MOAB: Mesh Oriented datABase  (version 5.2.1)
ParallelComm.cpp
Go to the documentation of this file.
00001 #include "moab/Interface.hpp"
00002 #include "moab/ParallelComm.hpp"
00003 #include "moab/WriteUtilIface.hpp"
00004 #include "moab/ReadUtilIface.hpp"
00005 #include "SequenceManager.hpp"
00006 #include "moab/Error.hpp"
00007 #include "EntitySequence.hpp"
00008 #include "MBTagConventions.hpp"
00009 #include "moab/Skinner.hpp"
00010 #include "MBParallelConventions.h"
00011 #include "moab/Core.hpp"
00012 #include "ElementSequence.hpp"
00013 #include "moab/CN.hpp"
00014 #include "moab/RangeMap.hpp"
00015 #include "moab/MeshTopoUtil.hpp"
00016 #include "TagInfo.hpp"
00017 #include "DebugOutput.hpp"
00018 #include "SharedSetData.hpp"
00019 #include "moab/ScdInterface.hpp"
00020 #include "moab/TupleList.hpp"
00021 #include "moab/gs.hpp"
00022 
00023 #include <iostream>
00024 #include <sstream>
00025 #include <algorithm>
00026 #include <functional>
00027 #include <numeric>
00028 
00029 #include <math.h>
00030 #include <cstdlib>
00031 #include <assert.h>
00032 
00033 #ifdef MOAB_HAVE_MPI
00034 #include "moab_mpi.h"
00035 #endif
00036 #ifdef MOAB_HAVE_MPE
00037 #include "mpe.h"
00038 int IFACE_START, IFACE_END;
00039 int GHOST_START, GHOST_END;
00040 int SHAREDV_START, SHAREDV_END;
00041 int RESOLVE_START, RESOLVE_END;
00042 int ENTITIES_START, ENTITIES_END;
00043 int RHANDLES_START, RHANDLES_END;
00044 int OWNED_START, OWNED_END;
00045 #endif
00046 
00047 namespace moab
00048 {
00049 
00050 const unsigned int ParallelComm::INITIAL_BUFF_SIZE = 1024;
00051 
00052 const int MAX_BCAST_SIZE = ( 1 << 28 );
00053 
00054 std::vector< ParallelComm::Buffer* > msgs;
00055 unsigned int __PACK_num = 0, __UNPACK_num = 0, __PACK_count = 0, __UNPACK_count = 0;
00056 std::string __PACK_string, __UNPACK_string;
00057 
00058 #ifdef DEBUG_PACKING_TIMES
00059 #define PC( n, m )                                                            \
00060     {                                                                         \
00061         if( __PACK_num == (unsigned int)n && __PACK_string == m )             \
00062             __PACK_count++;                                                   \
00063         else                                                                  \
00064         {                                                                     \
00065             if( __PACK_count > 1 ) std::cerr << " (" << __PACK_count << "x)"; \
00066             __PACK_count  = 1;                                                \
00067             __PACK_string = m;                                                \
00068             __PACK_num    = n;                                                \
00069             std::cerr << std::endl << "PACK: " << n << m;                     \
00070         }                                                                     \
00071     }
00072 #define UPC( n, m )                                                              \
00073     {                                                                            \
00074         if( __UNPACK_num == (unsigned int)n && __UNPACK_string == m )            \
00075             __UNPACK_count++;                                                    \
00076         else                                                                     \
00077         {                                                                        \
00078             if( __UNPACK_count > 1 ) std::cerr << "(" << __UNPACK_count << "x)"; \
00079             __UNPACK_count  = 1;                                                 \
00080             __UNPACK_string = m;                                                 \
00081             __UNPACK_num    = n;                                                 \
00082             std::cerr << std::endl << "UNPACK: " << n << m;                      \
00083         }                                                                        \
00084     }
00085 #else
00086 #define PC( n, m )
00087 #define UPC( n, m )
00088 #endif
00089 
00090 template < typename T >
00091 static inline void UNPACK( unsigned char*& buff, T* val, size_t count )
00092 {
00093     memcpy( val, buff, count * sizeof( T ) );
00094     buff += count * sizeof( T );
00095 }
00096 
00097 template < typename T >
00098 static inline void PACK( unsigned char*& buff, const T* val, size_t count )
00099 {
00100     memcpy( buff, val, count * sizeof( T ) );
00101     buff += count * sizeof( T );
00102 }
00103 
00104 static inline void PACK_INTS( unsigned char*& buff, const int* int_val, size_t num )
00105 {
00106     PACK( buff, int_val, num );
00107     PC( num, " ints" );
00108 }
00109 
00110 static inline void PACK_INT( unsigned char*& buff, int int_val )
00111 {
00112     PACK_INTS( buff, &int_val, 1 );
00113 }
00114 
00115 static inline void PACK_DBLS( unsigned char*& buff, const double* dbl_val, size_t num )
00116 {
00117     PACK( buff, dbl_val, num );
00118     PC( num, " doubles" );
00119 }
00120 
00121 // static inline
00122 // void PACK_DBL(unsigned char*& buff, const double dbl_val)
00123 //{ PACK_DBLS(buff, &dbl_val, 1); }
00124 
00125 static inline void PACK_EH( unsigned char*& buff, const EntityHandle* eh_val, size_t num )
00126 {
00127     PACK( buff, eh_val, num );
00128     PC( num, " handles" );
00129 }
00130 
00131 // static inline
00132 // void PACK_CHAR_64(unsigned char*& buff, const char* str)
00133 //{
00134 //  memcpy(buff, str, 64);
00135 //  buff += 64;
00136 //  PC(64, " chars");
00137 //}
00138 
00139 static inline void PACK_VOID( unsigned char*& buff, const void* val, size_t num )
00140 {
00141     PACK( buff, reinterpret_cast< const unsigned char* >( val ), num );
00142     PC( num, " void" );
00143 }
00144 
00145 static inline void PACK_BYTES( unsigned char*& buff, const void* val, int num )
00146 {
00147     PACK_INT( buff, num );
00148     PACK_VOID( buff, val, num );
00149 }
00150 
00151 static inline void PACK_RANGE( unsigned char*& buff, const Range& rng )
00152 {
00153     PACK_INT( buff, rng.psize() );
00154     Range::const_pair_iterator cit;
00155     for( cit = rng.const_pair_begin(); cit != rng.const_pair_end(); ++cit )
00156     {
00157         EntityHandle eh[2] = { cit->first, cit->second };
00158         PACK_EH( buff, eh, 2 );
00159     }
00160     PC( rng.psize(), "-subranged range" );
00161 }
00162 
00163 static inline void UNPACK_INTS( unsigned char*& buff, int* int_val, size_t num )
00164 {
00165     UNPACK( buff, int_val, num );
00166     UPC( num, " ints" );
00167 }
00168 
00169 static inline void UNPACK_INT( unsigned char*& buff, int& int_val )
00170 {
00171     UNPACK_INTS( buff, &int_val, 1 );
00172 }
00173 
00174 static inline void UNPACK_DBLS( unsigned char*& buff, double* dbl_val, size_t num )
00175 {
00176     UNPACK( buff, dbl_val, num );
00177     UPC( num, " doubles" );
00178 }
00179 
00180 static inline void UNPACK_DBL( unsigned char*& buff, double& dbl_val )
00181 {
00182     UNPACK_DBLS( buff, &dbl_val, 1 );
00183 }
00184 
00185 static inline void UNPACK_EH( unsigned char*& buff, EntityHandle* eh_val, size_t num )
00186 {
00187     UNPACK( buff, eh_val, num );
00188     UPC( num, " handles" );
00189 }
00190 
00191 // static inline
00192 // void UNPACK_CHAR_64(unsigned char*& buff, char* char_val)
00193 //{
00194 //  memcpy(buff, char_val, 64);
00195 //  buff += 64;
00196 //  UPC(64, " chars");
00197 //}
00198 
00199 static inline void UNPACK_VOID( unsigned char*& buff, void* val, size_t num )
00200 {
00201     UNPACK( buff, reinterpret_cast< unsigned char* >( val ), num );
00202     UPC( num, " void" );
00203 }
00204 
00205 static inline void UNPACK_TYPE( unsigned char*& buff, EntityType& type )
00206 {
00207     int int_type = MBMAXTYPE;
00208     UNPACK_INT( buff, int_type );
00209     type = static_cast< EntityType >( int_type );
00210     assert( type >= MBVERTEX && type <= MBMAXTYPE );
00211 }
00212 
00213 static inline void UNPACK_RANGE( unsigned char*& buff, Range& rng )
00214 {
00215     int num_subs;
00216     EntityHandle eh[2];
00217     UNPACK_INT( buff, num_subs );
00218     for( int i = 0; i < num_subs; i++ )
00219     {
00220         UPC( num_subs, "-subranged range" );
00221         UNPACK_EH( buff, eh, 2 );
00222         rng.insert( eh[0], eh[1] );
00223     }
00224 }
00225 
00226 enum MBMessageTag
00227 {
00228     MB_MESG_ANY = MPI_ANY_TAG,
00229     MB_MESG_ENTS_ACK,
00230     MB_MESG_ENTS_SIZE,
00231     MB_MESG_ENTS_LARGE,
00232     MB_MESG_REMOTEH_ACK,
00233     MB_MESG_REMOTEH_SIZE,
00234     MB_MESG_REMOTEH_LARGE,
00235     MB_MESG_TAGS_ACK,
00236     MB_MESG_TAGS_SIZE,
00237     MB_MESG_TAGS_LARGE
00238 };
00239 
00240 static inline size_t RANGE_SIZE( const Range& rng )
00241 {
00242     return 2 * sizeof( EntityHandle ) * rng.psize() + sizeof( int );
00243 }
00244 
00245 #define PRINT_DEBUG_ISEND( A, B, C, D, E )    print_debug_isend( ( A ), ( B ), ( C ), ( D ), ( E ) )
00246 #define PRINT_DEBUG_IRECV( A, B, C, D, E, F ) print_debug_irecv( ( A ), ( B ), ( C ), ( D ), ( E ), ( F ) )
00247 #define PRINT_DEBUG_RECD( A )                 print_debug_recd( ( A ) )
00248 #define PRINT_DEBUG_WAITANY( A, B, C )        print_debug_waitany( ( A ), ( B ), ( C ) )
00249 
00250 void ParallelComm::print_debug_isend( int from, int to, unsigned char* buff, int tag, int sz )
00251 {
00252     myDebug->tprintf( 3, "Isend, %d->%d, buffer ptr = %p, tag=%d, size=%d\n", from, to, (void*)buff, tag, sz );
00253 }
00254 
00255 void ParallelComm::print_debug_irecv( int to, int from, unsigned char* buff, int sz, int tag, int incoming )
00256 {
00257     myDebug->tprintf( 3, "Irecv, %d<-%d, buffer ptr = %p, tag=%d, size=%d", to, from, (void*)buff, tag, sz );
00258     if( tag < MB_MESG_REMOTEH_ACK )
00259         myDebug->printf( 3, ", incoming1=%d\n", incoming );
00260     else if( tag < MB_MESG_TAGS_ACK )
00261         myDebug->printf( 3, ", incoming2=%d\n", incoming );
00262     else
00263         myDebug->printf( 3, ", incoming=%d\n", incoming );
00264 }
00265 
00266 void ParallelComm::print_debug_recd( MPI_Status status )
00267 {
00268     if( myDebug->get_verbosity() == 3 )
00269     {
00270         int this_count;
00271         int success = MPI_Get_count( &status, MPI_UNSIGNED_CHAR, &this_count );
00272         if( MPI_SUCCESS != success ) this_count = -1;
00273         myDebug->tprintf( 3, "Received from %d, count = %d, tag = %d\n", status.MPI_SOURCE, this_count,
00274                           status.MPI_TAG );
00275     }
00276 }
00277 
00278 void ParallelComm::print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc )
00279 {
00280     if( myDebug->get_verbosity() == 3 )
00281     {
00282         myDebug->tprintf( 3, "Waitany, p=%d, ", proc );
00283         if( tag < MB_MESG_REMOTEH_ACK )
00284             myDebug->print( 3, ", recv_ent_reqs=" );
00285         else if( tag < MB_MESG_TAGS_ACK )
00286             myDebug->print( 3, ", recv_remoteh_reqs=" );
00287         else
00288             myDebug->print( 3, ", recv_tag_reqs=" );
00289         for( unsigned int i = 0; i < reqs.size(); i++ )
00290             myDebug->printf( 3, " %p", (void*)(intptr_t)reqs[i] );
00291         myDebug->print( 3, "\n" );
00292     }
00293 }
00294 
00295 /** Name of tag used to store ParallelComm Index on mesh paritioning sets */
00296 const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
00297 
00298 /** \brief Tag storing parallel communication objects
00299  *
00300  * This tag stores pointers to ParallelComm communication
00301  * objects; one of these is allocated for each different
00302  * communicator used to read mesh. ParallelComm stores
00303  * partition and interface sets corresponding to its parallel mesh.
00304  * By default, a parallel read uses the first ParallelComm object
00305  * on the interface instance; if instantiated with one, ReadParallel
00306  * adds this object to the interface instance too.
00307  *
00308  * Tag type: opaque
00309  * Tag size: MAX_SHARING_PROCS*sizeof(ParallelComm*)
00310  */
00311 #define PARALLEL_COMM_TAG_NAME "__PARALLEL_COMM"
00312 
00313 ParallelComm::ParallelComm( Interface* impl, MPI_Comm cm, int* id )
00314     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
00315       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
00316       myDebug( NULL )
00317 {
00318     initialize();
00319     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
00320     if( id ) *id = pcommID;
00321 }
00322 
00323 ParallelComm::ParallelComm( Interface* impl, std::vector< unsigned char >& /*tmp_buff*/, MPI_Comm cm, int* id )
00324     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
00325       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
00326       myDebug( NULL )
00327 {
00328     initialize();
00329     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
00330     if( id ) *id = pcommID;
00331 }
00332 
00333 ParallelComm::~ParallelComm()
00334 {
00335     remove_pcomm( this );
00336     delete_all_buffers();
00337     delete myDebug;
00338     delete sharedSetData;
00339 }
00340 
00341 void ParallelComm::initialize()
00342 {
00343     Core* core      = dynamic_cast< Core* >( mbImpl );
00344     sequenceManager = core->sequence_manager();
00345     mbImpl->query_interface( errorHandler );
00346 
00347     // Initialize MPI, if necessary
00348     int flag   = 1;
00349     int retval = MPI_Initialized( &flag );
00350     if( MPI_SUCCESS != retval || !flag )
00351     {
00352         int argc    = 0;
00353         char** argv = NULL;
00354 
00355         // mpi not initialized yet - initialize here
00356         retval = MPI_Init( &argc, &argv );
00357         assert( MPI_SUCCESS == retval );
00358     }
00359 
00360     // Reserve space for vectors
00361     buffProcs.reserve( MAX_SHARING_PROCS );
00362     localOwnedBuffs.reserve( MAX_SHARING_PROCS );
00363     remoteOwnedBuffs.reserve( MAX_SHARING_PROCS );
00364 
00365     pcommID = add_pcomm( this );
00366 
00367     if( !myDebug )
00368     {
00369         myDebug = new DebugOutput( "ParallelComm", std::cerr );
00370         myDebug->set_rank( procConfig.proc_rank() );
00371     }
00372 }
00373 
00374 int ParallelComm::add_pcomm( ParallelComm* pc )
00375 {
00376     // Add this pcomm to instance tag
00377     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS, (ParallelComm*)NULL );
00378     Tag pc_tag = pcomm_tag( mbImpl, true );
00379     assert( 0 != pc_tag );
00380 
00381     const EntityHandle root = 0;
00382     ErrorCode result        = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00383     if( MB_SUCCESS != result && MB_TAG_NOT_FOUND != result ) return -1;
00384     int index = 0;
00385     while( index < MAX_SHARING_PROCS && pc_array[index] )
00386         index++;
00387     if( index == MAX_SHARING_PROCS )
00388     {
00389         index = -1;
00390         assert( false );
00391     }
00392     else
00393     {
00394         pc_array[index] = pc;
00395         mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00396     }
00397     return index;
00398 }
00399 
00400 void ParallelComm::remove_pcomm( ParallelComm* pc )
00401 {
00402     // Remove this pcomm from instance tag
00403     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS );
00404     Tag pc_tag = pcomm_tag( mbImpl, true );
00405 
00406     const EntityHandle root                      = 0;
00407     ErrorCode result                             = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00408     std::vector< ParallelComm* >::iterator pc_it = std::find( pc_array.begin(), pc_array.end(), pc );
00409     assert( MB_SUCCESS == result && pc_it != pc_array.end() );
00410     // Empty if test to get around compiler warning about unused var
00411     if( MB_SUCCESS == result ) {}
00412 
00413     *pc_it = NULL;
00414     mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00415 }
00416 
00417 //! Assign a global id space, for largest-dimension or all entities (and
00418 //! in either case for vertices too)
00419 ErrorCode ParallelComm::assign_global_ids( EntityHandle this_set, const int dimension, const int start_id,
00420                                            const bool largest_dim_only, const bool parallel, const bool owned_only )
00421 {
00422     Range entities[4];
00423     ErrorCode result;
00424     std::vector< unsigned char > pstatus;
00425     for( int dim = 0; dim <= dimension; dim++ )
00426     {
00427         if( dim == 0 || !largest_dim_only || dim == dimension )
00428         {
00429             result = mbImpl->get_entities_by_dimension( this_set, dim, entities[dim] );MB_CHK_SET_ERR( result, "Failed to get vertices in assign_global_ids" );
00430         }
00431 
00432         // Need to filter out non-locally-owned entities!!!
00433         pstatus.resize( entities[dim].size() );
00434         result = mbImpl->tag_get_data( pstatus_tag(), entities[dim], &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus in assign_global_ids" );
00435 
00436         Range dum_range;
00437         Range::iterator rit;
00438         unsigned int i;
00439         for( rit = entities[dim].begin(), i = 0; rit != entities[dim].end(); ++rit, i++ )
00440             if( pstatus[i] & PSTATUS_NOT_OWNED ) dum_range.insert( *rit );
00441         entities[dim] = subtract( entities[dim], dum_range );
00442     }
00443 
00444     return assign_global_ids( entities, dimension, start_id, parallel, owned_only );
00445 }
00446 
00447 //! Assign a global id space, for largest-dimension or all entities (and
00448 //! in either case for vertices too)
00449 ErrorCode ParallelComm::assign_global_ids( Range entities[], const int dimension, const int start_id,
00450                                            const bool parallel, const bool owned_only )
00451 {
00452     int local_num_elements[4];
00453     ErrorCode result;
00454     for( int dim = 0; dim <= dimension; dim++ )
00455     {
00456         local_num_elements[dim] = entities[dim].size();
00457     }
00458 
00459     // Communicate numbers
00460     std::vector< int > num_elements( procConfig.proc_size() * 4 );
00461 #ifdef MOAB_HAVE_MPI
00462     if( procConfig.proc_size() > 1 && parallel )
00463     {
00464         int retval =
00465             MPI_Allgather( local_num_elements, 4, MPI_INT, &num_elements[0], 4, MPI_INT, procConfig.proc_comm() );
00466         if( 0 != retval ) return MB_FAILURE;
00467     }
00468     else
00469 #endif
00470         for( int dim = 0; dim < 4; dim++ )
00471             num_elements[dim] = local_num_elements[dim];
00472 
00473     // My entities start at one greater than total_elems[d]
00474     int total_elems[4] = { start_id, start_id, start_id, start_id };
00475 
00476     for( unsigned int proc = 0; proc < procConfig.proc_rank(); proc++ )
00477     {
00478         for( int dim = 0; dim < 4; dim++ )
00479             total_elems[dim] += num_elements[4 * proc + dim];
00480     }
00481 
00482     // Assign global ids now
00483     Tag gid_tag = mbImpl->globalId_tag();
00484 
00485     for( int dim = 0; dim < 4; dim++ )
00486     {
00487         if( entities[dim].empty() ) continue;
00488         num_elements.resize( entities[dim].size() );
00489         int i = 0;
00490         for( Range::iterator rit = entities[dim].begin(); rit != entities[dim].end(); ++rit )
00491             num_elements[i++] = total_elems[dim]++;
00492 
00493         result = mbImpl->tag_set_data( gid_tag, entities[dim], &num_elements[0] );MB_CHK_SET_ERR( result, "Failed to set global id tag in assign_global_ids" );
00494     }
00495 
00496     if( owned_only ) return MB_SUCCESS;
00497 
00498     // Exchange tags
00499     for( int dim = 1; dim < 4; dim++ )
00500         entities[0].merge( entities[dim] );
00501 
00502     return exchange_tags( gid_tag, entities[0] );
00503 }
00504 
00505 int ParallelComm::get_buffers( int to_proc, bool* is_new )
00506 {
00507     int ind                                   = -1;
00508     std::vector< unsigned int >::iterator vit = std::find( buffProcs.begin(), buffProcs.end(), to_proc );
00509     if( vit == buffProcs.end() )
00510     {
00511         assert( "shouldn't need buffer to myself" && to_proc != (int)procConfig.proc_rank() );
00512         ind = buffProcs.size();
00513         buffProcs.push_back( (unsigned int)to_proc );
00514         localOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
00515         remoteOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
00516         if( is_new ) *is_new = true;
00517     }
00518     else
00519     {
00520         ind = vit - buffProcs.begin();
00521         if( is_new ) *is_new = false;
00522     }
00523     assert( ind < MAX_SHARING_PROCS );
00524     return ind;
00525 }
00526 
00527 ErrorCode ParallelComm::broadcast_entities( const int from_proc, Range& entities, const bool adjacencies,
00528                                             const bool tags )
00529 {
00530 #ifndef MOAB_HAVE_MPI
00531     return MB_FAILURE;
00532 #else
00533 
00534     ErrorCode result = MB_SUCCESS;
00535     int success;
00536     int buff_size;
00537 
00538     Buffer buff( INITIAL_BUFF_SIZE );
00539     buff.reset_ptr( sizeof( int ) );
00540     if( (int)procConfig.proc_rank() == from_proc )
00541     {
00542         result = add_verts( entities );MB_CHK_SET_ERR( result, "Failed to add adj vertices" );
00543 
00544         buff.reset_ptr( sizeof( int ) );
00545         result = pack_buffer( entities, adjacencies, tags, false, -1, &buff );MB_CHK_SET_ERR( result, "Failed to compute buffer size in broadcast_entities" );
00546         buff.set_stored_size();
00547         buff_size = buff.buff_ptr - buff.mem_ptr;
00548     }
00549 
00550     success = MPI_Bcast( &buff_size, 1, MPI_INT, from_proc, procConfig.proc_comm() );
00551     if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" ); }
00552 
00553     if( !buff_size )  // No data
00554         return MB_SUCCESS;
00555 
00556     if( (int)procConfig.proc_rank() != from_proc ) buff.reserve( buff_size );
00557 
00558     size_t offset = 0;
00559     while( buff_size )
00560     {
00561         int sz  = std::min( buff_size, MAX_BCAST_SIZE );
00562         success = MPI_Bcast( buff.mem_ptr + offset, sz, MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
00563         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer failed" ); }
00564 
00565         offset += sz;
00566         buff_size -= sz;
00567     }
00568 
00569     if( (int)procConfig.proc_rank() != from_proc )
00570     {
00571         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
00572         std::vector< std::vector< int > > dum1p;
00573         std::vector< EntityHandle > dum2, dum4;
00574         std::vector< unsigned int > dum3;
00575         buff.reset_ptr( sizeof( int ) );
00576         result = unpack_buffer( buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );MB_CHK_SET_ERR( result, "Failed to unpack buffer in broadcast_entities" );
00577         std::copy( dum4.begin(), dum4.end(), range_inserter( entities ) );
00578     }
00579 
00580     return MB_SUCCESS;
00581 #endif
00582 }
00583 
00584 ErrorCode ParallelComm::scatter_entities( const int from_proc, std::vector< Range >& entities, const bool adjacencies,
00585                                           const bool tags )
00586 {
00587 #ifndef MOAB_HAVE_MPI
00588     return MB_FAILURE;
00589 #else
00590     ErrorCode result = MB_SUCCESS;
00591     int i, success, buff_size, prev_size;
00592     int nProcs         = (int)procConfig.proc_size();
00593     int* sendCounts    = new int[nProcs];
00594     int* displacements = new int[nProcs];
00595     sendCounts[0]      = sizeof( int );
00596     displacements[0]   = 0;
00597     Buffer buff( INITIAL_BUFF_SIZE );
00598     buff.reset_ptr( sizeof( int ) );
00599     buff.set_stored_size();
00600     unsigned int my_proc = procConfig.proc_rank();
00601 
00602     // Get buffer size array for each remote processor
00603     if( my_proc == (unsigned int)from_proc )
00604     {
00605         for( i = 1; i < nProcs; i++ )
00606         {
00607             prev_size = buff.buff_ptr - buff.mem_ptr;
00608             buff.reset_ptr( prev_size + sizeof( int ) );
00609             result = add_verts( entities[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
00610 
00611             result = pack_buffer( entities[i], adjacencies, tags, false, -1, &buff );
00612             if( MB_SUCCESS != result )
00613             {
00614                 delete[] sendCounts;
00615                 delete[] displacements;
00616                 MB_SET_ERR( result, "Failed to pack buffer in scatter_entities" );
00617             }
00618 
00619             buff_size                               = buff.buff_ptr - buff.mem_ptr - prev_size;
00620             *( (int*)( buff.mem_ptr + prev_size ) ) = buff_size;
00621             sendCounts[i]                           = buff_size;
00622         }
00623     }
00624 
00625     // Broadcast buffer size array
00626     success = MPI_Bcast( sendCounts, nProcs, MPI_INT, from_proc, procConfig.proc_comm() );
00627     if( MPI_SUCCESS != success )
00628     {
00629         delete[] sendCounts;
00630         delete[] displacements;
00631         MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" );
00632     }
00633 
00634     for( i = 1; i < nProcs; i++ )
00635     {
00636         displacements[i] = displacements[i - 1] + sendCounts[i - 1];
00637     }
00638 
00639     Buffer rec_buff;
00640     rec_buff.reserve( sendCounts[my_proc] );
00641 
00642     // Scatter actual geometry
00643     success = MPI_Scatterv( buff.mem_ptr, sendCounts, displacements, MPI_UNSIGNED_CHAR, rec_buff.mem_ptr,
00644                             sendCounts[my_proc], MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
00645 
00646     if( MPI_SUCCESS != success )
00647     {
00648         delete[] sendCounts;
00649         delete[] displacements;
00650         MB_SET_ERR( MB_FAILURE, "MPI_Scatterv of buffer failed" );
00651     }
00652 
00653     // Unpack in remote processors
00654     if( my_proc != (unsigned int)from_proc )
00655     {
00656         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
00657         std::vector< std::vector< int > > dum1p;
00658         std::vector< EntityHandle > dum2, dum4;
00659         std::vector< unsigned int > dum3;
00660         rec_buff.reset_ptr( sizeof( int ) );
00661         result = unpack_buffer( rec_buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );
00662         if( MB_SUCCESS != result )
00663         {
00664             delete[] sendCounts;
00665             delete[] displacements;
00666             MB_SET_ERR( result, "Failed to unpack buffer in scatter_entities" );
00667         }
00668 
00669         std::copy( dum4.begin(), dum4.end(), range_inserter( entities[my_proc] ) );
00670     }
00671 
00672     delete[] sendCounts;
00673     delete[] displacements;
00674 
00675     return MB_SUCCESS;
00676 #endif
00677 }
00678 
00679 ErrorCode ParallelComm::send_entities( const int to_proc, Range& orig_ents, const bool adjs, const bool tags,
00680                                        const bool store_remote_handles, const bool is_iface, Range& /*final_ents*/,
00681                                        int& incoming1, int& incoming2, TupleList& entprocs,
00682                                        std::vector< MPI_Request >& recv_remoteh_reqs, bool /*wait_all*/ )
00683 {
00684 #ifndef MOAB_HAVE_MPI
00685     return MB_FAILURE;
00686 #else
00687     // Pack entities to local buffer
00688     int ind = get_buffers( to_proc );
00689     localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
00690 
00691     // Add vertices
00692     ErrorCode result = add_verts( orig_ents );MB_CHK_SET_ERR( result, "Failed to add verts in send_entities" );
00693 
00694     // Filter out entities already shared with destination
00695     Range tmp_range;
00696     result = filter_pstatus( orig_ents, PSTATUS_SHARED, PSTATUS_AND, to_proc, &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
00697     if( !tmp_range.empty() ) { orig_ents = subtract( orig_ents, tmp_range ); }
00698 
00699     result = pack_buffer( orig_ents, adjs, tags, store_remote_handles, to_proc, localOwnedBuffs[ind], &entprocs );MB_CHK_SET_ERR( result, "Failed to pack buffer in send_entities" );
00700 
00701     // Send buffer
00702     result = send_buffer( to_proc, localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind], recvReqs[2 * ind + 1],
00703                           (int*)( remoteOwnedBuffs[ind]->mem_ptr ),
00704                           //&ackbuff,
00705                           incoming1, MB_MESG_REMOTEH_SIZE,
00706                           ( !is_iface && store_remote_handles ? localOwnedBuffs[ind] : NULL ),
00707                           &recv_remoteh_reqs[2 * ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to send buffer" );
00708 
00709     return MB_SUCCESS;
00710 #endif
00711 }
00712 
00713 ErrorCode ParallelComm::send_entities( std::vector< unsigned int >& send_procs, std::vector< Range* >& send_ents,
00714                                        int& incoming1, int& incoming2, const bool store_remote_handles )
00715 {
00716 #ifdef MOAB_HAVE_MPE
00717     if( myDebug->get_verbosity() == 2 )
00718     { MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_entities." ); }
00719 #endif
00720     myDebug->tprintf( 1, "Entering send_entities\n" );
00721     if( myDebug->get_verbosity() == 4 )
00722     {
00723         msgs.clear();
00724         msgs.reserve( MAX_SHARING_PROCS );
00725     }
00726 
00727     unsigned int i;
00728     int ind;
00729     ErrorCode result = MB_SUCCESS;
00730 
00731     // Set buffProcs with communicating procs
00732     unsigned int n_proc = send_procs.size();
00733     for( i = 0; i < n_proc; i++ )
00734     {
00735         ind    = get_buffers( send_procs[i] );
00736         result = add_verts( *send_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
00737 
00738         // Filter out entities already shared with destination
00739         Range tmp_range;
00740         result = filter_pstatus( *send_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
00741         if( !tmp_range.empty() ) { *send_ents[i] = subtract( *send_ents[i], tmp_range ); }
00742     }
00743 
00744     //===========================================
00745     // Get entities to be sent to neighbors
00746     // Need to get procs each entity is sent to
00747     //===========================================
00748     Range allsent, tmp_range;
00749     int npairs = 0;
00750     TupleList entprocs;
00751     for( i = 0; i < n_proc; i++ )
00752     {
00753         int n_ents = send_ents[i]->size();
00754         if( n_ents > 0 )
00755         {
00756             npairs += n_ents;  // Get the total # of proc/handle pairs
00757             allsent.merge( *send_ents[i] );
00758         }
00759     }
00760 
00761     // Allocate a TupleList of that size
00762     entprocs.initialize( 1, 0, 1, 0, npairs );
00763     entprocs.enableWriteAccess();
00764 
00765     // Put the proc/handle pairs in the list
00766     for( i = 0; i < n_proc; i++ )
00767     {
00768         for( Range::iterator rit = send_ents[i]->begin(); rit != send_ents[i]->end(); ++rit )
00769         {
00770             entprocs.vi_wr[entprocs.get_n()]  = send_procs[i];
00771             entprocs.vul_wr[entprocs.get_n()] = *rit;
00772             entprocs.inc_n();
00773         }
00774     }
00775 
00776     // Sort by handle
00777     moab::TupleList::buffer sort_buffer;
00778     sort_buffer.buffer_init( npairs );
00779     entprocs.sort( 1, &sort_buffer );
00780     entprocs.disableWriteAccess();
00781     sort_buffer.reset();
00782 
00783     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
00784                       (unsigned long)allsent.size() );
00785 
00786     //===========================================
00787     // Pack and send ents from this proc to others
00788     //===========================================
00789     for( i = 0; i < n_proc; i++ )
00790     {
00791         if( send_ents[i]->size() > 0 )
00792         {
00793             ind = get_buffers( send_procs[i] );
00794             myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", send_ents[i]->compactness(),
00795                               (unsigned long)send_ents[i]->size() );
00796             // Reserve space on front for size and for initial buff size
00797             localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
00798             result = pack_buffer( *send_ents[i], false, true, store_remote_handles, buffProcs[ind],
00799                                   localOwnedBuffs[ind], &entprocs, &allsent );
00800 
00801             if( myDebug->get_verbosity() == 4 )
00802             {
00803                 msgs.resize( msgs.size() + 1 );
00804                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
00805             }
00806 
00807             // Send the buffer (size stored in front in send_buffer)
00808             result = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind],
00809                                   recvReqs[2 * ind + 1], &ackbuff, incoming1, MB_MESG_REMOTEH_SIZE,
00810                                   ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recvRemotehReqs[2 * ind],
00811                                   &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost send" );
00812         }
00813     }
00814     entprocs.reset();
00815 
00816 #ifdef MOAB_HAVE_MPE
00817     if( myDebug->get_verbosity() == 2 )
00818     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_entities." ); }
00819 #endif
00820 
00821     return MB_SUCCESS;
00822 }
00823 
00824 /////////////////////////////////////////////////////////////////////////////////
00825 // Send and Receive routines for a sequence of entities: use case UMR
00826 /////////////////////////////////////////////////////////////////////////////////
00827 void print_buff( unsigned char* ch, int size )
00828 {
00829     for( int i = 0; i < size; i++ )
00830         std::cout << ch[i];
00831     std::cout << "\n";
00832 }
00833 ErrorCode ParallelComm::send_recv_entities( std::vector< int >& send_procs, std::vector< std::vector< int > >& msgsizes,
00834                                             std::vector< std::vector< EntityHandle > >& senddata,
00835                                             std::vector< std::vector< EntityHandle > >& recvdata )
00836 {
00837 #ifdef USE_MPE
00838     if( myDebug->get_verbosity() == 2 )
00839     { MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_recv_entities." ); }
00840 #endif
00841     myDebug->tprintf( 1, "Entering send_recv_entities\n" );
00842     if( myDebug->get_verbosity() == 4 )
00843     {
00844         msgs.clear();
00845         msgs.reserve( MAX_SHARING_PROCS );
00846     }
00847 
00848     // unsigned int i;
00849     int i, ind, success;
00850     ErrorCode error = MB_SUCCESS;
00851 
00852     //===========================================
00853     // Pack and send ents from this proc to others
00854     //===========================================
00855 
00856     // std::cout<<"resetting all buffers"<<std::endl;
00857 
00858     reset_all_buffers();
00859     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
00860     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
00861     int ack_buff;
00862     int incoming = 0;
00863 
00864     std::vector< unsigned int >::iterator sit;
00865 
00866     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
00867     {
00868         incoming++;
00869         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
00870                            MB_MESG_ENTS_SIZE, incoming );
00871 
00872         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
00873                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
00874         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in send_recv_entities" ); }
00875     }
00876 
00877     //  std::set<unsigned int>::iterator it;
00878     for( i = 0; i < (int)send_procs.size(); i++ )
00879     {
00880         // Get index of the shared processor in the local buffer
00881         ind = get_buffers( send_procs[i] );
00882         localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
00883 
00884         int buff_size = msgsizes[i].size() * sizeof( int ) + senddata[i].size() * sizeof( EntityHandle );
00885         localOwnedBuffs[ind]->check_space( buff_size );
00886 
00887         // Pack entities
00888         std::vector< int > msg;
00889         msg.insert( msg.end(), msgsizes[i].begin(), msgsizes[i].end() );
00890         PACK_INTS( localOwnedBuffs[ind]->buff_ptr, &msg[0], msg.size() );
00891 
00892         std::vector< EntityHandle > entities;
00893         entities.insert( entities.end(), senddata[i].begin(), senddata[i].end() );
00894         PACK_EH( localOwnedBuffs[ind]->buff_ptr, &entities[0], entities.size() );
00895         localOwnedBuffs[ind]->set_stored_size();
00896 
00897         if( myDebug->get_verbosity() == 4 )
00898         {
00899             msgs.resize( msgs.size() + 1 );
00900             msgs.back() = new Buffer( *localOwnedBuffs[ind] );
00901         }
00902 
00903         // Send the buffer (size stored in front in send_buffer)
00904         error = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
00905                              recv_ent_reqs[3 * ind + 2], &ack_buff, incoming );MB_CHK_SET_ERR( error, "Failed to Isend in send_recv_entities" );
00906     }
00907 
00908     //===========================================
00909     // Receive and unpack ents from received data
00910     //===========================================
00911 
00912     while( incoming )
00913     {
00914 
00915         MPI_Status status;
00916         int index_in_recv_requests;
00917 
00918         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
00919         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &index_in_recv_requests, &status );
00920         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in send_recv_entities" ); }
00921 
00922         // Processor index in the list is divided by 3
00923         ind = index_in_recv_requests / 3;
00924 
00925         PRINT_DEBUG_RECD( status );
00926 
00927         // OK, received something; decrement incoming counter
00928         incoming--;
00929 
00930         bool done = false;
00931 
00932         error = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind],
00933                              recv_ent_reqs[3 * ind + 1],  // This is for receiving the second message
00934                              recv_ent_reqs[3 * ind + 2],  // This would be for ack, but it is not
00935                                                           // used; consider removing it
00936                              incoming, localOwnedBuffs[ind],
00937                              sendReqs[3 * ind + 1],  // Send request for sending the second message
00938                              sendReqs[3 * ind + 2],  // This is for sending the ack
00939                              done );MB_CHK_SET_ERR( error, "Failed to resize recv buffer" );
00940 
00941         if( done )
00942         {
00943             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
00944 
00945             int from_proc = status.MPI_SOURCE;
00946             int idx       = std::find( send_procs.begin(), send_procs.end(), from_proc ) - send_procs.begin();
00947 
00948             int msg = msgsizes[idx].size();
00949             std::vector< int > recvmsg( msg );
00950             int ndata = senddata[idx].size();
00951             std::vector< EntityHandle > dum_vec( ndata );
00952 
00953             UNPACK_INTS( remoteOwnedBuffs[ind]->buff_ptr, &recvmsg[0], msg );
00954             UNPACK_EH( remoteOwnedBuffs[ind]->buff_ptr, &dum_vec[0], ndata );
00955 
00956             recvdata[idx].insert( recvdata[idx].end(), dum_vec.begin(), dum_vec.end() );
00957         }
00958     }
00959 
00960 #ifdef USE_MPE
00961     if( myDebug->get_verbosity() == 2 )
00962     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_recv_entities." ); }
00963 #endif
00964 
00965     return MB_SUCCESS;
00966 }
00967 
00968 ErrorCode ParallelComm::update_remote_data( EntityHandle entity, std::vector< int >& procs,
00969                                             std::vector< EntityHandle >& handles )
00970 {
00971     ErrorCode error;
00972     unsigned char pstatus = PSTATUS_INTERFACE;
00973 
00974     int procmin = *std::min_element( procs.begin(), procs.end() );
00975 
00976     if( (int)rank() > procmin )
00977         pstatus |= PSTATUS_NOT_OWNED;
00978     else
00979         procmin = rank();
00980 
00981     // DBG
00982     // std::cout<<"entity = "<<entity<<std::endl;
00983     // for (int j=0; j<procs.size(); j++)
00984     // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
00985     // DBG
00986 
00987     if( (int)procs.size() > 1 )
00988     {
00989         procs.push_back( rank() );
00990         handles.push_back( entity );
00991 
00992         int idx = std::find( procs.begin(), procs.end(), procmin ) - procs.begin();
00993 
00994         std::iter_swap( procs.begin(), procs.begin() + idx );
00995         std::iter_swap( handles.begin(), handles.begin() + idx );
00996 
00997         // DBG
00998         //  std::cout<<"entity = "<<entity<<std::endl;
00999         // for (int j=0; j<procs.size(); j++)
01000         // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
01001         // DBG
01002     }
01003 
01004     // if ((entity == 10388) && (rank()==1))
01005     //    std::cout<<"Here"<<std::endl;
01006 
01007     error = update_remote_data( entity, &procs[0], &handles[0], procs.size(), pstatus );MB_CHK_ERR( error );
01008 
01009     return MB_SUCCESS;
01010 }
01011 
01012 ErrorCode ParallelComm::get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc )
01013 {
01014     ErrorCode error;
01015     std::vector< EntityHandle > newents;
01016     error = get_remote_handles( true, local_vec, rem_vec, num_ents, to_proc, newents );MB_CHK_ERR( error );
01017 
01018     return MB_SUCCESS;
01019 }
01020 
01021 //////////////////////////////////////////////////////////////////
01022 
01023 ErrorCode ParallelComm::recv_entities( const int from_proc, const bool store_remote_handles, const bool is_iface,
01024                                        Range& final_ents, int& incoming1, int& incoming2,
01025                                        std::vector< std::vector< EntityHandle > >& L1hloc,
01026                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01027                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01028                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01029                                        std::vector< MPI_Request >& recv_remoteh_reqs, bool /*wait_all*/ )
01030 {
01031 #ifndef MOAB_HAVE_MPI
01032     return MB_FAILURE;
01033 #else
01034     // Non-blocking receive for the first message (having size info)
01035     int ind1 = get_buffers( from_proc );
01036     incoming1++;
01037     PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
01038                        MB_MESG_ENTS_SIZE, incoming1 );
01039     int success = MPI_Irecv( remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc,
01040                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind1] );
01041     if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
01042 
01043     // Receive messages in while loop
01044     return recv_messages( from_proc, store_remote_handles, is_iface, final_ents, incoming1, incoming2, L1hloc, L1hrem,
01045                           L1p, L2hloc, L2hrem, L2p, recv_remoteh_reqs );
01046 #endif
01047 }
01048 
01049 ErrorCode ParallelComm::recv_entities( std::set< unsigned int >& recv_procs, int incoming1, int incoming2,
01050                                        const bool store_remote_handles, const bool migrate )
01051 {
01052     //===========================================
01053     // Receive/unpack new entities
01054     //===========================================
01055     // Number of incoming messages is the number of procs we communicate with
01056     int success, ind, i;
01057     ErrorCode result;
01058     MPI_Status status;
01059     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
01060     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
01061     std::vector< std::vector< int > > L1p( buffProcs.size() );
01062     std::vector< EntityHandle > L2hloc, L2hrem;
01063     std::vector< unsigned int > L2p;
01064     std::vector< EntityHandle > new_ents;
01065 
01066     while( incoming1 )
01067     {
01068         // Wait for all recvs of ents before proceeding to sending remote handles,
01069         // b/c some procs may have sent to a 3rd proc ents owned by me;
01070         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
01071 
01072         success = MPI_Waitany( 2 * buffProcs.size(), &recvReqs[0], &ind, &status );
01073         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
01074 
01075         PRINT_DEBUG_RECD( status );
01076 
01077         // OK, received something; decrement incoming counter
01078         incoming1--;
01079         bool done = false;
01080 
01081         // In case ind is for ack, we need index of one before it
01082         unsigned int base_ind = 2 * ( ind / 2 );
01083         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 2], recvReqs[ind], recvReqs[ind + 1],
01084                               incoming1, localOwnedBuffs[ind / 2], sendReqs[base_ind], sendReqs[base_ind + 1], done,
01085                               ( store_remote_handles ? localOwnedBuffs[ind / 2] : NULL ), MB_MESG_REMOTEH_SIZE,
01086                               &recvRemotehReqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
01087 
01088         if( done )
01089         {
01090             if( myDebug->get_verbosity() == 4 )
01091             {
01092                 msgs.resize( msgs.size() + 1 );
01093                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 2] );
01094             }
01095 
01096             // Message completely received - process buffer that was sent
01097             remoteOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
01098             result = unpack_buffer( remoteOwnedBuffs[ind / 2]->buff_ptr, store_remote_handles, buffProcs[ind / 2],
01099                                     ind / 2, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
01100             if( MB_SUCCESS != result )
01101             {
01102                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
01103                 print_buffer( remoteOwnedBuffs[ind / 2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 2], false );
01104                 return result;
01105             }
01106 
01107             if( recvReqs.size() != 2 * buffProcs.size() )
01108             {
01109                 // Post irecv's for remote handles from new proc
01110                 recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01111                 for( i = recvReqs.size(); i < (int)( 2 * buffProcs.size() ); i += 2 )
01112                 {
01113                     localOwnedBuffs[i / 2]->reset_buffer();
01114                     incoming2++;
01115                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 2], localOwnedBuffs[i / 2]->mem_ptr,
01116                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
01117                     success = MPI_Irecv( localOwnedBuffs[i / 2]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
01118                                          buffProcs[i / 2], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
01119                                          &recvRemotehReqs[i] );
01120                     if( success != MPI_SUCCESS )
01121                     { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" ); }
01122                 }
01123                 recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01124                 sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01125             }
01126         }
01127     }
01128 
01129     // Assign and remove newly created elements from/to receive processor
01130     result = assign_entities_part( new_ents, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to assign entities to part" );
01131     if( migrate )
01132     {
01133         // result = remove_entities_part(allsent, procConfig.proc_rank());MB_CHK_SET_ERR(ressult,
01134         // "Failed to remove entities to part");
01135     }
01136 
01137     // Add requests for any new addl procs
01138     if( recvReqs.size() != 2 * buffProcs.size() )
01139     {
01140         // Shouldn't get here...
01141         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in entity exchange" );
01142     }
01143 
01144 #ifdef MOAB_HAVE_MPE
01145     if( myDebug->get_verbosity() == 2 )
01146     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending recv entities." ); }
01147 #endif
01148 
01149     //===========================================
01150     // Send local handles for new entity to owner
01151     //===========================================
01152     std::set< unsigned int >::iterator it  = recv_procs.begin();
01153     std::set< unsigned int >::iterator eit = recv_procs.end();
01154     for( ; it != eit; ++it )
01155     {
01156         ind = get_buffers( *it );
01157         // Reserve space on front for size and for initial buff size
01158         remoteOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
01159 
01160         result = pack_remote_handles( L1hloc[ind], L1hrem[ind], L1p[ind], buffProcs[ind], remoteOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
01161         remoteOwnedBuffs[ind]->set_stored_size();
01162 
01163         if( myDebug->get_verbosity() == 4 )
01164         {
01165             msgs.resize( msgs.size() + 1 );
01166             msgs.back() = new Buffer( *remoteOwnedBuffs[ind] );
01167         }
01168         result = send_buffer( buffProcs[ind], remoteOwnedBuffs[ind], MB_MESG_REMOTEH_SIZE, sendReqs[2 * ind],
01169                               recvRemotehReqs[2 * ind + 1], &ackbuff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
01170     }
01171 
01172     //===========================================
01173     // Process remote handles of my ghosteds
01174     //===========================================
01175     while( incoming2 )
01176     {
01177         PRINT_DEBUG_WAITANY( recvRemotehReqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
01178         success = MPI_Waitany( 2 * buffProcs.size(), &recvRemotehReqs[0], &ind, &status );
01179         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
01180 
01181         // OK, received something; decrement incoming counter
01182         incoming2--;
01183 
01184         PRINT_DEBUG_RECD( status );
01185         bool done             = false;
01186         unsigned int base_ind = 2 * ( ind / 2 );
01187         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 2], recvRemotehReqs[ind],
01188                               recvRemotehReqs[ind + 1], incoming2, remoteOwnedBuffs[ind / 2], sendReqs[base_ind],
01189                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
01190         if( done )
01191         {
01192             // Incoming remote handles
01193             if( myDebug->get_verbosity() == 4 )
01194             {
01195                 msgs.resize( msgs.size() + 1 );
01196                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
01197             }
01198 
01199             localOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
01200             result =
01201                 unpack_remote_handles( buffProcs[ind / 2], localOwnedBuffs[ind / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
01202         }
01203     }
01204 
01205 #ifdef MOAB_HAVE_MPE
01206     if( myDebug->get_verbosity() == 2 )
01207     {
01208         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
01209         MPE_Log_event( OWNED_END, procConfig.proc_rank(), "Ending recv entities (still doing checks)." );
01210     }
01211 #endif
01212     myDebug->tprintf( 1, "Exiting recv_entities.\n" );
01213 
01214     return MB_SUCCESS;
01215 }
01216 
01217 ErrorCode ParallelComm::recv_messages( const int from_proc, const bool store_remote_handles, const bool is_iface,
01218                                        Range& final_ents, int& incoming1, int& incoming2,
01219                                        std::vector< std::vector< EntityHandle > >& L1hloc,
01220                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01221                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01222                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01223                                        std::vector< MPI_Request >& recv_remoteh_reqs )
01224 {
01225 #ifndef MOAB_HAVE_MPI
01226     return MB_FAILURE;
01227 #else
01228     MPI_Status status;
01229     ErrorCode result;
01230     int ind1 = get_buffers( from_proc );
01231     int success, ind2;
01232     std::vector< EntityHandle > new_ents;
01233 
01234     // Wait and receive messages
01235     while( incoming1 )
01236     {
01237         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
01238         success = MPI_Waitany( 2, &recvReqs[2 * ind1], &ind2, &status );
01239         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_messages" ); }
01240 
01241         PRINT_DEBUG_RECD( status );
01242 
01243         // OK, received something; decrement incoming counter
01244         incoming1--;
01245         bool done = false;
01246 
01247         // In case ind is for ack, we need index of one before it
01248         ind2 += 2 * ind1;
01249         unsigned int base_ind = 2 * ( ind2 / 2 );
01250 
01251         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind2 / 2],
01252                               // recvbuff,
01253                               recvReqs[ind2], recvReqs[ind2 + 1], incoming1, localOwnedBuffs[ind2 / 2],
01254                               sendReqs[base_ind], sendReqs[base_ind + 1], done,
01255                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind2 / 2] : NULL ),
01256                               MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
01257 
01258         if( done )
01259         {
01260             // If it is done, unpack buffer
01261             remoteOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
01262             result = unpack_buffer( remoteOwnedBuffs[ind2 / 2]->buff_ptr, store_remote_handles, from_proc, ind2 / 2,
01263                                     L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );MB_CHK_SET_ERR( result, "Failed to unpack buffer in recev_messages" );
01264 
01265             std::copy( new_ents.begin(), new_ents.end(), range_inserter( final_ents ) );
01266 
01267             // Send local handles for new elements to owner
01268             // Reserve space on front for size and for initial buff size
01269             remoteOwnedBuffs[ind2 / 2]->reset_buffer( sizeof( int ) );
01270 
01271             result = pack_remote_handles( L1hloc[ind2 / 2], L1hrem[ind2 / 2], L1p[ind2 / 2], from_proc,
01272                                           remoteOwnedBuffs[ind2 / 2] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
01273             remoteOwnedBuffs[ind2 / 2]->set_stored_size();
01274 
01275             result = send_buffer( buffProcs[ind2 / 2], remoteOwnedBuffs[ind2 / 2], MB_MESG_REMOTEH_SIZE, sendReqs[ind2],
01276                                   recv_remoteh_reqs[ind2 + 1], (int*)( localOwnedBuffs[ind2 / 2]->mem_ptr ),
01277                                   //&ackbuff,
01278                                   incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
01279         }
01280     }
01281 
01282     return MB_SUCCESS;
01283 #endif
01284 }
01285 
01286 ErrorCode ParallelComm::recv_remote_handle_messages( const int from_proc, int& incoming2,
01287                                                      std::vector< EntityHandle >& L2hloc,
01288                                                      std::vector< EntityHandle >& L2hrem,
01289                                                      std::vector< unsigned int >& L2p,
01290                                                      std::vector< MPI_Request >& recv_remoteh_reqs )
01291 {
01292 #ifndef MOAB_HAVE_MPI
01293     return MB_FAILURE;
01294 #else
01295     MPI_Status status;
01296     ErrorCode result;
01297     int ind1 = get_buffers( from_proc );
01298     int success, ind2;
01299 
01300     while( incoming2 )
01301     {
01302         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
01303         success = MPI_Waitany( 2, &recv_remoteh_reqs[2 * ind1], &ind2, &status );
01304         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_remote_handle_messages" ); }
01305 
01306         // OK, received something; decrement incoming counter
01307         incoming2--;
01308 
01309         PRINT_DEBUG_RECD( status );
01310 
01311         bool done = false;
01312         ind2 += 2 * ind1;
01313         unsigned int base_ind = 2 * ( ind2 / 2 );
01314         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind2 / 2], recv_remoteh_reqs[ind2],
01315                               recv_remoteh_reqs[ind2 + 1], incoming2, remoteOwnedBuffs[ind2 / 2], sendReqs[base_ind],
01316                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
01317         if( done )
01318         {
01319             // Incoming remote handles
01320             localOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
01321             result =
01322                 unpack_remote_handles( buffProcs[ind2 / 2], localOwnedBuffs[ind2 / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
01323         }
01324     }
01325 
01326     return MB_SUCCESS;
01327 #endif
01328 }
01329 
01330 ErrorCode ParallelComm::pack_buffer( Range& orig_ents, const bool /*adjacencies*/, const bool tags,
01331                                      const bool store_remote_handles, const int to_proc, Buffer* buff,
01332                                      TupleList* entprocs, Range* allsent )
01333 {
01334     // Pack the buffer with the entity ranges, adjacencies, and tags sections
01335     //
01336     // Note: new entities used in subsequent connectivity lists, sets, or tags,
01337     // are referred to as (MBMAXTYPE + index), where index is into vector
01338     // of new entities, 0-based
01339     ErrorCode result;
01340 
01341     Range set_range;
01342     std::vector< Tag > all_tags;
01343     std::vector< Range > tag_ranges;
01344 
01345     Range::const_iterator rit;
01346 
01347     // Entities
01348     result = pack_entities( orig_ents, buff, store_remote_handles, to_proc, false, entprocs, allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
01349 
01350     // Sets
01351     result = pack_sets( orig_ents, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing sets (count) failed" );
01352 
01353     // Tags
01354     Range final_ents;
01355     if( tags )
01356     {
01357         result = get_tag_send_list( orig_ents, all_tags, tag_ranges );MB_CHK_SET_ERR( result, "Failed to get tagged entities" );
01358         result = pack_tags( orig_ents, all_tags, all_tags, tag_ranges, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing tags (count) failed" );
01359     }
01360     else
01361     {  // Set tag size to 0
01362         buff->check_space( sizeof( int ) );
01363         PACK_INT( buff->buff_ptr, 0 );
01364         buff->set_stored_size();
01365     }
01366 
01367     return result;
01368 }
01369 
01370 ErrorCode ParallelComm::unpack_buffer( unsigned char* buff_ptr, const bool store_remote_handles, const int from_proc,
01371                                        const int ind, std::vector< std::vector< EntityHandle > >& L1hloc,
01372                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01373                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01374                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01375                                        std::vector< EntityHandle >& new_ents, const bool created_iface )
01376 {
01377     unsigned char* tmp_buff = buff_ptr;
01378     ErrorCode result;
01379     result = unpack_entities( buff_ptr, store_remote_handles, ind, false, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
01380                               new_ents, created_iface );MB_CHK_SET_ERR( result, "Unpacking entities failed" );
01381     if( myDebug->get_verbosity() == 3 )
01382     {
01383         myDebug->tprintf( 4, "unpack_entities buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01384         tmp_buff = buff_ptr;
01385     }
01386     result = unpack_sets( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking sets failed" );
01387     if( myDebug->get_verbosity() == 3 )
01388     {
01389         myDebug->tprintf( 4, "unpack_sets buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01390         tmp_buff = buff_ptr;
01391     }
01392     result = unpack_tags( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking tags failed" );
01393     if( myDebug->get_verbosity() == 3 )
01394     {
01395         myDebug->tprintf( 4, "unpack_tags buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01396         // tmp_buff = buff_ptr;
01397     }
01398 
01399     if( myDebug->get_verbosity() == 3 ) myDebug->print( 4, "\n" );
01400 
01401     return MB_SUCCESS;
01402 }
01403 
01404 int ParallelComm::estimate_ents_buffer_size( Range& entities, const bool store_remote_handles )
01405 {
01406     int buff_size = 0;
01407     std::vector< EntityHandle > dum_connect_vec;
01408     const EntityHandle* connect;
01409     int num_connect;
01410 
01411     int num_verts = entities.num_of_type( MBVERTEX );
01412     // # verts + coords + handles
01413     buff_size += 2 * sizeof( int ) + 3 * sizeof( double ) * num_verts;
01414     if( store_remote_handles ) buff_size += sizeof( EntityHandle ) * num_verts;
01415 
01416     // Do a rough count by looking at first entity of each type
01417     for( EntityType t = MBEDGE; t < MBENTITYSET; t++ )
01418     {
01419         const Range::iterator rit = entities.lower_bound( t );
01420         if( TYPE_FROM_HANDLE( *rit ) != t ) continue;
01421 
01422         ErrorCode result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect_vec );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get connectivity to estimate buffer size", -1 );
01423 
01424         // Number, type, nodes per entity
01425         buff_size += 3 * sizeof( int );
01426         int num_ents = entities.num_of_type( t );
01427         // Connectivity, handle for each ent
01428         buff_size += ( num_connect + 1 ) * sizeof( EntityHandle ) * num_ents;
01429     }
01430 
01431     // Extra entity type at end, passed as int
01432     buff_size += sizeof( int );
01433 
01434     return buff_size;
01435 }
01436 
01437 int ParallelComm::estimate_sets_buffer_size( Range& entities, const bool /*store_remote_handles*/ )
01438 {
01439     // Number of sets
01440     int buff_size = sizeof( int );
01441 
01442     // Do a rough count by looking at first entity of each type
01443     Range::iterator rit = entities.lower_bound( MBENTITYSET );
01444     ErrorCode result;
01445 
01446     for( ; rit != entities.end(); ++rit )
01447     {
01448         unsigned int options;
01449         result = mbImpl->get_meshset_options( *rit, options );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get meshset options", -1 );
01450 
01451         buff_size += sizeof( int );
01452 
01453         Range set_range;
01454         if( options & MESHSET_SET )
01455         {
01456             // Range-based set; count the subranges
01457             result = mbImpl->get_entities_by_handle( *rit, set_range );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get set entities", -1 );
01458 
01459             // Set range
01460             buff_size += RANGE_SIZE( set_range );
01461         }
01462         else if( options & MESHSET_ORDERED )
01463         {
01464             // Just get the number of entities in the set
01465             int num_ents;
01466             result = mbImpl->get_number_entities_by_handle( *rit, num_ents );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get number entities in ordered set", -1 );
01467 
01468             // Set vec
01469             buff_size += sizeof( EntityHandle ) * num_ents + sizeof( int );
01470         }
01471 
01472         // Get numbers of parents/children
01473         int num_par, num_ch;
01474         result = mbImpl->num_child_meshsets( *rit, &num_ch );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num children", -1 );
01475         result = mbImpl->num_parent_meshsets( *rit, &num_par );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num parents", -1 );
01476 
01477         buff_size += ( num_ch + num_par ) * sizeof( EntityHandle ) + 2 * sizeof( int );
01478     }
01479 
01480     return buff_size;
01481 }
01482 
01483 ErrorCode ParallelComm::pack_entities( Range& entities, Buffer* buff, const bool store_remote_handles,
01484                                        const int to_proc, const bool /*is_iface*/, TupleList* entprocs,
01485                                        Range* /*allsent*/ )
01486 {
01487     // Packed information:
01488     // 1. # entities = E
01489     // 2. for e in E
01490     //   a. # procs sharing e, incl. sender and receiver = P
01491     //   b. for p in P (procs sharing e)
01492     //   c. for p in P (handle for e on p) (Note1)
01493     // 3. vertex/entity info
01494 
01495     // Get an estimate of the buffer size & pre-allocate buffer size
01496     int buff_size = estimate_ents_buffer_size( entities, store_remote_handles );
01497     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate ents buffer size" );
01498     buff->check_space( buff_size );
01499     myDebug->tprintf( 3, "estimate buffer size for %d entities: %d \n", (int)entities.size(), buff_size );
01500 
01501     unsigned int num_ents;
01502     ErrorCode result;
01503 
01504     std::vector< EntityHandle > entities_vec( entities.size() );
01505     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
01506 
01507     // First pack procs/handles sharing this ent, not including this dest but including
01508     // others (with zero handles)
01509     if( store_remote_handles )
01510     {
01511         // Buff space is at least proc + handle for each entity; use avg of 4 other procs
01512         // to estimate buff size, but check later
01513         buff->check_space( sizeof( int ) + ( 5 * sizeof( int ) + sizeof( EntityHandle ) ) * entities.size() );
01514 
01515         // 1. # entities = E
01516         PACK_INT( buff->buff_ptr, entities.size() );
01517 
01518         Range::iterator rit;
01519 
01520         // Pre-fetch sharedp and pstatus
01521         std::vector< int > sharedp_vals( entities.size() );
01522         result = mbImpl->tag_get_data( sharedp_tag(), entities, &sharedp_vals[0] );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
01523         std::vector< char > pstatus_vals( entities.size() );
01524         result = mbImpl->tag_get_data( pstatus_tag(), entities, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
01525 
01526         unsigned int i;
01527         int tmp_procs[MAX_SHARING_PROCS];
01528         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01529         std::set< unsigned int > dumprocs;
01530 
01531         // 2. for e in E
01532         for( rit = entities.begin(), i = 0; rit != entities.end(); ++rit, i++ )
01533         {
01534             unsigned int ind =
01535                 std::lower_bound( entprocs->vul_rd, entprocs->vul_rd + entprocs->get_n(), *rit ) - entprocs->vul_rd;
01536             assert( ind < entprocs->get_n() );
01537 
01538             while( ind < entprocs->get_n() && entprocs->vul_rd[ind] == *rit )
01539                 dumprocs.insert( entprocs->vi_rd[ind++] );
01540 
01541             result = build_sharedhps_list( *rit, pstatus_vals[i], sharedp_vals[i], dumprocs, num_ents, tmp_procs,
01542                                            tmp_handles );MB_CHK_SET_ERR( result, "Failed to build sharedhps" );
01543 
01544             dumprocs.clear();
01545 
01546             // Now pack them
01547             buff->check_space( ( num_ents + 1 ) * sizeof( int ) + num_ents * sizeof( EntityHandle ) );
01548             PACK_INT( buff->buff_ptr, num_ents );
01549             PACK_INTS( buff->buff_ptr, tmp_procs, num_ents );
01550             PACK_EH( buff->buff_ptr, tmp_handles, num_ents );
01551 
01552 #ifndef NDEBUG
01553             // Check for duplicates in proc list
01554             unsigned int dp = 0;
01555             for( ; dp < MAX_SHARING_PROCS && -1 != tmp_procs[dp]; dp++ )
01556                 dumprocs.insert( tmp_procs[dp] );
01557             assert( dumprocs.size() == dp );
01558             dumprocs.clear();
01559 #endif
01560         }
01561     }
01562 
01563     // Pack vertices
01564     Range these_ents = entities.subset_by_type( MBVERTEX );
01565     num_ents         = these_ents.size();
01566 
01567     if( num_ents )
01568     {
01569         buff_size = 2 * sizeof( int ) + 3 * num_ents * sizeof( double );
01570         buff->check_space( buff_size );
01571 
01572         // Type, # ents
01573         PACK_INT( buff->buff_ptr, ( (int)MBVERTEX ) );
01574         PACK_INT( buff->buff_ptr, ( (int)num_ents ) );
01575 
01576         std::vector< double > tmp_coords( 3 * num_ents );
01577         result = mbImpl->get_coords( these_ents, &tmp_coords[0] );MB_CHK_SET_ERR( result, "Failed to get vertex coordinates" );
01578         PACK_DBLS( buff->buff_ptr, &tmp_coords[0], 3 * num_ents );
01579 
01580         myDebug->tprintf( 4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01581                           CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01582     }
01583 
01584     // Now entities; go through range, packing by type and equal # verts per element
01585     Range::iterator start_rit = entities.find( *these_ents.rbegin() );
01586     ++start_rit;
01587     int last_nodes       = -1;
01588     EntityType last_type = MBMAXTYPE;
01589     these_ents.clear();
01590     Range::iterator end_rit = start_rit;
01591     EntitySequence* seq;
01592     ElementSequence* eseq;
01593 
01594     while( start_rit != entities.end() || !these_ents.empty() )
01595     {
01596         // Cases:
01597         // A: !end, last_type == MBMAXTYPE, seq: save contig sequence in these_ents
01598         // B: !end, last type & nodes same, seq: save contig sequence in these_ents
01599         // C: !end, last type & nodes different: pack these_ents, then save contig sequence in
01600         // these_ents D: end: pack these_ents
01601 
01602         // Find the sequence holding current start entity, if we're not at end
01603         eseq = NULL;
01604         if( start_rit != entities.end() )
01605         {
01606             result = sequenceManager->find( *start_rit, seq );MB_CHK_SET_ERR( result, "Failed to find entity sequence" );
01607             if( NULL == seq ) return MB_FAILURE;
01608             eseq = dynamic_cast< ElementSequence* >( seq );
01609         }
01610 
01611         // Pack the last batch if at end or next one is different
01612         if( !these_ents.empty() &&
01613             ( !eseq || eseq->type() != last_type || last_nodes != (int)eseq->nodes_per_element() ) )
01614         {
01615             result = pack_entity_seq( last_nodes, store_remote_handles, to_proc, these_ents, entities_vec, buff );MB_CHK_SET_ERR( result, "Failed to pack entities from a sequence" );
01616             these_ents.clear();
01617         }
01618 
01619         if( eseq )
01620         {
01621             // Continuation of current range, just save these entities
01622             // Get position in entities list one past end of this sequence
01623             end_rit = entities.lower_bound( start_rit, entities.end(), eseq->end_handle() + 1 );
01624 
01625             // Put these entities in the range
01626             std::copy( start_rit, end_rit, range_inserter( these_ents ) );
01627 
01628             last_type  = eseq->type();
01629             last_nodes = eseq->nodes_per_element();
01630         }
01631         else if( start_rit != entities.end() && TYPE_FROM_HANDLE( *start_rit ) == MBENTITYSET )
01632             break;
01633 
01634         start_rit = end_rit;
01635     }
01636 
01637     // Pack MBMAXTYPE to indicate end of ranges
01638     buff->check_space( sizeof( int ) );
01639     PACK_INT( buff->buff_ptr, ( (int)MBMAXTYPE ) );
01640 
01641     buff->set_stored_size();
01642     return MB_SUCCESS;
01643 }
01644 
01645 ErrorCode ParallelComm::build_sharedhps_list( const EntityHandle entity, const unsigned char pstatus,
01646                                               const int
01647 #ifndef NDEBUG
01648                                                   sharedp
01649 #endif
01650                                               ,
01651                                               const std::set< unsigned int >& procs, unsigned int& num_ents,
01652                                               int* tmp_procs, EntityHandle* tmp_handles )
01653 {
01654     num_ents = 0;
01655     unsigned char pstat;
01656     ErrorCode result = get_sharing_data( entity, tmp_procs, tmp_handles, pstat, num_ents );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
01657     assert( pstat == pstatus );
01658 
01659     // Build shared proc/handle lists
01660     // Start with multi-shared, since if it is the owner will be first
01661     if( pstatus & PSTATUS_MULTISHARED ) {}
01662     else if( pstatus & PSTATUS_NOT_OWNED )
01663     {
01664         // If not multishared and not owned, other sharing proc is owner, put that
01665         // one first
01666         assert( "If not owned, I should be shared too" && pstatus & PSTATUS_SHARED && 1 == num_ents );
01667         tmp_procs[1]   = procConfig.proc_rank();
01668         tmp_handles[1] = entity;
01669         num_ents       = 2;
01670     }
01671     else if( pstatus & PSTATUS_SHARED )
01672     {
01673         // If not multishared and owned, I'm owner
01674         assert( "shared and owned, should be only 1 sharing proc" && 1 == num_ents );
01675         tmp_procs[1]   = tmp_procs[0];
01676         tmp_procs[0]   = procConfig.proc_rank();
01677         tmp_handles[1] = tmp_handles[0];
01678         tmp_handles[0] = entity;
01679         num_ents       = 2;
01680     }
01681     else
01682     {
01683         // Not shared yet, just add owner (me)
01684         tmp_procs[0]   = procConfig.proc_rank();
01685         tmp_handles[0] = entity;
01686         num_ents       = 1;
01687     }
01688 
01689 #ifndef NDEBUG
01690     int tmp_ps = num_ents;
01691 #endif
01692 
01693     // Now add others, with zero handle for now
01694     for( std::set< unsigned int >::iterator sit = procs.begin(); sit != procs.end(); ++sit )
01695     {
01696 #ifndef NDEBUG
01697         if( tmp_ps && std::find( tmp_procs, tmp_procs + tmp_ps, *sit ) != tmp_procs + tmp_ps )
01698         {
01699             std::cerr << "Trouble with something already in shared list on proc " << procConfig.proc_rank()
01700                       << ". Entity:" << std::endl;
01701             list_entities( &entity, 1 );
01702             std::cerr << "pstatus = " << (int)pstatus << ", sharedp = " << sharedp << std::endl;
01703             std::cerr << "tmp_ps = ";
01704             for( int i = 0; i < tmp_ps; i++ )
01705                 std::cerr << tmp_procs[i] << " ";
01706             std::cerr << std::endl;
01707             std::cerr << "procs = ";
01708             for( std::set< unsigned int >::iterator sit2 = procs.begin(); sit2 != procs.end(); ++sit2 )
01709                 std::cerr << *sit2 << " ";
01710             assert( false );
01711         }
01712 #endif
01713         tmp_procs[num_ents]   = *sit;
01714         tmp_handles[num_ents] = 0;
01715         num_ents++;
01716     }
01717 
01718     // Put -1 after procs and 0 after handles
01719     if( MAX_SHARING_PROCS > num_ents )
01720     {
01721         tmp_procs[num_ents]   = -1;
01722         tmp_handles[num_ents] = 0;
01723     }
01724 
01725     return MB_SUCCESS;
01726 }
01727 
01728 ErrorCode ParallelComm::pack_entity_seq( const int nodes_per_entity, const bool store_remote_handles, const int to_proc,
01729                                          Range& these_ents, std::vector< EntityHandle >& entities_vec, Buffer* buff )
01730 {
01731     int tmp_space = 3 * sizeof( int ) + nodes_per_entity * these_ents.size() * sizeof( EntityHandle );
01732     buff->check_space( tmp_space );
01733 
01734     // Pack the entity type
01735     PACK_INT( buff->buff_ptr, ( (int)TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01736 
01737     // Pack # ents
01738     PACK_INT( buff->buff_ptr, these_ents.size() );
01739 
01740     // Pack the nodes per entity
01741     PACK_INT( buff->buff_ptr, nodes_per_entity );
01742     myDebug->tprintf( 3, "after some pack int  %d \n", buff->get_current_size() );
01743 
01744     // Pack the connectivity
01745     std::vector< EntityHandle > connect;
01746     ErrorCode result = MB_SUCCESS;
01747     for( Range::const_iterator rit = these_ents.begin(); rit != these_ents.end(); ++rit )
01748     {
01749         connect.clear();
01750         result = mbImpl->get_connectivity( &( *rit ), 1, connect, false );MB_CHK_SET_ERR( result, "Failed to get connectivity" );
01751         assert( (int)connect.size() == nodes_per_entity );
01752         result =
01753             get_remote_handles( store_remote_handles, &connect[0], &connect[0], connect.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
01754         PACK_EH( buff->buff_ptr, &connect[0], connect.size() );
01755     }
01756 
01757     myDebug->tprintf( 3, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01758                       CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01759 
01760     return result;
01761 }
01762 
01763 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, EntityHandle* from_vec,
01764                                             EntityHandle* to_vec_tmp, int num_ents, int to_proc,
01765                                             const std::vector< EntityHandle >& new_ents )
01766 {
01767     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE RANGE-BASED VERSION, NO REUSE
01768     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01769     // OTHER VERSION TOO!!!
01770     if( 0 == num_ents ) return MB_SUCCESS;
01771 
01772     // Use a local destination ptr in case we're doing an in-place copy
01773     std::vector< EntityHandle > tmp_vector;
01774     EntityHandle* to_vec = to_vec_tmp;
01775     if( to_vec == from_vec )
01776     {
01777         tmp_vector.resize( num_ents );
01778         to_vec = &tmp_vector[0];
01779     }
01780 
01781     if( !store_remote_handles )
01782     {
01783         int err;
01784         // In this case, substitute position in new_ents list
01785         for( int i = 0; i < num_ents; i++ )
01786         {
01787             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
01788             assert( new_ents[ind] == from_vec[i] );
01789             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
01790             assert( to_vec[i] != 0 && !err && -1 != ind );
01791         }
01792     }
01793     else
01794     {
01795         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
01796         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
01797 
01798         // Get single-proc destination handles and shared procs
01799         std::vector< int > sharing_procs( num_ents );
01800         result = mbImpl->tag_get_data( shh_tag, from_vec, num_ents, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
01801         result = mbImpl->tag_get_data( shp_tag, from_vec, num_ents, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
01802         for( int j = 0; j < num_ents; j++ )
01803         {
01804             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
01805         }
01806 
01807         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01808         int tmp_procs[MAX_SHARING_PROCS];
01809         int i;
01810         // Go through results, and for 0-valued ones, look for multiple shared proc
01811         for( i = 0; i < num_ents; i++ )
01812         {
01813             if( !to_vec[i] )
01814             {
01815                 result = mbImpl->tag_get_data( shps_tag, from_vec + i, 1, tmp_procs );
01816                 if( MB_SUCCESS == result )
01817                 {
01818                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
01819                     {
01820                         if( -1 == tmp_procs[j] )
01821                             break;
01822                         else if( tmp_procs[j] == to_proc )
01823                         {
01824                             result = mbImpl->tag_get_data( shhs_tag, from_vec + i, 1, tmp_handles );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
01825                             to_vec[i] = tmp_handles[j];
01826                             assert( to_vec[i] );
01827                             break;
01828                         }
01829                     }
01830                 }
01831                 if( !to_vec[i] )
01832                 {
01833                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
01834                     if( (int)new_ents.size() == j )
01835                     {
01836                         std::cout << "Failed to find new entity in send list, proc " << procConfig.proc_rank()
01837                                   << std::endl;
01838                         for( int k = 0; k <= num_ents; k++ )
01839                             std::cout << k << ": " << from_vec[k] << " " << to_vec[k] << std::endl;
01840                         MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
01841                     }
01842                     int err;
01843                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
01844                     if( err ) { MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" ); }
01845                 }
01846             }
01847         }
01848     }
01849 
01850     // memcpy over results if from_vec and to_vec are the same
01851     if( to_vec_tmp == from_vec ) memcpy( from_vec, to_vec, num_ents * sizeof( EntityHandle ) );
01852 
01853     return MB_SUCCESS;
01854 }
01855 
01856 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, const Range& from_range,
01857                                             EntityHandle* to_vec, int to_proc,
01858                                             const std::vector< EntityHandle >& new_ents )
01859 {
01860     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE VECTOR-BASED VERSION, NO REUSE
01861     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01862     // OTHER VERSION TOO!!!
01863     if( from_range.empty() ) return MB_SUCCESS;
01864 
01865     if( !store_remote_handles )
01866     {
01867         int err;
01868         // In this case, substitute position in new_ents list
01869         Range::iterator rit;
01870         unsigned int i;
01871         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
01872         {
01873             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
01874             assert( new_ents[ind] == *rit );
01875             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
01876             assert( to_vec[i] != 0 && !err && -1 != ind );
01877         }
01878     }
01879     else
01880     {
01881         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
01882         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
01883 
01884         // Get single-proc destination handles and shared procs
01885         std::vector< int > sharing_procs( from_range.size() );
01886         result = mbImpl->tag_get_data( shh_tag, from_range, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
01887         result = mbImpl->tag_get_data( shp_tag, from_range, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
01888         for( unsigned int j = 0; j < from_range.size(); j++ )
01889         {
01890             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
01891         }
01892 
01893         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01894         int tmp_procs[MAX_SHARING_PROCS];
01895         // Go through results, and for 0-valued ones, look for multiple shared proc
01896         Range::iterator rit;
01897         unsigned int i;
01898         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
01899         {
01900             if( !to_vec[i] )
01901             {
01902                 result = mbImpl->tag_get_data( shhs_tag, &( *rit ), 1, tmp_handles );
01903                 if( MB_SUCCESS == result )
01904                 {
01905                     result = mbImpl->tag_get_data( shps_tag, &( *rit ), 1, tmp_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
01906                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
01907                         if( tmp_procs[j] == to_proc )
01908                         {
01909                             to_vec[i] = tmp_handles[j];
01910                             break;
01911                         }
01912                 }
01913 
01914                 if( !to_vec[i] )
01915                 {
01916                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
01917                     if( (int)new_ents.size() == j )
01918                     { MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" ); }
01919                     int err;
01920                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
01921                     if( err ) { MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" ); }
01922                 }
01923             }
01924         }
01925     }
01926 
01927     return MB_SUCCESS;
01928 }
01929 
01930 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, const Range& from_range, Range& to_range,
01931                                             int to_proc, const std::vector< EntityHandle >& new_ents )
01932 {
01933     std::vector< EntityHandle > to_vector( from_range.size() );
01934 
01935     ErrorCode result = get_remote_handles( store_remote_handles, from_range, &to_vector[0], to_proc, new_ents );MB_CHK_SET_ERR( result, "Failed to get remote handles" );
01936     std::copy( to_vector.begin(), to_vector.end(), range_inserter( to_range ) );
01937     return result;
01938 }
01939 
01940 ErrorCode ParallelComm::unpack_entities( unsigned char*& buff_ptr, const bool store_remote_handles,
01941                                          const int /*from_ind*/, const bool is_iface,
01942                                          std::vector< std::vector< EntityHandle > >& L1hloc,
01943                                          std::vector< std::vector< EntityHandle > >& L1hrem,
01944                                          std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01945                                          std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01946                                          std::vector< EntityHandle >& new_ents, const bool created_iface )
01947 {
01948     // General algorithm:
01949     // - unpack # entities
01950     // - save start of remote handle info, then scan forward to entity definition data
01951     // - for all vertices or entities w/ same # verts:
01952     //   . get entity type, num ents, and (if !vert) # verts
01953     //   . for each ent:
01954     //      o get # procs/handles in remote handle info
01955     //      o if # procs/handles > 2, check for already-created entity:
01956     //        x get index of owner proc (1st in proc list), resize L1 list if nec
01957     //        x look for already-arrived entity in L2 by owner handle
01958     //      o if no existing entity:
01959     //        x if iface, look for existing entity with same connect & type
01960     //        x if none found, create vertex or element
01961     //        x if !iface & multi-shared, save on L2
01962     //        x if !iface, put new entity on new_ents list
01963     //      o update proc/handle, pstatus tags, adjusting to put owner first if iface
01964     //      o if !iface, save new handle on L1 for all sharing procs
01965 
01966     // Lists of handles/procs to return to sending/other procs
01967     // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
01968     //         and h' is either the remote proc handle (if that is known) or
01969     //         the owner proc handle (otherwise);
01970     // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
01971     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
01972     //         remote handles are on owning proc
01973     // L2p: owning procs for handles in L2hrem
01974 
01975     ErrorCode result;
01976     bool done         = false;
01977     ReadUtilIface* ru = NULL;
01978 
01979     result = mbImpl->query_interface( ru );MB_CHK_SET_ERR( result, "Failed to get ReadUtilIface" );
01980 
01981     // 1. # entities = E
01982     int num_ents             = 0;
01983     unsigned char* buff_save = buff_ptr;
01984     int i, j;
01985 
01986     if( store_remote_handles )
01987     {
01988         UNPACK_INT( buff_ptr, num_ents );
01989 
01990         buff_save = buff_ptr;
01991 
01992         // Save place where remote handle info starts, then scan forward to ents
01993         for( i = 0; i < num_ents; i++ )
01994         {
01995             UNPACK_INT( buff_ptr, j );
01996             if( j < 0 )
01997             {
01998                 std::cout << "Should be non-negative # proc/handles.";
01999                 return MB_FAILURE;
02000             }
02001 
02002             buff_ptr += j * ( sizeof( int ) + sizeof( EntityHandle ) );
02003         }
02004     }
02005 
02006     std::vector< EntityHandle > msg_ents;
02007 
02008     while( !done )
02009     {
02010         EntityType this_type = MBMAXTYPE;
02011         UNPACK_TYPE( buff_ptr, this_type );
02012         assert( this_type != MBENTITYSET );
02013 
02014         // MBMAXTYPE signifies end of entities data
02015         if( MBMAXTYPE == this_type ) break;
02016 
02017         // Get the number of ents
02018         int num_ents2, verts_per_entity = 0;
02019         UNPACK_INT( buff_ptr, num_ents2 );
02020 
02021         // Unpack the nodes per entity
02022         if( MBVERTEX != this_type && num_ents2 ) { UNPACK_INT( buff_ptr, verts_per_entity ); }
02023 
02024         std::vector< int > ps( MAX_SHARING_PROCS, -1 );
02025         std::vector< EntityHandle > hs( MAX_SHARING_PROCS, 0 );
02026         for( int e = 0; e < num_ents2; e++ )
02027         {
02028             // Check for existing entity, otherwise make new one
02029             EntityHandle new_h = 0;
02030             EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02031             double coords[3];
02032             int num_ps = -1;
02033 
02034             //=======================================
02035             // Unpack all the data at once, to make sure the buffer pointers
02036             // are tracked correctly
02037             //=======================================
02038             if( store_remote_handles )
02039             {
02040                 // Pointers to other procs/handles
02041                 UNPACK_INT( buff_save, num_ps );
02042                 if( 0 >= num_ps )
02043                 {
02044                     std::cout << "Shouldn't ever be fewer than 1 procs here." << std::endl;
02045                     return MB_FAILURE;
02046                 }
02047 
02048                 UNPACK_INTS( buff_save, &ps[0], num_ps );
02049                 UNPACK_EH( buff_save, &hs[0], num_ps );
02050             }
02051 
02052             if( MBVERTEX == this_type ) { UNPACK_DBLS( buff_ptr, coords, 3 ); }
02053             else
02054             {
02055                 assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
02056                 UNPACK_EH( buff_ptr, connect, verts_per_entity );
02057 
02058                 // Update connectivity to local handles
02059                 result = get_local_handles( connect, verts_per_entity, msg_ents );MB_CHK_SET_ERR( result, "Failed to get local handles" );
02060             }
02061 
02062             //=======================================
02063             // Now, process that data; begin by finding an identical
02064             // entity, if there is one
02065             //=======================================
02066             if( store_remote_handles )
02067             {
02068                 result = find_existing_entity( is_iface, ps[0], hs[0], num_ps, connect, verts_per_entity, this_type,
02069                                                L2hloc, L2hrem, L2p, new_h );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
02070             }
02071 
02072             //=======================================
02073             // If we didn't find one, we'll have to create one
02074             //=======================================
02075             bool created_here = false;
02076             if( !new_h && !is_iface )
02077             {
02078                 if( MBVERTEX == this_type )
02079                 {
02080                     // Create a vertex
02081                     result = mbImpl->create_vertex( coords, new_h );MB_CHK_SET_ERR( result, "Failed to make new vertex" );
02082                 }
02083                 else
02084                 {
02085                     // Create the element
02086                     result = mbImpl->create_element( this_type, connect, verts_per_entity, new_h );MB_CHK_SET_ERR( result, "Failed to make new element" );
02087 
02088                     // Update adjacencies
02089                     result = ru->update_adjacencies( new_h, 1, verts_per_entity, connect );MB_CHK_SET_ERR( result, "Failed to update adjacencies" );
02090                 }
02091 
02092                 // Should have a new handle now
02093                 assert( new_h );
02094 
02095                 created_here = true;
02096             }
02097 
02098             //=======================================
02099             // Take care of sharing data
02100             //=======================================
02101 
02102             // Need to save entities found in order, for interpretation of
02103             // later parts of this message
02104             if( !is_iface )
02105             {
02106                 assert( new_h );
02107                 msg_ents.push_back( new_h );
02108             }
02109 
02110             if( created_here ) new_ents.push_back( new_h );
02111 
02112             if( new_h && store_remote_handles )
02113             {
02114                 unsigned char new_pstat = 0x0;
02115                 if( is_iface )
02116                 {
02117                     new_pstat = PSTATUS_INTERFACE;
02118                     // Here, lowest rank proc should be first
02119                     int idx = std::min_element( &ps[0], &ps[0] + num_ps ) - &ps[0];
02120                     if( idx )
02121                     {
02122                         std::swap( ps[0], ps[idx] );
02123                         std::swap( hs[0], hs[idx] );
02124                     }
02125                     // Set ownership based on lowest rank; can't be in update_remote_data, because
02126                     // there we don't know whether it resulted from ghosting or not
02127                     if( ( num_ps > 1 && ps[0] != (int)rank() ) ) new_pstat |= PSTATUS_NOT_OWNED;
02128                 }
02129                 else if( created_here )
02130                 {
02131                     if( created_iface )
02132                         new_pstat = PSTATUS_NOT_OWNED;
02133                     else
02134                         new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
02135                 }
02136 
02137                 // Update sharing data and pstatus, adjusting order if iface
02138                 result = update_remote_data( new_h, &ps[0], &hs[0], num_ps, new_pstat );MB_CHK_SET_ERR( result, "unpack_entities" );
02139 
02140                 // If a new multi-shared entity, save owner for subsequent lookup in L2 lists
02141                 if( store_remote_handles && !is_iface && num_ps > 2 )
02142                 {
02143                     L2hrem.push_back( hs[0] );
02144                     L2hloc.push_back( new_h );
02145                     L2p.push_back( ps[0] );
02146                 }
02147 
02148                 // Need to send this new handle to all sharing procs
02149                 if( !is_iface )
02150                 {
02151                     for( j = 0; j < num_ps; j++ )
02152                     {
02153                         if( ps[j] == (int)procConfig.proc_rank() ) continue;
02154                         int idx = get_buffers( ps[j] );
02155                         if( idx == (int)L1hloc.size() )
02156                         {
02157                             L1hloc.resize( idx + 1 );
02158                             L1hrem.resize( idx + 1 );
02159                             L1p.resize( idx + 1 );
02160                         }
02161 
02162                         // Don't bother adding if it's already in the list
02163                         std::vector< EntityHandle >::iterator vit =
02164                             std::find( L1hloc[idx].begin(), L1hloc[idx].end(), new_h );
02165                         if( vit != L1hloc[idx].end() )
02166                         {
02167                             // If it's in the list but remote handle isn't known but we know
02168                             // it, replace in the list
02169                             if( L1p[idx][vit - L1hloc[idx].begin()] != -1 && hs[j] )
02170                             {
02171                                 L1hrem[idx][vit - L1hloc[idx].begin()] = hs[j];
02172                                 L1p[idx][vit - L1hloc[idx].begin()]    = -1;
02173                             }
02174                             else
02175                                 continue;
02176                         }
02177                         else
02178                         {
02179                             if( !hs[j] )
02180                             {
02181                                 assert( -1 != ps[0] && num_ps > 2 );
02182                                 L1p[idx].push_back( ps[0] );
02183                                 L1hrem[idx].push_back( hs[0] );
02184                             }
02185                             else
02186                             {
02187                                 assert(
02188                                     "either this remote handle isn't in the remote list, or "
02189                                     "it's for another proc" &&
02190                                     ( std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) == L1hrem[idx].end() ||
02191                                       L1p[idx][std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) -
02192                                                L1hrem[idx].begin()] != -1 ) );
02193                                 L1p[idx].push_back( -1 );
02194                                 L1hrem[idx].push_back( hs[j] );
02195                             }
02196                             L1hloc[idx].push_back( new_h );
02197                         }
02198                     }
02199                 }
02200 
02201                 assert( "Shouldn't be here for non-shared entities" && -1 != num_ps );
02202                 std::fill( &ps[0], &ps[num_ps], -1 );
02203                 std::fill( &hs[0], &hs[num_ps], 0 );
02204             }
02205         }
02206 
02207         myDebug->tprintf( 4, "Unpacked %d ents of type %s", num_ents2, CN::EntityTypeName( this_type ) );
02208     }
02209 
02210     myDebug->tprintf( 4, "Done unpacking entities.\n" );
02211 
02212     // Need to sort here, to enable searching
02213     std::sort( new_ents.begin(), new_ents.end() );
02214 
02215     return MB_SUCCESS;
02216 }
02217 
02218 ErrorCode ParallelComm::print_buffer( unsigned char* buff_ptr, int mesg_tag, int from_proc, bool sent )
02219 {
02220     std::cerr << procConfig.proc_rank();
02221     if( sent )
02222         std::cerr << " sent";
02223     else
02224         std::cerr << " received";
02225     std::cerr << " message type " << mesg_tag << " to/from proc " << from_proc << "; contents:" << std::endl;
02226 
02227     int msg_length, num_ents;
02228     unsigned char* orig_ptr = buff_ptr;
02229     UNPACK_INT( buff_ptr, msg_length );
02230     std::cerr << msg_length << " bytes..." << std::endl;
02231 
02232     if( MB_MESG_ENTS_SIZE == mesg_tag || MB_MESG_ENTS_LARGE == mesg_tag )
02233     {
02234         // 1. # entities = E
02235         int i, j, k;
02236         std::vector< int > ps;
02237         std::vector< EntityHandle > hs;
02238 
02239         UNPACK_INT( buff_ptr, num_ents );
02240         std::cerr << num_ents << " entities..." << std::endl;
02241 
02242         // Save place where remote handle info starts, then scan forward to ents
02243         for( i = 0; i < num_ents; i++ )
02244         {
02245             UNPACK_INT( buff_ptr, j );
02246             if( 0 > j ) return MB_FAILURE;
02247             ps.resize( j );
02248             hs.resize( j );
02249             std::cerr << "Entity " << i << ", # procs = " << j << std::endl;
02250             UNPACK_INTS( buff_ptr, &ps[0], j );
02251             UNPACK_EH( buff_ptr, &hs[0], j );
02252             std::cerr << "   Procs: ";
02253             for( k = 0; k < j; k++ )
02254                 std::cerr << ps[k] << " ";
02255             std::cerr << std::endl;
02256             std::cerr << "   Handles: ";
02257             for( k = 0; k < j; k++ )
02258                 std::cerr << hs[k] << " ";
02259             std::cerr << std::endl;
02260 
02261             if( buff_ptr - orig_ptr > msg_length )
02262             {
02263                 std::cerr << "End of buffer..." << std::endl;
02264                 std::cerr.flush();
02265                 return MB_FAILURE;
02266             }
02267         }
02268 
02269         while( true )
02270         {
02271             EntityType this_type = MBMAXTYPE;
02272             UNPACK_TYPE( buff_ptr, this_type );
02273             assert( this_type != MBENTITYSET );
02274 
02275             // MBMAXTYPE signifies end of entities data
02276             if( MBMAXTYPE == this_type ) break;
02277 
02278             // Get the number of ents
02279             int num_ents2, verts_per_entity = 0;
02280             UNPACK_INT( buff_ptr, num_ents2 );
02281 
02282             // Unpack the nodes per entity
02283             if( MBVERTEX != this_type && num_ents2 ) { UNPACK_INT( buff_ptr, verts_per_entity ); }
02284 
02285             std::cerr << "Type: " << CN::EntityTypeName( this_type ) << "; num_ents = " << num_ents2;
02286             if( MBVERTEX != this_type ) std::cerr << "; verts_per_ent = " << verts_per_entity;
02287             std::cerr << std::endl;
02288             if( num_ents2 < 0 || num_ents2 > msg_length )
02289             {
02290                 std::cerr << "Wrong number of entities, returning." << std::endl;
02291                 return MB_FAILURE;
02292             }
02293 
02294             for( int e = 0; e < num_ents2; e++ )
02295             {
02296                 // Check for existing entity, otherwise make new one
02297                 if( MBVERTEX == this_type )
02298                 {
02299                     double coords[3];
02300                     UNPACK_DBLS( buff_ptr, coords, 3 );
02301                     std::cerr << "xyz = " << coords[0] << ", " << coords[1] << ", " << coords[2] << std::endl;
02302                 }
02303                 else
02304                 {
02305                     EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02306                     assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
02307                     UNPACK_EH( buff_ptr, connect, verts_per_entity );
02308 
02309                     // Update connectivity to local handles
02310                     std::cerr << "Connectivity: ";
02311                     for( k = 0; k < verts_per_entity; k++ )
02312                         std::cerr << connect[k] << " ";
02313                     std::cerr << std::endl;
02314                 }
02315 
02316                 if( buff_ptr - orig_ptr > msg_length )
02317                 {
02318                     std::cerr << "End of buffer..." << std::endl;
02319                     std::cerr.flush();
02320                     return MB_FAILURE;
02321                 }
02322             }
02323         }
02324     }
02325     else if( MB_MESG_REMOTEH_SIZE == mesg_tag || MB_MESG_REMOTEH_LARGE == mesg_tag )
02326     {
02327         UNPACK_INT( buff_ptr, num_ents );
02328         std::cerr << num_ents << " entities..." << std::endl;
02329         if( 0 > num_ents || num_ents > msg_length )
02330         {
02331             std::cerr << "Wrong number of entities, returning." << std::endl;
02332             return MB_FAILURE;
02333         }
02334         std::vector< EntityHandle > L1hloc( num_ents ), L1hrem( num_ents );
02335         std::vector< int > L1p( num_ents );
02336         UNPACK_INTS( buff_ptr, &L1p[0], num_ents );
02337         UNPACK_EH( buff_ptr, &L1hrem[0], num_ents );
02338         UNPACK_EH( buff_ptr, &L1hloc[0], num_ents );
02339         std::cerr << num_ents << " Entity pairs; hremote/hlocal/proc: " << std::endl;
02340         for( int i = 0; i < num_ents; i++ )
02341         {
02342             EntityType etype = TYPE_FROM_HANDLE( L1hloc[i] );
02343             std::cerr << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hrem[i] ) << ", "
02344                       << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hloc[i] ) << ", " << L1p[i] << std::endl;
02345         }
02346 
02347         if( buff_ptr - orig_ptr > msg_length )
02348         {
02349             std::cerr << "End of buffer..." << std::endl;
02350             std::cerr.flush();
02351             return MB_FAILURE;
02352         }
02353     }
02354     else if( mesg_tag == MB_MESG_TAGS_SIZE || mesg_tag == MB_MESG_TAGS_LARGE )
02355     {
02356         int num_tags, dum1, data_type, tag_size;
02357         UNPACK_INT( buff_ptr, num_tags );
02358         std::cerr << "Number of tags = " << num_tags << std::endl;
02359         for( int i = 0; i < num_tags; i++ )
02360         {
02361             std::cerr << "Tag " << i << ":" << std::endl;
02362             UNPACK_INT( buff_ptr, tag_size );
02363             UNPACK_INT( buff_ptr, dum1 );
02364             UNPACK_INT( buff_ptr, data_type );
02365             std::cerr << "Tag size, type, data type = " << tag_size << ", " << dum1 << ", " << data_type << std::endl;
02366             UNPACK_INT( buff_ptr, dum1 );
02367             std::cerr << "Default value size = " << dum1 << std::endl;
02368             buff_ptr += dum1;
02369             UNPACK_INT( buff_ptr, dum1 );
02370             std::string name( (char*)buff_ptr, dum1 );
02371             std::cerr << "Tag name = " << name.c_str() << std::endl;
02372             buff_ptr += dum1;
02373             UNPACK_INT( buff_ptr, num_ents );
02374             std::cerr << "Number of ents = " << num_ents << std::endl;
02375             std::vector< EntityHandle > tmp_buff( num_ents );
02376             UNPACK_EH( buff_ptr, &tmp_buff[0], num_ents );
02377             int tot_length = 0;
02378             for( int j = 0; j < num_ents; j++ )
02379             {
02380                 EntityType etype = TYPE_FROM_HANDLE( tmp_buff[j] );
02381                 std::cerr << CN::EntityTypeName( etype ) << " " << ID_FROM_HANDLE( tmp_buff[j] ) << ", tag = ";
02382                 if( tag_size == MB_VARIABLE_LENGTH )
02383                 {
02384                     UNPACK_INT( buff_ptr, dum1 );
02385                     tot_length += dum1;
02386                     std::cerr << "(variable, length = " << dum1 << ")" << std::endl;
02387                 }
02388                 else if( data_type == MB_TYPE_DOUBLE )
02389                 {
02390                     double dum_dbl;
02391                     UNPACK_DBL( buff_ptr, dum_dbl );
02392                     std::cerr << dum_dbl << std::endl;
02393                 }
02394                 else if( data_type == MB_TYPE_INTEGER )
02395                 {
02396                     int dum_int;
02397                     UNPACK_INT( buff_ptr, dum_int );
02398                     std::cerr << dum_int << std::endl;
02399                 }
02400                 else if( data_type == MB_TYPE_OPAQUE )
02401                 {
02402                     std::cerr << "(opaque)" << std::endl;
02403                     buff_ptr += tag_size;
02404                 }
02405                 else if( data_type == MB_TYPE_HANDLE )
02406                 {
02407                     EntityHandle dum_eh;
02408                     UNPACK_EH( buff_ptr, &dum_eh, 1 );
02409                     std::cerr << dum_eh << std::endl;
02410                 }
02411                 else if( data_type == MB_TYPE_BIT )
02412                 {
02413                     std::cerr << "(bit)" << std::endl;
02414                     buff_ptr += tag_size;
02415                 }
02416             }
02417             if( tag_size == MB_VARIABLE_LENGTH ) buff_ptr += tot_length;
02418         }
02419     }
02420     else
02421     {
02422         assert( false );
02423         return MB_FAILURE;
02424     }
02425 
02426     std::cerr.flush();
02427 
02428     return MB_SUCCESS;
02429 }
02430 
02431 ErrorCode ParallelComm::list_entities( const EntityHandle* ents, int num_ents )
02432 {
02433     if( NULL == ents )
02434     {
02435         Range shared_ents;
02436         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( shared_ents ) );
02437         shared_ents.print( "Shared entities:\n" );
02438         return MB_SUCCESS;
02439     }
02440 
02441     unsigned char pstat;
02442     EntityHandle tmp_handles[MAX_SHARING_PROCS];
02443     int tmp_procs[MAX_SHARING_PROCS];
02444     unsigned int num_ps;
02445     ErrorCode result;
02446 
02447     for( int i = 0; i < num_ents; i++ )
02448     {
02449         result = mbImpl->list_entities( ents + i, 1 );MB_CHK_ERR( result );
02450         double coords[3];
02451         result = mbImpl->get_coords( ents + i, 1, coords );
02452         std::cout << " coords: " << coords[0] << " " << coords[1] << " " << coords[2] << "\n";
02453 
02454         result = get_sharing_data( ents[i], tmp_procs, tmp_handles, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
02455 
02456         std::cout << "Pstatus: ";
02457         if( !num_ps )
02458             std::cout << "local " << std::endl;
02459         else
02460         {
02461             if( pstat & PSTATUS_NOT_OWNED ) std::cout << "NOT_OWNED; ";
02462             if( pstat & PSTATUS_SHARED ) std::cout << "SHARED; ";
02463             if( pstat & PSTATUS_MULTISHARED ) std::cout << "MULTISHARED; ";
02464             if( pstat & PSTATUS_INTERFACE ) std::cout << "INTERFACE; ";
02465             if( pstat & PSTATUS_GHOST ) std::cout << "GHOST; ";
02466             std::cout << std::endl;
02467             for( unsigned int j = 0; j < num_ps; j++ )
02468             {
02469                 std::cout << "  proc " << tmp_procs[j] << " id (handle) " << mbImpl->id_from_handle( tmp_handles[j] )
02470                           << "(" << tmp_handles[j] << ")" << std::endl;
02471             }
02472         }
02473         std::cout << std::endl;
02474     }
02475 
02476     return MB_SUCCESS;
02477 }
02478 
02479 ErrorCode ParallelComm::list_entities( const Range& ents )
02480 {
02481     for( Range::iterator rit = ents.begin(); rit != ents.end(); ++rit )
02482         list_entities( &( *rit ), 1 );
02483 
02484     return MB_SUCCESS;
02485 }
02486 
02487 ErrorCode ParallelComm::update_remote_data( Range& local_range, Range& remote_range, int other_proc,
02488                                             const unsigned char add_pstat )
02489 {
02490     Range::iterator rit, rit2;
02491     ErrorCode result = MB_SUCCESS;
02492 
02493     // For each pair of local/remote handles:
02494     for( rit = local_range.begin(), rit2 = remote_range.begin(); rit != local_range.end(); ++rit, ++rit2 )
02495     {
02496         result = update_remote_data( *rit, &other_proc, &( *rit2 ), 1, add_pstat );MB_CHK_ERR( result );
02497     }
02498 
02499     return MB_SUCCESS;
02500 }
02501 
02502 ErrorCode ParallelComm::update_remote_data( const EntityHandle new_h, const int* ps, const EntityHandle* hs,
02503                                             const int num_ps, const unsigned char add_pstat
02504                                             // The following lines left in for future debugging, at least until I trust
02505                                             // this function; tjt, 10/4/2013
02506                                             //                                           , int *new_ps,
02507                                             //                                           EntityHandle *new_hs,
02508                                             //                                           int &new_numps,
02509                                             //                                           unsigned char &new_pstat
02510 )
02511 {
02512     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02513     // in this function, so no need to initialize; sharing data does not include
02514     // this proc if shared with only one other
02515 
02516     // Following variables declared here to avoid compiler errors
02517     int new_numps;
02518     unsigned char new_pstat;
02519     std::vector< int > new_ps( MAX_SHARING_PROCS, -1 );
02520     std::vector< EntityHandle > new_hs( MAX_SHARING_PROCS, 0 );
02521 
02522     new_numps        = 0;
02523     ErrorCode result = get_sharing_data( new_h, &new_ps[0], &new_hs[0], new_pstat, new_numps );MB_CHK_SET_ERR( result, "Failed to get sharing data in update_remote_data" );
02524     int num_exist = new_numps;
02525 
02526     // Add new pstat info to the flag
02527     new_pstat |= add_pstat;
02528 
02529     /*
02530     #define plist(str, lst, siz)                                          \
02531         std::cout << str << "(";                                          \
02532         for (int i = 0; i < (int)siz; i++) std::cout << lst[i] << " ";    \
02533         std::cout << ") ";                                                \
02534 
02535         std::cout << "update_remote_data: rank = " << rank() << ", new_h = " << new_h << std::endl;
02536         std::string ostr;
02537         plist("ps", ps, num_ps);
02538         plist("hs", hs, num_ps);
02539         print_pstatus(add_pstat, ostr);
02540         std::cout << ", add_pstat = " << ostr.c_str() << std::endl;
02541         plist("tag_ps", new_ps, new_numps);
02542         plist("tag_hs", new_hs, new_numps);
02543         assert(new_numps <= size());
02544         print_pstatus(new_pstat, ostr);
02545         std::cout << ", tag_pstat=" << ostr.c_str() << std::endl;
02546     */
02547 
02548 #ifndef NDEBUG
02549     {
02550         // Check for duplicates in proc list
02551         std::set< unsigned int > dumprocs;
02552         unsigned int dp = 0;
02553         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
02554             dumprocs.insert( ps[dp] );
02555         assert( dp == dumprocs.size() );
02556     }
02557 #endif
02558 
02559     // If only one sharer and I'm the owner, insert myself in the list;
02560     // otherwise, my data is checked at the end
02561     if( 1 == new_numps && !( new_pstat & PSTATUS_NOT_OWNED ) )
02562     {
02563         new_hs[1] = new_hs[0];
02564         new_ps[1] = new_ps[0];
02565         new_hs[0] = new_h;
02566         new_ps[0] = rank();
02567         new_numps = 2;
02568     }
02569 
02570     // Now put passed-in data onto lists
02571     int idx;
02572     for( int i = 0; i < num_ps; i++ )
02573     {
02574         idx = std::find( &new_ps[0], &new_ps[0] + new_numps, ps[i] ) - &new_ps[0];
02575         if( idx < new_numps )
02576         {
02577             if( !new_hs[idx] && hs[i] )
02578                 // h on list is 0 and passed-in h is non-zero, replace it
02579                 new_hs[idx] = hs[i];
02580             else
02581                 assert( !hs[i] || new_hs[idx] == hs[i] );
02582         }
02583         else
02584         {
02585             if( new_numps + 1 == MAX_SHARING_PROCS )
02586             {
02587                 MB_SET_ERR( MB_FAILURE, "Exceeded MAX_SHARING_PROCS for "
02588                                             << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) ) << ' '
02589                                             << ID_FROM_HANDLE( new_h ) << " in process " << rank() );
02590             }
02591             new_ps[new_numps] = ps[i];
02592             new_hs[new_numps] = hs[i];
02593             new_numps++;
02594         }
02595     }
02596 
02597     // Add myself, if it isn't there already
02598     idx = std::find( &new_ps[0], &new_ps[0] + new_numps, rank() ) - &new_ps[0];
02599     if( idx == new_numps )
02600     {
02601         new_ps[new_numps] = rank();
02602         new_hs[new_numps] = new_h;
02603         new_numps++;
02604     }
02605     else if( !new_hs[idx] && new_numps > 2 )
02606         new_hs[idx] = new_h;
02607 
02608     // Proc list is complete; update for shared, multishared
02609     if( new_numps > 1 )
02610     {
02611         if( new_numps > 2 ) new_pstat |= PSTATUS_MULTISHARED;
02612         new_pstat |= PSTATUS_SHARED;
02613     }
02614 
02615     /*
02616         plist("new_ps", new_ps, new_numps);
02617         plist("new_hs", new_hs, new_numps);
02618         print_pstatus(new_pstat, ostr);
02619         std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
02620         std::cout << std::endl;
02621     */
02622 
02623     result = set_sharing_data( new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0] );MB_CHK_SET_ERR( result, "Failed to set sharing data in update_remote_data" );
02624 
02625     if( new_pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
02626 
02627     return MB_SUCCESS;
02628 }
02629 
02630 ErrorCode ParallelComm::update_remote_data_old( const EntityHandle new_h, const int* ps, const EntityHandle* hs,
02631                                                 const int num_ps, const unsigned char add_pstat )
02632 {
02633     EntityHandle tag_hs[MAX_SHARING_PROCS];
02634     int tag_ps[MAX_SHARING_PROCS];
02635     unsigned char pstat;
02636     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02637     // in this function, so no need to initialize
02638     unsigned int num_exist;
02639     ErrorCode result = get_sharing_data( new_h, tag_ps, tag_hs, pstat, num_exist );MB_CHK_ERR( result );
02640 
02641 #ifndef NDEBUG
02642     {
02643         // Check for duplicates in proc list
02644         std::set< unsigned int > dumprocs;
02645         unsigned int dp = 0;
02646         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
02647             dumprocs.insert( ps[dp] );
02648         assert( dp == dumprocs.size() );
02649     }
02650 #endif
02651 
02652     // Add any new sharing data
02653     bool changed = false;
02654     int idx;
02655     if( !num_exist )
02656     {
02657         // Just take what caller passed
02658         memcpy( tag_ps, ps, num_ps * sizeof( int ) );
02659         memcpy( tag_hs, hs, num_ps * sizeof( EntityHandle ) );
02660         num_exist = num_ps;
02661         // If it's only one, hopefully I'm not there yet...
02662         assert( "I shouldn't be the only proc there." && ( 1 != num_exist || ps[0] != (int)procConfig.proc_rank() ) );
02663         changed = true;
02664     }
02665     else
02666     {
02667         for( int i = 0; i < num_ps; i++ )
02668         {
02669             idx = std::find( tag_ps, tag_ps + num_exist, ps[i] ) - tag_ps;
02670             if( idx == (int)num_exist )
02671             {
02672                 if( num_exist == MAX_SHARING_PROCS )
02673                 {
02674                     std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) )
02675                               << ' ' << ID_FROM_HANDLE( new_h ) << " in process " << proc_config().proc_rank()
02676                               << std::endl;
02677                     std::cerr.flush();
02678                     MPI_Abort( proc_config().proc_comm(), 66 );
02679                 }
02680 
02681                 // If there's only 1 sharing proc, and it's not me, then
02682                 // we'll end up with 3; add me to the front
02683                 if( !i && num_ps == 1 && num_exist == 1 && ps[0] != (int)procConfig.proc_rank() )
02684                 {
02685                     int j = 1;
02686                     // If I own this entity, put me at front, otherwise after first
02687                     if( !( pstat & PSTATUS_NOT_OWNED ) )
02688                     {
02689                         tag_ps[1] = tag_ps[0];
02690                         tag_hs[1] = tag_hs[0];
02691                         j         = 0;
02692                     }
02693                     tag_ps[j] = procConfig.proc_rank();
02694                     tag_hs[j] = new_h;
02695                     num_exist++;
02696                 }
02697 
02698                 tag_ps[num_exist] = ps[i];
02699                 tag_hs[num_exist] = hs[i];
02700                 num_exist++;
02701                 changed = true;
02702             }
02703             else if( 0 == tag_hs[idx] )
02704             {
02705                 tag_hs[idx] = hs[i];
02706                 changed     = true;
02707             }
02708             else if( 0 != hs[i] )
02709             {
02710                 assert( hs[i] == tag_hs[idx] );
02711             }
02712         }
02713     }
02714 
02715     // Adjust for interface layer if necessary
02716     if( add_pstat & PSTATUS_INTERFACE )
02717     {
02718         idx = std::min_element( tag_ps, tag_ps + num_exist ) - tag_ps;
02719         if( idx )
02720         {
02721             int tag_proc       = tag_ps[idx];
02722             tag_ps[idx]        = tag_ps[0];
02723             tag_ps[0]          = tag_proc;
02724             EntityHandle tag_h = tag_hs[idx];
02725             tag_hs[idx]        = tag_hs[0];
02726             tag_hs[0]          = tag_h;
02727             changed            = true;
02728             if( tag_ps[0] != (int)procConfig.proc_rank() ) pstat |= PSTATUS_NOT_OWNED;
02729         }
02730     }
02731 
02732     if( !changed ) return MB_SUCCESS;
02733 
02734     assert( "interface entities should have > 1 proc" && ( !( add_pstat & PSTATUS_INTERFACE ) || num_exist > 1 ) );
02735     assert( "ghost entities should have > 1 proc" && ( !( add_pstat & PSTATUS_GHOST ) || num_exist > 1 ) );
02736 
02737     // If it's multi-shared and we created the entity in this unpack,
02738     // local handle probably isn't in handle list yet
02739     if( num_exist > 2 )
02740     {
02741         idx = std::find( tag_ps, tag_ps + num_exist, procConfig.proc_rank() ) - tag_ps;
02742         assert( idx < (int)num_exist );
02743         if( !tag_hs[idx] ) tag_hs[idx] = new_h;
02744     }
02745 
02746     int tag_p;
02747     EntityHandle tag_h;
02748 
02749     // Update pstat
02750     pstat |= add_pstat;
02751 
02752     if( num_exist > 2 )
02753         pstat |= ( PSTATUS_MULTISHARED | PSTATUS_SHARED );
02754     else if( num_exist > 0 )
02755         pstat |= PSTATUS_SHARED;
02756 
02757     //    compare_remote_data(new_h, num_ps, hs, ps, add_pstat,
02758     //                        num_exist, tag_hs, tag_ps, pstat);
02759 
02760     // Reset single shared proc/handle if was shared and moving to multi-shared
02761     if( num_exist > 2 && !( pstat & PSTATUS_MULTISHARED ) && ( pstat & PSTATUS_SHARED ) )
02762     {
02763         // Must remove sharedp/h first, which really means set to default value
02764         tag_p  = -1;
02765         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, &tag_p );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
02766         tag_h  = 0;
02767         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, &tag_h );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
02768     }
02769 
02770     // Set sharing tags
02771     if( num_exist > 2 )
02772     {
02773         std::fill( tag_ps + num_exist, tag_ps + MAX_SHARING_PROCS, -1 );
02774         std::fill( tag_hs + num_exist, tag_hs + MAX_SHARING_PROCS, 0 );
02775         result = mbImpl->tag_set_data( sharedps_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedps tag data" );
02776         result = mbImpl->tag_set_data( sharedhs_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag data" );
02777 
02778 #ifndef NDEBUG
02779         {
02780             // Check for duplicates in proc list
02781             std::set< unsigned int > dumprocs;
02782             unsigned int dp = 0;
02783             for( ; dp < num_exist && -1 != tag_ps[dp]; dp++ )
02784                 dumprocs.insert( tag_ps[dp] );
02785             assert( dp == dumprocs.size() );
02786         }
02787 #endif
02788     }
02789     else if( num_exist == 2 || num_exist == 1 )
02790     {
02791         if( tag_ps[0] == (int)procConfig.proc_rank() )
02792         {
02793             assert( 2 == num_exist && tag_ps[1] != (int)procConfig.proc_rank() );
02794             tag_ps[0] = tag_ps[1];
02795             tag_hs[0] = tag_hs[1];
02796         }
02797         assert( tag_ps[0] != -1 && tag_hs[0] != 0 );
02798         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
02799         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
02800     }
02801 
02802     // Now set new pstatus
02803     result = mbImpl->tag_set_data( pstatus_tag(), &new_h, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
02804 
02805     if( pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
02806 
02807     return MB_SUCCESS;
02808 }
02809 
02810 ErrorCode ParallelComm::get_sharing_data( const Range& entities, std::set< int >& procs, int operation )
02811 {
02812     // Get the union or intersection of sharing data for multiple entities
02813     ErrorCode result;
02814     int sp2[MAX_SHARING_PROCS];
02815     int num_ps;
02816     unsigned char pstat;
02817     std::set< int > tmp_procs;
02818     procs.clear();
02819 
02820     for( Range::const_iterator rit = entities.begin(); rit != entities.end(); ++rit )
02821     {
02822         // Get sharing procs
02823         result = get_sharing_data( *rit, sp2, NULL, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_sharing_data" );
02824         if( !( pstat & PSTATUS_SHARED ) && Interface::INTERSECT == operation )
02825         {
02826             procs.clear();
02827             return MB_SUCCESS;
02828         }
02829 
02830         if( rit == entities.begin() ) { std::copy( sp2, sp2 + num_ps, std::inserter( procs, procs.begin() ) ); }
02831         else
02832         {
02833             std::sort( sp2, sp2 + num_ps );
02834             tmp_procs.clear();
02835             if( Interface::UNION == operation )
02836                 std::set_union( procs.begin(), procs.end(), sp2, sp2 + num_ps,
02837                                 std::inserter( tmp_procs, tmp_procs.end() ) );
02838             else if( Interface::INTERSECT == operation )
02839                 std::set_intersection( procs.begin(), procs.end(), sp2, sp2 + num_ps,
02840                                        std::inserter( tmp_procs, tmp_procs.end() ) );
02841             else
02842             {
02843                 assert( "Unknown operation." && false );
02844                 return MB_FAILURE;
02845             }
02846             procs.swap( tmp_procs );
02847         }
02848         if( Interface::INTERSECT == operation && procs.empty() ) return MB_SUCCESS;
02849     }
02850 
02851     return MB_SUCCESS;
02852 }
02853 
02854 ErrorCode ParallelComm::get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs, unsigned char& pstat,
02855                                           unsigned int& num_ps )
02856 {
02857     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
02858     if( pstat & PSTATUS_MULTISHARED )
02859     {
02860         result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
02861         if( hs )
02862         {
02863             result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
02864         }
02865         num_ps = std::find( ps, ps + MAX_SHARING_PROCS, -1 ) - ps;
02866     }
02867     else if( pstat & PSTATUS_SHARED )
02868     {
02869         result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
02870         if( hs )
02871         {
02872             result = mbImpl->tag_get_data( sharedh_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
02873             hs[1] = 0;
02874         }
02875         // Initialize past end of data
02876         ps[1]  = -1;
02877         num_ps = 1;
02878     }
02879     else
02880     {
02881         ps[0] = -1;
02882         if( hs ) hs[0] = 0;
02883         num_ps = 0;
02884     }
02885 
02886     assert( MAX_SHARING_PROCS >= num_ps );
02887 
02888     return MB_SUCCESS;
02889 }
02890 
02891 ErrorCode ParallelComm::find_existing_entity( const bool is_iface, const int owner_p, const EntityHandle owner_h,
02892                                               const int num_ps, const EntityHandle* connect, const int num_connect,
02893                                               const EntityType this_type, std::vector< EntityHandle >& L2hloc,
02894                                               std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
02895                                               EntityHandle& new_h )
02896 {
02897     new_h = 0;
02898     if( !is_iface && num_ps > 2 )
02899     {
02900         for( unsigned int i = 0; i < L2hrem.size(); i++ )
02901         {
02902             if( L2hrem[i] == owner_h && owner_p == (int)L2p[i] )
02903             {
02904                 new_h = L2hloc[i];
02905                 return MB_SUCCESS;
02906             }
02907         }
02908     }
02909 
02910     // If we got here and it's a vertex, we don't need to look further
02911     if( MBVERTEX == this_type || !connect || !num_connect ) return MB_SUCCESS;
02912 
02913     Range tmp_range;
02914     ErrorCode result = mbImpl->get_adjacencies( connect, num_connect, CN::Dimension( this_type ), false, tmp_range );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
02915     if( !tmp_range.empty() )
02916     {
02917         // Found a corresponding entity - return target
02918         new_h = *tmp_range.begin();
02919     }
02920     else
02921     {
02922         new_h = 0;
02923     }
02924 
02925     return MB_SUCCESS;
02926 }
02927 
02928 ErrorCode ParallelComm::get_local_handles( const Range& remote_handles, Range& local_handles,
02929                                            const std::vector< EntityHandle >& new_ents )
02930 {
02931     std::vector< EntityHandle > rh_vec;
02932     rh_vec.reserve( remote_handles.size() );
02933     std::copy( remote_handles.begin(), remote_handles.end(), std::back_inserter( rh_vec ) );
02934     ErrorCode result = get_local_handles( &rh_vec[0], remote_handles.size(), new_ents );
02935     std::copy( rh_vec.begin(), rh_vec.end(), range_inserter( local_handles ) );
02936     return result;
02937 }
02938 
02939 ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents )
02940 {
02941     std::vector< EntityHandle > tmp_ents;
02942     std::copy( new_ents.begin(), new_ents.end(), std::back_inserter( tmp_ents ) );
02943     return get_local_handles( from_vec, num_ents, tmp_ents );
02944 }
02945 
02946 ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents,
02947                                            const std::vector< EntityHandle >& new_ents )
02948 {
02949     for( int i = 0; i < num_ents; i++ )
02950     {
02951         if( TYPE_FROM_HANDLE( from_vec[i] ) == MBMAXTYPE )
02952         {
02953             assert( ID_FROM_HANDLE( from_vec[i] ) < (int)new_ents.size() );
02954             from_vec[i] = new_ents[ID_FROM_HANDLE( from_vec[i] )];
02955         }
02956     }
02957 
02958     return MB_SUCCESS;
02959 }
02960 
02961 /*
02962 template <typename T> void
02963 insert_in_array(T* array, size_t array_size, size_t location, T value)
02964 {
02965   assert(location + 1 < array_size);
02966   for (size_t i = array_size - 1; i > location; i--)
02967     array[i] = array[i - 1];
02968   array[location] = value;
02969 }
02970 */
02971 
02972 ErrorCode ParallelComm::pack_range_map( Range& key_range, EntityHandle val_start, HandleMap& handle_map )
02973 {
02974     for( Range::const_pair_iterator key_it = key_range.const_pair_begin(); key_it != key_range.const_pair_end();
02975          ++key_it )
02976     {
02977         int tmp_num = ( *key_it ).second - ( *key_it ).first + 1;
02978         handle_map.insert( ( *key_it ).first, val_start, tmp_num );
02979         val_start += tmp_num;
02980     }
02981 
02982     return MB_SUCCESS;
02983 }
02984 
02985 ErrorCode ParallelComm::pack_sets( Range& entities, Buffer* buff, const bool store_remote_handles, const int to_proc )
02986 {
02987     // SETS:
02988     // . #sets
02989     // . for each set:
02990     //   - options[#sets] (unsigned int)
02991     //   - if (unordered) set range
02992     //   - else if ordered
02993     //     . #ents in set
02994     //     . handles[#ents]
02995     //   - #parents
02996     //   - if (#parents) handles[#parents]
02997     //   - #children
02998     //   - if (#children) handles[#children]
02999 
03000     // Now the sets; assume any sets the application wants to pass are in the entities list
03001     ErrorCode result;
03002     Range all_sets = entities.subset_by_type( MBENTITYSET );
03003 
03004     int buff_size = estimate_sets_buffer_size( all_sets, store_remote_handles );
03005     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate sets buffer size" );
03006     buff->check_space( buff_size );
03007 
03008     // Number of sets
03009     PACK_INT( buff->buff_ptr, all_sets.size() );
03010 
03011     // Options for all sets
03012     std::vector< unsigned int > options( all_sets.size() );
03013     Range::iterator rit;
03014     std::vector< EntityHandle > members;
03015     int i;
03016     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03017     {
03018         result = mbImpl->get_meshset_options( *rit, options[i] );MB_CHK_SET_ERR( result, "Failed to get meshset options" );
03019     }
03020     buff->check_space( all_sets.size() * sizeof( unsigned int ) );
03021     PACK_VOID( buff->buff_ptr, &options[0], all_sets.size() * sizeof( unsigned int ) );
03022 
03023     // Pack parallel geometry unique id
03024     if( !all_sets.empty() )
03025     {
03026         Tag uid_tag;
03027         int n_sets  = all_sets.size();
03028         bool b_pack = false;
03029         std::vector< int > id_data( n_sets );
03030         result =
03031             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
03032 
03033         result = mbImpl->tag_get_data( uid_tag, all_sets, &id_data[0] );
03034         if( MB_TAG_NOT_FOUND != result )
03035         {
03036             if( MB_SUCCESS != result ) MB_SET_ERR( result, "Failed to get parallel geometry unique ids" );
03037             for( i = 0; i < n_sets; i++ )
03038             {
03039                 if( id_data[i] != 0 )
03040                 {
03041                     b_pack = true;
03042                     break;
03043                 }
03044             }
03045         }
03046 
03047         if( b_pack )
03048         {  // If you find
03049             buff->check_space( ( n_sets + 1 ) * sizeof( int ) );
03050             PACK_INT( buff->buff_ptr, n_sets );
03051             PACK_INTS( buff->buff_ptr, &id_data[0], n_sets );
03052         }
03053         else
03054         {
03055             buff->check_space( sizeof( int ) );
03056             PACK_INT( buff->buff_ptr, 0 );
03057         }
03058     }
03059 
03060     // Vectors/ranges
03061     std::vector< EntityHandle > entities_vec( entities.size() );
03062     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
03063     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03064     {
03065         members.clear();
03066         result = mbImpl->get_entities_by_handle( *rit, members );MB_CHK_SET_ERR( result, "Failed to get entities in ordered set" );
03067         result =
03068             get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
03069         buff->check_space( members.size() * sizeof( EntityHandle ) + sizeof( int ) );
03070         PACK_INT( buff->buff_ptr, members.size() );
03071         PACK_EH( buff->buff_ptr, &members[0], members.size() );
03072     }
03073 
03074     // Pack parent/child sets
03075     if( !store_remote_handles )
03076     {  // Only works not store remote handles
03077         // Pack numbers of parents/children
03078         unsigned int tot_pch = 0;
03079         int num_pch;
03080         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
03081         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03082         {
03083             // Pack parents
03084             result = mbImpl->num_parent_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num parents" );
03085             PACK_INT( buff->buff_ptr, num_pch );
03086             tot_pch += num_pch;
03087             result = mbImpl->num_child_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num children" );
03088             PACK_INT( buff->buff_ptr, num_pch );
03089             tot_pch += num_pch;
03090         }
03091 
03092         // Now pack actual parents/children
03093         members.clear();
03094         members.reserve( tot_pch );
03095         std::vector< EntityHandle > tmp_pch;
03096         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03097         {
03098             result = mbImpl->get_parent_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get parents" );
03099             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
03100             tmp_pch.clear();
03101             result = mbImpl->get_child_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get children" );
03102             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
03103             tmp_pch.clear();
03104         }
03105         assert( members.size() == tot_pch );
03106         if( !members.empty() )
03107         {
03108             result = get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc,
03109                                          entities_vec );MB_CHK_SET_ERR( result, "Failed to get remote handles for set parent/child sets" );
03110 #ifndef NDEBUG
03111             // Check that all handles are either sets or maxtype
03112             for( unsigned int __j = 0; __j < members.size(); __j++ )
03113                 assert( ( TYPE_FROM_HANDLE( members[__j] ) == MBMAXTYPE &&
03114                           ID_FROM_HANDLE( members[__j] ) < (int)entities.size() ) ||
03115                         TYPE_FROM_HANDLE( members[__j] ) == MBENTITYSET );
03116 #endif
03117             buff->check_space( members.size() * sizeof( EntityHandle ) );
03118             PACK_EH( buff->buff_ptr, &members[0], members.size() );
03119         }
03120     }
03121     else
03122     {
03123         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
03124         for( rit = all_sets.begin(); rit != all_sets.end(); ++rit )
03125         {
03126             PACK_INT( buff->buff_ptr, 0 );
03127             PACK_INT( buff->buff_ptr, 0 );
03128         }
03129     }
03130 
03131     // Pack the handles
03132     if( store_remote_handles && !all_sets.empty() )
03133     {
03134         buff_size = RANGE_SIZE( all_sets );
03135         buff->check_space( buff_size );
03136         PACK_RANGE( buff->buff_ptr, all_sets );
03137     }
03138 
03139     myDebug->tprintf( 4, "Done packing sets.\n" );
03140 
03141     buff->set_stored_size();
03142 
03143     return MB_SUCCESS;
03144 }
03145 
03146 ErrorCode ParallelComm::unpack_sets( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities,
03147                                      const bool store_remote_handles, const int from_proc )
03148 {
03149     // Now the sets; assume any sets the application wants to pass are in the entities list
03150     ErrorCode result;
03151 
03152     bool no_sets = ( entities.empty() || ( mbImpl->type_from_handle( *entities.rbegin() ) == MBENTITYSET ) );
03153 
03154     Range new_sets;
03155     int num_sets;
03156     UNPACK_INT( buff_ptr, num_sets );
03157 
03158     if( !num_sets ) return MB_SUCCESS;
03159 
03160     int i;
03161     Range::const_iterator rit;
03162     std::vector< EntityHandle > members;
03163     int num_ents;
03164     std::vector< unsigned int > options_vec( num_sets );
03165     // Option value
03166     if( num_sets ) UNPACK_VOID( buff_ptr, &options_vec[0], num_sets * sizeof( unsigned int ) );
03167 
03168     // Unpack parallel geometry unique id
03169     int n_uid;
03170     UNPACK_INT( buff_ptr, n_uid );
03171     if( n_uid > 0 && n_uid != num_sets )
03172     { std::cerr << "The number of Parallel geometry unique ids should be same." << std::endl; }
03173 
03174     if( n_uid > 0 )
03175     {  // If parallel geometry unique id is packed
03176         std::vector< int > uids( n_uid );
03177         UNPACK_INTS( buff_ptr, &uids[0], n_uid );
03178 
03179         Tag uid_tag;
03180         result =
03181             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
03182 
03183         // Find existing sets
03184         for( i = 0; i < n_uid; i++ )
03185         {
03186             EntityHandle set_handle;
03187             Range temp_sets;
03188             void* tag_vals[] = { &uids[i] };
03189             if( uids[i] > 0 )
03190             { result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &uid_tag, tag_vals, 1, temp_sets ); }
03191             if( !temp_sets.empty() )
03192             {  // Existing set
03193                 set_handle = *temp_sets.begin();
03194             }
03195             else
03196             {  // Create a new set
03197                 result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
03198                 result = mbImpl->tag_set_data( uid_tag, &set_handle, 1, &uids[i] );MB_CHK_SET_ERR( result, "Failed to set parallel geometry unique ids" );
03199             }
03200             new_sets.insert( set_handle );
03201         }
03202     }
03203     else
03204     {
03205         // Create sets
03206         for( i = 0; i < num_sets; i++ )
03207         {
03208             EntityHandle set_handle;
03209             result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
03210 
03211             // Make sure new sets handles are monotonically increasing
03212             assert( set_handle > *new_sets.rbegin() );
03213             new_sets.insert( set_handle );
03214         }
03215     }
03216 
03217     std::copy( new_sets.begin(), new_sets.end(), std::back_inserter( entities ) );
03218     // Only need to sort if we came in with no sets on the end
03219     if( !no_sets ) std::sort( entities.begin(), entities.end() );
03220 
03221     for( rit = new_sets.begin(), i = 0; rit != new_sets.end(); ++rit, i++ )
03222     {
03223         // Unpack entities as vector, with length
03224         UNPACK_INT( buff_ptr, num_ents );
03225         members.resize( num_ents );
03226         if( num_ents ) UNPACK_EH( buff_ptr, &members[0], num_ents );
03227         result = get_local_handles( &members[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for ordered set contents" );
03228         result = mbImpl->add_entities( *rit, &members[0], num_ents );MB_CHK_SET_ERR( result, "Failed to add ents to ordered set in unpack" );
03229     }
03230 
03231     std::vector< int > num_pch( 2 * new_sets.size() );
03232     std::vector< int >::iterator vit;
03233     int tot_pch = 0;
03234     for( vit = num_pch.begin(); vit != num_pch.end(); ++vit )
03235     {
03236         UNPACK_INT( buff_ptr, *vit );
03237         tot_pch += *vit;
03238     }
03239 
03240     members.resize( tot_pch );
03241     UNPACK_EH( buff_ptr, &members[0], tot_pch );
03242     result = get_local_handles( &members[0], tot_pch, entities );MB_CHK_SET_ERR( result, "Failed to get local handle for parent/child sets" );
03243 
03244     int num               = 0;
03245     EntityHandle* mem_ptr = &members[0];
03246     for( rit = new_sets.begin(); rit != new_sets.end(); ++rit )
03247     {
03248         // Unpack parents/children
03249         int num_par = num_pch[num++], num_child = num_pch[num++];
03250         if( num_par + num_child )
03251         {
03252             for( i = 0; i < num_par; i++ )
03253             {
03254                 assert( 0 != mem_ptr[i] );
03255                 result = mbImpl->add_parent_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add parent to set in unpack" );
03256             }
03257             mem_ptr += num_par;
03258             for( i = 0; i < num_child; i++ )
03259             {
03260                 assert( 0 != mem_ptr[i] );
03261                 result = mbImpl->add_child_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add child to set in unpack" );
03262             }
03263             mem_ptr += num_child;
03264         }
03265     }
03266 
03267     // Unpack source handles
03268     Range dum_range;
03269     if( store_remote_handles && !new_sets.empty() )
03270     {
03271         UNPACK_RANGE( buff_ptr, dum_range );
03272         result = update_remote_data( new_sets, dum_range, from_proc, 0 );MB_CHK_SET_ERR( result, "Failed to set sharing data for sets" );
03273     }
03274 
03275     myDebug->tprintf( 4, "Done unpacking sets." );
03276 
03277     return MB_SUCCESS;
03278 }
03279 
03280 ErrorCode ParallelComm::pack_adjacencies( Range& /*entities*/, Range::const_iterator& /*start_rit*/,
03281                                           Range& /*whole_range*/, unsigned char*& /*buff_ptr*/, int& /*count*/,
03282                                           const bool /*just_count*/, const bool /*store_handles*/,
03283                                           const int /*to_proc*/ )
03284 {
03285     return MB_FAILURE;
03286 }
03287 
03288 ErrorCode ParallelComm::unpack_adjacencies( unsigned char*& /*buff_ptr*/, Range& /*entities*/,
03289                                             const bool /*store_handles*/, const int /*from_proc*/ )
03290 {
03291     return MB_FAILURE;
03292 }
03293 
03294 ErrorCode ParallelComm::pack_tags( Range& entities, const std::vector< Tag >& src_tags,
03295                                    const std::vector< Tag >& dst_tags, const std::vector< Range >& tag_ranges,
03296                                    Buffer* buff, const bool store_remote_handles, const int to_proc )
03297 {
03298     ErrorCode result;
03299     std::vector< Tag >::const_iterator tag_it, dst_it;
03300     std::vector< Range >::const_iterator rit;
03301     int count = 0;
03302 
03303     for( tag_it = src_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end(); ++tag_it, ++rit )
03304     {
03305         result = packed_tag_size( *tag_it, *rit, count );
03306         if( MB_SUCCESS != result ) return result;
03307     }
03308 
03309     // Number of tags
03310     count += sizeof( int );
03311 
03312     buff->check_space( count );
03313 
03314     PACK_INT( buff->buff_ptr, src_tags.size() );
03315 
03316     std::vector< EntityHandle > entities_vec( entities.size() );
03317     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
03318 
03319     for( tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end();
03320          ++tag_it, ++dst_it, ++rit )
03321     {
03322         result = pack_tag( *tag_it, *dst_it, *rit, entities_vec, buff, store_remote_handles, to_proc );
03323         if( MB_SUCCESS != result ) return result;
03324     }
03325 
03326     myDebug->tprintf( 4, "Done packing tags." );
03327 
03328     buff->set_stored_size();
03329 
03330     return MB_SUCCESS;
03331 }
03332 
03333 ErrorCode ParallelComm::packed_tag_size( Tag tag, const Range& tagged_entities, int& count )
03334 {
03335     // For dense tags, compute size assuming all entities have that tag
03336     // For sparse tags, get number of entities w/ that tag to compute size
03337 
03338     std::vector< int > var_len_sizes;
03339     std::vector< const void* > var_len_values;
03340 
03341     // Default value
03342     count += sizeof( int );
03343     if( NULL != tag->get_default_value() ) count += tag->get_default_value_size();
03344 
03345     // Size, type, data type
03346     count += 3 * sizeof( int );
03347 
03348     // Name
03349     count += sizeof( int );
03350     count += tag->get_name().size();
03351 
03352     // Range of tag
03353     count += sizeof( int ) + tagged_entities.size() * sizeof( EntityHandle );
03354 
03355     if( tag->get_size() == MB_VARIABLE_LENGTH )
03356     {
03357         const int num_ent = tagged_entities.size();
03358         // Send a tag size for each entity
03359         count += num_ent * sizeof( int );
03360         // Send tag data for each entity
03361         var_len_sizes.resize( num_ent );
03362         var_len_values.resize( num_ent );
03363         ErrorCode result =
03364             tag->get_data( sequenceManager, errorHandler, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get lenghts of variable-length tag values" );
03365         count += std::accumulate( var_len_sizes.begin(), var_len_sizes.end(), 0 );
03366     }
03367     else
03368     {
03369         // Tag data values for range or vector
03370         count += tagged_entities.size() * tag->get_size();
03371     }
03372 
03373     return MB_SUCCESS;
03374 }
03375 
03376 ErrorCode ParallelComm::pack_tag( Tag src_tag, Tag dst_tag, const Range& tagged_entities,
03377                                   const std::vector< EntityHandle >& whole_vec, Buffer* buff,
03378                                   const bool store_remote_handles, const int to_proc )
03379 {
03380     ErrorCode result;
03381     std::vector< int > var_len_sizes;
03382     std::vector< const void* > var_len_values;
03383 
03384     if( src_tag != dst_tag )
03385     {
03386         if( dst_tag->get_size() != src_tag->get_size() ) return MB_TYPE_OUT_OF_RANGE;
03387         if( dst_tag->get_data_type() != src_tag->get_data_type() && dst_tag->get_data_type() != MB_TYPE_OPAQUE &&
03388             src_tag->get_data_type() != MB_TYPE_OPAQUE )
03389             return MB_TYPE_OUT_OF_RANGE;
03390     }
03391 
03392     // Size, type, data type
03393     buff->check_space( 3 * sizeof( int ) );
03394     PACK_INT( buff->buff_ptr, src_tag->get_size() );
03395     TagType this_type;
03396     result = mbImpl->tag_get_type( dst_tag, this_type );
03397     PACK_INT( buff->buff_ptr, (int)this_type );
03398     DataType data_type = src_tag->get_data_type();
03399     PACK_INT( buff->buff_ptr, (int)data_type );
03400     int type_size = TagInfo::size_from_data_type( data_type );
03401 
03402     // Default value
03403     if( NULL == src_tag->get_default_value() )
03404     {
03405         buff->check_space( sizeof( int ) );
03406         PACK_INT( buff->buff_ptr, 0 );
03407     }
03408     else
03409     {
03410         buff->check_space( src_tag->get_default_value_size() );
03411         PACK_BYTES( buff->buff_ptr, src_tag->get_default_value(), src_tag->get_default_value_size() );
03412     }
03413 
03414     // Name
03415     buff->check_space( src_tag->get_name().size() );
03416     PACK_BYTES( buff->buff_ptr, dst_tag->get_name().c_str(), dst_tag->get_name().size() );
03417 
03418     myDebug->tprintf( 4, "Packing tag \"%s\"", src_tag->get_name().c_str() );
03419     if( src_tag != dst_tag ) myDebug->tprintf( 4, " (as tag \"%s\")", dst_tag->get_name().c_str() );
03420     myDebug->tprintf( 4, "\n" );
03421 
03422     // Pack entities
03423     buff->check_space( tagged_entities.size() * sizeof( EntityHandle ) + sizeof( int ) );
03424     PACK_INT( buff->buff_ptr, tagged_entities.size() );
03425     std::vector< EntityHandle > dum_tagged_entities( tagged_entities.size() );
03426     result = get_remote_handles( store_remote_handles, tagged_entities, &dum_tagged_entities[0], to_proc, whole_vec );
03427     if( MB_SUCCESS != result )
03428     {
03429         if( myDebug->get_verbosity() == 3 )
03430         {
03431             std::cerr << "Failed to get remote handles for tagged entities:" << std::endl;
03432             tagged_entities.print( "  " );
03433         }
03434         MB_SET_ERR( result, "Failed to get remote handles for tagged entities" );
03435     }
03436 
03437     PACK_EH( buff->buff_ptr, &dum_tagged_entities[0], dum_tagged_entities.size() );
03438 
03439     const size_t num_ent = tagged_entities.size();
03440     if( src_tag->get_size() == MB_VARIABLE_LENGTH )
03441     {
03442         var_len_sizes.resize( num_ent, 0 );
03443         var_len_values.resize( num_ent, 0 );
03444         result = mbImpl->tag_get_by_ptr( src_tag, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get variable-length tag data in pack_tags" );
03445         buff->check_space( num_ent * sizeof( int ) );
03446         PACK_INTS( buff->buff_ptr, &var_len_sizes[0], num_ent );
03447         for( unsigned int i = 0; i < num_ent; i++ )
03448         {
03449             buff->check_space( var_len_sizes[i] );
03450             PACK_VOID( buff->buff_ptr, var_len_values[i], type_size * var_len_sizes[i] );
03451         }
03452     }
03453     else
03454     {
03455         buff->check_space( num_ent * src_tag->get_size() );
03456         // Should be OK to read directly into buffer, since tags are untyped and
03457         // handled by memcpy
03458         result = mbImpl->tag_get_data( src_tag, tagged_entities, buff->buff_ptr );MB_CHK_SET_ERR( result, "Failed to get tag data in pack_tags" );
03459         buff->buff_ptr += num_ent * src_tag->get_size();
03460         PC( num_ent * src_tag->get_size(), " void" );
03461     }
03462 
03463     return MB_SUCCESS;
03464 }
03465 
03466 ErrorCode ParallelComm::get_tag_send_list( const Range& whole_range, std::vector< Tag >& all_tags,
03467                                            std::vector< Range >& tag_ranges )
03468 {
03469     std::vector< Tag > tmp_tags;
03470     ErrorCode result = mbImpl->tag_get_tags( tmp_tags );MB_CHK_SET_ERR( result, "Failed to get tags in pack_tags" );
03471 
03472     std::vector< Tag >::iterator tag_it;
03473     for( tag_it = tmp_tags.begin(); tag_it != tmp_tags.end(); ++tag_it )
03474     {
03475         std::string tag_name;
03476         result = mbImpl->tag_get_name( *tag_it, tag_name );
03477         if( tag_name.c_str()[0] == '_' && tag_name.c_str()[1] == '_' ) continue;
03478 
03479         Range tmp_range;
03480         result = ( *tag_it )->get_tagged_entities( sequenceManager, tmp_range );MB_CHK_SET_ERR( result, "Failed to get entities for tag in pack_tags" );
03481         tmp_range = intersect( tmp_range, whole_range );
03482 
03483         if( tmp_range.empty() ) continue;
03484 
03485         // OK, we'll be sending this tag
03486         all_tags.push_back( *tag_it );
03487         tag_ranges.push_back( Range() );
03488         tag_ranges.back().swap( tmp_range );
03489     }
03490 
03491     return MB_SUCCESS;
03492 }
03493 
03494 ErrorCode ParallelComm::unpack_tags( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities,
03495                                      const bool /*store_remote_handles*/, const int /*from_proc*/,
03496                                      const MPI_Op* const mpi_op )
03497 {
03498     // Tags
03499     // Get all the tags
03500     // For dense tags, compute size assuming all entities have that tag
03501     // For sparse tags, get number of entities w/ that tag to compute size
03502 
03503     ErrorCode result;
03504 
03505     int num_tags;
03506     UNPACK_INT( buff_ptr, num_tags );
03507     std::vector< const void* > var_len_vals;
03508     std::vector< unsigned char > dum_vals;
03509     std::vector< EntityHandle > dum_ehvals;
03510 
03511     for( int i = 0; i < num_tags; i++ )
03512     {
03513         // Tag handle
03514         Tag tag_handle;
03515 
03516         // Size, data type
03517         int tag_size, tag_data_type, tag_type;
03518         UNPACK_INT( buff_ptr, tag_size );
03519         UNPACK_INT( buff_ptr, tag_type );
03520         UNPACK_INT( buff_ptr, tag_data_type );
03521 
03522         // Default value
03523         int def_val_size;
03524         UNPACK_INT( buff_ptr, def_val_size );
03525         void* def_val_ptr = NULL;
03526         if( def_val_size )
03527         {
03528             def_val_ptr = buff_ptr;
03529             buff_ptr += def_val_size;
03530             UPC( tag_size, " void" );
03531         }
03532 
03533         // Name
03534         int name_len;
03535         UNPACK_INT( buff_ptr, name_len );
03536         std::string tag_name( reinterpret_cast< char* >( buff_ptr ), name_len );
03537         buff_ptr += name_len;
03538         UPC( 64, " chars" );
03539 
03540         myDebug->tprintf( 4, "Unpacking tag %s\n", tag_name.c_str() );
03541 
03542         // Create the tag
03543         if( tag_size == MB_VARIABLE_LENGTH )
03544             result = mbImpl->tag_get_handle( tag_name.c_str(), def_val_size, (DataType)tag_data_type, tag_handle,
03545                                              MB_TAG_VARLEN | MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
03546         else
03547             result = mbImpl->tag_get_handle( tag_name.c_str(), tag_size, (DataType)tag_data_type, tag_handle,
03548                                              MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
03549         if( MB_SUCCESS != result ) return result;
03550 
03551         // Get handles and convert to local handles
03552         int num_ents;
03553         UNPACK_INT( buff_ptr, num_ents );
03554         std::vector< EntityHandle > dum_ents( num_ents );
03555         UNPACK_EH( buff_ptr, &dum_ents[0], num_ents );
03556 
03557         // In this case handles are indices into new entity range; need to convert
03558         // to local handles
03559         result = get_local_handles( &dum_ents[0], num_ents, entities );MB_CHK_SET_ERR( result, "Unable to convert to local handles" );
03560 
03561         // If it's a handle type, also convert tag vals in-place in buffer
03562         if( MB_TYPE_HANDLE == tag_type )
03563         {
03564             dum_ehvals.resize( num_ents );
03565             UNPACK_EH( buff_ptr, &dum_ehvals[0], num_ents );
03566             result = get_local_handles( &dum_ehvals[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for tag vals" );
03567         }
03568 
03569         DataType data_type;
03570         mbImpl->tag_get_data_type( tag_handle, data_type );
03571         int type_size = TagInfo::size_from_data_type( data_type );
03572 
03573         if( !dum_ents.empty() )
03574         {
03575             if( tag_size == MB_VARIABLE_LENGTH )
03576             {
03577                 // Be careful of alignment here. If the integers are aligned
03578                 // in the buffer, we can use them directly. Otherwise we must
03579                 // copy them.
03580                 std::vector< int > var_lengths( num_ents );
03581                 UNPACK_INTS( buff_ptr, &var_lengths[0], num_ents );
03582                 UPC( sizeof( int ) * num_ents, " void" );
03583 
03584                 // Get pointers into buffer for each tag value
03585                 var_len_vals.resize( num_ents );
03586                 for( std::vector< EntityHandle >::size_type j = 0; j < (std::vector< EntityHandle >::size_type)num_ents;
03587                      j++ )
03588                 {
03589                     var_len_vals[j] = buff_ptr;
03590                     buff_ptr += var_lengths[j] * type_size;
03591                     UPC( var_lengths[j], " void" );
03592                 }
03593                 result =
03594                     mbImpl->tag_set_by_ptr( tag_handle, &dum_ents[0], num_ents, &var_len_vals[0], &var_lengths[0] );MB_CHK_SET_ERR( result, "Failed to set tag data when unpacking variable-length tag" );
03595             }
03596             else
03597             {
03598                 // Get existing values of dst tag
03599                 dum_vals.resize( tag_size * num_ents );
03600                 if( mpi_op )
03601                 {
03602                     int tag_length;
03603                     result = mbImpl->tag_get_length( tag_handle, tag_length );MB_CHK_SET_ERR( result, "Failed to get tag length" );
03604                     result = mbImpl->tag_get_data( tag_handle, &dum_ents[0], num_ents, &dum_vals[0] );MB_CHK_SET_ERR( result, "Failed to get existing value of dst tag on entities" );
03605                     result = reduce_void( tag_data_type, *mpi_op, tag_length * num_ents, &dum_vals[0], buff_ptr );MB_CHK_SET_ERR( result, "Failed to perform mpi op on dst tags" );
03606                 }
03607                 result = mbImpl->tag_set_data( tag_handle, &dum_ents[0], num_ents, buff_ptr );MB_CHK_SET_ERR( result, "Failed to set range-based tag data when unpacking tag" );
03608                 buff_ptr += num_ents * tag_size;
03609                 UPC( num_ents * tag_size, " void" );
03610             }
03611         }
03612     }
03613 
03614     myDebug->tprintf( 4, "Done unpacking tags.\n" );
03615 
03616     return MB_SUCCESS;
03617 }
03618 
03619 template < class T >
03620 T LAND( const T& arg1, const T& arg2 )
03621 {
03622     return arg1 && arg2;
03623 }
03624 template < class T >
03625 T LOR( const T& arg1, const T& arg2 )
03626 {
03627     return arg1 || arg2;
03628 }
03629 template < class T >
03630 T LXOR( const T& arg1, const T& arg2 )
03631 {
03632     return ( ( arg1 && !arg2 ) || ( !arg1 && arg2 ) );
03633 }
03634 template < class T >
03635 T MAX( const T& arg1, const T& arg2 )
03636 {
03637     return ( arg1 > arg2 ? arg1 : arg2 );
03638 }
03639 template < class T >
03640 T MIN( const T& arg1, const T& arg2 )
03641 {
03642     return ( arg1 < arg2 ? arg1 : arg2 );
03643 }
03644 template < class T >
03645 T ADD( const T& arg1, const T& arg2 )
03646 {
03647     return arg1 + arg2;
03648 }
03649 template < class T >
03650 T MULT( const T& arg1, const T& arg2 )
03651 {
03652     return arg1 * arg2;
03653 }
03654 
03655 template < class T >
03656 ErrorCode ParallelComm::reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals )
03657 {
03658     T* old_tmp = reinterpret_cast< T* >( old_vals );
03659     // T *new_tmp = reinterpret_cast<T*>(new_vals);
03660     // new vals pointer needs to be aligned , some compilers will optimize and will shift
03661 
03662     std::vector< T > new_values;
03663     new_values.resize( num_ents );
03664     memcpy( &new_values[0], new_vals, num_ents * sizeof( T ) );
03665     T* new_tmp = &new_values[0];
03666 
03667     if( mpi_op == MPI_SUM )
03668         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, ADD< T > );
03669     else if( mpi_op == MPI_PROD )
03670         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MULT< T > );
03671     else if( mpi_op == MPI_MAX )
03672         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MAX< T > );
03673     else if( mpi_op == MPI_MIN )
03674         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MIN< T > );
03675     else if( mpi_op == MPI_LAND )
03676         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LAND< T > );
03677     else if( mpi_op == MPI_LOR )
03678         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LOR< T > );
03679     else if( mpi_op == MPI_LXOR )
03680         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LXOR< T > );
03681     else if( mpi_op == MPI_BAND || mpi_op == MPI_BOR || mpi_op == MPI_BXOR )
03682     {
03683         std::cerr << "Bitwise operations not allowed in tag reductions." << std::endl;
03684         return MB_FAILURE;
03685     }
03686     else if( mpi_op != MPI_OP_NULL )
03687     {
03688         std::cerr << "Unknown MPI operation type." << std::endl;
03689         return MB_TYPE_OUT_OF_RANGE;
03690     }
03691 
03692     // copy now the result back where it should be
03693     memcpy( new_vals, new_tmp, num_ents * sizeof( T ) );
03694     std::vector< T >().swap( new_values );  // way to release allocated vector
03695 
03696     return MB_SUCCESS;
03697 }
03698 
03699 ErrorCode ParallelComm::reduce_void( int tag_data_type, const MPI_Op mpi_op, int num_ents, void* old_vals,
03700                                      void* new_vals )
03701 {
03702     ErrorCode result;
03703     switch( tag_data_type )
03704     {
03705         case MB_TYPE_INTEGER:
03706             result = reduce< int >( mpi_op, num_ents, old_vals, new_vals );
03707             break;
03708         case MB_TYPE_DOUBLE:
03709             result = reduce< double >( mpi_op, num_ents, old_vals, new_vals );
03710             break;
03711         case MB_TYPE_BIT:
03712             result = reduce< unsigned char >( mpi_op, num_ents, old_vals, new_vals );
03713             break;
03714         default:
03715             result = MB_SUCCESS;
03716             break;
03717     }
03718 
03719     return result;
03720 }
03721 
03722 ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, int resolve_dim, int shared_dim, const Tag* id_tag )
03723 {
03724     ErrorCode result;
03725     Range proc_ents;
03726 
03727     // Check for structured mesh, and do it differently if it is
03728     ScdInterface* scdi;
03729     result = mbImpl->query_interface( scdi );
03730     if( scdi )
03731     {
03732         result = scdi->tag_shared_vertices( this, this_set );
03733         if( MB_SUCCESS == result )
03734         {
03735             myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
03736             return result;
03737         }
03738     }
03739 
03740     if( 0 == this_set )
03741     {
03742         // Get the entities in the partition sets
03743         for( Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit )
03744         {
03745             Range tmp_ents;
03746             result = mbImpl->get_entities_by_handle( *rit, tmp_ents, true );
03747             if( MB_SUCCESS != result ) return result;
03748             proc_ents.merge( tmp_ents );
03749         }
03750     }
03751     else
03752     {
03753         result = mbImpl->get_entities_by_handle( this_set, proc_ents, true );
03754         if( MB_SUCCESS != result ) return result;
03755     }
03756 
03757     // Resolve dim is maximal dim of entities in proc_ents
03758     if( -1 == resolve_dim )
03759     {
03760         if( !proc_ents.empty() ) resolve_dim = mbImpl->dimension_from_handle( *proc_ents.rbegin() );
03761     }
03762 
03763     // proc_ents should all be of same dimension
03764     if( resolve_dim > shared_dim &&
03765         mbImpl->dimension_from_handle( *proc_ents.rbegin() ) != mbImpl->dimension_from_handle( *proc_ents.begin() ) )
03766     {
03767         Range::iterator lower = proc_ents.lower_bound( CN::TypeDimensionMap[0].first ),
03768                         upper = proc_ents.upper_bound( CN::TypeDimensionMap[resolve_dim - 1].second );
03769         proc_ents.erase( lower, upper );
03770     }
03771 
03772     // Must call even if we don't have any entities, to make sure
03773     // collective comm'n works
03774     return resolve_shared_ents( this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag );
03775 }
03776 
03777 ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, Range& proc_ents, int resolve_dim, int shared_dim,
03778                                              Range* skin_ents, const Tag* id_tag )
03779 {
03780 #ifdef MOAB_HAVE_MPE
03781     if( myDebug->get_verbosity() == 2 )
03782     {
03783         define_mpe();
03784         MPE_Log_event( RESOLVE_START, procConfig.proc_rank(), "Entering resolve_shared_ents." );
03785     }
03786 #endif
03787 
03788     ErrorCode result;
03789     myDebug->tprintf( 1, "Resolving shared entities.\n" );
03790 
03791     if( resolve_dim < shared_dim )
03792     { MB_SET_ERR( MB_FAILURE, "MOAB does not support vertex-based partitions, only element-based ones" ); }
03793 
03794     if( -1 == shared_dim )
03795     {
03796         if( !proc_ents.empty() )
03797             shared_dim = mbImpl->dimension_from_handle( *proc_ents.begin() ) - 1;
03798         else if( resolve_dim == 3 )
03799             shared_dim = 2;
03800     }
03801     int max_global_resolve_dim = -1;
03802     int err = MPI_Allreduce( &resolve_dim, &max_global_resolve_dim, 1, MPI_INT, MPI_MAX, proc_config().proc_comm() );
03803     if( MPI_SUCCESS != err ) { MB_SET_ERR( MB_FAILURE, "Unable to guess global resolve_dim" ); }
03804     if( shared_dim < 0 || resolve_dim < 0 )
03805     {
03806         // MB_SET_ERR(MB_FAILURE, "Unable to guess shared_dim or resolve_dim");
03807         resolve_dim = max_global_resolve_dim;
03808         shared_dim  = resolve_dim - 1;
03809     }
03810 
03811     if( resolve_dim < 0 || shared_dim < 0 ) return MB_SUCCESS;
03812     // no task has any mesh, get out
03813 
03814     // Get the skin entities by dimension
03815     Range tmp_skin_ents[4];
03816 
03817     // Get the entities to be skinned
03818     // Find the skin
03819     int skin_dim = resolve_dim - 1;
03820     if( !skin_ents )
03821     {
03822         skin_ents              = tmp_skin_ents;
03823         skin_ents[resolve_dim] = proc_ents;
03824         Skinner skinner( mbImpl );
03825         result =
03826             skinner.find_skin( this_set, skin_ents[skin_dim + 1], false, skin_ents[skin_dim], NULL, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
03827         myDebug->tprintf( 1, "Found skin:   skin_dim: %d resolve_dim: %d , now resolving.\n", skin_dim, resolve_dim );
03828         myDebug->tprintf( 3, "skin_ents[0].size(): %d skin_ents[1].size(): %d  \n", (int)skin_ents[0].size(),
03829                           (int)skin_ents[1].size() );
03830         // Get entities adjacent to skin ents from shared_dim down to zero
03831         for( int this_dim = skin_dim - 1; this_dim >= 0; this_dim-- )
03832         {
03833             result =
03834                 mbImpl->get_adjacencies( skin_ents[skin_dim], this_dim, true, skin_ents[this_dim], Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
03835 
03836             if( this_set && skin_dim == 2 && this_dim == 1 )
03837             {
03838                 result = mbImpl->add_entities( this_set, skin_ents[this_dim] );MB_CHK_ERR( result );
03839             }
03840         }
03841     }
03842     else if( skin_ents[resolve_dim].empty() )
03843         skin_ents[resolve_dim] = proc_ents;
03844 
03845     // Global id tag
03846     Tag gid_tag;
03847     if( id_tag )
03848         gid_tag = *id_tag;
03849     else
03850     {
03851         bool tag_created = false;
03852         int def_val      = -1;
03853         result = mbImpl->tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE | MB_TAG_CREAT,
03854                                          &def_val, &tag_created );
03855         if( MB_ALREADY_ALLOCATED != result && MB_SUCCESS != result )
03856         { MB_SET_ERR( result, "Failed to create/get gid tag handle" ); }
03857         else if( tag_created )
03858         {
03859             // Just created it, so we need global ids
03860             result = assign_global_ids( this_set, skin_dim + 1, true, true, true );MB_CHK_SET_ERR( result, "Failed to assign global ids" );
03861         }
03862     }
03863 
03864     DataType tag_type;
03865     result = mbImpl->tag_get_data_type( gid_tag, tag_type );MB_CHK_SET_ERR( result, "Failed to get tag data type" );
03866     int bytes_per_tag;
03867     result = mbImpl->tag_get_bytes( gid_tag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed to get number of bytes per tag" );
03868     // On 64 bits, long and int are different
03869     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
03870 
03871     // Get gids for skin ents in a vector, to pass to gs
03872     std::vector< long > lgid_data( skin_ents[0].size() );
03873     // Size is either long or int
03874     // On 64 bit is 8 or 4
03875     if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
03876     {  // It is a special id tag
03877         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &lgid_data[0] );MB_CHK_SET_ERR( result, "Couldn't get gid tag for skin vertices" );
03878     }
03879     else if( 4 == bytes_per_tag )
03880     {  // Must be GLOBAL_ID tag or 32 bits ...
03881         std::vector< int > gid_data( lgid_data.size() );
03882         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &gid_data[0] );MB_CHK_SET_ERR( result, "Failed to get gid tag for skin vertices" );
03883         std::copy( gid_data.begin(), gid_data.end(), lgid_data.begin() );
03884     }
03885     else
03886     {
03887         // Not supported flag
03888         MB_SET_ERR( MB_FAILURE, "Unsupported id tag" );
03889     }
03890 
03891     // Put handles in vector for passing to gs setup
03892     std::vector< Ulong > handle_vec;  // Assumes that we can do conversion from Ulong to EntityHandle
03893     std::copy( skin_ents[0].begin(), skin_ents[0].end(), std::back_inserter( handle_vec ) );
03894 
03895 #ifdef MOAB_HAVE_MPE
03896     if( myDebug->get_verbosity() == 2 )
03897     { MPE_Log_event( SHAREDV_START, procConfig.proc_rank(), "Creating crystal router." ); }
03898 #endif
03899 
03900     // Get a crystal router
03901     gs_data::crystal_data* cd = procConfig.crystal_router();
03902 
03903     /*
03904     // Get total number of entities; will overshoot highest global id, but
03905     // that's OK
03906     int num_total[2] = {0, 0}, num_local[2] = {0, 0};
03907     result = mbImpl->get_number_entities_by_dimension(this_set, 0, num_local);
03908     if (MB_SUCCESS != result)return result;
03909     int failure = MPI_Allreduce(num_local, num_total, 1,
03910     MPI_INT, MPI_SUM, procConfig.proc_comm());
03911     if (failure) {
03912       MB_SET_ERR(MB_FAILURE, "Allreduce for total number of shared ents failed");
03913     }
03914     */
03915     // Call gather-scatter to get shared ids & procs
03916     gs_data* gsd = new gs_data();
03917     // assert(sizeof(ulong_) == sizeof(EntityHandle));
03918     result = gsd->initialize( skin_ents[0].size(), &lgid_data[0], &handle_vec[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
03919 
03920     // Get shared proc tags
03921     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
03922     result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
03923 
03924     // Load shared verts into a tuple, then sort by index
03925     TupleList shared_verts;
03926     shared_verts.initialize( 2, 0, 1, 0, skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 ) );
03927     shared_verts.enableWriteAccess();
03928 
03929     unsigned int i = 0, j = 0;
03930     for( unsigned int p = 0; p < gsd->nlinfo->_np; p++ )
03931         for( unsigned int np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
03932         {
03933             shared_verts.vi_wr[i++] = gsd->nlinfo->_sh_ind[j];
03934             shared_verts.vi_wr[i++] = gsd->nlinfo->_target[p];
03935             shared_verts.vul_wr[j]  = gsd->nlinfo->_ulabels[j];
03936             j++;
03937             shared_verts.inc_n();
03938         }
03939 
03940     myDebug->tprintf( 3, " shared verts size %d \n", (int)shared_verts.get_n() );
03941 
03942     int max_size = skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 );
03943     moab::TupleList::buffer sort_buffer;
03944     sort_buffer.buffer_init( max_size );
03945     shared_verts.sort( 0, &sort_buffer );
03946     sort_buffer.reset();
03947 
03948     // Set sharing procs and handles tags on skin ents
03949     int maxp = -1;
03950     std::vector< int > sharing_procs( MAX_SHARING_PROCS );
03951     std::fill( sharing_procs.begin(), sharing_procs.end(), maxp );
03952     j = 0;
03953     i = 0;
03954 
03955     // Get ents shared by 1 or n procs
03956     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
03957     Range proc_verts;
03958     result = mbImpl->get_adjacencies( proc_ents, 0, false, proc_verts, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get proc_verts" );
03959 
03960     myDebug->print( 3, " resolve shared ents:  proc verts ", proc_verts );
03961     result = tag_shared_verts( shared_verts, skin_ents, proc_nvecs, proc_verts );MB_CHK_SET_ERR( result, "Failed to tag shared verts" );
03962 
03963 #ifdef MOAB_HAVE_MPE
03964     if( myDebug->get_verbosity() == 2 )
03965     { MPE_Log_event( SHAREDV_END, procConfig.proc_rank(), "Finished tag_shared_verts." ); }
03966 #endif
03967 
03968     // Get entities shared by 1 or n procs
03969     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );MB_CHK_SET_ERR( result, "Failed to tag shared entities" );
03970 
03971     shared_verts.reset();
03972 
03973     if( myDebug->get_verbosity() > 0 )
03974     {
03975         for( std::map< std::vector< int >, std::vector< EntityHandle > >::const_iterator mit = proc_nvecs.begin();
03976              mit != proc_nvecs.end(); ++mit )
03977         {
03978             myDebug->tprintf( 1, "Iface: " );
03979             for( std::vector< int >::const_iterator vit = ( mit->first ).begin(); vit != ( mit->first ).end(); ++vit )
03980                 myDebug->printf( 1, " %d", *vit );
03981             myDebug->print( 1, "\n" );
03982         }
03983     }
03984 
03985     // Create the sets for each interface; store them as tags on
03986     // the interface instance
03987     Range iface_sets;
03988     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
03989 
03990     // Establish comm procs and buffers for them
03991     std::set< unsigned int > procs;
03992     result = get_interface_procs( procs, true );MB_CHK_SET_ERR( result, "Failed to get interface procs" );
03993 
03994 #ifndef NDEBUG
03995     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Shared handle check failed after interface vertex exchange" );
03996 #endif
03997 
03998     // Resolve shared entity remote handles; implemented in ghost cell exchange
03999     // code because it's so similar
04000     result = exchange_ghost_cells( -1, -1, 0, 0, true, true );MB_CHK_SET_ERR( result, "Failed to resolve shared entity remote handles" );
04001 
04002     // Now build parent/child links for interface sets
04003     result = create_iface_pc_links();MB_CHK_SET_ERR( result, "Failed to create interface parent/child links" );
04004 
04005     gsd->reset();
04006     delete gsd;
04007 
04008 #ifdef MOAB_HAVE_MPE
04009     if( myDebug->get_verbosity() == 2 )
04010     { MPE_Log_event( RESOLVE_END, procConfig.proc_rank(), "Exiting resolve_shared_ents." ); }
04011 #endif
04012 
04013     // std::ostringstream ent_str;
04014     // ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
04015     // mbImpl->write_mesh(ent_str.str().c_str());
04016 
04017     // Done
04018     return result;
04019 }
04020 
04021 void ParallelComm::define_mpe()
04022 {
04023 #ifdef MOAB_HAVE_MPE
04024     if( myDebug->get_verbosity() == 2 )
04025     {
04026         // Define mpe states used for logging
04027         int success;
04028         MPE_Log_get_state_eventIDs( &IFACE_START, &IFACE_END );
04029         MPE_Log_get_state_eventIDs( &GHOST_START, &GHOST_END );
04030         MPE_Log_get_state_eventIDs( &SHAREDV_START, &SHAREDV_END );
04031         MPE_Log_get_state_eventIDs( &RESOLVE_START, &RESOLVE_END );
04032         MPE_Log_get_state_eventIDs( &ENTITIES_START, &ENTITIES_END );
04033         MPE_Log_get_state_eventIDs( &RHANDLES_START, &RHANDLES_END );
04034         MPE_Log_get_state_eventIDs( &OWNED_START, &OWNED_END );
04035         success = MPE_Describe_state( IFACE_START, IFACE_END, "Resolve interface ents", "green" );
04036         assert( MPE_LOG_OK == success );
04037         success = MPE_Describe_state( GHOST_START, GHOST_END, "Exchange ghost ents", "red" );
04038         assert( MPE_LOG_OK == success );
04039         success = MPE_Describe_state( SHAREDV_START, SHAREDV_END, "Resolve interface vertices", "blue" );
04040         assert( MPE_LOG_OK == success );
04041         success = MPE_Describe_state( RESOLVE_START, RESOLVE_END, "Resolve shared ents", "purple" );
04042         assert( MPE_LOG_OK == success );
04043         success = MPE_Describe_state( ENTITIES_START, ENTITIES_END, "Exchange shared ents", "yellow" );
04044         assert( MPE_LOG_OK == success );
04045         success = MPE_Describe_state( RHANDLES_START, RHANDLES_END, "Remote handles", "cyan" );
04046         assert( MPE_LOG_OK == success );
04047         success = MPE_Describe_state( OWNED_START, OWNED_END, "Exchange owned ents", "black" );
04048         assert( MPE_LOG_OK == success );
04049     }
04050 #endif
04051 }
04052 
04053 ErrorCode ParallelComm::resolve_shared_ents( ParallelComm** pc, const unsigned int np, EntityHandle this_set,
04054                                              const int part_dim )
04055 {
04056     std::vector< Range > verts( np );
04057     int tot_verts = 0;
04058     unsigned int p, i, j, v;
04059     ErrorCode rval;
04060     for( p = 0; p < np; p++ )
04061     {
04062         Skinner skinner( pc[p]->get_moab() );
04063         Range part_ents, skin_ents;
04064         rval = pc[p]->get_moab()->get_entities_by_dimension( this_set, part_dim, part_ents );
04065         if( MB_SUCCESS != rval ) return rval;
04066         rval = skinner.find_skin( this_set, part_ents, false, skin_ents, 0, true, true, true );
04067         if( MB_SUCCESS != rval ) return rval;
04068         rval = pc[p]->get_moab()->get_adjacencies( skin_ents, 0, true, verts[p], Interface::UNION );
04069         if( MB_SUCCESS != rval ) return rval;
04070         tot_verts += verts[p].size();
04071     }
04072 
04073     TupleList shared_ents;
04074     shared_ents.initialize( 2, 0, 1, 0, tot_verts );
04075     shared_ents.enableWriteAccess();
04076 
04077     i = 0;
04078     j = 0;
04079     std::vector< int > gids;
04080     Range::iterator rit;
04081     Tag gid_tag;
04082     for( p = 0; p < np; p++ )
04083     {
04084         gid_tag = pc[p]->get_moab()->globalId_tag();
04085 
04086         gids.resize( verts[p].size() );
04087         rval = pc[p]->get_moab()->tag_get_data( gid_tag, verts[p], &gids[0] );
04088         if( MB_SUCCESS != rval ) return rval;
04089 
04090         for( v = 0, rit = verts[p].begin(); v < gids.size(); v++, ++rit )
04091         {
04092             shared_ents.vi_wr[i++] = gids[v];
04093             shared_ents.vi_wr[i++] = p;
04094             shared_ents.vul_wr[j]  = *rit;
04095             j++;
04096             shared_ents.inc_n();
04097         }
04098     }
04099 
04100     moab::TupleList::buffer sort_buffer;
04101     sort_buffer.buffer_init( tot_verts );
04102     shared_ents.sort( 0, &sort_buffer );
04103     sort_buffer.reset();
04104 
04105     j = 0;
04106     i = 0;
04107     std::vector< EntityHandle > handles;
04108     std::vector< int > procs;
04109 
04110     while( i < shared_ents.get_n() )
04111     {
04112         handles.clear();
04113         procs.clear();
04114 
04115         // Count & accumulate sharing procs
04116         int this_gid = shared_ents.vi_rd[j];
04117         while( i < shared_ents.get_n() && shared_ents.vi_rd[j] == this_gid )
04118         {
04119             j++;
04120             procs.push_back( shared_ents.vi_rd[j++] );
04121             handles.push_back( shared_ents.vul_rd[i++] );
04122         }
04123         if( 1 == procs.size() ) continue;
04124 
04125         for( v = 0; v < procs.size(); v++ )
04126         {
04127             rval = pc[procs[v]]->update_remote_data( handles[v], &procs[0], &handles[0], procs.size(),
04128                                                      ( procs[0] == (int)pc[procs[v]]->rank()
04129                                                            ? PSTATUS_INTERFACE
04130                                                            : ( PSTATUS_NOT_OWNED | PSTATUS_INTERFACE ) ) );
04131             if( MB_SUCCESS != rval ) return rval;
04132         }
04133     }
04134 
04135     std::set< unsigned int > psets;
04136     for( p = 0; p < np; p++ )
04137     {
04138         rval = pc[p]->create_interface_sets( this_set, part_dim, part_dim - 1 );
04139         if( MB_SUCCESS != rval ) return rval;
04140         // Establish comm procs and buffers for them
04141         psets.clear();
04142         rval = pc[p]->get_interface_procs( psets, true );
04143         if( MB_SUCCESS != rval ) return rval;
04144     }
04145 
04146     shared_ents.reset();
04147 
04148     return MB_SUCCESS;
04149 }
04150 
04151 ErrorCode ParallelComm::tag_iface_entities()
04152 {
04153     ErrorCode result = MB_SUCCESS;
04154     Range iface_ents, tmp_ents, rmv_ents;
04155     std::vector< unsigned char > pstat;
04156     unsigned char set_pstat;
04157     Range::iterator rit2;
04158     unsigned int i;
04159 
04160     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
04161     {
04162         iface_ents.clear();
04163 
04164         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get interface set contents" );
04165         pstat.resize( iface_ents.size() );
04166         result = mbImpl->tag_get_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set entities" );
04167         result = mbImpl->tag_get_data( pstatus_tag(), &( *rit ), 1, &set_pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set" );
04168         rmv_ents.clear();
04169         for( rit2 = iface_ents.begin(), i = 0; rit2 != iface_ents.end(); ++rit2, i++ )
04170         {
04171             if( !( pstat[i] & PSTATUS_INTERFACE ) )
04172             {
04173                 rmv_ents.insert( *rit2 );
04174                 pstat[i] = 0x0;
04175             }
04176         }
04177         result = mbImpl->remove_entities( *rit, rmv_ents );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
04178 
04179         if( !( set_pstat & PSTATUS_NOT_OWNED ) ) continue;
04180         // If we're here, we need to set the notowned status on (remaining) set contents
04181 
04182         // Remove rmv_ents from the contents list
04183         iface_ents = subtract( iface_ents, rmv_ents );
04184         // Compress the pstat vector (removing 0x0's)
04185         std::remove_if( pstat.begin(), pstat.end(),
04186                         std::bind( std::equal_to< unsigned char >(), std::placeholders::_1, 0x0 ) );
04187         // std::bind2nd(std::equal_to<unsigned char>(), 0x0));
04188         // https://stackoverflow.com/questions/32739018/a-replacement-for-stdbind2nd
04189         // Fold the not_owned bit into remaining values
04190         unsigned int sz = iface_ents.size();
04191         for( i = 0; i < sz; i++ )
04192             pstat[i] |= PSTATUS_NOT_OWNED;
04193 
04194         // Set the tag on the entities
04195         result = mbImpl->tag_set_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus values for interface set entities" );
04196     }
04197 
04198     return MB_SUCCESS;
04199 }
04200 
04201 ErrorCode ParallelComm::set_pstatus_entities( Range& pstatus_ents, unsigned char pstatus_val, bool lower_dim_ents,
04202                                               bool verts_too, int operation )
04203 {
04204     std::vector< unsigned char > pstatus_vals( pstatus_ents.size() );
04205     Range all_ents, *range_ptr = &pstatus_ents;
04206     ErrorCode result;
04207     if( lower_dim_ents || verts_too )
04208     {
04209         all_ents      = pstatus_ents;
04210         range_ptr     = &all_ents;
04211         int start_dim = ( lower_dim_ents ? mbImpl->dimension_from_handle( *pstatus_ents.rbegin() ) - 1 : 0 );
04212         for( ; start_dim >= 0; start_dim-- )
04213         {
04214             result = mbImpl->get_adjacencies( all_ents, start_dim, true, all_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get adjacencies for pstatus entities" );
04215         }
04216     }
04217     if( Interface::UNION == operation )
04218     {
04219         result = mbImpl->tag_get_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
04220         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
04221             pstatus_vals[i] |= pstatus_val;
04222     }
04223     else
04224     {
04225         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
04226             pstatus_vals[i] = pstatus_val;
04227     }
04228     result = mbImpl->tag_set_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
04229 
04230     return MB_SUCCESS;
04231 }
04232 
04233 ErrorCode ParallelComm::set_pstatus_entities( EntityHandle* pstatus_ents, int num_ents, unsigned char pstatus_val,
04234                                               bool lower_dim_ents, bool verts_too, int operation )
04235 {
04236     std::vector< unsigned char > pstatus_vals( num_ents );
04237     ErrorCode result;
04238     if( lower_dim_ents || verts_too )
04239     {
04240         // In this case, call the range-based version
04241         Range tmp_range;
04242         std::copy( pstatus_ents, pstatus_ents + num_ents, range_inserter( tmp_range ) );
04243         return set_pstatus_entities( tmp_range, pstatus_val, lower_dim_ents, verts_too, operation );
04244     }
04245 
04246     if( Interface::UNION == operation )
04247     {
04248         result = mbImpl->tag_get_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
04249         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
04250             pstatus_vals[i] |= pstatus_val;
04251     }
04252     else
04253     {
04254         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
04255             pstatus_vals[i] = pstatus_val;
04256     }
04257     result = mbImpl->tag_set_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
04258 
04259     return MB_SUCCESS;
04260 }
04261 
04262 static size_t choose_owner_idx( const std::vector< unsigned >& proc_list )
04263 {
04264     // Try to assign owners randomly so we get a good distribution,
04265     // (note: specifying the same seed on all procs is essential)
04266     unsigned val = 0;
04267     for( size_t i = 0; i < proc_list.size(); i++ )
04268         val ^= proc_list[i];
04269     srand( (int)( val ) );
04270     return rand() % proc_list.size();
04271 }
04272 
04273 struct set_tuple
04274 {
04275     unsigned idx;
04276     unsigned proc;
04277     EntityHandle handle;
04278     inline bool operator<( set_tuple other ) const
04279     {
04280         return ( idx == other.idx ) ? ( proc < other.proc ) : ( idx < other.idx );
04281     }
04282 };
04283 
04284 ErrorCode ParallelComm::resolve_shared_sets( EntityHandle file, const Tag* idtag )
04285 {
04286     // Find all sets with any of the following tags:
04287     const char* const shared_set_tag_names[] = { GEOM_DIMENSION_TAG_NAME, MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME,
04288                                                  NEUMANN_SET_TAG_NAME, PARALLEL_PARTITION_TAG_NAME };
04289     int num_tags                             = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
04290     Range candidate_sets;
04291     ErrorCode result = MB_FAILURE;
04292 
04293     // If we're not given an ID tag to use to globally identify sets,
04294     // then fall back to using known tag values
04295     if( !idtag )
04296     {
04297         Tag gid, tag;
04298         gid = mbImpl->globalId_tag();
04299         if( NULL != gid ) result = mbImpl->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tag );
04300         if( MB_SUCCESS == result )
04301         {
04302             for( int d = 0; d < 4; d++ )
04303             {
04304                 candidate_sets.clear();
04305                 const void* vals[] = { &d };
04306                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, vals, 1, candidate_sets );
04307                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, gid );
04308             }
04309         }
04310 
04311         for( int i = 1; i < num_tags; i++ )
04312         {
04313             result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag );
04314             if( MB_SUCCESS == result )
04315             {
04316                 candidate_sets.clear();
04317                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets );
04318                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, tag );
04319             }
04320         }
04321 
04322         return MB_SUCCESS;
04323     }
04324 
04325     for( int i = 0; i < num_tags; i++ )
04326     {
04327         Tag tag;
04328         result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag, MB_TAG_ANY );
04329         if( MB_SUCCESS != result ) continue;
04330 
04331         mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets, Interface::UNION );
04332     }
04333 
04334     // Find any additional sets that contain shared entities
04335     Range::iterator hint = candidate_sets.begin();
04336     Range all_sets;
04337     mbImpl->get_entities_by_type( file, MBENTITYSET, all_sets );
04338     all_sets           = subtract( all_sets, candidate_sets );
04339     Range::iterator it = all_sets.begin();
04340     while( it != all_sets.end() )
04341     {
04342         Range contents;
04343         mbImpl->get_entities_by_handle( *it, contents );
04344         contents.erase( contents.lower_bound( MBENTITYSET ), contents.end() );
04345         filter_pstatus( contents, PSTATUS_SHARED, PSTATUS_OR );
04346         if( contents.empty() ) { ++it; }
04347         else
04348         {
04349             hint = candidate_sets.insert( hint, *it );
04350             it   = all_sets.erase( it );
04351         }
04352     }
04353 
04354     // Find any additionl sets that contain or are parents of potential shared sets
04355     Range prev_list = candidate_sets;
04356     while( !prev_list.empty() )
04357     {
04358         it = all_sets.begin();
04359         Range new_list;
04360         hint = new_list.begin();
04361         while( it != all_sets.end() )
04362         {
04363             Range contents;
04364             mbImpl->get_entities_by_type( *it, MBENTITYSET, contents );
04365             if( !intersect( prev_list, contents ).empty() )
04366             {
04367                 hint = new_list.insert( hint, *it );
04368                 it   = all_sets.erase( it );
04369             }
04370             else
04371             {
04372                 new_list.clear();
04373                 mbImpl->get_child_meshsets( *it, contents );
04374                 if( !intersect( prev_list, contents ).empty() )
04375                 {
04376                     hint = new_list.insert( hint, *it );
04377                     it   = all_sets.erase( it );
04378                 }
04379                 else
04380                 {
04381                     ++it;
04382                 }
04383             }
04384         }
04385 
04386         candidate_sets.merge( new_list );
04387         prev_list.swap( new_list );
04388     }
04389 
04390     return resolve_shared_sets( candidate_sets, *idtag );
04391 }
04392 
04393 #ifndef NDEBUG
04394 bool is_sorted_unique( std::vector< unsigned >& v )
04395 {
04396     for( size_t i = 1; i < v.size(); i++ )
04397         if( v[i - 1] >= v[i] ) return false;
04398     return true;
04399 }
04400 #endif
04401 
04402 ErrorCode ParallelComm::resolve_shared_sets( Range& sets, Tag idtag )
04403 {
04404     ErrorCode result;
04405     const unsigned rk = proc_config().proc_rank();
04406     MPI_Comm cm       = proc_config().proc_comm();
04407 
04408     // Build sharing list for all sets
04409 
04410     // Get ids for sets in a vector, to pass to gs
04411     std::vector< long > larray;  // Allocate sufficient space for longs
04412     std::vector< Ulong > handles;
04413     Range tmp_sets;
04414     // The id tag can be size 4 or size 8
04415     // Based on that, convert to int or to long, similarly to what we do
04416     // for resolving shared vertices;
04417     // This code must work on 32 bit too, where long is 4 bytes, also
04418     // so test first size 4, then we should be fine
04419     DataType tag_type;
04420     result = mbImpl->tag_get_data_type( idtag, tag_type );MB_CHK_SET_ERR( result, "Failed getting tag data type" );
04421     int bytes_per_tag;
04422     result = mbImpl->tag_get_bytes( idtag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed getting number of bytes per tag" );
04423     // On 64 bits, long and int are different
04424     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
04425 
04426     for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
04427     {
04428         if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
04429         {  // It is a special id tag
04430             long dum;
04431             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
04432             if( MB_SUCCESS == result )
04433             {
04434                 larray.push_back( dum );
04435                 handles.push_back( *rit );
04436                 tmp_sets.insert( tmp_sets.end(), *rit );
04437             }
04438         }
04439         else if( 4 == bytes_per_tag )
04440         {  // Must be GLOBAL_ID tag or MATERIAL_ID, etc
04441             int dum;
04442             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
04443             if( MB_SUCCESS == result )
04444             {
04445                 larray.push_back( dum );
04446                 handles.push_back( *rit );
04447                 tmp_sets.insert( tmp_sets.end(), *rit );
04448             }
04449         }
04450     }
04451 
04452     const size_t nsets = handles.size();
04453 
04454     // Get handle array for sets
04455     // This is not true on windows machine, 64 bits: entity handle is 64 bit, long is 32
04456     // assert(sizeof(EntityHandle) <= sizeof(unsigned long));
04457 
04458     // Do communication of data
04459     gs_data::crystal_data* cd = procConfig.crystal_router();
04460     gs_data* gsd              = new gs_data();
04461     result                    = gsd->initialize( nsets, &larray[0], &handles[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
04462 
04463     // Convert from global IDs grouped by process rank to list
04464     // of <idx, rank> pairs so that we can sort primarily
04465     // by idx and secondarily by rank (we want lists of procs for each
04466     // idx, not lists if indices for each proc).
04467     size_t ntuple = 0;
04468     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
04469         ntuple += gsd->nlinfo->_nshared[p];
04470     std::vector< set_tuple > tuples;
04471     tuples.reserve( ntuple );
04472     size_t j = 0;
04473     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
04474     {
04475         for( unsigned np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
04476         {
04477             set_tuple t;
04478             t.idx    = gsd->nlinfo->_sh_ind[j];
04479             t.proc   = gsd->nlinfo->_target[p];
04480             t.handle = gsd->nlinfo->_ulabels[j];
04481             tuples.push_back( t );
04482             j++;
04483         }
04484     }
04485     std::sort( tuples.begin(), tuples.end() );
04486 
04487     // Release crystal router stuff
04488     gsd->reset();
04489     delete gsd;
04490 
04491     // Storing sharing data for each set
04492     size_t ti    = 0;
04493     unsigned idx = 0;
04494     std::vector< unsigned > procs;
04495     Range::iterator si = tmp_sets.begin();
04496     while( si != tmp_sets.end() && ti < tuples.size() )
04497     {
04498         assert( idx <= tuples[ti].idx );
04499         if( idx < tuples[ti].idx ) si += ( tuples[ti].idx - idx );
04500         idx = tuples[ti].idx;
04501 
04502         procs.clear();
04503         size_t ti_init = ti;
04504         while( ti < tuples.size() && tuples[ti].idx == idx )
04505         {
04506             procs.push_back( tuples[ti].proc );
04507             ++ti;
04508         }
04509         assert( is_sorted_unique( procs ) );
04510 
04511         result = sharedSetData->set_sharing_procs( *si, procs );
04512         if( MB_SUCCESS != result )
04513         {
04514             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04515             std::cerr.flush();
04516             MPI_Abort( cm, 1 );
04517         }
04518 
04519         // Add this proc to list of sharing procs in correct position
04520         // so that all procs select owner based on same list
04521         std::vector< unsigned >::iterator it = std::lower_bound( procs.begin(), procs.end(), rk );
04522         assert( it == procs.end() || *it > rk );
04523         procs.insert( it, rk );
04524         size_t owner_idx = choose_owner_idx( procs );
04525         EntityHandle owner_handle;
04526         if( procs[owner_idx] == rk )
04527             owner_handle = *si;
04528         else if( procs[owner_idx] > rk )
04529             owner_handle = tuples[ti_init + owner_idx - 1].handle;
04530         else
04531             owner_handle = tuples[ti_init + owner_idx].handle;
04532         result = sharedSetData->set_owner( *si, procs[owner_idx], owner_handle );
04533         if( MB_SUCCESS != result )
04534         {
04535             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04536             std::cerr.flush();
04537             MPI_Abort( cm, 1 );
04538         }
04539 
04540         ++si;
04541         ++idx;
04542     }
04543 
04544     return MB_SUCCESS;
04545 }
04546 // populate sets with ghost entities, if necessary
04547 ErrorCode ParallelComm::augment_default_sets_with_ghosts( EntityHandle file_set )
04548 {
04549     // gather all default sets we are interested in, material, neumann, etc
04550     // we will skip geometry sets, because they are not uniquely identified with their tag value
04551     // maybe we will add another tag, like category
04552 
04553     if( procConfig.proc_size() < 2 ) return MB_SUCCESS;  // no reason to stop by
04554     const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
04555                                                  PARALLEL_PARTITION_TAG_NAME };
04556 
04557     int num_tags = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
04558 
04559     Range* rangeSets = new Range[num_tags];
04560     Tag* tags        = new Tag[num_tags + 1];  // one extra for global id tag, which is an int, so far
04561 
04562     int my_rank   = rank();
04563     int** tagVals = new int*[num_tags];
04564     for( int i = 0; i < num_tags; i++ )
04565         tagVals[i] = NULL;
04566     ErrorCode rval;
04567 
04568     // for each tag, we keep a local map, from the value to the actual set with that value
04569     // we assume that the tag values are unique, for a given set, otherwise we
04570     // do not know to which set to add the entity
04571 
04572     typedef std::map< int, EntityHandle > MVal;
04573     typedef std::map< int, EntityHandle >::iterator itMVal;
04574     MVal* localMaps = new MVal[num_tags];
04575 
04576     for( int i = 0; i < num_tags; i++ )
04577     {
04578 
04579         rval = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tags[i], MB_TAG_ANY );
04580         if( MB_SUCCESS != rval ) continue;
04581         rval = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &( tags[i] ), 0, 1, rangeSets[i],
04582                                                      Interface::UNION );MB_CHK_SET_ERR( rval, "can't get sets with a tag" );
04583 
04584         if( rangeSets[i].size() > 0 )
04585         {
04586             tagVals[i] = new int[rangeSets[i].size()];
04587             // fill up with the tag values
04588             rval = mbImpl->tag_get_data( tags[i], rangeSets[i], tagVals[i] );MB_CHK_SET_ERR( rval, "can't get set tag values" );
04589             // now for inverse mapping:
04590             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
04591             {
04592                 localMaps[i][tagVals[i][j]] = rangeSets[i][j];
04593             }
04594         }
04595     }
04596     // get the global id tag too
04597     tags[num_tags] = mbImpl->globalId_tag();
04598 
04599     TupleList remoteEnts;
04600     // processor to send to, type of tag (0-mat,) tag value,     remote handle
04601     //                         1-diri
04602     //                         2-neum
04603     //                         3-part
04604     //
04605     int initialSize = (int)sharedEnts.size();  // estimate that on average, each shared ent
04606     // will be sent to one processor, for one tag
04607     // we will actually send only entities that are owned locally, and from those
04608     // only those that do have a special tag (material, neumann, etc)
04609     // if we exceed the capacity, we resize the tuple
04610     remoteEnts.initialize( 3, 0, 1, 0, initialSize );
04611     remoteEnts.enableWriteAccess();
04612 
04613     // now, for each owned entity, get the remote handle(s) and Proc(s), and verify if it
04614     // belongs to one of the sets; if yes, create a tuple and append it
04615 
04616     std::set< EntityHandle > own_and_sha;
04617     int ir = 0, jr = 0;
04618     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
04619     {
04620         // ghosted eh
04621         EntityHandle geh = *vit;
04622         if( own_and_sha.find( geh ) != own_and_sha.end() )  // already encountered
04623             continue;
04624         int procs[MAX_SHARING_PROCS];
04625         EntityHandle handles[MAX_SHARING_PROCS];
04626         int nprocs;
04627         unsigned char pstat;
04628         rval = get_sharing_data( geh, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( rval, "Failed to get sharing data" );
04629         if( pstat & PSTATUS_NOT_OWNED ) continue;  // we will send info only for entities that we own
04630         own_and_sha.insert( geh );
04631         for( int i = 0; i < num_tags; i++ )
04632         {
04633             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
04634             {
04635                 EntityHandle specialSet = rangeSets[i][j];  // this set has tag i, value tagVals[i][j];
04636                 if( mbImpl->contains_entities( specialSet, &geh, 1 ) )
04637                 {
04638                     // this ghosted entity is in a special set, so form the tuple
04639                     // to send to the processors that do not own this
04640                     for( int k = 0; k < nprocs; k++ )
04641                     {
04642                         if( procs[k] != my_rank )
04643                         {
04644                             if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
04645                             {
04646                                 // resize, so we do not overflow
04647                                 int oldSize = remoteEnts.get_max();
04648                                 // increase with 50% the capacity
04649                                 remoteEnts.resize( oldSize + oldSize / 2 + 1 );
04650                             }
04651                             remoteEnts.vi_wr[ir++]  = procs[k];       // send to proc
04652                             remoteEnts.vi_wr[ir++]  = i;              // for the tags [i] (0-3)
04653                             remoteEnts.vi_wr[ir++]  = tagVals[i][j];  // actual value of the tag
04654                             remoteEnts.vul_wr[jr++] = handles[k];
04655                             remoteEnts.inc_n();
04656                         }
04657                     }
04658                 }
04659             }
04660         }
04661         // if the local entity has a global id, send it too, so we avoid
04662         // another "exchange_tags" for global id
04663         int gid;
04664         rval = mbImpl->tag_get_data( tags[num_tags], &geh, 1, &gid );MB_CHK_SET_ERR( rval, "Failed to get global id" );
04665         if( gid != 0 )
04666         {
04667             for( int k = 0; k < nprocs; k++ )
04668             {
04669                 if( procs[k] != my_rank )
04670                 {
04671                     if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
04672                     {
04673                         // resize, so we do not overflow
04674                         int oldSize = remoteEnts.get_max();
04675                         // increase with 50% the capacity
04676                         remoteEnts.resize( oldSize + oldSize / 2 + 1 );
04677                     }
04678                     remoteEnts.vi_wr[ir++]  = procs[k];  // send to proc
04679                     remoteEnts.vi_wr[ir++]  = num_tags;  // for the tags [j] (4)
04680                     remoteEnts.vi_wr[ir++]  = gid;       // actual value of the tag
04681                     remoteEnts.vul_wr[jr++] = handles[k];
04682                     remoteEnts.inc_n();
04683                 }
04684             }
04685         }
04686     }
04687 
04688 #ifndef NDEBUG
04689     if( my_rank == 1 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 1, before augment routing" );
04690     MPI_Barrier( procConfig.proc_comm() );
04691     int sentEnts = remoteEnts.get_n();
04692     assert( ( sentEnts == jr ) && ( 3 * sentEnts == ir ) );
04693 #endif
04694     // exchange the info now, and send to
04695     gs_data::crystal_data* cd = this->procConfig.crystal_router();
04696     // All communication happens here; no other mpi calls
04697     // Also, this is a collective call
04698     rval = cd->gs_transfer( 1, remoteEnts, 0 );MB_CHK_SET_ERR( rval, "Error in tuple transfer" );
04699 #ifndef NDEBUG
04700     if( my_rank == 0 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 0, after augment routing" );
04701     MPI_Barrier( procConfig.proc_comm() );
04702 #endif
04703 
04704     // now process the data received from other processors
04705     int received = remoteEnts.get_n();
04706     for( int i = 0; i < received; i++ )
04707     {
04708         // int from = ents_to_delete.vi_rd[i];
04709         EntityHandle geh = (EntityHandle)remoteEnts.vul_rd[i];
04710         int from_proc    = remoteEnts.vi_rd[3 * i];
04711         if( my_rank == from_proc )
04712             std::cout << " unexpected receive from my rank " << my_rank << " during augmenting with ghosts\n ";
04713         int tag_type = remoteEnts.vi_rd[3 * i + 1];
04714         assert( ( 0 <= tag_type ) && ( tag_type <= num_tags ) );
04715         int value = remoteEnts.vi_rd[3 * i + 2];
04716         if( tag_type == num_tags )
04717         {
04718             // it is global id
04719             rval = mbImpl->tag_set_data( tags[num_tags], &geh, 1, &value );MB_CHK_SET_ERR( rval, "Error in setting gid tag" );
04720         }
04721         else
04722         {
04723             // now, based on value and tag type, see if we have that value in the map
04724             MVal& lmap = localMaps[tag_type];
04725             itMVal itm = lmap.find( value );
04726             if( itm == lmap.end() )
04727             {
04728                 // the value was not found yet in the local map, so we have to create the set
04729                 EntityHandle newSet;
04730                 rval = mbImpl->create_meshset( MESHSET_SET, newSet );MB_CHK_SET_ERR( rval, "can't create new set" );
04731                 lmap[value] = newSet;
04732                 // set the tag value
04733                 rval = mbImpl->tag_set_data( tags[tag_type], &newSet, 1, &value );MB_CHK_SET_ERR( rval, "can't set tag for new set" );
04734 
04735                 // we also need to add the new created set to the file set, if not null
04736                 if( file_set )
04737                 {
04738                     rval = mbImpl->add_entities( file_set, &newSet, 1 );MB_CHK_SET_ERR( rval, "can't add new set to the file set" );
04739                 }
04740             }
04741             // add the entity to the set pointed to by the map
04742             rval = mbImpl->add_entities( lmap[value], &geh, 1 );MB_CHK_SET_ERR( rval, "can't add ghost ent to the set" );
04743         }
04744     }
04745 
04746     for( int i = 0; i < num_tags; i++ )
04747         delete[] tagVals[i];
04748     delete[] tagVals;
04749     delete[] rangeSets;
04750     delete[] tags;
04751     delete[] localMaps;
04752     return MB_SUCCESS;
04753 }
04754 ErrorCode ParallelComm::create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim )
04755 {
04756     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
04757 
04758     // Build up the list of shared entities
04759     int procs[MAX_SHARING_PROCS];
04760     EntityHandle handles[MAX_SHARING_PROCS];
04761     ErrorCode result;
04762     int nprocs;
04763     unsigned char pstat;
04764     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
04765     {
04766         if( shared_dim != -1 && mbImpl->dimension_from_handle( *vit ) > shared_dim ) continue;
04767         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
04768         std::sort( procs, procs + nprocs );
04769         std::vector< int > tmp_procs( procs, procs + nprocs );
04770         assert( tmp_procs.size() != 2 );
04771         proc_nvecs[tmp_procs].push_back( *vit );
04772     }
04773 
04774     Skinner skinner( mbImpl );
04775     Range skin_ents[4];
04776     result = mbImpl->get_entities_by_dimension( this_set, resolve_dim, skin_ents[resolve_dim] );MB_CHK_SET_ERR( result, "Failed to get skin entities by dimension" );
04777     result =
04778         skinner.find_skin( this_set, skin_ents[resolve_dim], false, skin_ents[resolve_dim - 1], 0, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
04779     if( shared_dim > 1 )
04780     {
04781         result = mbImpl->get_adjacencies( skin_ents[resolve_dim - 1], resolve_dim - 2, true, skin_ents[resolve_dim - 2],
04782                                           Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
04783     }
04784 
04785     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );
04786 
04787     return create_interface_sets( proc_nvecs );
04788 }
04789 
04790 ErrorCode ParallelComm::create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
04791 {
04792     if( proc_nvecs.empty() ) return MB_SUCCESS;
04793 
04794     int proc_ids[MAX_SHARING_PROCS];
04795     EntityHandle proc_handles[MAX_SHARING_PROCS];
04796     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
04797     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in create_interface_sets" );
04798     Range::iterator rit;
04799 
04800     // Create interface sets, tag them, and tag their contents with iface set tag
04801     std::vector< unsigned char > pstatus;
04802     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator vit = proc_nvecs.begin();
04803          vit != proc_nvecs.end(); ++vit )
04804     {
04805         // Create the set
04806         EntityHandle new_set;
04807         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
04808         interfaceSets.insert( new_set );
04809 
04810         // Add entities
04811         assert( !vit->second.empty() );
04812         result = mbImpl->add_entities( new_set, &( vit->second )[0], ( vit->second ).size() );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
04813         // Tag set with the proc rank(s)
04814         if( vit->first.size() == 1 )
04815         {
04816             assert( ( vit->first )[0] != (int)procConfig.proc_rank() );
04817             result = mbImpl->tag_set_data( shp_tag, &new_set, 1, &( vit->first )[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04818             proc_handles[0] = 0;
04819             result          = mbImpl->tag_set_data( shh_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04820         }
04821         else
04822         {
04823             // Pad tag data out to MAX_SHARING_PROCS with -1
04824             if( vit->first.size() > MAX_SHARING_PROCS )
04825             {
04826                 std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_set ) )
04827                           << ' ' << ID_FROM_HANDLE( new_set ) << " on process " << proc_config().proc_rank()
04828                           << std::endl;
04829                 std::cerr.flush();
04830                 MPI_Abort( proc_config().proc_comm(), 66 );
04831             }
04832             // assert(vit->first.size() <= MAX_SHARING_PROCS);
04833             std::copy( vit->first.begin(), vit->first.end(), proc_ids );
04834             std::fill( proc_ids + vit->first.size(), proc_ids + MAX_SHARING_PROCS, -1 );
04835             result = mbImpl->tag_set_data( shps_tag, &new_set, 1, proc_ids );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04836             unsigned int ind = std::find( proc_ids, proc_ids + vit->first.size(), procConfig.proc_rank() ) - proc_ids;
04837             assert( ind < vit->first.size() );
04838             std::fill( proc_handles, proc_handles + MAX_SHARING_PROCS, 0 );
04839             proc_handles[ind] = new_set;
04840             result            = mbImpl->tag_set_data( shhs_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04841         }
04842 
04843         // Get the owning proc, then set the pstatus tag on iface set
04844         int min_proc       = ( vit->first )[0];
04845         unsigned char pval = ( PSTATUS_SHARED | PSTATUS_INTERFACE );
04846         if( min_proc < (int)procConfig.proc_rank() ) pval |= PSTATUS_NOT_OWNED;
04847         if( vit->first.size() > 1 ) pval |= PSTATUS_MULTISHARED;
04848         result = mbImpl->tag_set_data( pstat_tag, &new_set, 1, &pval );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
04849 
04850         // Tag the vertices with the same thing
04851         pstatus.clear();
04852         std::vector< EntityHandle > verts;
04853         for( std::vector< EntityHandle >::iterator v2it = ( vit->second ).begin(); v2it != ( vit->second ).end();
04854              ++v2it )
04855             if( mbImpl->type_from_handle( *v2it ) == MBVERTEX ) verts.push_back( *v2it );
04856         pstatus.resize( verts.size(), pval );
04857         if( !verts.empty() )
04858         {
04859             result = mbImpl->tag_set_data( pstat_tag, &verts[0], verts.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set vertices with pstatus" );
04860         }
04861     }
04862 
04863     return MB_SUCCESS;
04864 }
04865 
04866 ErrorCode ParallelComm::create_iface_pc_links()
04867 {
04868     // Now that we've resolved the entities in the iface sets,
04869     // set parent/child links between the iface sets
04870 
04871     // First tag all entities in the iface sets
04872     Tag tmp_iface_tag;
04873     EntityHandle tmp_iface_set = 0;
04874     ErrorCode result           = mbImpl->tag_get_handle( "__tmp_iface", 1, MB_TYPE_HANDLE, tmp_iface_tag,
04875                                                MB_TAG_DENSE | MB_TAG_CREAT, &tmp_iface_set );MB_CHK_SET_ERR( result, "Failed to create temporary interface set tag" );
04876 
04877     Range iface_ents;
04878     std::vector< EntityHandle > tag_vals;
04879     Range::iterator rit;
04880 
04881     for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
04882     {
04883         // tag entities with interface set
04884         iface_ents.clear();
04885         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in interface set" );
04886 
04887         if( iface_ents.empty() ) continue;
04888 
04889         tag_vals.resize( iface_ents.size() );
04890         std::fill( tag_vals.begin(), tag_vals.end(), *rit );
04891         result = mbImpl->tag_set_data( tmp_iface_tag, iface_ents, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to tag iface entities with interface set" );
04892     }
04893 
04894     // Now go back through interface sets and add parent/child links
04895     Range tmp_ents2;
04896     for( int d = 2; d >= 0; d-- )
04897     {
04898         for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
04899         {
04900             // Get entities on this interface
04901             iface_ents.clear();
04902             result = mbImpl->get_entities_by_handle( *rit, iface_ents, true );MB_CHK_SET_ERR( result, "Failed to get entities by handle" );
04903             if( iface_ents.empty() || mbImpl->dimension_from_handle( *iface_ents.rbegin() ) != d ) continue;
04904 
04905             // Get higher-dimensional entities and their interface sets
04906             result = mbImpl->get_adjacencies( &( *iface_ents.begin() ), 1, d + 1, false, tmp_ents2 );MB_CHK_SET_ERR( result, "Failed to get adjacencies for interface sets" );
04907             tag_vals.resize( tmp_ents2.size() );
04908             result = mbImpl->tag_get_data( tmp_iface_tag, tmp_ents2, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to get tmp iface tag for interface sets" );
04909 
04910             // Go through and for any on interface make it a parent
04911             EntityHandle last_set = 0;
04912             for( unsigned int i = 0; i < tag_vals.size(); i++ )
04913             {
04914                 if( tag_vals[i] && tag_vals[i] != last_set )
04915                 {
04916                     result = mbImpl->add_parent_child( tag_vals[i], *rit );MB_CHK_SET_ERR( result, "Failed to add parent/child link for interface set" );
04917                     last_set = tag_vals[i];
04918                 }
04919             }
04920         }
04921     }
04922 
04923     // Delete the temporary tag
04924     result = mbImpl->tag_delete( tmp_iface_tag );MB_CHK_SET_ERR( result, "Failed to delete tmp iface tag" );
04925 
04926     return MB_SUCCESS;
04927 }
04928 
04929 ErrorCode ParallelComm::get_proc_nvecs( int resolve_dim, int shared_dim, Range* skin_ents,
04930                                         std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
04931 {
04932     // Set sharing procs tags on other skin ents
04933     ErrorCode result;
04934     const EntityHandle* connect;
04935     int num_connect;
04936     std::set< int > sharing_procs;
04937     std::vector< EntityHandle > dum_connect;
04938     std::vector< int > sp_vec;
04939 
04940     for( int d = 3; d > 0; d-- )
04941     {
04942         if( resolve_dim == d ) continue;
04943 
04944         for( Range::iterator rit = skin_ents[d].begin(); rit != skin_ents[d].end(); ++rit )
04945         {
04946             // Get connectivity
04947             result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect );MB_CHK_SET_ERR( result, "Failed to get connectivity on non-vertex skin entities" );
04948 
04949             int op = ( resolve_dim < shared_dim ? Interface::UNION : Interface::INTERSECT );
04950             result = get_sharing_data( connect, num_connect, sharing_procs, op );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_proc_nvecs" );
04951             if( sharing_procs.empty() ||
04952                 ( sharing_procs.size() == 1 && *sharing_procs.begin() == (int)procConfig.proc_rank() ) )
04953                 continue;
04954 
04955             // Need to specify sharing data correctly for entities or they will
04956             // end up in a different interface set than corresponding vertices
04957             if( sharing_procs.size() == 2 )
04958             {
04959                 std::set< int >::iterator it = sharing_procs.find( proc_config().proc_rank() );
04960                 assert( it != sharing_procs.end() );
04961                 sharing_procs.erase( it );
04962             }
04963 
04964             // Intersection is the owning proc(s) for this skin ent
04965             sp_vec.clear();
04966             std::copy( sharing_procs.begin(), sharing_procs.end(), std::back_inserter( sp_vec ) );
04967             assert( sp_vec.size() != 2 );
04968             proc_nvecs[sp_vec].push_back( *rit );
04969         }
04970     }
04971 
04972 #ifndef NDEBUG
04973     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
04974     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
04975          mit != proc_nvecs.end(); ++mit )
04976     {
04977         std::vector< EntityHandle > tmp_vec = ( mit->second );
04978         std::sort( tmp_vec.begin(), tmp_vec.end() );
04979         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
04980         assert( vit == tmp_vec.end() );
04981     }
04982 #endif
04983 
04984     return MB_SUCCESS;
04985 }
04986 
04987 // Overloaded form of tag_shared_verts
04988 // Tuple coming in is of form (arbitrary value, remoteProc, localHandle, remoteHandle)
04989 // Also will check for doubles in the list if the list is sorted
04990 ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents,
04991                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
04992                                           Range& /*proc_verts*/, unsigned int i_extra )
04993 {
04994     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
04995     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
04996 
04997     unsigned int j = 0, i = 0;
04998     std::vector< int > sharing_procs, sharing_procs2, tag_procs;
04999     std::vector< EntityHandle > sharing_handles, sharing_handles2, tag_lhandles, tag_rhandles;
05000     std::vector< unsigned char > pstatus;
05001 
05002     // Were on tuple j/2
05003     if( i_extra ) i += i_extra;
05004     while( j < 2 * shared_ents.get_n() )
05005     {
05006         // Count & accumulate sharing procs
05007         EntityHandle this_ent = shared_ents.vul_rd[j], other_ent = 0;
05008         int other_proc = -1;
05009         while( j < 2 * shared_ents.get_n() && shared_ents.vul_rd[j] == this_ent )
05010         {
05011             j++;
05012             // Shouldn't have same proc
05013             assert( shared_ents.vi_rd[i] != (int)procConfig.proc_rank() );
05014             // Grab the remote data if its not a dublicate
05015             if( shared_ents.vul_rd[j] != other_ent || shared_ents.vi_rd[i] != other_proc )
05016             {
05017                 assert( 0 != shared_ents.vul_rd[j] );
05018                 sharing_procs.push_back( shared_ents.vi_rd[i] );
05019                 sharing_handles.push_back( shared_ents.vul_rd[j] );
05020             }
05021             other_proc = shared_ents.vi_rd[i];
05022             other_ent  = shared_ents.vul_rd[j];
05023             j++;
05024             i += 1 + i_extra;
05025         }
05026 
05027         if( sharing_procs.size() > 1 )
05028         {
05029             // Add current proc/handle to list
05030             sharing_procs.push_back( procConfig.proc_rank() );
05031             sharing_handles.push_back( this_ent );
05032 
05033             // Sort sharing_procs and sharing_handles such that
05034             // sharing_procs is in ascending order. Use temporary
05035             // lists and binary search to re-order sharing_handles.
05036             sharing_procs2 = sharing_procs;
05037             std::sort( sharing_procs2.begin(), sharing_procs2.end() );
05038             sharing_handles2.resize( sharing_handles.size() );
05039             for( size_t k = 0; k < sharing_handles.size(); k++ )
05040             {
05041                 size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
05042                              sharing_procs2.begin();
05043                 sharing_handles2[idx] = sharing_handles[k];
05044             }
05045             sharing_procs.swap( sharing_procs2 );
05046             sharing_handles.swap( sharing_handles2 );
05047         }
05048 
05049         assert( sharing_procs.size() != 2 );
05050         proc_nvecs[sharing_procs].push_back( this_ent );
05051 
05052         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
05053         if( sharing_procs.size() == 1 )
05054         {
05055             tag_procs.push_back( sharing_procs[0] );
05056             tag_lhandles.push_back( this_ent );
05057             tag_rhandles.push_back( sharing_handles[0] );
05058             pstatus.push_back( share_flag );
05059         }
05060         else
05061         {
05062             // Pad lists
05063             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
05064             if( sharing_procs.size() > MAX_SHARING_PROCS )
05065             {
05066                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
05067                           << proc_config().proc_rank() << std::endl;
05068                 std::cerr.flush();
05069                 MPI_Abort( proc_config().proc_comm(), 66 );
05070             }
05071             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
05072             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
05073             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
05074             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
05075             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05076             sharedEnts.insert( this_ent );
05077         }
05078 
05079         // Reset sharing proc(s) tags
05080         sharing_procs.clear();
05081         sharing_handles.clear();
05082     }
05083 
05084     if( !tag_procs.empty() )
05085     {
05086         result = mbImpl->tag_set_data( shp_tag, &tag_lhandles[0], tag_procs.size(), &tag_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
05087         result = mbImpl->tag_set_data( shh_tag, &tag_lhandles[0], tag_procs.size(), &tag_rhandles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
05088         result = mbImpl->tag_set_data( pstat_tag, &tag_lhandles[0], tag_procs.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05089         for( std::vector< EntityHandle >::iterator vvt = tag_lhandles.begin(); vvt != tag_lhandles.end(); vvt++ )
05090             sharedEnts.insert( *vvt );
05091     }
05092 
05093 #ifndef NDEBUG
05094     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05095     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05096          mit != proc_nvecs.end(); ++mit )
05097     {
05098         std::vector< EntityHandle > tmp_vec = ( mit->second );
05099         std::sort( tmp_vec.begin(), tmp_vec.end() );
05100         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05101         assert( vit == tmp_vec.end() );
05102     }
05103 #endif
05104 
05105     return MB_SUCCESS;
05106 }
05107 
05108 ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents, Range* skin_ents,
05109                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
05110                                           Range& /*proc_verts*/ )
05111 {
05112     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
05113     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
05114 
05115     unsigned int j = 0, i = 0;
05116     std::vector< int > sharing_procs, sharing_procs2;
05117     std::vector< EntityHandle > sharing_handles, sharing_handles2, skin_verts( skin_ents[0].size() );
05118     for( Range::iterator rit = skin_ents[0].begin(); rit != skin_ents[0].end(); ++rit, i++ )
05119         skin_verts[i] = *rit;
05120     i = 0;
05121 
05122     while( j < 2 * shared_ents.get_n() )
05123     {
05124         // Count & accumulate sharing procs
05125         int this_idx          = shared_ents.vi_rd[j];
05126         EntityHandle this_ent = skin_verts[this_idx];
05127         while( j < 2 * shared_ents.get_n() && shared_ents.vi_rd[j] == this_idx )
05128         {
05129             j++;
05130             // Shouldn't have same proc
05131             assert( shared_ents.vi_rd[j] != (int)procConfig.proc_rank() );
05132             sharing_procs.push_back( shared_ents.vi_rd[j++] );
05133             sharing_handles.push_back( shared_ents.vul_rd[i++] );
05134         }
05135 
05136         if( sharing_procs.size() > 1 )
05137         {
05138             // Add current proc/handle to list
05139             sharing_procs.push_back( procConfig.proc_rank() );
05140             sharing_handles.push_back( this_ent );
05141         }
05142 
05143         // Sort sharing_procs and sharing_handles such that
05144         // sharing_procs is in ascending order. Use temporary
05145         // lists and binary search to re-order sharing_handles.
05146         sharing_procs2 = sharing_procs;
05147         std::sort( sharing_procs2.begin(), sharing_procs2.end() );
05148         sharing_handles2.resize( sharing_handles.size() );
05149         for( size_t k = 0; k < sharing_handles.size(); k++ )
05150         {
05151             size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
05152                          sharing_procs2.begin();
05153             sharing_handles2[idx] = sharing_handles[k];
05154         }
05155         sharing_procs.swap( sharing_procs2 );
05156         sharing_handles.swap( sharing_handles2 );
05157 
05158         assert( sharing_procs.size() != 2 );
05159         proc_nvecs[sharing_procs].push_back( this_ent );
05160 
05161         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
05162         if( sharing_procs.size() == 1 )
05163         {
05164             result = mbImpl->tag_set_data( shp_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
05165             result = mbImpl->tag_set_data( shh_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
05166             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &share_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05167             sharedEnts.insert( this_ent );
05168         }
05169         else
05170         {
05171             // Pad lists
05172             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
05173             if( sharing_procs.size() > MAX_SHARING_PROCS )
05174             {
05175                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
05176                           << proc_config().proc_rank() << std::endl;
05177                 std::cerr.flush();
05178                 MPI_Abort( proc_config().proc_comm(), 66 );
05179             }
05180             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
05181             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
05182             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
05183             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
05184             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05185             sharedEnts.insert( this_ent );
05186         }
05187 
05188         // Reset sharing proc(s) tags
05189         sharing_procs.clear();
05190         sharing_handles.clear();
05191     }
05192 
05193 #ifndef NDEBUG
05194     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05195     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05196          mit != proc_nvecs.end(); ++mit )
05197     {
05198         std::vector< EntityHandle > tmp_vec = ( mit->second );
05199         std::sort( tmp_vec.begin(), tmp_vec.end() );
05200         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05201         assert( vit == tmp_vec.end() );
05202     }
05203 #endif
05204 
05205     return MB_SUCCESS;
05206 }
05207 
05208 //! Get processors with which this processor communicates; sets are sorted by processor
05209 ErrorCode ParallelComm::get_interface_procs( std::set< unsigned int >& procs_set, bool get_buffs )
05210 {
05211     // Make sure the sharing procs vector is empty
05212     procs_set.clear();
05213 
05214     // Pre-load vector of single-proc tag values
05215     unsigned int i, j;
05216     std::vector< int > iface_proc( interfaceSets.size() );
05217     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), interfaceSets, &iface_proc[0] );MB_CHK_SET_ERR( result, "Failed to get iface_proc for iface sets" );
05218 
05219     // Get sharing procs either from single-proc vector or by getting
05220     // multi-proc tag value
05221     int tmp_iface_procs[MAX_SHARING_PROCS];
05222     std::fill( tmp_iface_procs, tmp_iface_procs + MAX_SHARING_PROCS, -1 );
05223     Range::iterator rit;
05224     for( rit = interfaceSets.begin(), i = 0; rit != interfaceSets.end(); ++rit, i++ )
05225     {
05226         if( -1 != iface_proc[i] )
05227         {
05228             assert( iface_proc[i] != (int)procConfig.proc_rank() );
05229             procs_set.insert( (unsigned int)iface_proc[i] );
05230         }
05231         else
05232         {
05233             // Get the sharing_procs tag
05234             result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, tmp_iface_procs );MB_CHK_SET_ERR( result, "Failed to get iface_procs for iface set" );
05235             for( j = 0; j < MAX_SHARING_PROCS; j++ )
05236             {
05237                 if( -1 != tmp_iface_procs[j] && tmp_iface_procs[j] != (int)procConfig.proc_rank() )
05238                     procs_set.insert( (unsigned int)tmp_iface_procs[j] );
05239                 else if( -1 == tmp_iface_procs[j] )
05240                 {
05241                     std::fill( tmp_iface_procs, tmp_iface_procs + j, -1 );
05242                     break;
05243                 }
05244             }
05245         }
05246     }
05247 
05248     if( get_buffs )
05249     {
05250         for( std::set< unsigned int >::iterator sit = procs_set.begin(); sit != procs_set.end(); ++sit )
05251             get_buffers( *sit );
05252     }
05253 
05254     return MB_SUCCESS;
05255 }
05256 
05257 ErrorCode ParallelComm::get_pstatus( EntityHandle entity, unsigned char& pstatus_val )
05258 {
05259     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstatus_val );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
05260     return result;
05261 }
05262 
05263 ErrorCode ParallelComm::get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents )
05264 {
05265     Range ents;
05266     ErrorCode result;
05267 
05268     if( -1 == dim )
05269     {
05270         result = mbImpl->get_entities_by_handle( 0, ents );MB_CHK_SET_ERR( result, "Failed to get all entities" );
05271     }
05272     else
05273     {
05274         result = mbImpl->get_entities_by_dimension( 0, dim, ents );MB_CHK_SET_ERR( result, "Failed to get entities of dimension " << dim );
05275     }
05276 
05277     std::vector< unsigned char > pstatus( ents.size() );
05278     result = mbImpl->tag_get_data( pstatus_tag(), ents, &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
05279     Range::iterator rit = ents.begin();
05280     int i               = 0;
05281     if( pstatus_val )
05282     {
05283         for( ; rit != ents.end(); i++, ++rit )
05284         {
05285             if( pstatus[i] & pstatus_val && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
05286                 pstatus_ents.insert( *rit );
05287         }
05288     }
05289     else
05290     {
05291         for( ; rit != ents.end(); i++, ++rit )
05292         {
05293             if( !pstatus[i] && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
05294                 pstatus_ents.insert( *rit );
05295         }
05296     }
05297 
05298     return MB_SUCCESS;
05299 }
05300 
05301 ErrorCode ParallelComm::check_global_ids( EntityHandle this_set, const int dimension, const int start_id,
05302                                           const bool largest_dim_only, const bool parallel, const bool owned_only )
05303 {
05304     // Global id tag
05305     Tag gid_tag = mbImpl->globalId_tag();
05306     int def_val = -1;
05307     Range dum_range;
05308 
05309     void* tag_ptr    = &def_val;
05310     ErrorCode result = mbImpl->get_entities_by_type_and_tag( this_set, MBVERTEX, &gid_tag, &tag_ptr, 1, dum_range );MB_CHK_SET_ERR( result, "Failed to get entities by MBVERTEX type and gid tag" );
05311 
05312     if( !dum_range.empty() )
05313     {
05314         // Just created it, so we need global ids
05315         result = assign_global_ids( this_set, dimension, start_id, largest_dim_only, parallel, owned_only );MB_CHK_SET_ERR( result, "Failed assigning global ids" );
05316     }
05317 
05318     return MB_SUCCESS;
05319 }
05320 
05321 bool ParallelComm::is_iface_proc( EntityHandle this_set, int to_proc )
05322 {
05323     int sharing_procs[MAX_SHARING_PROCS];
05324     std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
05325     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), &this_set, 1, sharing_procs );
05326     if( MB_SUCCESS == result && to_proc == sharing_procs[0] ) return true;
05327 
05328     result = mbImpl->tag_get_data( sharedps_tag(), &this_set, 1, sharing_procs );
05329     if( MB_SUCCESS != result ) return false;
05330 
05331     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
05332     {
05333         if( to_proc == sharing_procs[i] )
05334             return true;
05335         else if( -1 == sharing_procs[i] )
05336             return false;
05337     }
05338 
05339     return false;
05340 }
05341 
05342 ErrorCode ParallelComm::filter_pstatus( Range& ents, unsigned char pstat, unsigned char op, int to_proc,
05343                                         Range* returned_ents )
05344 {
05345     Range tmp_ents;
05346 
05347     // assert(!ents.empty());
05348     if( ents.empty() )
05349     {
05350         if( returned_ents ) returned_ents->clear();
05351         return MB_SUCCESS;
05352     }
05353 
05354     // Put into tmp_ents any entities which are not owned locally or
05355     // who are already shared with to_proc
05356     std::vector< unsigned char > shared_flags( ents.size() ), shared_flags2;
05357     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), ents, &shared_flags[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus flag" );
05358     Range::const_iterator rit, hint = tmp_ents.begin();
05359     ;
05360     int i;
05361     if( op == PSTATUS_OR )
05362     {
05363         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05364         {
05365             if( ( ( shared_flags[i] & ~pstat ) ^ shared_flags[i] ) & pstat )
05366             {
05367                 hint = tmp_ents.insert( hint, *rit );
05368                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05369             }
05370         }
05371     }
05372     else if( op == PSTATUS_AND )
05373     {
05374         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05375         {
05376             if( ( shared_flags[i] & pstat ) == pstat )
05377             {
05378                 hint = tmp_ents.insert( hint, *rit );
05379                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05380             }
05381         }
05382     }
05383     else if( op == PSTATUS_NOT )
05384     {
05385         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05386         {
05387             if( !( shared_flags[i] & pstat ) )
05388             {
05389                 hint = tmp_ents.insert( hint, *rit );
05390                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05391             }
05392         }
05393     }
05394     else
05395     {
05396         assert( false );
05397         return MB_FAILURE;
05398     }
05399 
05400     if( -1 != to_proc )
05401     {
05402         int sharing_procs[MAX_SHARING_PROCS];
05403         std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
05404         Range tmp_ents2;
05405         hint = tmp_ents2.begin();
05406 
05407         for( rit = tmp_ents.begin(), i = 0; rit != tmp_ents.end(); ++rit, i++ )
05408         {
05409             // We need to check sharing procs
05410             if( shared_flags2[i] & PSTATUS_MULTISHARED )
05411             {
05412                 result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag" );
05413                 assert( -1 != sharing_procs[0] );
05414                 for( unsigned int j = 0; j < MAX_SHARING_PROCS; j++ )
05415                 {
05416                     // If to_proc shares this entity, add it to list
05417                     if( sharing_procs[j] == to_proc ) { hint = tmp_ents2.insert( hint, *rit ); }
05418                     else if( -1 == sharing_procs[j] )
05419                         break;
05420 
05421                     sharing_procs[j] = -1;
05422                 }
05423             }
05424             else if( shared_flags2[i] & PSTATUS_SHARED )
05425             {
05426                 result = mbImpl->tag_get_data( sharedp_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedp tag" );
05427                 assert( -1 != sharing_procs[0] );
05428                 if( sharing_procs[0] == to_proc ) hint = tmp_ents2.insert( hint, *rit );
05429                 sharing_procs[0] = -1;
05430             }
05431             else
05432                 assert( "should never get here" && false );
05433         }
05434 
05435         tmp_ents.swap( tmp_ents2 );
05436     }
05437 
05438     if( returned_ents )
05439         returned_ents->swap( tmp_ents );
05440     else
05441         ents.swap( tmp_ents );
05442 
05443     return MB_SUCCESS;
05444 }
05445 
05446 ErrorCode ParallelComm::exchange_ghost_cells( int ghost_dim, int bridge_dim, int num_layers, int addl_ents,
05447                                               bool store_remote_handles, bool wait_all, EntityHandle* file_set )
05448 {
05449 #ifdef MOAB_HAVE_MPE
05450     if( myDebug->get_verbosity() == 2 )
05451     {
05452         if( !num_layers )
05453             MPE_Log_event( IFACE_START, procConfig.proc_rank(), "Starting interface exchange." );
05454         else
05455             MPE_Log_event( GHOST_START, procConfig.proc_rank(), "Starting ghost exchange." );
05456     }
05457 #endif
05458 
05459     myDebug->tprintf( 1, "Entering exchange_ghost_cells with num_layers = %d\n", num_layers );
05460     if( myDebug->get_verbosity() == 4 )
05461     {
05462         msgs.clear();
05463         msgs.reserve( MAX_SHARING_PROCS );
05464     }
05465 
05466     // If we're only finding out about existing ents, we have to be storing
05467     // remote handles too
05468     assert( num_layers > 0 || store_remote_handles );
05469 
05470     const bool is_iface = !num_layers;
05471 
05472     // Get the b-dimensional interface(s) with with_proc, where b = bridge_dim
05473 
05474     int success;
05475     ErrorCode result = MB_SUCCESS;
05476     int incoming1 = 0, incoming2 = 0;
05477 
05478     reset_all_buffers();
05479 
05480     // When this function is called, buffProcs should already have any
05481     // communicating procs
05482 
05483     //===========================================
05484     // Post ghost irecv's for ghost entities from all communicating procs
05485     //===========================================
05486 #ifdef MOAB_HAVE_MPE
05487     if( myDebug->get_verbosity() == 2 )
05488     { MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." ); }
05489 #endif
05490 
05491     // Index reqs the same as buffer/sharing procs indices
05492     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL ),
05493         recv_remoteh_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05494     std::vector< unsigned int >::iterator proc_it;
05495     int ind, p;
05496     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05497     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
05498     {
05499         incoming1++;
05500         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
05501                            MB_MESG_ENTS_SIZE, incoming1 );
05502         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
05503                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
05504         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
05505     }
05506 
05507     //===========================================
05508     // Get entities to be sent to neighbors
05509     //===========================================
05510     Range sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
05511     TupleList entprocs;
05512     int dum_ack_buff;
05513     result = get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents, allsent, entprocs );MB_CHK_SET_ERR( result, "get_sent_ents failed" );
05514 
05515     // augment file set with the entities to be sent
05516     // we might have created new entities if addl_ents>0, edges and/or faces
05517     if( addl_ents > 0 && file_set && !allsent.empty() )
05518     {
05519         result = mbImpl->add_entities( *file_set, allsent );MB_CHK_SET_ERR( result, "Failed to add new sub-entities to set" );
05520     }
05521     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
05522                       (unsigned long)allsent.size() );
05523 
05524     //===========================================
05525     // Pack and send ents from this proc to others
05526     //===========================================
05527     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
05528     {
05529         myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", sent_ents[p].compactness(),
05530                           (unsigned long)sent_ents[p].size() );
05531 
05532         // Reserve space on front for size and for initial buff size
05533         localOwnedBuffs[p]->reset_buffer( sizeof( int ) );
05534 
05535         // Entities
05536         result = pack_entities( sent_ents[p], localOwnedBuffs[p], store_remote_handles, buffProcs[p], is_iface,
05537                                 &entprocs, &allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
05538 
05539         if( myDebug->get_verbosity() == 4 )
05540         {
05541             msgs.resize( msgs.size() + 1 );
05542             msgs.back() = new Buffer( *localOwnedBuffs[p] );
05543         }
05544 
05545         // Send the buffer (size stored in front in send_buffer)
05546         result = send_buffer( *proc_it, localOwnedBuffs[p], MB_MESG_ENTS_SIZE, sendReqs[3 * p],
05547                               recv_ent_reqs[3 * p + 2], &dum_ack_buff, incoming1, MB_MESG_REMOTEH_SIZE,
05548                               ( !is_iface && store_remote_handles ?  // this used for ghosting only
05549                                     localOwnedBuffs[p]
05550                                                                   : NULL ),
05551                               &recv_remoteh_reqs[3 * p], &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost exchange" );
05552     }
05553 
05554     entprocs.reset();
05555 
05556     //===========================================
05557     // Receive/unpack new entities
05558     //===========================================
05559     // Number of incoming messages for ghosts is the number of procs we
05560     // communicate with; for iface, it's the number of those with lower rank
05561     MPI_Status status;
05562     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
05563     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
05564     std::vector< std::vector< int > > L1p( buffProcs.size() );
05565     std::vector< EntityHandle > L2hloc, L2hrem;
05566     std::vector< unsigned int > L2p;
05567     std::vector< EntityHandle > new_ents;
05568 
05569     while( incoming1 )
05570     {
05571         // Wait for all recvs of ghost ents before proceeding to sending remote handles,
05572         // b/c some procs may have sent to a 3rd proc ents owned by me;
05573         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
05574 
05575         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &ind, &status );
05576         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
05577 
05578         PRINT_DEBUG_RECD( status );
05579 
05580         // OK, received something; decrement incoming counter
05581         incoming1--;
05582         bool done = false;
05583 
05584         // In case ind is for ack, we need index of one before it
05585         unsigned int base_ind = 3 * ( ind / 3 );
05586         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 3], recv_ent_reqs[base_ind + 1],
05587                               recv_ent_reqs[base_ind + 2], incoming1, localOwnedBuffs[ind / 3], sendReqs[base_ind + 1],
05588                               sendReqs[base_ind + 2], done,
05589                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind / 3] : NULL ),
05590                               MB_MESG_REMOTEH_SIZE,  // maybe base_ind+1?
05591                               &recv_remoteh_reqs[base_ind + 1], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
05592 
05593         if( done )
05594         {
05595             if( myDebug->get_verbosity() == 4 )
05596             {
05597                 msgs.resize( msgs.size() + 1 );
05598                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 3] );
05599             }
05600 
05601             // Message completely received - process buffer that was sent
05602             remoteOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
05603             result = unpack_entities( remoteOwnedBuffs[ind / 3]->buff_ptr, store_remote_handles, ind / 3, is_iface,
05604                                       L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );
05605             if( MB_SUCCESS != result )
05606             {
05607                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
05608                 print_buffer( remoteOwnedBuffs[ind / 3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 3], false );
05609                 return result;
05610             }
05611 
05612             if( recv_ent_reqs.size() != 3 * buffProcs.size() )
05613             {
05614                 // Post irecv's for remote handles from new proc; shouldn't be iface,
05615                 // since we know about all procs we share with
05616                 assert( !is_iface );
05617                 recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05618                 for( unsigned int i = recv_ent_reqs.size(); i < 3 * buffProcs.size(); i += 3 )
05619                 {
05620                     localOwnedBuffs[i / 3]->reset_buffer();
05621                     incoming2++;
05622                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 3], localOwnedBuffs[i / 3]->mem_ptr,
05623                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
05624                     success = MPI_Irecv( localOwnedBuffs[i / 3]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
05625                                          buffProcs[i / 3], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
05626                                          &recv_remoteh_reqs[i] );
05627                     if( success != MPI_SUCCESS )
05628                     { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" ); }
05629                 }
05630                 recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05631                 sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05632             }
05633         }
05634     }
05635 
05636     // Add requests for any new addl procs
05637     if( recv_ent_reqs.size() != 3 * buffProcs.size() )
05638     {
05639         // Shouldn't get here...
05640         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in ghost exchange" );
05641     }
05642 
05643 #ifdef MOAB_HAVE_MPE
05644     if( myDebug->get_verbosity() == 2 )
05645     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange." ); }
05646 #endif
05647 
05648     if( is_iface )
05649     {
05650         // Need to check over entities I sent and make sure I received
05651         // handles for them from all expected procs; if not, need to clean
05652         // them up
05653         result = check_clean_iface( allsent );
05654         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05655 
05656         // Now set the shared/interface tag on non-vertex entities on interface
05657         result = tag_iface_entities();MB_CHK_SET_ERR( result, "Failed to tag iface entities" );
05658 
05659 #ifndef NDEBUG
05660         result = check_sent_ents( allsent );
05661         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05662         result = check_all_shared_handles( true );
05663         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05664 #endif
05665 
05666 #ifdef MOAB_HAVE_MPE
05667         if( myDebug->get_verbosity() == 2 )
05668         { MPE_Log_event( IFACE_END, procConfig.proc_rank(), "Ending interface exchange." ); }
05669 #endif
05670 
05671         //===========================================
05672         // Wait if requested
05673         //===========================================
05674         if( wait_all )
05675         {
05676             if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
05677             else
05678             {
05679                 MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05680                 success = MPI_Waitall( 3 * buffProcs.size(), &recv_ent_reqs[0], mult_status );
05681                 if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05682                 success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05683                 if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05684                 /*success = MPI_Waitall(3*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
05685                 if (MPI_SUCCESS != success) {
05686                   MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
05687                 }*/
05688             }
05689         }
05690 
05691         myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
05692         myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==true \n" );
05693 
05694         return MB_SUCCESS;
05695     }
05696 
05697     // we still need to wait on sendReqs, if they are not fulfilled yet
05698     if( wait_all )
05699     {
05700         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
05701         else
05702         {
05703             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05704             success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05705             if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05706         }
05707     }
05708     //===========================================
05709     // Send local handles for new ghosts to owner, then add
05710     // those to ghost list for that owner
05711     //===========================================
05712     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
05713     {
05714 
05715         // Reserve space on front for size and for initial buff size
05716         remoteOwnedBuffs[p]->reset_buffer( sizeof( int ) );
05717 
05718         result = pack_remote_handles( L1hloc[p], L1hrem[p], L1p[p], *proc_it, remoteOwnedBuffs[p] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
05719         remoteOwnedBuffs[p]->set_stored_size();
05720 
05721         if( myDebug->get_verbosity() == 4 )
05722         {
05723             msgs.resize( msgs.size() + 1 );
05724             msgs.back() = new Buffer( *remoteOwnedBuffs[p] );
05725         }
05726         result = send_buffer( buffProcs[p], remoteOwnedBuffs[p], MB_MESG_REMOTEH_SIZE, sendReqs[3 * p],
05727                               recv_remoteh_reqs[3 * p + 2], &dum_ack_buff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
05728     }
05729 
05730     //===========================================
05731     // Process remote handles of my ghosteds
05732     //===========================================
05733     while( incoming2 )
05734     {
05735         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
05736         success = MPI_Waitany( 3 * buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status );
05737         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
05738 
05739         // OK, received something; decrement incoming counter
05740         incoming2--;
05741 
05742         PRINT_DEBUG_RECD( status );
05743 
05744         bool done             = false;
05745         unsigned int base_ind = 3 * ( ind / 3 );
05746         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 3], recv_remoteh_reqs[base_ind + 1],
05747                               recv_remoteh_reqs[base_ind + 2], incoming2, remoteOwnedBuffs[ind / 3],
05748                               sendReqs[base_ind + 1], sendReqs[base_ind + 2], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
05749         if( done )
05750         {
05751             // Incoming remote handles
05752             if( myDebug->get_verbosity() == 4 )
05753             {
05754                 msgs.resize( msgs.size() + 1 );
05755                 msgs.back() = new Buffer( *localOwnedBuffs[ind / 3] );
05756             }
05757             localOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
05758             result =
05759                 unpack_remote_handles( buffProcs[ind / 3], localOwnedBuffs[ind / 3]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
05760         }
05761     }
05762 
05763 #ifdef MOAB_HAVE_MPE
05764     if( myDebug->get_verbosity() == 2 )
05765     {
05766         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
05767         MPE_Log_event( GHOST_END, procConfig.proc_rank(), "Ending ghost exchange (still doing checks)." );
05768     }
05769 #endif
05770 
05771     //===========================================
05772     // Wait if requested
05773     //===========================================
05774     if( wait_all )
05775     {
05776         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
05777         else
05778         {
05779             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05780             success = MPI_Waitall( 3 * buffProcs.size(), &recv_remoteh_reqs[0], mult_status );
05781             if( MPI_SUCCESS == success ) success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05782         }
05783         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05784     }
05785 
05786 #ifndef NDEBUG
05787     result = check_sent_ents( allsent );MB_CHK_SET_ERR( result, "Failed check on shared entities" );
05788     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Failed check on all shared handles" );
05789 #endif
05790 
05791     if( file_set && !new_ents.empty() )
05792     {
05793         result = mbImpl->add_entities( *file_set, &new_ents[0], new_ents.size() );MB_CHK_SET_ERR( result, "Failed to add new entities to set" );
05794     }
05795 
05796     myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
05797     myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==false \n" );
05798 
05799     return MB_SUCCESS;
05800 }
05801 
05802 ErrorCode ParallelComm::send_buffer( const unsigned int to_proc, Buffer* send_buff, int mesg_tag, MPI_Request& send_req,
05803                                      MPI_Request& ack_req, int* ack_buff, int& this_incoming, int next_mesg_tag,
05804                                      Buffer* next_recv_buff, MPI_Request* next_recv_req, int* next_incoming )
05805 {
05806     ErrorCode result = MB_SUCCESS;
05807     int success;
05808 
05809     // If small message, post recv for remote handle message
05810     if( send_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE && next_recv_buff )
05811     {
05812         ( *next_incoming )++;
05813         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, next_mesg_tag,
05814                            *next_incoming );
05815         success = MPI_Irecv( next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, to_proc, next_mesg_tag,
05816                              procConfig.proc_comm(), next_recv_req );
05817         if( success != MPI_SUCCESS )
05818         { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for next message in ghost exchange" ); }
05819     }
05820     // If large, we'll need an ack before sending the rest
05821     else if( send_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
05822     {
05823         this_incoming++;
05824         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff, sizeof( int ), mesg_tag - 1,
05825                            this_incoming );
05826         success = MPI_Irecv( (void*)ack_buff, sizeof( int ), MPI_UNSIGNED_CHAR, to_proc, mesg_tag - 1,
05827                              procConfig.proc_comm(), &ack_req );
05828         if( success != MPI_SUCCESS )
05829         { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for entity ack in ghost exchange" ); }
05830     }
05831 
05832     // Send the buffer
05833     PRINT_DEBUG_ISEND( procConfig.proc_rank(), to_proc, send_buff->mem_ptr, mesg_tag,
05834                        std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ) );
05835     assert( 0 <= send_buff->get_stored_size() && send_buff->get_stored_size() <= (int)send_buff->alloc_size );
05836     success = MPI_Isend( send_buff->mem_ptr, std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ),
05837                          MPI_UNSIGNED_CHAR, to_proc, mesg_tag, procConfig.proc_comm(), &send_req );
05838     if( success != MPI_SUCCESS ) return MB_FAILURE;
05839 
05840     return result;
05841 }
05842 
05843 ErrorCode ParallelComm::recv_buffer( int mesg_tag_expected, const MPI_Status& mpi_status, Buffer* recv_buff,
05844                                      MPI_Request& recv_req, MPI_Request& /*ack_recvd_req*/, int& this_incoming,
05845                                      Buffer* send_buff, MPI_Request& send_req, MPI_Request& sent_ack_req, bool& done,
05846                                      Buffer* next_buff, int next_tag, MPI_Request* next_req, int* next_incoming )
05847 {
05848     // Process a received message; if there will be more coming,
05849     // post a receive for 2nd part then send an ack message
05850     int from_proc = mpi_status.MPI_SOURCE;
05851     int success;
05852 
05853     // Set the buff_ptr on the recv_buffer; needs to point beyond any
05854     // valid data already in the buffer
05855     recv_buff->reset_ptr( std::min( recv_buff->get_stored_size(), (int)recv_buff->alloc_size ) );
05856 
05857     if( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
05858     {
05859         // 1st message & large - allocate buffer, post irecv for 2nd message,
05860         // then send ack
05861         recv_buff->reserve( recv_buff->get_stored_size() );
05862         assert( recv_buff->alloc_size > INITIAL_BUFF_SIZE );
05863 
05864         // Will expect a 2nd message
05865         this_incoming++;
05866 
05867         PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr + INITIAL_BUFF_SIZE,
05868                            recv_buff->get_stored_size() - INITIAL_BUFF_SIZE, mesg_tag_expected + 1, this_incoming );
05869         success = MPI_Irecv( recv_buff->mem_ptr + INITIAL_BUFF_SIZE, recv_buff->get_stored_size() - INITIAL_BUFF_SIZE,
05870                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &recv_req );
05871         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post 2nd iRecv in ghost exchange" ); }
05872 
05873         // Send ack, doesn't matter what data actually is
05874         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr, mesg_tag_expected - 1,
05875                            sizeof( int ) );
05876         success = MPI_Isend( recv_buff->mem_ptr, sizeof( int ), MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected - 1,
05877                              procConfig.proc_comm(), &sent_ack_req );
05878         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to send ack in ghost exchange" ); }
05879     }
05880     else if( mpi_status.MPI_TAG == mesg_tag_expected - 1 )
05881     {
05882         // Got an ack back, send the 2nd half of message
05883 
05884         // Should be a large message if we got this
05885         assert( *( (size_t*)send_buff->mem_ptr ) > INITIAL_BUFF_SIZE );
05886 
05887         // Post irecv for next message, then send 2nd message
05888         if( next_buff )
05889         {
05890             // We'll expect a return message
05891             ( *next_incoming )++;
05892             PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, next_buff->mem_ptr, INITIAL_BUFF_SIZE, next_tag,
05893                                *next_incoming );
05894 
05895             success = MPI_Irecv( next_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc, next_tag,
05896                                  procConfig.proc_comm(), next_req );
05897             if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post next irecv in ghost exchange" ); }
05898         }
05899 
05900         // Send 2nd message
05901         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, send_buff->mem_ptr + INITIAL_BUFF_SIZE,
05902                            mesg_tag_expected + 1, send_buff->get_stored_size() - INITIAL_BUFF_SIZE );
05903 
05904         assert( send_buff->get_stored_size() - INITIAL_BUFF_SIZE < send_buff->alloc_size &&
05905                 0 <= send_buff->get_stored_size() );
05906         success = MPI_Isend( send_buff->mem_ptr + INITIAL_BUFF_SIZE, send_buff->get_stored_size() - INITIAL_BUFF_SIZE,
05907                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &send_req );
05908         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to send 2nd message in ghost exchange" ); }
05909     }
05910     else if( ( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE ) ||
05911              mpi_status.MPI_TAG == mesg_tag_expected + 1 )
05912     {
05913         // Message completely received - signal that we're done
05914         done = true;
05915     }
05916 
05917     return MB_SUCCESS;
05918 }
05919 
05920 struct ProcList
05921 {
05922     int procs[MAX_SHARING_PROCS];
05923 };
05924 static bool operator<( const ProcList& a, const ProcList& b )
05925 {
05926     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
05927     {
05928         if( a.procs[i] < b.procs[i] )
05929             return true;
05930         else if( b.procs[i] < a.procs[i] )
05931             return false;
05932         else if( a.procs[i] < 0 )
05933             return false;
05934     }
05935     return false;
05936 }
05937 
05938 ErrorCode ParallelComm::check_clean_iface( Range& allsent )
05939 {
05940     // allsent is all entities I think are on interface; go over them, looking
05941     // for zero-valued handles, and fix any I find
05942 
05943     // Keep lists of entities for which teh sharing data changed, grouped
05944     // by set of sharing procs.
05945     typedef std::map< ProcList, Range > procmap_t;
05946     procmap_t old_procs, new_procs;
05947 
05948     ErrorCode result = MB_SUCCESS;
05949     Range::iterator rit;
05950     Range::reverse_iterator rvit;
05951     unsigned char pstatus;
05952     int nump;
05953     ProcList sharedp;
05954     EntityHandle sharedh[MAX_SHARING_PROCS];
05955     for( rvit = allsent.rbegin(); rvit != allsent.rend(); ++rvit )
05956     {
05957         result = get_sharing_data( *rvit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
05958         assert( "Should be shared with at least one other proc" &&
05959                 ( nump > 1 || sharedp.procs[0] != (int)procConfig.proc_rank() ) );
05960         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
05961 
05962         // Look for first null handle in list
05963         int idx = std::find( sharedh, sharedh + nump, (EntityHandle)0 ) - sharedh;
05964         if( idx == nump ) continue;  // All handles are valid
05965 
05966         ProcList old_list( sharedp );
05967         std::sort( old_list.procs, old_list.procs + nump );
05968         old_procs[old_list].insert( *rvit );
05969 
05970         // Remove null handles and corresponding proc ranks from lists
05971         int new_nump       = idx;
05972         bool removed_owner = !idx;
05973         for( ++idx; idx < nump; ++idx )
05974         {
05975             if( sharedh[idx] )
05976             {
05977                 sharedh[new_nump]       = sharedh[idx];
05978                 sharedp.procs[new_nump] = sharedp.procs[idx];
05979                 ++new_nump;
05980             }
05981         }
05982         sharedp.procs[new_nump] = -1;
05983 
05984         if( removed_owner && new_nump > 1 )
05985         {
05986             // The proc that we choose as the entity owner isn't sharing the
05987             // entity (doesn't have a copy of it). We need to pick a different
05988             // owner. Choose the proc with lowest rank.
05989             idx = std::min_element( sharedp.procs, sharedp.procs + new_nump ) - sharedp.procs;
05990             std::swap( sharedp.procs[0], sharedp.procs[idx] );
05991             std::swap( sharedh[0], sharedh[idx] );
05992             if( sharedp.procs[0] == (int)proc_config().proc_rank() ) pstatus &= ~PSTATUS_NOT_OWNED;
05993         }
05994 
05995         result = set_sharing_data( *rvit, pstatus, nump, new_nump, sharedp.procs, sharedh );MB_CHK_SET_ERR( result, "Failed to set sharing data in check_clean_iface" );
05996 
05997         if( new_nump > 1 )
05998         {
05999             if( new_nump == 2 )
06000             {
06001                 if( sharedp.procs[1] != (int)proc_config().proc_rank() )
06002                 {
06003                     assert( sharedp.procs[0] == (int)proc_config().proc_rank() );
06004                     sharedp.procs[0] = sharedp.procs[1];
06005                 }
06006                 sharedp.procs[1] = -1;
06007             }
06008             else
06009             {
06010                 std::sort( sharedp.procs, sharedp.procs + new_nump );
06011             }
06012             new_procs[sharedp].insert( *rvit );
06013         }
06014     }
06015 
06016     if( old_procs.empty() )
06017     {
06018         assert( new_procs.empty() );
06019         return MB_SUCCESS;
06020     }
06021 
06022     // Update interface sets
06023     procmap_t::iterator pmit;
06024     // std::vector<unsigned char> pstatus_list;
06025     rit = interface_sets().begin();
06026     while( rit != interface_sets().end() )
06027     {
06028         result = get_sharing_data( *rit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data for interface set" );
06029         assert( nump != 2 );
06030         std::sort( sharedp.procs, sharedp.procs + nump );
06031         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
06032 
06033         pmit = old_procs.find( sharedp );
06034         if( pmit != old_procs.end() )
06035         {
06036             result = mbImpl->remove_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
06037         }
06038 
06039         pmit = new_procs.find( sharedp );
06040         if( pmit == new_procs.end() )
06041         {
06042             int count;
06043             result = mbImpl->get_number_entities_by_handle( *rit, count );MB_CHK_SET_ERR( result, "Failed to get number of entities in interface set" );
06044             if( !count )
06045             {
06046                 result = mbImpl->delete_entities( &*rit, 1 );MB_CHK_SET_ERR( result, "Failed to delete entities from interface set" );
06047                 rit = interface_sets().erase( rit );
06048             }
06049             else
06050             {
06051                 ++rit;
06052             }
06053         }
06054         else
06055         {
06056             result = mbImpl->add_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
06057 
06058             // Remove those that we've processed so that we know which ones
06059             // are new.
06060             new_procs.erase( pmit );
06061             ++rit;
06062         }
06063     }
06064 
06065     // Create interface sets for new proc id combinations
06066     std::fill( sharedh, sharedh + MAX_SHARING_PROCS, 0 );
06067     for( pmit = new_procs.begin(); pmit != new_procs.end(); ++pmit )
06068     {
06069         EntityHandle new_set;
06070         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
06071         interfaceSets.insert( new_set );
06072 
06073         // Add entities
06074         result = mbImpl->add_entities( new_set, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
06075         // Tag set with the proc rank(s)
06076         assert( pmit->first.procs[0] >= 0 );
06077         pstatus = PSTATUS_SHARED | PSTATUS_INTERFACE;
06078         if( pmit->first.procs[1] == -1 )
06079         {
06080             int other = pmit->first.procs[0];
06081             assert( other != (int)procConfig.proc_rank() );
06082             result = mbImpl->tag_set_data( sharedp_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06083             sharedh[0] = 0;
06084             result     = mbImpl->tag_set_data( sharedh_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06085             if( other < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
06086         }
06087         else
06088         {
06089             result = mbImpl->tag_set_data( sharedps_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06090             result = mbImpl->tag_set_data( sharedhs_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06091             pstatus |= PSTATUS_MULTISHARED;
06092             if( pmit->first.procs[0] < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
06093         }
06094 
06095         result = mbImpl->tag_set_data( pstatus_tag(), &new_set, 1, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
06096 
06097         // Set pstatus on all interface entities in set
06098         result = mbImpl->tag_clear_data( pstatus_tag(), pmit->second, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface entities with pstatus" );
06099     }
06100 
06101     return MB_SUCCESS;
06102 }
06103 
06104 ErrorCode ParallelComm::set_sharing_data( EntityHandle ent, unsigned char pstatus, int old_nump, int new_nump, int* ps,
06105                                           EntityHandle* hs )
06106 {
06107     // If new nump is less than 3, the entity is no longer mutishared
06108     if( old_nump > 2 && ( pstatus & PSTATUS_MULTISHARED ) && new_nump < 3 )
06109     {
06110         // Unset multishared flag
06111         pstatus ^= PSTATUS_MULTISHARED;
06112     }
06113 
06114     // Check for consistency in input data
06115     // DBG
06116     /*  bool con1 = ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) ||
06117       (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED)); bool con2 =
06118       (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED); bool con3 = (new_nump < 3 ||
06119       (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || (!(pstatus&PSTATUS_NOT_OWNED) && ps[0]
06120       == (int)rank())); std::cout<<"current rank = "<<rank()<<std::endl; std::cout<<"condition
06121       1::"<<con1<<std::endl; std::cout<<"condition 2::"<<con2<<std::endl; std::cout<<"condition
06122       3::"<<con3<<std::endl;*/
06123 
06124     // DBG
06125 
06126     assert( new_nump > 1 &&
06127             ( ( new_nump == 2 && pstatus & PSTATUS_SHARED &&
06128                 !( pstatus & PSTATUS_MULTISHARED ) ) ||  // If <= 2 must not be multishared
06129               ( new_nump > 2 && pstatus & PSTATUS_SHARED &&
06130                 pstatus & PSTATUS_MULTISHARED ) ) &&                         // If > 2 procs, must be multishared
06131             ( !( pstatus & PSTATUS_GHOST ) || pstatus & PSTATUS_SHARED ) &&  // If ghost, it must also be shared
06132             ( new_nump < 3 ||
06133               ( pstatus & PSTATUS_NOT_OWNED && ps[0] != (int)rank() ) ||      // I'm not owner and first proc not me
06134               ( !( pstatus & PSTATUS_NOT_OWNED ) && ps[0] == (int)rank() ) )  // I'm owner and first proc is me
06135     );
06136 
06137 #ifndef NDEBUG
06138     {
06139         // Check for duplicates in proc list
06140         std::set< unsigned int > dumprocs;
06141         int dp = 0;
06142         for( ; dp < old_nump && -1 != ps[dp]; dp++ )
06143             dumprocs.insert( ps[dp] );
06144         assert( dp == (int)dumprocs.size() );
06145     }
06146 #endif
06147 
06148     ErrorCode result;
06149     // Reset any old data that needs to be
06150     if( old_nump > 2 && new_nump < 3 )
06151     {
06152         // Need to remove multishared tags
06153         result = mbImpl->tag_delete_data( sharedps_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:1" );
06154         result = mbImpl->tag_delete_data( sharedhs_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:2" );
06155         //    if (new_nump < 2)
06156         //      pstatus = 0x0;
06157         //    else if (ps[0] != (int)proc_config().proc_rank())
06158         //      pstatus |= PSTATUS_NOT_OWNED;
06159     }
06160     else if( ( old_nump < 3 && new_nump > 2 ) || ( old_nump > 1 && new_nump == 1 ) )
06161     {
06162         // Reset sharedp and sharedh tags
06163         int tmp_p          = -1;
06164         EntityHandle tmp_h = 0;
06165         result             = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, &tmp_p );MB_CHK_SET_ERR( result, "set_sharing_data:3" );
06166         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, &tmp_h );MB_CHK_SET_ERR( result, "set_sharing_data:4" );
06167     }
06168 
06169     assert( "check for multishared/owner I'm first proc" &&
06170             ( !( pstatus & PSTATUS_MULTISHARED ) || ( pstatus & ( PSTATUS_NOT_OWNED | PSTATUS_GHOST ) ) ||
06171               ( ps[0] == (int)rank() ) ) &&
06172             "interface entities should have > 1 proc" && ( !( pstatus & PSTATUS_INTERFACE ) || new_nump > 1 ) &&
06173             "ghost entities should have > 1 proc" && ( !( pstatus & PSTATUS_GHOST ) || new_nump > 1 ) );
06174 
06175     // Now set new data
06176     if( new_nump > 2 )
06177     {
06178         result = mbImpl->tag_set_data( sharedps_tag(), &ent, 1, ps );MB_CHK_SET_ERR( result, "set_sharing_data:5" );
06179         result = mbImpl->tag_set_data( sharedhs_tag(), &ent, 1, hs );MB_CHK_SET_ERR( result, "set_sharing_data:6" );
06180     }
06181     else
06182     {
06183         unsigned int j = ( ps[0] == (int)procConfig.proc_rank() ? 1 : 0 );
06184         assert( -1 != ps[j] );
06185         result = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, ps + j );MB_CHK_SET_ERR( result, "set_sharing_data:7" );
06186         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, hs + j );MB_CHK_SET_ERR( result, "set_sharing_data:8" );
06187     }
06188 
06189     result = mbImpl->tag_set_data( pstatus_tag(), &ent, 1, &pstatus );MB_CHK_SET_ERR( result, "set_sharing_data:9" );
06190 
06191     if( old_nump > 1 && new_nump < 2 ) sharedEnts.erase( ent );
06192 
06193     return result;
06194 }
06195 
06196 ErrorCode ParallelComm::get_sent_ents( const bool is_iface, const int bridge_dim, const int ghost_dim,
06197                                        const int num_layers, const int addl_ents, Range* sent_ents, Range& allsent,
06198                                        TupleList& entprocs )
06199 {
06200     ErrorCode result;
06201     unsigned int ind;
06202     std::vector< unsigned int >::iterator proc_it;
06203     Range tmp_range;
06204 
06205     // Done in a separate loop over procs because sometimes later procs
06206     // need to add info to earlier procs' messages
06207     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
06208     {
06209         if( !is_iface )
06210         {
06211             result =
06212                 get_ghosted_entities( bridge_dim, ghost_dim, buffProcs[ind], num_layers, addl_ents, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get ghost layers" );
06213         }
06214         else
06215         {
06216             result = get_iface_entities( buffProcs[ind], -1, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get interface layers" );
06217         }
06218 
06219         // Filter out entities already shared with destination
06220         tmp_range.clear();
06221         result = filter_pstatus( sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
06222         if( !tmp_range.empty() ) sent_ents[ind] = subtract( sent_ents[ind], tmp_range );
06223 
06224         allsent.merge( sent_ents[ind] );
06225     }
06226 
06227     //===========================================
06228     // Need to get procs each entity is sent to
06229     //===========================================
06230 
06231     // Get the total # of proc/handle pairs
06232     int npairs = 0;
06233     for( ind = 0; ind < buffProcs.size(); ind++ )
06234         npairs += sent_ents[ind].size();
06235 
06236     // Allocate a TupleList of that size
06237     entprocs.initialize( 1, 0, 1, 0, npairs );
06238     entprocs.enableWriteAccess();
06239 
06240     // Put the proc/handle pairs in the list
06241     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
06242     {
06243         for( Range::iterator rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); ++rit )
06244         {
06245             entprocs.vi_wr[entprocs.get_n()]  = *proc_it;
06246             entprocs.vul_wr[entprocs.get_n()] = *rit;
06247             entprocs.inc_n();
06248         }
06249     }
06250     // Sort by handle
06251     moab::TupleList::buffer sort_buffer;
06252     sort_buffer.buffer_init( npairs );
06253     entprocs.sort( 1, &sort_buffer );
06254 
06255     entprocs.disableWriteAccess();
06256     sort_buffer.reset();
06257 
06258     return MB_SUCCESS;
06259 }
06260 
06261 ErrorCode ParallelComm::exchange_ghost_cells( ParallelComm** pcs, unsigned int num_procs, int ghost_dim, int bridge_dim,
06262                                               int num_layers, int addl_ents, bool store_remote_handles,
06263                                               EntityHandle* file_sets )
06264 {
06265     // Static version of function, exchanging info through buffers rather
06266     // than through messages
06267 
06268     // If we're only finding out about existing ents, we have to be storing
06269     // remote handles too
06270     assert( num_layers > 0 || store_remote_handles );
06271 
06272     const bool is_iface = !num_layers;
06273 
06274     unsigned int ind;
06275     ParallelComm* pc;
06276     ErrorCode result = MB_SUCCESS;
06277 
06278     std::vector< Error* > ehs( num_procs );
06279     for( unsigned int i = 0; i < num_procs; i++ )
06280     {
06281         result = pcs[i]->get_moab()->query_interface( ehs[i] );
06282         assert( MB_SUCCESS == result );
06283     }
06284 
06285     // When this function is called, buffProcs should already have any
06286     // communicating procs
06287 
06288     //===========================================
06289     // Get entities to be sent to neighbors
06290     //===========================================
06291 
06292     // Done in a separate loop over procs because sometimes later procs
06293     // need to add info to earlier procs' messages
06294     Range sent_ents[MAX_SHARING_PROCS][MAX_SHARING_PROCS], allsent[MAX_SHARING_PROCS];
06295 
06296     //===========================================
06297     // Get entities to be sent to neighbors
06298     //===========================================
06299     TupleList entprocs[MAX_SHARING_PROCS];
06300     for( unsigned int p = 0; p < num_procs; p++ )
06301     {
06302         pc     = pcs[p];
06303         result = pc->get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents[p], allsent[p],
06304                                     entprocs[p] );MB_CHK_SET_ERR( result, "p = " << p << ", get_sent_ents failed" );
06305 
06306         //===========================================
06307         // Pack entities into buffers
06308         //===========================================
06309         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
06310         {
06311             // Entities
06312             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06313             result = pc->pack_entities( sent_ents[p][ind], pc->localOwnedBuffs[ind], store_remote_handles,
06314                                         pc->buffProcs[ind], is_iface, &entprocs[p], &allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", packing entities failed" );
06315         }
06316 
06317         entprocs[p].reset();
06318     }
06319 
06320     //===========================================
06321     // Receive/unpack new entities
06322     //===========================================
06323     // Number of incoming messages for ghosts is the number of procs we
06324     // communicate with; for iface, it's the number of those with lower rank
06325     std::vector< std::vector< EntityHandle > > L1hloc[MAX_SHARING_PROCS], L1hrem[MAX_SHARING_PROCS];
06326     std::vector< std::vector< int > > L1p[MAX_SHARING_PROCS];
06327     std::vector< EntityHandle > L2hloc[MAX_SHARING_PROCS], L2hrem[MAX_SHARING_PROCS];
06328     std::vector< unsigned int > L2p[MAX_SHARING_PROCS];
06329     std::vector< EntityHandle > new_ents[MAX_SHARING_PROCS];
06330 
06331     for( unsigned int p = 0; p < num_procs; p++ )
06332     {
06333         L1hloc[p].resize( pcs[p]->buffProcs.size() );
06334         L1hrem[p].resize( pcs[p]->buffProcs.size() );
06335         L1p[p].resize( pcs[p]->buffProcs.size() );
06336     }
06337 
06338     for( unsigned int p = 0; p < num_procs; p++ )
06339     {
06340         pc = pcs[p];
06341 
06342         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
06343         {
06344             // Incoming ghost entities; unpack; returns entities received
06345             // both from sending proc and from owning proc (which may be different)
06346 
06347             // Buffer could be empty, which means there isn't any message to
06348             // unpack (due to this comm proc getting added as a result of indirect
06349             // communication); just skip this unpack
06350             if( pc->localOwnedBuffs[ind]->get_stored_size() == 0 ) continue;
06351 
06352             unsigned int to_p = pc->buffProcs[ind];
06353             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06354             result = pcs[to_p]->unpack_entities( pc->localOwnedBuffs[ind]->buff_ptr, store_remote_handles, ind,
06355                                                  is_iface, L1hloc[to_p], L1hrem[to_p], L1p[to_p], L2hloc[to_p],
06356                                                  L2hrem[to_p], L2p[to_p], new_ents[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack entities" );
06357         }
06358     }
06359 
06360     if( is_iface )
06361     {
06362         // Need to check over entities I sent and make sure I received
06363         // handles for them from all expected procs; if not, need to clean
06364         // them up
06365         for( unsigned int p = 0; p < num_procs; p++ )
06366         {
06367             result = pcs[p]->check_clean_iface( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06368         }
06369 
06370 #ifndef NDEBUG
06371         for( unsigned int p = 0; p < num_procs; p++ )
06372         {
06373             result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06374         }
06375         result = check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
06376 #endif
06377         return MB_SUCCESS;
06378     }
06379 
06380     //===========================================
06381     // Send local handles for new ghosts to owner, then add
06382     // those to ghost list for that owner
06383     //===========================================
06384     std::vector< unsigned int >::iterator proc_it;
06385     for( unsigned int p = 0; p < num_procs; p++ )
06386     {
06387         pc = pcs[p];
06388 
06389         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
06390         {
06391             // Skip if iface layer and higher-rank proc
06392             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06393             result = pc->pack_remote_handles( L1hloc[p][ind], L1hrem[p][ind], L1p[p][ind], *proc_it,
06394                                               pc->localOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to pack remote handles" );
06395         }
06396     }
06397 
06398     //===========================================
06399     // Process remote handles of my ghosteds
06400     //===========================================
06401     for( unsigned int p = 0; p < num_procs; p++ )
06402     {
06403         pc = pcs[p];
06404 
06405         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
06406         {
06407             // Incoming remote handles
06408             unsigned int to_p = pc->buffProcs[ind];
06409             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06410             result = pcs[to_p]->unpack_remote_handles( p, pc->localOwnedBuffs[ind]->buff_ptr, L2hloc[to_p],
06411                                                        L2hrem[to_p], L2p[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack remote handles" );
06412         }
06413     }
06414 
06415 #ifndef NDEBUG
06416     for( unsigned int p = 0; p < num_procs; p++ )
06417     {
06418         result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06419     }
06420 
06421     result = ParallelComm::check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
06422 #endif
06423 
06424     if( file_sets )
06425     {
06426         for( unsigned int p = 0; p < num_procs; p++ )
06427         {
06428             if( new_ents[p].empty() ) continue;
06429             result = pcs[p]->get_moab()->add_entities( file_sets[p], &new_ents[p][0], new_ents[p].size() );MB_CHK_SET_ERR( result, "p = " << p << ", failed to add new entities to set" );
06430         }
06431     }
06432 
06433     return MB_SUCCESS;
06434 }
06435 
06436 ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& exchange_procs )
06437 {
06438     // Set buffers
06439     int n_proc = exchange_procs.size();
06440     for( int i = 0; i < n_proc; i++ )
06441         get_buffers( exchange_procs[i] );
06442     reset_all_buffers();
06443 
06444     // Post ghost irecv's for entities from all communicating procs
06445     // Index requests the same as buffer/sharing procs indices
06446     int success;
06447     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06448     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06449     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06450 
06451     int incoming = 0;
06452     for( int i = 0; i < n_proc; i++ )
06453     {
06454         int ind = get_buffers( exchange_procs[i] );
06455         incoming++;
06456         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
06457                            MB_MESG_ENTS_SIZE, incoming );
06458         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06459                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
06460         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
06461     }
06462 
06463     return MB_SUCCESS;
06464 }
06465 
06466 ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs )
06467 {
06468     // Set buffers
06469     int num = shared_procs.size();
06470     for( int i = 0; i < num; i++ )
06471         get_buffers( shared_procs[i] );
06472     reset_all_buffers();
06473     num = remoteOwnedBuffs.size();
06474     for( int i = 0; i < num; i++ )
06475         remoteOwnedBuffs[i]->set_stored_size();
06476     num = localOwnedBuffs.size();
06477     for( int i = 0; i < num; i++ )
06478         localOwnedBuffs[i]->set_stored_size();
06479 
06480     // Post ghost irecv's for entities from all communicating procs
06481     // Index requests the same as buffer/sharing procs indices
06482     int success;
06483     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06484     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06485     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06486 
06487     int incoming                           = 0;
06488     std::set< unsigned int >::iterator it  = recv_procs.begin();
06489     std::set< unsigned int >::iterator eit = recv_procs.end();
06490     for( ; it != eit; ++it )
06491     {
06492         int ind = get_buffers( *it );
06493         incoming++;
06494         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
06495                            MB_MESG_ENTS_SIZE, incoming );
06496         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06497                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
06498         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
06499     }
06500 
06501     return MB_SUCCESS;
06502 }
06503 
06504 ErrorCode ParallelComm::exchange_owned_meshs( std::vector< unsigned int >& exchange_procs,
06505                                               std::vector< Range* >& exchange_ents,
06506                                               std::vector< MPI_Request >& recv_ent_reqs,
06507                                               std::vector< MPI_Request >& recv_remoteh_reqs, bool store_remote_handles,
06508                                               bool wait_all, bool migrate, int dim )
06509 {
06510     // Filter out entities already shared with destination
06511     // Exchange twice for entities and sets
06512     ErrorCode result;
06513     std::vector< unsigned int > exchange_procs_sets;
06514     std::vector< Range* > exchange_sets;
06515     int n_proc = exchange_procs.size();
06516     for( int i = 0; i < n_proc; i++ )
06517     {
06518         Range set_range   = exchange_ents[i]->subset_by_type( MBENTITYSET );
06519         *exchange_ents[i] = subtract( *exchange_ents[i], set_range );
06520         Range* tmp_range  = new Range( set_range );
06521         exchange_sets.push_back( tmp_range );
06522         exchange_procs_sets.push_back( exchange_procs[i] );
06523     }
06524 
06525     if( dim == 2 )
06526     {
06527         // Exchange entities first
06528         result = exchange_owned_mesh( exchange_procs, exchange_ents, recvReqs, recvRemotehReqs, true,
06529                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
06530 
06531         // Exchange sets
06532         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recvReqs, recvRemotehReqs, false,
06533                                       store_remote_handles, wait_all, migrate );
06534     }
06535     else
06536     {
06537         // Exchange entities first
06538         result = exchange_owned_mesh( exchange_procs, exchange_ents, recv_ent_reqs, recv_remoteh_reqs, false,
06539                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
06540 
06541         // Exchange sets
06542         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recv_ent_reqs, recv_remoteh_reqs, false,
06543                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh sets" );
06544     }
06545 
06546     for( int i = 0; i < n_proc; i++ )
06547         delete exchange_sets[i];
06548 
06549     // Build up the list of shared entities
06550     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
06551     int procs[MAX_SHARING_PROCS];
06552     EntityHandle handles[MAX_SHARING_PROCS];
06553     int nprocs;
06554     unsigned char pstat;
06555     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
06556     {
06557         if( mbImpl->dimension_from_handle( *vit ) > 2 ) continue;
06558         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data in exchange_owned_meshs" );
06559         std::sort( procs, procs + nprocs );
06560         std::vector< int > tmp_procs( procs, procs + nprocs );
06561         assert( tmp_procs.size() != 2 );
06562         proc_nvecs[tmp_procs].push_back( *vit );
06563     }
06564 
06565     // Create interface sets from shared entities
06566     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
06567 
06568     return MB_SUCCESS;
06569 }
06570 
06571 ErrorCode ParallelComm::exchange_owned_mesh( std::vector< unsigned int >& exchange_procs,
06572                                              std::vector< Range* >& exchange_ents,
06573                                              std::vector< MPI_Request >& recv_ent_reqs,
06574                                              std::vector< MPI_Request >& recv_remoteh_reqs, const bool recv_posted,
06575                                              bool store_remote_handles, bool wait_all, bool migrate )
06576 {
06577 #ifdef MOAB_HAVE_MPE
06578     if( myDebug->get_verbosity() == 2 )
06579     { MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting owned ents exchange." ); }
06580 #endif
06581 
06582     myDebug->tprintf( 1, "Entering exchange_owned_mesh\n" );
06583     if( myDebug->get_verbosity() == 4 )
06584     {
06585         msgs.clear();
06586         msgs.reserve( MAX_SHARING_PROCS );
06587     }
06588     unsigned int i;
06589     int ind, success;
06590     ErrorCode result = MB_SUCCESS;
06591     int incoming1 = 0, incoming2 = 0;
06592 
06593     // Set buffProcs with communicating procs
06594     unsigned int n_proc = exchange_procs.size();
06595     for( i = 0; i < n_proc; i++ )
06596     {
06597         ind    = get_buffers( exchange_procs[i] );
06598         result = add_verts( *exchange_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
06599 
06600         // Filter out entities already shared with destination
06601         Range tmp_range;
06602         result = filter_pstatus( *exchange_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
06603         if( !tmp_range.empty() ) { *exchange_ents[i] = subtract( *exchange_ents[i], tmp_range ); }
06604     }
06605 
06606     //===========================================
06607     // Post ghost irecv's for entities from all communicating procs
06608     //===========================================
06609 #ifdef MOAB_HAVE_MPE
06610     if( myDebug->get_verbosity() == 2 )
06611     { MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." ); }
06612 #endif
06613 
06614     // Index reqs the same as buffer/sharing procs indices
06615     if( !recv_posted )
06616     {
06617         reset_all_buffers();
06618         recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06619         recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06620         sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06621 
06622         for( i = 0; i < n_proc; i++ )
06623         {
06624             ind = get_buffers( exchange_procs[i] );
06625             incoming1++;
06626             PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr,
06627                                INITIAL_BUFF_SIZE, MB_MESG_ENTS_SIZE, incoming1 );
06628             success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06629                                  MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
06630             if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
06631         }
06632     }
06633     else
06634         incoming1 += n_proc;
06635 
06636     //===========================================
06637     // Get entities to be sent to neighbors
06638     // Need to get procs each entity is sent to
06639     //===========================================
06640     Range allsent, tmp_range;
06641     int dum_ack_buff;
06642     int npairs = 0;
06643     TupleList entprocs;
06644     for( i = 0; i < n_proc; i++ )
06645     {
06646         int n_ents = exchange_ents[i]->size();
06647         if( n_ents > 0 )
06648         {
06649             npairs += n_ents;  // Get the total # of proc/handle pairs
06650             allsent.merge( *exchange_ents[i] );
06651         }
06652     }
06653 
06654     // Allocate a TupleList of that size
06655     entprocs.initialize( 1, 0, 1, 0, npairs );
06656     entprocs.enableWriteAccess();
06657 
06658     // Put the proc/handle pairs in the list
06659     for( i = 0; i < n_proc; i++ )
06660     {
06661         for( Range::iterator rit = exchange_ents[i]->begin(); rit != exchange_ents[i]->end(); ++rit )
06662         {
06663             entprocs.vi_wr[entprocs.get_n()]  = exchange_procs[i];
06664             entprocs.vul_wr[entprocs.get_n()] = *rit;
06665             entprocs.inc_n();
06666         }
06667     }
06668 
06669     // Sort by handle
06670     moab::TupleList::buffer sort_buffer;
06671     sort_buffer.buffer_init( npairs );
06672     entprocs.sort( 1, &sort_buffer );
06673     sort_buffer.reset();
06674 
06675     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
06676                       (unsigned long)allsent.size() );
06677 
06678     //===========================================
06679     // Pack and send ents from this proc to others
06680     //===========================================
06681     for( i = 0; i < n_proc; i++ )
06682     {
06683         ind = get_buffers( exchange_procs[i] );
06684         myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", exchange_ents[i]->compactness(),
06685                           (unsigned long)exchange_ents[i]->size() );
06686         // Reserve space on front for size and for initial buff size
06687         localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
06688         result = pack_buffer( *exchange_ents[i], false, true, store_remote_handles, buffProcs[ind],
06689                               localOwnedBuffs[ind], &entprocs, &allsent );
06690 
06691         if( myDebug->get_verbosity() == 4 )
06692         {
06693             msgs.resize( msgs.size() + 1 );
06694             msgs.back() = new Buffer( *localOwnedBuffs[ind] );
06695         }
06696 
06697         // Send the buffer (size stored in front in send_buffer)
06698         result = send_buffer( exchange_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
06699                               recv_ent_reqs[3 * ind + 2], &dum_ack_buff, incoming1, MB_MESG_REMOTEH_SIZE,
06700                               ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recv_remoteh_reqs[3 * ind],
06701                               &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost exchange" );
06702     }
06703 
06704     entprocs.reset();
06705 
06706     //===========================================
06707     // Receive/unpack new entities
06708     //===========================================
06709     // Number of incoming messages is the number of procs we communicate with
06710     MPI_Status status;
06711     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
06712     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
06713     std::vector< std::vector< int > > L1p( buffProcs.size() );
06714     std::vector< EntityHandle > L2hloc, L2hrem;
06715     std::vector< unsigned int > L2p;
06716     std::vector< EntityHandle > new_ents;
06717 
06718     while( incoming1 )
06719     {
06720         // Wait for all recvs of ents before proceeding to sending remote handles,
06721         // b/c some procs may have sent to a 3rd proc ents owned by me;
06722         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
06723 
06724         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &ind, &status );
06725         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
06726 
06727         PRINT_DEBUG_RECD( status );
06728 
06729         // OK, received something; decrement incoming counter
06730         incoming1--;
06731         bool done = false;
06732 
06733         // In case ind is for ack, we need index of one before it
06734         unsigned int base_ind = 3 * ( ind / 3 );
06735         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 3], recv_ent_reqs[base_ind + 1],
06736                               recv_ent_reqs[base_ind + 2], incoming1, localOwnedBuffs[ind / 3], sendReqs[base_ind + 1],
06737                               sendReqs[base_ind + 2], done, ( store_remote_handles ? localOwnedBuffs[ind / 3] : NULL ),
06738                               MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind + 1], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
06739 
06740         if( done )
06741         {
06742             if( myDebug->get_verbosity() == 4 )
06743             {
06744                 msgs.resize( msgs.size() + 1 );
06745                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 3] );
06746             }
06747 
06748             // Message completely received - process buffer that was sent
06749             remoteOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
06750             result = unpack_buffer( remoteOwnedBuffs[ind / 3]->buff_ptr, store_remote_handles, buffProcs[ind / 3],
06751                                     ind / 3, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
06752             if( MB_SUCCESS != result )
06753             {
06754                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
06755                 print_buffer( remoteOwnedBuffs[ind / 3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 3], false );