LCOV - code coverage report
Current view: top level - src/parallel - ParallelComm.cpp (source / functions) Hit Total Coverage
Test: coverage_sk.info Lines: 482 4215 11.4 %
Date: 2020-12-16 07:07:30 Functions: 42 202 20.8 %
Branches: 550 16144 3.4 %

           Branch data     Line data    Source code
       1                 :            : #include "moab/Interface.hpp"
       2                 :            : #include "moab/ParallelComm.hpp"
       3                 :            : #include "moab/WriteUtilIface.hpp"
       4                 :            : #include "moab/ReadUtilIface.hpp"
       5                 :            : #include "SequenceManager.hpp"
       6                 :            : #include "moab/Error.hpp"
       7                 :            : #include "EntitySequence.hpp"
       8                 :            : #include "MBTagConventions.hpp"
       9                 :            : #include "moab/Skinner.hpp"
      10                 :            : #include "MBParallelConventions.h"
      11                 :            : #include "moab/Core.hpp"
      12                 :            : #include "ElementSequence.hpp"
      13                 :            : #include "moab/CN.hpp"
      14                 :            : #include "moab/RangeMap.hpp"
      15                 :            : #include "moab/MeshTopoUtil.hpp"
      16                 :            : #include "TagInfo.hpp"
      17                 :            : #include "DebugOutput.hpp"
      18                 :            : #include "SharedSetData.hpp"
      19                 :            : #include "moab/ScdInterface.hpp"
      20                 :            : #include "moab/TupleList.hpp"
      21                 :            : #include "moab/gs.hpp"
      22                 :            : 
      23                 :            : #include <iostream>
      24                 :            : #include <sstream>
      25                 :            : #include <algorithm>
      26                 :            : #include <functional>
      27                 :            : #include <numeric>
      28                 :            : 
      29                 :            : #include <math.h>
      30                 :            : #include <cstdlib>
      31                 :            : #include <assert.h>
      32                 :            : 
      33                 :            : #ifdef MOAB_HAVE_MPI
      34                 :            : #include "moab_mpi.h"
      35                 :            : #endif
      36                 :            : #ifdef MOAB_HAVE_MPE
      37                 :            : #include "mpe.h"
      38                 :            : int IFACE_START, IFACE_END;
      39                 :            : int GHOST_START, GHOST_END;
      40                 :            : int SHAREDV_START, SHAREDV_END;
      41                 :            : int RESOLVE_START, RESOLVE_END;
      42                 :            : int ENTITIES_START, ENTITIES_END;
      43                 :            : int RHANDLES_START, RHANDLES_END;
      44                 :            : int OWNED_START, OWNED_END;
      45                 :            : #endif
      46                 :            : 
      47                 :            : namespace moab
      48                 :            : {
      49                 :            : 
      50                 :            : const unsigned int ParallelComm::INITIAL_BUFF_SIZE = 1024;
      51                 :            : 
      52                 :            : const int MAX_BCAST_SIZE = ( 1 << 28 );
      53                 :            : 
      54                 :         57 : std::vector< ParallelComm::Buffer* > msgs;
      55                 :            : unsigned int __PACK_num = 0, __UNPACK_num = 0, __PACK_count = 0, __UNPACK_count = 0;
      56                 :         57 : std::string __PACK_string, __UNPACK_string;
      57                 :            : 
      58                 :            : #ifdef DEBUG_PACKING_TIMES
      59                 :            : #define PC( n, m )                                                            \
      60                 :            :     {                                                                         \
      61                 :            :         if( __PACK_num == (unsigned int)n && __PACK_string == m )             \
      62                 :            :             __PACK_count++;                                                   \
      63                 :            :         else                                                                  \
      64                 :            :         {                                                                     \
      65                 :            :             if( __PACK_count > 1 ) std::cerr << " (" << __PACK_count << "x)"; \
      66                 :            :             __PACK_count  = 1;                                                \
      67                 :            :             __PACK_string = m;                                                \
      68                 :            :             __PACK_num    = n;                                                \
      69                 :            :             std::cerr << std::endl << "PACK: " << n << m;                     \
      70                 :            :         }                                                                     \
      71                 :            :     }
      72                 :            : #define UPC( n, m )                                                              \
      73                 :            :     {                                                                            \
      74                 :            :         if( __UNPACK_num == (unsigned int)n && __UNPACK_string == m )            \
      75                 :            :             __UNPACK_count++;                                                    \
      76                 :            :         else                                                                     \
      77                 :            :         {                                                                        \
      78                 :            :             if( __UNPACK_count > 1 ) std::cerr << "(" << __UNPACK_count << "x)"; \
      79                 :            :             __UNPACK_count  = 1;                                                 \
      80                 :            :             __UNPACK_string = m;                                                 \
      81                 :            :             __UNPACK_num    = n;                                                 \
      82                 :            :             std::cerr << std::endl << "UNPACK: " << n << m;                      \
      83                 :            :         }                                                                        \
      84                 :            :     }
      85                 :            : #else
      86                 :            : #define PC( n, m )
      87                 :            : #define UPC( n, m )
      88                 :            : #endif
      89                 :            : 
      90                 :            : template < typename T >
      91                 :          0 : static inline void UNPACK( unsigned char*& buff, T* val, size_t count )
      92                 :            : {
      93                 :          0 :     memcpy( val, buff, count * sizeof( T ) );
      94                 :          0 :     buff += count * sizeof( T );
      95                 :          0 : }
      96                 :            : 
      97                 :            : template < typename T >
      98                 :          0 : static inline void PACK( unsigned char*& buff, const T* val, size_t count )
      99                 :            : {
     100                 :          0 :     memcpy( buff, val, count * sizeof( T ) );
     101                 :          0 :     buff += count * sizeof( T );
     102                 :          0 : }
     103                 :            : 
     104                 :          0 : static inline void PACK_INTS( unsigned char*& buff, const int* int_val, size_t num )
     105                 :            : {
     106                 :          0 :     PACK( buff, int_val, num );
     107                 :            :     PC( num, " ints" );
     108                 :          0 : }
     109                 :            : 
     110                 :          0 : static inline void PACK_INT( unsigned char*& buff, int int_val )
     111                 :            : {
     112                 :          0 :     PACK_INTS( buff, &int_val, 1 );
     113                 :          0 : }
     114                 :            : 
     115                 :          0 : static inline void PACK_DBLS( unsigned char*& buff, const double* dbl_val, size_t num )
     116                 :            : {
     117                 :          0 :     PACK( buff, dbl_val, num );
     118                 :            :     PC( num, " doubles" );
     119                 :          0 : }
     120                 :            : 
     121                 :            : // static inline
     122                 :            : // void PACK_DBL(unsigned char*& buff, const double dbl_val)
     123                 :            : //{ PACK_DBLS(buff, &dbl_val, 1); }
     124                 :            : 
     125                 :          0 : static inline void PACK_EH( unsigned char*& buff, const EntityHandle* eh_val, size_t num )
     126                 :            : {
     127                 :          0 :     PACK( buff, eh_val, num );
     128                 :            :     PC( num, " handles" );
     129                 :          0 : }
     130                 :            : 
     131                 :            : // static inline
     132                 :            : // void PACK_CHAR_64(unsigned char*& buff, const char* str)
     133                 :            : //{
     134                 :            : //  memcpy(buff, str, 64);
     135                 :            : //  buff += 64;
     136                 :            : //  PC(64, " chars");
     137                 :            : //}
     138                 :            : 
     139                 :          0 : static inline void PACK_VOID( unsigned char*& buff, const void* val, size_t num )
     140                 :            : {
     141                 :          0 :     PACK( buff, reinterpret_cast< const unsigned char* >( val ), num );
     142                 :            :     PC( num, " void" );
     143                 :          0 : }
     144                 :            : 
     145                 :          0 : static inline void PACK_BYTES( unsigned char*& buff, const void* val, int num )
     146                 :            : {
     147                 :          0 :     PACK_INT( buff, num );
     148                 :          0 :     PACK_VOID( buff, val, num );
     149                 :          0 : }
     150                 :            : 
     151                 :          0 : static inline void PACK_RANGE( unsigned char*& buff, const Range& rng )
     152                 :            : {
     153 [ #  # ][ #  # ]:          0 :     PACK_INT( buff, rng.psize() );
     154         [ #  # ]:          0 :     Range::const_pair_iterator cit;
     155 [ #  # ][ #  # ]:          0 :     for( cit = rng.const_pair_begin(); cit != rng.const_pair_end(); ++cit )
         [ #  # ][ #  # ]
                 [ #  # ]
     156                 :            :     {
     157 [ #  # ][ #  # ]:          0 :         EntityHandle eh[2] = { cit->first, cit->second };
     158         [ #  # ]:          0 :         PACK_EH( buff, eh, 2 );
     159                 :            :     }
     160                 :            :     PC( rng.psize(), "-subranged range" );
     161                 :          0 : }
     162                 :            : 
     163                 :          0 : static inline void UNPACK_INTS( unsigned char*& buff, int* int_val, size_t num )
     164                 :            : {
     165                 :          0 :     UNPACK( buff, int_val, num );
     166                 :            :     UPC( num, " ints" );
     167                 :          0 : }
     168                 :            : 
     169                 :          0 : static inline void UNPACK_INT( unsigned char*& buff, int& int_val )
     170                 :            : {
     171                 :          0 :     UNPACK_INTS( buff, &int_val, 1 );
     172                 :          0 : }
     173                 :            : 
     174                 :          0 : static inline void UNPACK_DBLS( unsigned char*& buff, double* dbl_val, size_t num )
     175                 :            : {
     176                 :          0 :     UNPACK( buff, dbl_val, num );
     177                 :            :     UPC( num, " doubles" );
     178                 :          0 : }
     179                 :            : 
     180                 :          0 : static inline void UNPACK_DBL( unsigned char*& buff, double& dbl_val )
     181                 :            : {
     182                 :          0 :     UNPACK_DBLS( buff, &dbl_val, 1 );
     183                 :          0 : }
     184                 :            : 
     185                 :          0 : static inline void UNPACK_EH( unsigned char*& buff, EntityHandle* eh_val, size_t num )
     186                 :            : {
     187                 :          0 :     UNPACK( buff, eh_val, num );
     188                 :            :     UPC( num, " handles" );
     189                 :          0 : }
     190                 :            : 
     191                 :            : // static inline
     192                 :            : // void UNPACK_CHAR_64(unsigned char*& buff, char* char_val)
     193                 :            : //{
     194                 :            : //  memcpy(buff, char_val, 64);
     195                 :            : //  buff += 64;
     196                 :            : //  UPC(64, " chars");
     197                 :            : //}
     198                 :            : 
     199                 :          0 : static inline void UNPACK_VOID( unsigned char*& buff, void* val, size_t num )
     200                 :            : {
     201                 :          0 :     UNPACK( buff, reinterpret_cast< unsigned char* >( val ), num );
     202                 :            :     UPC( num, " void" );
     203                 :          0 : }
     204                 :            : 
     205                 :          0 : static inline void UNPACK_TYPE( unsigned char*& buff, EntityType& type )
     206                 :            : {
     207                 :          0 :     int int_type = MBMAXTYPE;
     208         [ #  # ]:          0 :     UNPACK_INT( buff, int_type );
     209                 :          0 :     type = static_cast< EntityType >( int_type );
     210 [ #  # ][ #  # ]:          0 :     assert( type >= MBVERTEX && type <= MBMAXTYPE );
     211                 :          0 : }
     212                 :            : 
     213                 :          0 : static inline void UNPACK_RANGE( unsigned char*& buff, Range& rng )
     214                 :            : {
     215                 :            :     int num_subs;
     216                 :            :     EntityHandle eh[2];
     217         [ #  # ]:          0 :     UNPACK_INT( buff, num_subs );
     218         [ #  # ]:          0 :     for( int i = 0; i < num_subs; i++ )
     219                 :            :     {
     220                 :            :         UPC( num_subs, "-subranged range" );
     221         [ #  # ]:          0 :         UNPACK_EH( buff, eh, 2 );
     222         [ #  # ]:          0 :         rng.insert( eh[0], eh[1] );
     223                 :            :     }
     224                 :          0 : }
     225                 :            : 
     226                 :            : enum MBMessageTag
     227                 :            : {
     228                 :            :     MB_MESG_ANY = MPI_ANY_TAG,
     229                 :            :     MB_MESG_ENTS_ACK,
     230                 :            :     MB_MESG_ENTS_SIZE,
     231                 :            :     MB_MESG_ENTS_LARGE,
     232                 :            :     MB_MESG_REMOTEH_ACK,
     233                 :            :     MB_MESG_REMOTEH_SIZE,
     234                 :            :     MB_MESG_REMOTEH_LARGE,
     235                 :            :     MB_MESG_TAGS_ACK,
     236                 :            :     MB_MESG_TAGS_SIZE,
     237                 :            :     MB_MESG_TAGS_LARGE
     238                 :            : };
     239                 :            : 
     240                 :          0 : static inline size_t RANGE_SIZE( const Range& rng )
     241                 :            : {
     242                 :          0 :     return 2 * sizeof( EntityHandle ) * rng.psize() + sizeof( int );
     243                 :            : }
     244                 :            : 
     245                 :            : #define PRINT_DEBUG_ISEND( A, B, C, D, E )    print_debug_isend( ( A ), ( B ), ( C ), ( D ), ( E ) )
     246                 :            : #define PRINT_DEBUG_IRECV( A, B, C, D, E, F ) print_debug_irecv( ( A ), ( B ), ( C ), ( D ), ( E ), ( F ) )
     247                 :            : #define PRINT_DEBUG_RECD( A )                 print_debug_recd( ( A ) )
     248                 :            : #define PRINT_DEBUG_WAITANY( A, B, C )        print_debug_waitany( ( A ), ( B ), ( C ) )
     249                 :            : 
     250                 :          0 : void ParallelComm::print_debug_isend( int from, int to, unsigned char* buff, int tag, int sz )
     251                 :            : {
     252                 :          0 :     myDebug->tprintf( 3, "Isend, %d->%d, buffer ptr = %p, tag=%d, size=%d\n", from, to, (void*)buff, tag, sz );
     253                 :          0 : }
     254                 :            : 
     255                 :          0 : void ParallelComm::print_debug_irecv( int to, int from, unsigned char* buff, int sz, int tag, int incoming )
     256                 :            : {
     257                 :          0 :     myDebug->tprintf( 3, "Irecv, %d<-%d, buffer ptr = %p, tag=%d, size=%d", to, from, (void*)buff, tag, sz );
     258         [ #  # ]:          0 :     if( tag < MB_MESG_REMOTEH_ACK )
     259                 :          0 :         myDebug->printf( 3, ", incoming1=%d\n", incoming );
     260         [ #  # ]:          0 :     else if( tag < MB_MESG_TAGS_ACK )
     261                 :          0 :         myDebug->printf( 3, ", incoming2=%d\n", incoming );
     262                 :            :     else
     263                 :          0 :         myDebug->printf( 3, ", incoming=%d\n", incoming );
     264                 :          0 : }
     265                 :            : 
     266                 :          0 : void ParallelComm::print_debug_recd( MPI_Status status )
     267                 :            : {
     268         [ #  # ]:          0 :     if( myDebug->get_verbosity() == 3 )
     269                 :            :     {
     270                 :            :         int this_count;
     271         [ #  # ]:          0 :         int success = MPI_Get_count( &status, MPI_UNSIGNED_CHAR, &this_count );
     272         [ #  # ]:          0 :         if( MPI_SUCCESS != success ) this_count = -1;
     273                 :            :         myDebug->tprintf( 3, "Received from %d, count = %d, tag = %d\n", status.MPI_SOURCE, this_count,
     274         [ #  # ]:          0 :                           status.MPI_TAG );
     275                 :            :     }
     276                 :          0 : }
     277                 :            : 
     278                 :          0 : void ParallelComm::print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc )
     279                 :            : {
     280         [ #  # ]:          0 :     if( myDebug->get_verbosity() == 3 )
     281                 :            :     {
     282                 :          0 :         myDebug->tprintf( 3, "Waitany, p=%d, ", proc );
     283         [ #  # ]:          0 :         if( tag < MB_MESG_REMOTEH_ACK )
     284                 :          0 :             myDebug->print( 3, ", recv_ent_reqs=" );
     285         [ #  # ]:          0 :         else if( tag < MB_MESG_TAGS_ACK )
     286                 :          0 :             myDebug->print( 3, ", recv_remoteh_reqs=" );
     287                 :            :         else
     288                 :          0 :             myDebug->print( 3, ", recv_tag_reqs=" );
     289         [ #  # ]:          0 :         for( unsigned int i = 0; i < reqs.size(); i++ )
     290                 :          0 :             myDebug->printf( 3, " %p", (void*)(intptr_t)reqs[i] );
     291                 :          0 :         myDebug->print( 3, "\n" );
     292                 :            :     }
     293                 :          0 : }
     294                 :            : 
     295                 :            : /** Name of tag used to store ParallelComm Index on mesh paritioning sets */
     296                 :            : const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
     297                 :            : 
     298                 :            : /** \brief Tag storing parallel communication objects
     299                 :            :  *
     300                 :            :  * This tag stores pointers to ParallelComm communication
     301                 :            :  * objects; one of these is allocated for each different
     302                 :            :  * communicator used to read mesh. ParallelComm stores
     303                 :            :  * partition and interface sets corresponding to its parallel mesh.
     304                 :            :  * By default, a parallel read uses the first ParallelComm object
     305                 :            :  * on the interface instance; if instantiated with one, ReadParallel
     306                 :            :  * adds this object to the interface instance too.
     307                 :            :  *
     308                 :            :  * Tag type: opaque
     309                 :            :  * Tag size: MAX_SHARING_PROCS*sizeof(ParallelComm*)
     310                 :            :  */
     311                 :            : #define PARALLEL_COMM_TAG_NAME "__PARALLEL_COMM"
     312                 :            : 
     313                 :         45 : ParallelComm::ParallelComm( Interface* impl, MPI_Comm cm, int* id )
     314                 :            :     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
     315                 :            :       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
     316 [ +  - ][ +  - ]:         45 :       myDebug( NULL )
         [ +  - ][ +  - ]
         [ +  - ][ +  - ]
         [ +  - ][ +  - ]
         [ +  - ][ +  - ]
     317                 :            : {
     318         [ +  - ]:         45 :     initialize();
     319 [ +  - ][ +  - ]:         45 :     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
                 [ +  - ]
     320         [ +  + ]:         45 :     if( id ) *id = pcommID;
     321                 :         45 : }
     322                 :            : 
     323                 :          0 : ParallelComm::ParallelComm( Interface* impl, std::vector< unsigned char >& /*tmp_buff*/, MPI_Comm cm, int* id )
     324                 :            :     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
     325                 :            :       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
     326 [ #  # ][ #  # ]:          0 :       myDebug( NULL )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     327                 :            : {
     328         [ #  # ]:          0 :     initialize();
     329 [ #  # ][ #  # ]:          0 :     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
                 [ #  # ]
     330         [ #  # ]:          0 :     if( id ) *id = pcommID;
     331                 :          0 : }
     332                 :            : 
     333                 :         84 : ParallelComm::~ParallelComm()
     334                 :            : {
     335                 :         42 :     remove_pcomm( this );
     336                 :         42 :     delete_all_buffers();
     337         [ +  - ]:         42 :     delete myDebug;
     338         [ +  - ]:         42 :     delete sharedSetData;
     339                 :         42 : }
     340                 :            : 
     341                 :         45 : void ParallelComm::initialize()
     342                 :            : {
     343         [ -  + ]:         45 :     Core* core      = dynamic_cast< Core* >( mbImpl );
     344         [ +  - ]:         45 :     sequenceManager = core->sequence_manager();
     345         [ +  - ]:         45 :     mbImpl->query_interface( errorHandler );
     346                 :            : 
     347                 :            :     // Initialize MPI, if necessary
     348                 :         45 :     int flag   = 1;
     349         [ +  - ]:         45 :     int retval = MPI_Initialized( &flag );
     350 [ +  - ][ -  + ]:         45 :     if( MPI_SUCCESS != retval || !flag )
     351                 :            :     {
     352                 :          0 :         int argc    = 0;
     353                 :          0 :         char** argv = NULL;
     354                 :            : 
     355                 :            :         // mpi not initialized yet - initialize here
     356         [ #  # ]:          0 :         retval = MPI_Init( &argc, &argv );
     357         [ #  # ]:          0 :         assert( MPI_SUCCESS == retval );
     358                 :            :     }
     359                 :            : 
     360                 :            :     // Reserve space for vectors
     361         [ +  - ]:         45 :     buffProcs.reserve( MAX_SHARING_PROCS );
     362         [ +  - ]:         45 :     localOwnedBuffs.reserve( MAX_SHARING_PROCS );
     363         [ +  - ]:         45 :     remoteOwnedBuffs.reserve( MAX_SHARING_PROCS );
     364                 :            : 
     365         [ +  - ]:         45 :     pcommID = add_pcomm( this );
     366                 :            : 
     367         [ +  - ]:         45 :     if( !myDebug )
     368                 :            :     {
     369 [ +  - ][ +  - ]:         45 :         myDebug = new DebugOutput( "ParallelComm", std::cerr );
     370 [ +  - ][ +  - ]:         45 :         myDebug->set_rank( procConfig.proc_rank() );
     371                 :            :     }
     372                 :         45 : }
     373                 :            : 
     374                 :         45 : int ParallelComm::add_pcomm( ParallelComm* pc )
     375                 :            : {
     376                 :            :     // Add this pcomm to instance tag
     377         [ +  - ]:         45 :     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS, (ParallelComm*)NULL );
     378         [ +  - ]:         45 :     Tag pc_tag = pcomm_tag( mbImpl, true );
     379         [ -  + ]:         45 :     assert( 0 != pc_tag );
     380                 :            : 
     381                 :         45 :     const EntityHandle root = 0;
     382 [ +  - ][ +  - ]:         45 :     ErrorCode result        = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
     383 [ +  + ][ -  + ]:         45 :     if( MB_SUCCESS != result && MB_TAG_NOT_FOUND != result ) return -1;
     384                 :         45 :     int index = 0;
     385 [ +  - ][ +  - ]:         46 :     while( index < MAX_SHARING_PROCS && pc_array[index] )
         [ +  + ][ +  + ]
     386                 :          1 :         index++;
     387         [ -  + ]:         45 :     if( index == MAX_SHARING_PROCS )
     388                 :            :     {
     389                 :          0 :         index = -1;
     390                 :          0 :         assert( false );
     391                 :            :     }
     392                 :            :     else
     393                 :            :     {
     394         [ +  - ]:         45 :         pc_array[index] = pc;
     395 [ +  - ][ +  - ]:         45 :         mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
     396                 :            :     }
     397                 :         45 :     return index;
     398                 :            : }
     399                 :            : 
     400                 :         42 : void ParallelComm::remove_pcomm( ParallelComm* pc )
     401                 :            : {
     402                 :            :     // Remove this pcomm from instance tag
     403         [ +  - ]:         42 :     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS );
     404         [ +  - ]:         42 :     Tag pc_tag = pcomm_tag( mbImpl, true );
     405                 :            : 
     406                 :         42 :     const EntityHandle root                      = 0;
     407 [ +  - ][ +  - ]:         42 :     ErrorCode result                             = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
     408         [ +  - ]:         42 :     std::vector< ParallelComm* >::iterator pc_it = std::find( pc_array.begin(), pc_array.end(), pc );
     409 [ +  - ][ +  - ]:         42 :     assert( MB_SUCCESS == result && pc_it != pc_array.end() );
         [ -  + ][ +  - ]
                 [ #  # ]
     410                 :            :     // Empty if test to get around compiler warning about unused var
     411                 :            :     if( MB_SUCCESS == result ) {}
     412                 :            : 
     413         [ +  - ]:         42 :     *pc_it = NULL;
     414 [ +  - ][ +  - ]:         42 :     mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
     415                 :         42 : }
     416                 :            : 
     417                 :            : //! Assign a global id space, for largest-dimension or all entities (and
     418                 :            : //! in either case for vertices too)
     419                 :          0 : ErrorCode ParallelComm::assign_global_ids( EntityHandle this_set, const int dimension, const int start_id,
     420                 :            :                                            const bool largest_dim_only, const bool parallel, const bool owned_only )
     421                 :            : {
     422 [ #  # ][ #  # ]:          0 :     Range entities[4];
                 [ #  # ]
     423                 :            :     ErrorCode result;
     424         [ #  # ]:          0 :     std::vector< unsigned char > pstatus;
     425         [ #  # ]:          0 :     for( int dim = 0; dim <= dimension; dim++ )
     426                 :            :     {
     427 [ #  # ][ #  # ]:          0 :         if( dim == 0 || !largest_dim_only || dim == dimension )
                 [ #  # ]
     428                 :            :         {
     429 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_dimension( this_set, dim, entities[dim] );MB_CHK_SET_ERR( result, "Failed to get vertices in assign_global_ids" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     430                 :            :         }
     431                 :            : 
     432                 :            :         // Need to filter out non-locally-owned entities!!!
     433 [ #  # ][ #  # ]:          0 :         pstatus.resize( entities[dim].size() );
     434 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( pstatus_tag(), entities[dim], &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus in assign_global_ids" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     435                 :            : 
     436         [ #  # ]:          0 :         Range dum_range;
     437         [ #  # ]:          0 :         Range::iterator rit;
     438                 :            :         unsigned int i;
     439 [ #  # ][ #  # ]:          0 :         for( rit = entities[dim].begin(), i = 0; rit != entities[dim].end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
     440 [ #  # ][ #  # ]:          0 :             if( pstatus[i] & PSTATUS_NOT_OWNED ) dum_range.insert( *rit );
         [ #  # ][ #  # ]
     441 [ #  # ][ #  # ]:          0 :         entities[dim] = subtract( entities[dim], dum_range );
     442                 :          0 :     }
     443                 :            : 
     444 [ #  # ][ #  # ]:          0 :     return assign_global_ids( entities, dimension, start_id, parallel, owned_only );
           [ #  #  #  # ]
     445                 :            : }
     446                 :            : 
     447                 :            : //! Assign a global id space, for largest-dimension or all entities (and
     448                 :            : //! in either case for vertices too)
     449                 :          0 : ErrorCode ParallelComm::assign_global_ids( Range entities[], const int dimension, const int start_id,
     450                 :            :                                            const bool parallel, const bool owned_only )
     451                 :            : {
     452                 :            :     int local_num_elements[4];
     453                 :            :     ErrorCode result;
     454         [ #  # ]:          0 :     for( int dim = 0; dim <= dimension; dim++ )
     455                 :            :     {
     456         [ #  # ]:          0 :         local_num_elements[dim] = entities[dim].size();
     457                 :            :     }
     458                 :            : 
     459                 :            :     // Communicate numbers
     460 [ #  # ][ #  # ]:          0 :     std::vector< int > num_elements( procConfig.proc_size() * 4 );
     461                 :            : #ifdef MOAB_HAVE_MPI
     462 [ #  # ][ #  # ]:          0 :     if( procConfig.proc_size() > 1 && parallel )
         [ #  # ][ #  # ]
     463                 :            :     {
     464                 :            :         int retval =
     465 [ #  # ][ #  # ]:          0 :             MPI_Allgather( local_num_elements, 4, MPI_INT, &num_elements[0], 4, MPI_INT, procConfig.proc_comm() );
                 [ #  # ]
     466         [ #  # ]:          0 :         if( 0 != retval ) return MB_FAILURE;
     467                 :            :     }
     468                 :            :     else
     469                 :            : #endif
     470         [ #  # ]:          0 :         for( int dim = 0; dim < 4; dim++ )
     471         [ #  # ]:          0 :             num_elements[dim] = local_num_elements[dim];
     472                 :            : 
     473                 :            :     // My entities start at one greater than total_elems[d]
     474                 :          0 :     int total_elems[4] = { start_id, start_id, start_id, start_id };
     475                 :            : 
     476 [ #  # ][ #  # ]:          0 :     for( unsigned int proc = 0; proc < procConfig.proc_rank(); proc++ )
     477                 :            :     {
     478         [ #  # ]:          0 :         for( int dim = 0; dim < 4; dim++ )
     479         [ #  # ]:          0 :             total_elems[dim] += num_elements[4 * proc + dim];
     480                 :            :     }
     481                 :            : 
     482                 :            :     // Assign global ids now
     483         [ #  # ]:          0 :     Tag gid_tag = mbImpl->globalId_tag();
     484                 :            : 
     485         [ #  # ]:          0 :     for( int dim = 0; dim < 4; dim++ )
     486                 :            :     {
     487 [ #  # ][ #  # ]:          0 :         if( entities[dim].empty() ) continue;
     488 [ #  # ][ #  # ]:          0 :         num_elements.resize( entities[dim].size() );
     489                 :          0 :         int i = 0;
     490 [ #  # ][ #  # ]:          0 :         for( Range::iterator rit = entities[dim].begin(); rit != entities[dim].end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
     491         [ #  # ]:          0 :             num_elements[i++] = total_elems[dim]++;
     492                 :            : 
     493 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( gid_tag, entities[dim], &num_elements[0] );MB_CHK_SET_ERR( result, "Failed to set global id tag in assign_global_ids" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     494                 :            :     }
     495                 :            : 
     496         [ #  # ]:          0 :     if( owned_only ) return MB_SUCCESS;
     497                 :            : 
     498                 :            :     // Exchange tags
     499         [ #  # ]:          0 :     for( int dim = 1; dim < 4; dim++ )
     500         [ #  # ]:          0 :         entities[0].merge( entities[dim] );
     501                 :            : 
     502         [ #  # ]:          0 :     return exchange_tags( gid_tag, entities[0] );
     503                 :            : }
     504                 :            : 
     505                 :          0 : int ParallelComm::get_buffers( int to_proc, bool* is_new )
     506                 :            : {
     507                 :          0 :     int ind                                   = -1;
     508         [ #  # ]:          0 :     std::vector< unsigned int >::iterator vit = std::find( buffProcs.begin(), buffProcs.end(), to_proc );
     509 [ #  # ][ #  # ]:          0 :     if( vit == buffProcs.end() )
     510                 :            :     {
     511 [ #  # ][ #  # ]:          0 :         assert( "shouldn't need buffer to myself" && to_proc != (int)procConfig.proc_rank() );
     512                 :          0 :         ind = buffProcs.size();
     513         [ #  # ]:          0 :         buffProcs.push_back( (unsigned int)to_proc );
     514 [ #  # ][ #  # ]:          0 :         localOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
                 [ #  # ]
     515 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
                 [ #  # ]
     516         [ #  # ]:          0 :         if( is_new ) *is_new = true;
     517                 :            :     }
     518                 :            :     else
     519                 :            :     {
     520         [ #  # ]:          0 :         ind = vit - buffProcs.begin();
     521         [ #  # ]:          0 :         if( is_new ) *is_new = false;
     522                 :            :     }
     523         [ #  # ]:          0 :     assert( ind < MAX_SHARING_PROCS );
     524                 :          0 :     return ind;
     525                 :            : }
     526                 :            : 
     527                 :          0 : ErrorCode ParallelComm::broadcast_entities( const int from_proc, Range& entities, const bool adjacencies,
     528                 :            :                                             const bool tags )
     529                 :            : {
     530                 :            : #ifndef MOAB_HAVE_MPI
     531                 :            :     return MB_FAILURE;
     532                 :            : #else
     533                 :            : 
     534                 :          0 :     ErrorCode result = MB_SUCCESS;
     535                 :            :     int success;
     536                 :            :     int buff_size;
     537                 :            : 
     538         [ #  # ]:          0 :     Buffer buff( INITIAL_BUFF_SIZE );
     539         [ #  # ]:          0 :     buff.reset_ptr( sizeof( int ) );
     540 [ #  # ][ #  # ]:          0 :     if( (int)procConfig.proc_rank() == from_proc )
     541                 :            :     {
     542 [ #  # ][ #  # ]:          0 :         result = add_verts( entities );MB_CHK_SET_ERR( result, "Failed to add adj vertices" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     543                 :            : 
     544         [ #  # ]:          0 :         buff.reset_ptr( sizeof( int ) );
     545 [ #  # ][ #  # ]:          0 :         result = pack_buffer( entities, adjacencies, tags, false, -1, &buff );MB_CHK_SET_ERR( result, "Failed to compute buffer size in broadcast_entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     546         [ #  # ]:          0 :         buff.set_stored_size();
     547                 :          0 :         buff_size = buff.buff_ptr - buff.mem_ptr;
     548                 :            :     }
     549                 :            : 
     550 [ #  # ][ #  # ]:          0 :     success = MPI_Bcast( &buff_size, 1, MPI_INT, from_proc, procConfig.proc_comm() );
     551 [ #  # ][ #  # ]:          0 :     if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     552                 :            : 
     553         [ #  # ]:          0 :     if( !buff_size )  // No data
     554                 :          0 :         return MB_SUCCESS;
     555                 :            : 
     556 [ #  # ][ #  # ]:          0 :     if( (int)procConfig.proc_rank() != from_proc ) buff.reserve( buff_size );
                 [ #  # ]
     557                 :            : 
     558                 :          0 :     size_t offset = 0;
     559         [ #  # ]:          0 :     while( buff_size )
     560                 :            :     {
     561         [ #  # ]:          0 :         int sz  = std::min( buff_size, MAX_BCAST_SIZE );
     562 [ #  # ][ #  # ]:          0 :         success = MPI_Bcast( buff.mem_ptr + offset, sz, MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
     563 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer failed" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     564                 :            : 
     565                 :          0 :         offset += sz;
     566                 :          0 :         buff_size -= sz;
     567                 :            :     }
     568                 :            : 
     569 [ #  # ][ #  # ]:          0 :     if( (int)procConfig.proc_rank() != from_proc )
     570                 :            :     {
     571 [ #  # ][ #  # ]:          0 :         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
                 [ #  # ]
     572 [ #  # ][ #  # ]:          0 :         std::vector< std::vector< int > > dum1p;
     573 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > dum2, dum4;
         [ #  # ][ #  # ]
     574 [ #  # ][ #  # ]:          0 :         std::vector< unsigned int > dum3;
     575         [ #  # ]:          0 :         buff.reset_ptr( sizeof( int ) );
     576 [ #  # ][ #  # ]:          0 :         result = unpack_buffer( buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );MB_CHK_SET_ERR( result, "Failed to unpack buffer in broadcast_entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     577 [ #  # ][ #  # ]:          0 :         std::copy( dum4.begin(), dum4.end(), range_inserter( entities ) );
                 [ #  # ]
     578                 :            :     }
     579                 :            : 
     580                 :          0 :     return MB_SUCCESS;
     581                 :            : #endif
     582                 :            : }
     583                 :            : 
     584                 :          0 : ErrorCode ParallelComm::scatter_entities( const int from_proc, std::vector< Range >& entities, const bool adjacencies,
     585                 :            :                                           const bool tags )
     586                 :            : {
     587                 :            : #ifndef MOAB_HAVE_MPI
     588                 :            :     return MB_FAILURE;
     589                 :            : #else
     590                 :          0 :     ErrorCode result = MB_SUCCESS;
     591                 :            :     int i, success, buff_size, prev_size;
     592         [ #  # ]:          0 :     int nProcs         = (int)procConfig.proc_size();
     593 [ #  # ][ #  # ]:          0 :     int* sendCounts    = new int[nProcs];
     594 [ #  # ][ #  # ]:          0 :     int* displacements = new int[nProcs];
     595                 :          0 :     sendCounts[0]      = sizeof( int );
     596                 :          0 :     displacements[0]   = 0;
     597         [ #  # ]:          0 :     Buffer buff( INITIAL_BUFF_SIZE );
     598         [ #  # ]:          0 :     buff.reset_ptr( sizeof( int ) );
     599         [ #  # ]:          0 :     buff.set_stored_size();
     600         [ #  # ]:          0 :     unsigned int my_proc = procConfig.proc_rank();
     601                 :            : 
     602                 :            :     // Get buffer size array for each remote processor
     603         [ #  # ]:          0 :     if( my_proc == (unsigned int)from_proc )
     604                 :            :     {
     605         [ #  # ]:          0 :         for( i = 1; i < nProcs; i++ )
     606                 :            :         {
     607                 :          0 :             prev_size = buff.buff_ptr - buff.mem_ptr;
     608         [ #  # ]:          0 :             buff.reset_ptr( prev_size + sizeof( int ) );
     609 [ #  # ][ #  # ]:          0 :             result = add_verts( entities[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     610                 :            : 
     611 [ #  # ][ #  # ]:          0 :             result = pack_buffer( entities[i], adjacencies, tags, false, -1, &buff );
     612         [ #  # ]:          0 :             if( MB_SUCCESS != result )
     613                 :            :             {
     614         [ #  # ]:          0 :                 delete[] sendCounts;
     615         [ #  # ]:          0 :                 delete[] displacements;
     616 [ #  # ][ #  # ]:          0 :                 MB_SET_ERR( result, "Failed to pack buffer in scatter_entities" );
         [ #  # ][ #  # ]
                 [ #  # ]
     617                 :            :             }
     618                 :            : 
     619                 :          0 :             buff_size                               = buff.buff_ptr - buff.mem_ptr - prev_size;
     620                 :          0 :             *( (int*)( buff.mem_ptr + prev_size ) ) = buff_size;
     621                 :          0 :             sendCounts[i]                           = buff_size;
     622                 :            :         }
     623                 :            :     }
     624                 :            : 
     625                 :            :     // Broadcast buffer size array
     626 [ #  # ][ #  # ]:          0 :     success = MPI_Bcast( sendCounts, nProcs, MPI_INT, from_proc, procConfig.proc_comm() );
     627         [ #  # ]:          0 :     if( MPI_SUCCESS != success )
     628                 :            :     {
     629         [ #  # ]:          0 :         delete[] sendCounts;
     630         [ #  # ]:          0 :         delete[] displacements;
     631 [ #  # ][ #  # ]:          0 :         MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" );
         [ #  # ][ #  # ]
                 [ #  # ]
     632                 :            :     }
     633                 :            : 
     634         [ #  # ]:          0 :     for( i = 1; i < nProcs; i++ )
     635                 :            :     {
     636                 :          0 :         displacements[i] = displacements[i - 1] + sendCounts[i - 1];
     637                 :            :     }
     638                 :            : 
     639         [ #  # ]:          0 :     Buffer rec_buff;
     640         [ #  # ]:          0 :     rec_buff.reserve( sendCounts[my_proc] );
     641                 :            : 
     642                 :            :     // Scatter actual geometry
     643                 :            :     success = MPI_Scatterv( buff.mem_ptr, sendCounts, displacements, MPI_UNSIGNED_CHAR, rec_buff.mem_ptr,
     644 [ #  # ][ #  # ]:          0 :                             sendCounts[my_proc], MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
     645                 :            : 
     646         [ #  # ]:          0 :     if( MPI_SUCCESS != success )
     647                 :            :     {
     648         [ #  # ]:          0 :         delete[] sendCounts;
     649         [ #  # ]:          0 :         delete[] displacements;
     650 [ #  # ][ #  # ]:          0 :         MB_SET_ERR( MB_FAILURE, "MPI_Scatterv of buffer failed" );
         [ #  # ][ #  # ]
                 [ #  # ]
     651                 :            :     }
     652                 :            : 
     653                 :            :     // Unpack in remote processors
     654         [ #  # ]:          0 :     if( my_proc != (unsigned int)from_proc )
     655                 :            :     {
     656 [ #  # ][ #  # ]:          0 :         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
                 [ #  # ]
     657 [ #  # ][ #  # ]:          0 :         std::vector< std::vector< int > > dum1p;
     658 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > dum2, dum4;
         [ #  # ][ #  # ]
     659 [ #  # ][ #  # ]:          0 :         std::vector< unsigned int > dum3;
     660         [ #  # ]:          0 :         rec_buff.reset_ptr( sizeof( int ) );
     661         [ #  # ]:          0 :         result = unpack_buffer( rec_buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );
     662         [ #  # ]:          0 :         if( MB_SUCCESS != result )
     663                 :            :         {
     664         [ #  # ]:          0 :             delete[] sendCounts;
     665         [ #  # ]:          0 :             delete[] displacements;
     666 [ #  # ][ #  # ]:          0 :             MB_SET_ERR( result, "Failed to unpack buffer in scatter_entities" );
         [ #  # ][ #  # ]
                 [ #  # ]
     667                 :            :         }
     668                 :            : 
     669 [ #  # ][ #  # ]:          0 :         std::copy( dum4.begin(), dum4.end(), range_inserter( entities[my_proc] ) );
         [ #  # ][ #  # ]
     670                 :            :     }
     671                 :            : 
     672         [ #  # ]:          0 :     delete[] sendCounts;
     673         [ #  # ]:          0 :     delete[] displacements;
     674                 :            : 
     675                 :          0 :     return MB_SUCCESS;
     676                 :            : #endif
     677                 :            : }
     678                 :            : 
     679                 :          0 : ErrorCode ParallelComm::send_entities( const int to_proc, Range& orig_ents, const bool adjs, const bool tags,
     680                 :            :                                        const bool store_remote_handles, const bool is_iface, Range& /*final_ents*/,
     681                 :            :                                        int& incoming1, int& incoming2, TupleList& entprocs,
     682                 :            :                                        std::vector< MPI_Request >& recv_remoteh_reqs, bool /*wait_all*/ )
     683                 :            : {
     684                 :            : #ifndef MOAB_HAVE_MPI
     685                 :            :     return MB_FAILURE;
     686                 :            : #else
     687                 :            :     // Pack entities to local buffer
     688         [ #  # ]:          0 :     int ind = get_buffers( to_proc );
     689 [ #  # ][ #  # ]:          0 :     localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
     690                 :            : 
     691                 :            :     // Add vertices
     692 [ #  # ][ #  # ]:          0 :     ErrorCode result = add_verts( orig_ents );MB_CHK_SET_ERR( result, "Failed to add verts in send_entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     693                 :            : 
     694                 :            :     // Filter out entities already shared with destination
     695         [ #  # ]:          0 :     Range tmp_range;
     696 [ #  # ][ #  # ]:          0 :     result = filter_pstatus( orig_ents, PSTATUS_SHARED, PSTATUS_AND, to_proc, &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     697 [ #  # ][ #  # ]:          0 :     if( !tmp_range.empty() ) { orig_ents = subtract( orig_ents, tmp_range ); }
         [ #  # ][ #  # ]
     698                 :            : 
     699 [ #  # ][ #  # ]:          0 :     result = pack_buffer( orig_ents, adjs, tags, store_remote_handles, to_proc, localOwnedBuffs[ind], &entprocs );MB_CHK_SET_ERR( result, "Failed to pack buffer in send_entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     700                 :            : 
     701                 :            :     // Send buffer
     702 [ #  # ][ #  # ]:          0 :     result = send_buffer( to_proc, localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind], recvReqs[2 * ind + 1],
                 [ #  # ]
     703         [ #  # ]:          0 :                           (int*)( remoteOwnedBuffs[ind]->mem_ptr ),
     704                 :            :                           //&ackbuff,
     705                 :            :                           incoming1, MB_MESG_REMOTEH_SIZE,
     706 [ #  # ][ #  # ]:          0 :                           ( !is_iface && store_remote_handles ? localOwnedBuffs[ind] : NULL ),
     707 [ #  # ][ #  # ]:          0 :                           &recv_remoteh_reqs[2 * ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to send buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     708                 :            : 
     709                 :          0 :     return MB_SUCCESS;
     710                 :            : #endif
     711                 :            : }
     712                 :            : 
     713                 :          0 : ErrorCode ParallelComm::send_entities( std::vector< unsigned int >& send_procs, std::vector< Range* >& send_ents,
     714                 :            :                                        int& incoming1, int& incoming2, const bool store_remote_handles )
     715                 :            : {
     716                 :            : #ifdef MOAB_HAVE_MPE
     717                 :            :     if( myDebug->get_verbosity() == 2 )
     718                 :            :     { MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_entities." ); }
     719                 :            : #endif
     720         [ #  # ]:          0 :     myDebug->tprintf( 1, "Entering send_entities\n" );
     721 [ #  # ][ #  # ]:          0 :     if( myDebug->get_verbosity() == 4 )
     722                 :            :     {
     723                 :          0 :         msgs.clear();
     724         [ #  # ]:          0 :         msgs.reserve( MAX_SHARING_PROCS );
     725                 :            :     }
     726                 :            : 
     727                 :            :     unsigned int i;
     728                 :            :     int ind;
     729                 :          0 :     ErrorCode result = MB_SUCCESS;
     730                 :            : 
     731                 :            :     // Set buffProcs with communicating procs
     732                 :          0 :     unsigned int n_proc = send_procs.size();
     733         [ #  # ]:          0 :     for( i = 0; i < n_proc; i++ )
     734                 :            :     {
     735 [ #  # ][ #  # ]:          0 :         ind    = get_buffers( send_procs[i] );
     736 [ #  # ][ #  # ]:          0 :         result = add_verts( *send_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     737                 :            : 
     738                 :            :         // Filter out entities already shared with destination
     739         [ #  # ]:          0 :         Range tmp_range;
     740 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( *send_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     741 [ #  # ][ #  # ]:          0 :         if( !tmp_range.empty() ) { *send_ents[i] = subtract( *send_ents[i], tmp_range ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     742                 :          0 :     }
     743                 :            : 
     744                 :            :     //===========================================
     745                 :            :     // Get entities to be sent to neighbors
     746                 :            :     // Need to get procs each entity is sent to
     747                 :            :     //===========================================
     748 [ #  # ][ #  # ]:          0 :     Range allsent, tmp_range;
     749                 :          0 :     int npairs = 0;
     750         [ #  # ]:          0 :     TupleList entprocs;
     751         [ #  # ]:          0 :     for( i = 0; i < n_proc; i++ )
     752                 :            :     {
     753 [ #  # ][ #  # ]:          0 :         int n_ents = send_ents[i]->size();
     754         [ #  # ]:          0 :         if( n_ents > 0 )
     755                 :            :         {
     756                 :          0 :             npairs += n_ents;  // Get the total # of proc/handle pairs
     757 [ #  # ][ #  # ]:          0 :             allsent.merge( *send_ents[i] );
     758                 :            :         }
     759                 :            :     }
     760                 :            : 
     761                 :            :     // Allocate a TupleList of that size
     762         [ #  # ]:          0 :     entprocs.initialize( 1, 0, 1, 0, npairs );
     763         [ #  # ]:          0 :     entprocs.enableWriteAccess();
     764                 :            : 
     765                 :            :     // Put the proc/handle pairs in the list
     766         [ #  # ]:          0 :     for( i = 0; i < n_proc; i++ )
     767                 :            :     {
     768 [ #  # ][ #  # ]:          0 :         for( Range::iterator rit = send_ents[i]->begin(); rit != send_ents[i]->end(); ++rit )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     769                 :            :         {
     770 [ #  # ][ #  # ]:          0 :             entprocs.vi_wr[entprocs.get_n()]  = send_procs[i];
     771 [ #  # ][ #  # ]:          0 :             entprocs.vul_wr[entprocs.get_n()] = *rit;
     772         [ #  # ]:          0 :             entprocs.inc_n();
     773                 :            :         }
     774                 :            :     }
     775                 :            : 
     776                 :            :     // Sort by handle
     777         [ #  # ]:          0 :     moab::TupleList::buffer sort_buffer;
     778         [ #  # ]:          0 :     sort_buffer.buffer_init( npairs );
     779         [ #  # ]:          0 :     entprocs.sort( 1, &sort_buffer );
     780         [ #  # ]:          0 :     entprocs.disableWriteAccess();
     781         [ #  # ]:          0 :     sort_buffer.reset();
     782                 :            : 
     783                 :            :     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
     784 [ #  # ][ #  # ]:          0 :                       (unsigned long)allsent.size() );
                 [ #  # ]
     785                 :            : 
     786                 :            :     //===========================================
     787                 :            :     // Pack and send ents from this proc to others
     788                 :            :     //===========================================
     789         [ #  # ]:          0 :     for( i = 0; i < n_proc; i++ )
     790                 :            :     {
     791 [ #  # ][ #  # ]:          0 :         if( send_ents[i]->size() > 0 )
                 [ #  # ]
     792                 :            :         {
     793 [ #  # ][ #  # ]:          0 :             ind = get_buffers( send_procs[i] );
     794         [ #  # ]:          0 :             myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", send_ents[i]->compactness(),
     795 [ #  # ][ #  # ]:          0 :                               (unsigned long)send_ents[i]->size() );
         [ #  # ][ #  # ]
     796                 :            :             // Reserve space on front for size and for initial buff size
     797 [ #  # ][ #  # ]:          0 :             localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
     798 [ #  # ][ #  # ]:          0 :             result = pack_buffer( *send_ents[i], false, true, store_remote_handles, buffProcs[ind],
     799 [ #  # ][ #  # ]:          0 :                                   localOwnedBuffs[ind], &entprocs, &allsent );
     800                 :            : 
     801 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 4 )
     802                 :            :             {
     803         [ #  # ]:          0 :                 msgs.resize( msgs.size() + 1 );
     804 [ #  # ][ #  # ]:          0 :                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
         [ #  # ][ #  # ]
     805                 :            :             }
     806                 :            : 
     807                 :            :             // Send the buffer (size stored in front in send_buffer)
     808 [ #  # ][ #  # ]:          0 :             result = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind],
                 [ #  # ]
     809         [ #  # ]:          0 :                                   recvReqs[2 * ind + 1], &ackbuff, incoming1, MB_MESG_REMOTEH_SIZE,
     810 [ #  # ][ #  # ]:          0 :                                   ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recvRemotehReqs[2 * ind],
     811 [ #  # ][ #  # ]:          0 :                                   &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost send" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     812                 :            :         }
     813                 :            :     }
     814         [ #  # ]:          0 :     entprocs.reset();
     815                 :            : 
     816                 :            : #ifdef MOAB_HAVE_MPE
     817                 :            :     if( myDebug->get_verbosity() == 2 )
     818                 :            :     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_entities." ); }
     819                 :            : #endif
     820                 :            : 
     821                 :          0 :     return MB_SUCCESS;
     822                 :            : }
     823                 :            : 
     824                 :            : /////////////////////////////////////////////////////////////////////////////////
     825                 :            : // Send and Receive routines for a sequence of entities: use case UMR
     826                 :            : /////////////////////////////////////////////////////////////////////////////////
     827                 :          0 : void print_buff( unsigned char* ch, int size )
     828                 :            : {
     829         [ #  # ]:          0 :     for( int i = 0; i < size; i++ )
     830                 :          0 :         std::cout << ch[i];
     831                 :          0 :     std::cout << "\n";
     832                 :          0 : }
     833                 :          0 : ErrorCode ParallelComm::send_recv_entities( std::vector< int >& send_procs, std::vector< std::vector< int > >& msgsizes,
     834                 :            :                                             std::vector< std::vector< EntityHandle > >& senddata,
     835                 :            :                                             std::vector< std::vector< EntityHandle > >& recvdata )
     836                 :            : {
     837                 :            : #ifdef USE_MPE
     838                 :            :     if( myDebug->get_verbosity() == 2 )
     839                 :            :     { MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_recv_entities." ); }
     840                 :            : #endif
     841         [ #  # ]:          0 :     myDebug->tprintf( 1, "Entering send_recv_entities\n" );
     842 [ #  # ][ #  # ]:          0 :     if( myDebug->get_verbosity() == 4 )
     843                 :            :     {
     844                 :          0 :         msgs.clear();
     845         [ #  # ]:          0 :         msgs.reserve( MAX_SHARING_PROCS );
     846                 :            :     }
     847                 :            : 
     848                 :            :     // unsigned int i;
     849                 :            :     int i, ind, success;
     850                 :          0 :     ErrorCode error = MB_SUCCESS;
     851                 :            : 
     852                 :            :     //===========================================
     853                 :            :     // Pack and send ents from this proc to others
     854                 :            :     //===========================================
     855                 :            : 
     856                 :            :     // std::cout<<"resetting all buffers"<<std::endl;
     857                 :            : 
     858         [ #  # ]:          0 :     reset_all_buffers();
     859         [ #  # ]:          0 :     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
     860         [ #  # ]:          0 :     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
     861                 :            :     int ack_buff;
     862                 :          0 :     int incoming = 0;
     863                 :            : 
     864                 :          0 :     std::vector< unsigned int >::iterator sit;
     865                 :            : 
     866 [ #  # ][ #  # ]:          0 :     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
                 [ #  # ]
     867                 :            :     {
     868                 :          0 :         incoming++;
     869 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                 [ #  # ]
     870         [ #  # ]:          0 :                            MB_MESG_ENTS_SIZE, incoming );
     871                 :            : 
     872 [ #  # ][ #  # ]:          0 :         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
     873 [ #  # ][ #  # ]:          0 :                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
                 [ #  # ]
     874 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in send_recv_entities" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     875                 :            :     }
     876                 :            : 
     877                 :            :     //  std::set<unsigned int>::iterator it;
     878         [ #  # ]:          0 :     for( i = 0; i < (int)send_procs.size(); i++ )
     879                 :            :     {
     880                 :            :         // Get index of the shared processor in the local buffer
     881 [ #  # ][ #  # ]:          0 :         ind = get_buffers( send_procs[i] );
     882 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
     883                 :            : 
     884 [ #  # ][ #  # ]:          0 :         int buff_size = msgsizes[i].size() * sizeof( int ) + senddata[i].size() * sizeof( EntityHandle );
     885 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[ind]->check_space( buff_size );
     886                 :            : 
     887                 :            :         // Pack entities
     888         [ #  # ]:          0 :         std::vector< int > msg;
     889 [ #  # ][ #  # ]:          0 :         msg.insert( msg.end(), msgsizes[i].begin(), msgsizes[i].end() );
                 [ #  # ]
     890 [ #  # ][ #  # ]:          0 :         PACK_INTS( localOwnedBuffs[ind]->buff_ptr, &msg[0], msg.size() );
                 [ #  # ]
     891                 :            : 
     892 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > entities;
     893 [ #  # ][ #  # ]:          0 :         entities.insert( entities.end(), senddata[i].begin(), senddata[i].end() );
                 [ #  # ]
     894 [ #  # ][ #  # ]:          0 :         PACK_EH( localOwnedBuffs[ind]->buff_ptr, &entities[0], entities.size() );
                 [ #  # ]
     895 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[ind]->set_stored_size();
     896                 :            : 
     897 [ #  # ][ #  # ]:          0 :         if( myDebug->get_verbosity() == 4 )
     898                 :            :         {
     899         [ #  # ]:          0 :             msgs.resize( msgs.size() + 1 );
     900 [ #  # ][ #  # ]:          0 :             msgs.back() = new Buffer( *localOwnedBuffs[ind] );
         [ #  # ][ #  # ]
     901                 :            :         }
     902                 :            : 
     903                 :            :         // Send the buffer (size stored in front in send_buffer)
     904 [ #  # ][ #  # ]:          0 :         error = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
                 [ #  # ]
     905 [ #  # ][ #  # ]:          0 :                              recv_ent_reqs[3 * ind + 2], &ack_buff, incoming );MB_CHK_SET_ERR( error, "Failed to Isend in send_recv_entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     906                 :          0 :     }
     907                 :            : 
     908                 :            :     //===========================================
     909                 :            :     // Receive and unpack ents from received data
     910                 :            :     //===========================================
     911                 :            : 
     912         [ #  # ]:          0 :     while( incoming )
     913                 :            :     {
     914                 :            : 
     915                 :            :         MPI_Status status;
     916                 :            :         int index_in_recv_requests;
     917                 :            : 
     918 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
     919 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &index_in_recv_requests, &status );
     920 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in send_recv_entities" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     921                 :            : 
     922                 :            :         // Processor index in the list is divided by 3
     923                 :          0 :         ind = index_in_recv_requests / 3;
     924                 :            : 
     925         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
     926                 :            : 
     927                 :            :         // OK, received something; decrement incoming counter
     928                 :          0 :         incoming--;
     929                 :            : 
     930                 :          0 :         bool done = false;
     931                 :            : 
     932         [ #  # ]:          0 :         error = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind],
     933         [ #  # ]:          0 :                              recv_ent_reqs[3 * ind + 1],  // This is for receiving the second message
     934         [ #  # ]:          0 :                              recv_ent_reqs[3 * ind + 2],  // This would be for ack, but it is not
     935                 :            :                                                           // used; consider removing it
     936         [ #  # ]:          0 :                              incoming, localOwnedBuffs[ind],
     937         [ #  # ]:          0 :                              sendReqs[3 * ind + 1],  // Send request for sending the second message
     938         [ #  # ]:          0 :                              sendReqs[3 * ind + 2],  // This is for sending the ack
     939 [ #  # ][ #  # ]:          0 :                              done );MB_CHK_SET_ERR( error, "Failed to resize recv buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     940                 :            : 
     941         [ #  # ]:          0 :         if( done )
     942                 :            :         {
     943 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
     944                 :            : 
     945                 :          0 :             int from_proc = status.MPI_SOURCE;
     946 [ #  # ][ #  # ]:          0 :             int idx       = std::find( send_procs.begin(), send_procs.end(), from_proc ) - send_procs.begin();
     947                 :            : 
     948         [ #  # ]:          0 :             int msg = msgsizes[idx].size();
     949         [ #  # ]:          0 :             std::vector< int > recvmsg( msg );
     950         [ #  # ]:          0 :             int ndata = senddata[idx].size();
     951         [ #  # ]:          0 :             std::vector< EntityHandle > dum_vec( ndata );
     952                 :            : 
     953 [ #  # ][ #  # ]:          0 :             UNPACK_INTS( remoteOwnedBuffs[ind]->buff_ptr, &recvmsg[0], msg );
                 [ #  # ]
     954 [ #  # ][ #  # ]:          0 :             UNPACK_EH( remoteOwnedBuffs[ind]->buff_ptr, &dum_vec[0], ndata );
                 [ #  # ]
     955                 :            : 
     956 [ #  # ][ #  # ]:          0 :             recvdata[idx].insert( recvdata[idx].end(), dum_vec.begin(), dum_vec.end() );
                 [ #  # ]
     957                 :            :         }
     958                 :            :     }
     959                 :            : 
     960                 :            : #ifdef USE_MPE
     961                 :            :     if( myDebug->get_verbosity() == 2 )
     962                 :            :     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_recv_entities." ); }
     963                 :            : #endif
     964                 :            : 
     965                 :          0 :     return MB_SUCCESS;
     966                 :            : }
     967                 :            : 
     968                 :          0 : ErrorCode ParallelComm::update_remote_data( EntityHandle entity, std::vector< int >& procs,
     969                 :            :                                             std::vector< EntityHandle >& handles )
     970                 :            : {
     971                 :            :     ErrorCode error;
     972                 :          0 :     unsigned char pstatus = PSTATUS_INTERFACE;
     973                 :            : 
     974 [ #  # ][ #  # ]:          0 :     int procmin = *std::min_element( procs.begin(), procs.end() );
     975                 :            : 
     976 [ #  # ][ #  # ]:          0 :     if( (int)rank() > procmin )
     977                 :          0 :         pstatus |= PSTATUS_NOT_OWNED;
     978                 :            :     else
     979         [ #  # ]:          0 :         procmin = rank();
     980                 :            : 
     981                 :            :     // DBG
     982                 :            :     // std::cout<<"entity = "<<entity<<std::endl;
     983                 :            :     // for (int j=0; j<procs.size(); j++)
     984                 :            :     // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
     985                 :            :     // DBG
     986                 :            : 
     987         [ #  # ]:          0 :     if( (int)procs.size() > 1 )
     988                 :            :     {
     989 [ #  # ][ #  # ]:          0 :         procs.push_back( rank() );
     990         [ #  # ]:          0 :         handles.push_back( entity );
     991                 :            : 
     992 [ #  # ][ #  # ]:          0 :         int idx = std::find( procs.begin(), procs.end(), procmin ) - procs.begin();
     993                 :            : 
     994 [ #  # ][ #  # ]:          0 :         std::iter_swap( procs.begin(), procs.begin() + idx );
     995 [ #  # ][ #  # ]:          0 :         std::iter_swap( handles.begin(), handles.begin() + idx );
     996                 :            : 
     997                 :            :         // DBG
     998                 :            :         //  std::cout<<"entity = "<<entity<<std::endl;
     999                 :            :         // for (int j=0; j<procs.size(); j++)
    1000                 :            :         // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
    1001                 :            :         // DBG
    1002                 :            :     }
    1003                 :            : 
    1004                 :            :     // if ((entity == 10388) && (rank()==1))
    1005                 :            :     //    std::cout<<"Here"<<std::endl;
    1006                 :            : 
    1007 [ #  # ][ #  # ]:          0 :     error = update_remote_data( entity, &procs[0], &handles[0], procs.size(), pstatus );MB_CHK_ERR( error );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1008                 :            : 
    1009                 :          0 :     return MB_SUCCESS;
    1010                 :            : }
    1011                 :            : 
    1012                 :          0 : ErrorCode ParallelComm::get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc )
    1013                 :            : {
    1014                 :            :     ErrorCode error;
    1015         [ #  # ]:          0 :     std::vector< EntityHandle > newents;
    1016 [ #  # ][ #  # ]:          0 :     error = get_remote_handles( true, local_vec, rem_vec, num_ents, to_proc, newents );MB_CHK_ERR( error );
         [ #  # ][ #  # ]
    1017                 :            : 
    1018                 :          0 :     return MB_SUCCESS;
    1019                 :            : }
    1020                 :            : 
    1021                 :            : //////////////////////////////////////////////////////////////////
    1022                 :            : 
    1023                 :          0 : ErrorCode ParallelComm::recv_entities( const int from_proc, const bool store_remote_handles, const bool is_iface,
    1024                 :            :                                        Range& final_ents, int& incoming1, int& incoming2,
    1025                 :            :                                        std::vector< std::vector< EntityHandle > >& L1hloc,
    1026                 :            :                                        std::vector< std::vector< EntityHandle > >& L1hrem,
    1027                 :            :                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
    1028                 :            :                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
    1029                 :            :                                        std::vector< MPI_Request >& recv_remoteh_reqs, bool /*wait_all*/ )
    1030                 :            : {
    1031                 :            : #ifndef MOAB_HAVE_MPI
    1032                 :            :     return MB_FAILURE;
    1033                 :            : #else
    1034                 :            :     // Non-blocking receive for the first message (having size info)
    1035                 :          0 :     int ind1 = get_buffers( from_proc );
    1036                 :          0 :     incoming1++;
    1037                 :          0 :     PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
    1038                 :          0 :                        MB_MESG_ENTS_SIZE, incoming1 );
    1039                 :          0 :     int success = MPI_Irecv( remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc,
    1040                 :          0 :                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind1] );
    1041 [ #  # ][ #  # ]:          0 :     if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1042                 :            : 
    1043                 :            :     // Receive messages in while loop
    1044                 :            :     return recv_messages( from_proc, store_remote_handles, is_iface, final_ents, incoming1, incoming2, L1hloc, L1hrem,
    1045                 :          0 :                           L1p, L2hloc, L2hrem, L2p, recv_remoteh_reqs );
    1046                 :            : #endif
    1047                 :            : }
    1048                 :            : 
    1049                 :          0 : ErrorCode ParallelComm::recv_entities( std::set< unsigned int >& recv_procs, int incoming1, int incoming2,
    1050                 :            :                                        const bool store_remote_handles, const bool migrate )
    1051                 :            : {
    1052                 :            :     //===========================================
    1053                 :            :     // Receive/unpack new entities
    1054                 :            :     //===========================================
    1055                 :            :     // Number of incoming messages is the number of procs we communicate with
    1056                 :            :     int success, ind, i;
    1057                 :            :     ErrorCode result;
    1058                 :            :     MPI_Status status;
    1059         [ #  # ]:          0 :     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
    1060 [ #  # ][ #  # ]:          0 :     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
    1061         [ #  # ]:          0 :     std::vector< std::vector< int > > L1p( buffProcs.size() );
    1062 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > L2hloc, L2hrem;
    1063         [ #  # ]:          0 :     std::vector< unsigned int > L2p;
    1064         [ #  # ]:          0 :     std::vector< EntityHandle > new_ents;
    1065                 :            : 
    1066         [ #  # ]:          0 :     while( incoming1 )
    1067                 :            :     {
    1068                 :            :         // Wait for all recvs of ents before proceeding to sending remote handles,
    1069                 :            :         // b/c some procs may have sent to a 3rd proc ents owned by me;
    1070 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
    1071                 :            : 
    1072 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 2 * buffProcs.size(), &recvReqs[0], &ind, &status );
    1073 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1074                 :            : 
    1075         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    1076                 :            : 
    1077                 :            :         // OK, received something; decrement incoming counter
    1078                 :          0 :         incoming1--;
    1079                 :          0 :         bool done = false;
    1080                 :            : 
    1081                 :            :         // In case ind is for ack, we need index of one before it
    1082                 :          0 :         unsigned int base_ind = 2 * ( ind / 2 );
    1083 [ #  # ][ #  # ]:          0 :         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 2], recvReqs[ind], recvReqs[ind + 1],
                 [ #  # ]
    1084 [ #  # ][ #  # ]:          0 :                               incoming1, localOwnedBuffs[ind / 2], sendReqs[base_ind], sendReqs[base_ind + 1], done,
                 [ #  # ]
    1085         [ #  # ]:          0 :                               ( store_remote_handles ? localOwnedBuffs[ind / 2] : NULL ), MB_MESG_REMOTEH_SIZE,
    1086 [ #  # ][ #  # ]:          0 :                               &recvRemotehReqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1087                 :            : 
    1088         [ #  # ]:          0 :         if( done )
    1089                 :            :         {
    1090 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 4 )
    1091                 :            :             {
    1092         [ #  # ]:          0 :                 msgs.resize( msgs.size() + 1 );
    1093 [ #  # ][ #  # ]:          0 :                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 2] );
         [ #  # ][ #  # ]
    1094                 :            :             }
    1095                 :            : 
    1096                 :            :             // Message completely received - process buffer that was sent
    1097 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
    1098 [ #  # ][ #  # ]:          0 :             result = unpack_buffer( remoteOwnedBuffs[ind / 2]->buff_ptr, store_remote_handles, buffProcs[ind / 2],
    1099         [ #  # ]:          0 :                                     ind / 2, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
    1100         [ #  # ]:          0 :             if( MB_SUCCESS != result )
    1101                 :            :             {
    1102 [ #  # ][ #  # ]:          0 :                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
    1103 [ #  # ][ #  # ]:          0 :                 print_buffer( remoteOwnedBuffs[ind / 2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 2], false );
                 [ #  # ]
    1104                 :          0 :                 return result;
    1105                 :            :             }
    1106                 :            : 
    1107         [ #  # ]:          0 :             if( recvReqs.size() != 2 * buffProcs.size() )
    1108                 :            :             {
    1109                 :            :                 // Post irecv's for remote handles from new proc
    1110         [ #  # ]:          0 :                 recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    1111         [ #  # ]:          0 :                 for( i = recvReqs.size(); i < (int)( 2 * buffProcs.size() ); i += 2 )
    1112                 :            :                 {
    1113 [ #  # ][ #  # ]:          0 :                     localOwnedBuffs[i / 2]->reset_buffer();
    1114                 :          0 :                     incoming2++;
    1115 [ #  # ][ #  # ]:          0 :                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 2], localOwnedBuffs[i / 2]->mem_ptr,
                 [ #  # ]
    1116         [ #  # ]:          0 :                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
    1117         [ #  # ]:          0 :                     success = MPI_Irecv( localOwnedBuffs[i / 2]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
    1118         [ #  # ]:          0 :                                          buffProcs[i / 2], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
    1119 [ #  # ][ #  # ]:          0 :                                          &recvRemotehReqs[i] );
                 [ #  # ]
    1120         [ #  # ]:          0 :                     if( success != MPI_SUCCESS )
    1121 [ #  # ][ #  # ]:          0 :                     { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    1122                 :            :                 }
    1123         [ #  # ]:          0 :                 recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    1124         [ #  # ]:          0 :                 sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    1125                 :            :             }
    1126                 :            :         }
    1127                 :            :     }
    1128                 :            : 
    1129                 :            :     // Assign and remove newly created elements from/to receive processor
    1130 [ #  # ][ #  # ]:          0 :     result = assign_entities_part( new_ents, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to assign entities to part" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1131                 :            :     if( migrate )
    1132                 :            :     {
    1133                 :            :         // result = remove_entities_part(allsent, procConfig.proc_rank());MB_CHK_SET_ERR(ressult,
    1134                 :            :         // "Failed to remove entities to part");
    1135                 :            :     }
    1136                 :            : 
    1137                 :            :     // Add requests for any new addl procs
    1138         [ #  # ]:          0 :     if( recvReqs.size() != 2 * buffProcs.size() )
    1139                 :            :     {
    1140                 :            :         // Shouldn't get here...
    1141 [ #  # ][ #  # ]:          0 :         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in entity exchange" );
         [ #  # ][ #  # ]
                 [ #  # ]
    1142                 :            :     }
    1143                 :            : 
    1144                 :            : #ifdef MOAB_HAVE_MPE
    1145                 :            :     if( myDebug->get_verbosity() == 2 )
    1146                 :            :     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending recv entities." ); }
    1147                 :            : #endif
    1148                 :            : 
    1149                 :            :     //===========================================
    1150                 :            :     // Send local handles for new entity to owner
    1151                 :            :     //===========================================
    1152                 :          0 :     std::set< unsigned int >::iterator it  = recv_procs.begin();
    1153                 :          0 :     std::set< unsigned int >::iterator eit = recv_procs.end();
    1154 [ #  # ][ #  # ]:          0 :     for( ; it != eit; ++it )
                 [ #  # ]
    1155                 :            :     {
    1156 [ #  # ][ #  # ]:          0 :         ind = get_buffers( *it );
    1157                 :            :         // Reserve space on front for size and for initial buff size
    1158 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
    1159                 :            : 
    1160 [ #  # ][ #  # ]:          0 :         result = pack_remote_handles( L1hloc[ind], L1hrem[ind], L1p[ind], buffProcs[ind], remoteOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1161 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs[ind]->set_stored_size();
    1162                 :            : 
    1163 [ #  # ][ #  # ]:          0 :         if( myDebug->get_verbosity() == 4 )
    1164                 :            :         {
    1165         [ #  # ]:          0 :             msgs.resize( msgs.size() + 1 );
    1166 [ #  # ][ #  # ]:          0 :             msgs.back() = new Buffer( *remoteOwnedBuffs[ind] );
         [ #  # ][ #  # ]
    1167                 :            :         }
    1168 [ #  # ][ #  # ]:          0 :         result = send_buffer( buffProcs[ind], remoteOwnedBuffs[ind], MB_MESG_REMOTEH_SIZE, sendReqs[2 * ind],
                 [ #  # ]
    1169 [ #  # ][ #  # ]:          0 :                               recvRemotehReqs[2 * ind + 1], &ackbuff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1170                 :            :     }
    1171                 :            : 
    1172                 :            :     //===========================================
    1173                 :            :     // Process remote handles of my ghosteds
    1174                 :            :     //===========================================
    1175         [ #  # ]:          0 :     while( incoming2 )
    1176                 :            :     {
    1177 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recvRemotehReqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
    1178 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 2 * buffProcs.size(), &recvRemotehReqs[0], &ind, &status );
    1179 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1180                 :            : 
    1181                 :            :         // OK, received something; decrement incoming counter
    1182                 :          0 :         incoming2--;
    1183                 :            : 
    1184         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    1185                 :          0 :         bool done             = false;
    1186                 :          0 :         unsigned int base_ind = 2 * ( ind / 2 );
    1187 [ #  # ][ #  # ]:          0 :         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 2], recvRemotehReqs[ind],
    1188 [ #  # ][ #  # ]:          0 :                               recvRemotehReqs[ind + 1], incoming2, remoteOwnedBuffs[ind / 2], sendReqs[base_ind],
                 [ #  # ]
    1189 [ #  # ][ #  # ]:          0 :                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1190         [ #  # ]:          0 :         if( done )
    1191                 :            :         {
    1192                 :            :             // Incoming remote handles
    1193 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 4 )
    1194                 :            :             {
    1195         [ #  # ]:          0 :                 msgs.resize( msgs.size() + 1 );
    1196 [ #  # ][ #  # ]:          0 :                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
         [ #  # ][ #  # ]
    1197                 :            :             }
    1198                 :            : 
    1199 [ #  # ][ #  # ]:          0 :             localOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
    1200                 :            :             result =
    1201 [ #  # ][ #  # ]:          0 :                 unpack_remote_handles( buffProcs[ind / 2], localOwnedBuffs[ind / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1202                 :            :         }
    1203                 :            :     }
    1204                 :            : 
    1205                 :            : #ifdef MOAB_HAVE_MPE
    1206                 :            :     if( myDebug->get_verbosity() == 2 )
    1207                 :            :     {
    1208                 :            :         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
    1209                 :            :         MPE_Log_event( OWNED_END, procConfig.proc_rank(), "Ending recv entities (still doing checks)." );
    1210                 :            :     }
    1211                 :            : #endif
    1212         [ #  # ]:          0 :     myDebug->tprintf( 1, "Exiting recv_entities.\n" );
    1213                 :            : 
    1214                 :          0 :     return MB_SUCCESS;
    1215                 :            : }
    1216                 :            : 
    1217                 :          0 : ErrorCode ParallelComm::recv_messages( const int from_proc, const bool store_remote_handles, const bool is_iface,
    1218                 :            :                                        Range& final_ents, int& incoming1, int& incoming2,
    1219                 :            :                                        std::vector< std::vector< EntityHandle > >& L1hloc,
    1220                 :            :                                        std::vector< std::vector< EntityHandle > >& L1hrem,
    1221                 :            :                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
    1222                 :            :                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
    1223                 :            :                                        std::vector< MPI_Request >& recv_remoteh_reqs )
    1224                 :            : {
    1225                 :            : #ifndef MOAB_HAVE_MPI
    1226                 :            :     return MB_FAILURE;
    1227                 :            : #else
    1228                 :            :     MPI_Status status;
    1229                 :            :     ErrorCode result;
    1230         [ #  # ]:          0 :     int ind1 = get_buffers( from_proc );
    1231                 :            :     int success, ind2;
    1232         [ #  # ]:          0 :     std::vector< EntityHandle > new_ents;
    1233                 :            : 
    1234                 :            :     // Wait and receive messages
    1235         [ #  # ]:          0 :     while( incoming1 )
    1236                 :            :     {
    1237 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
    1238 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 2, &recvReqs[2 * ind1], &ind2, &status );
    1239 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_messages" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1240                 :            : 
    1241         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    1242                 :            : 
    1243                 :            :         // OK, received something; decrement incoming counter
    1244                 :          0 :         incoming1--;
    1245                 :          0 :         bool done = false;
    1246                 :            : 
    1247                 :            :         // In case ind is for ack, we need index of one before it
    1248                 :          0 :         ind2 += 2 * ind1;
    1249                 :          0 :         unsigned int base_ind = 2 * ( ind2 / 2 );
    1250                 :            : 
    1251         [ #  # ]:          0 :         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind2 / 2],
    1252                 :            :                               // recvbuff,
    1253 [ #  # ][ #  # ]:          0 :                               recvReqs[ind2], recvReqs[ind2 + 1], incoming1, localOwnedBuffs[ind2 / 2],
                 [ #  # ]
    1254 [ #  # ][ #  # ]:          0 :                               sendReqs[base_ind], sendReqs[base_ind + 1], done,
    1255 [ #  # ][ #  # ]:          0 :                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind2 / 2] : NULL ),
    1256 [ #  # ][ #  # ]:          0 :                               MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1257                 :            : 
    1258         [ #  # ]:          0 :         if( done )
    1259                 :            :         {
    1260                 :            :             // If it is done, unpack buffer
    1261 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
    1262         [ #  # ]:          0 :             result = unpack_buffer( remoteOwnedBuffs[ind2 / 2]->buff_ptr, store_remote_handles, from_proc, ind2 / 2,
    1263 [ #  # ][ #  # ]:          0 :                                     L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );MB_CHK_SET_ERR( result, "Failed to unpack buffer in recev_messages" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1264                 :            : 
    1265 [ #  # ][ #  # ]:          0 :             std::copy( new_ents.begin(), new_ents.end(), range_inserter( final_ents ) );
    1266                 :            : 
    1267                 :            :             // Send local handles for new elements to owner
    1268                 :            :             // Reserve space on front for size and for initial buff size
    1269 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind2 / 2]->reset_buffer( sizeof( int ) );
    1270                 :            : 
    1271 [ #  # ][ #  # ]:          0 :             result = pack_remote_handles( L1hloc[ind2 / 2], L1hrem[ind2 / 2], L1p[ind2 / 2], from_proc,
                 [ #  # ]
    1272 [ #  # ][ #  # ]:          0 :                                           remoteOwnedBuffs[ind2 / 2] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1273 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind2 / 2]->set_stored_size();
    1274                 :            : 
    1275 [ #  # ][ #  # ]:          0 :             result = send_buffer( buffProcs[ind2 / 2], remoteOwnedBuffs[ind2 / 2], MB_MESG_REMOTEH_SIZE, sendReqs[ind2],
                 [ #  # ]
    1276 [ #  # ][ #  # ]:          0 :                                   recv_remoteh_reqs[ind2 + 1], (int*)( localOwnedBuffs[ind2 / 2]->mem_ptr ),
    1277                 :            :                                   //&ackbuff,
    1278 [ #  # ][ #  # ]:          0 :                                   incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1279                 :            :         }
    1280                 :            :     }
    1281                 :            : 
    1282                 :          0 :     return MB_SUCCESS;
    1283                 :            : #endif
    1284                 :            : }
    1285                 :            : 
    1286                 :          0 : ErrorCode ParallelComm::recv_remote_handle_messages( const int from_proc, int& incoming2,
    1287                 :            :                                                      std::vector< EntityHandle >& L2hloc,
    1288                 :            :                                                      std::vector< EntityHandle >& L2hrem,
    1289                 :            :                                                      std::vector< unsigned int >& L2p,
    1290                 :            :                                                      std::vector< MPI_Request >& recv_remoteh_reqs )
    1291                 :            : {
    1292                 :            : #ifndef MOAB_HAVE_MPI
    1293                 :            :     return MB_FAILURE;
    1294                 :            : #else
    1295                 :            :     MPI_Status status;
    1296                 :            :     ErrorCode result;
    1297         [ #  # ]:          0 :     int ind1 = get_buffers( from_proc );
    1298                 :            :     int success, ind2;
    1299                 :            : 
    1300         [ #  # ]:          0 :     while( incoming2 )
    1301                 :            :     {
    1302 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
    1303 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 2, &recv_remoteh_reqs[2 * ind1], &ind2, &status );
    1304 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_remote_handle_messages" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1305                 :            : 
    1306                 :            :         // OK, received something; decrement incoming counter
    1307                 :          0 :         incoming2--;
    1308                 :            : 
    1309         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    1310                 :            : 
    1311                 :          0 :         bool done = false;
    1312                 :          0 :         ind2 += 2 * ind1;
    1313                 :          0 :         unsigned int base_ind = 2 * ( ind2 / 2 );
    1314 [ #  # ][ #  # ]:          0 :         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind2 / 2], recv_remoteh_reqs[ind2],
    1315 [ #  # ][ #  # ]:          0 :                               recv_remoteh_reqs[ind2 + 1], incoming2, remoteOwnedBuffs[ind2 / 2], sendReqs[base_ind],
                 [ #  # ]
    1316 [ #  # ][ #  # ]:          0 :                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1317         [ #  # ]:          0 :         if( done )
    1318                 :            :         {
    1319                 :            :             // Incoming remote handles
    1320 [ #  # ][ #  # ]:          0 :             localOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
    1321                 :            :             result =
    1322 [ #  # ][ #  # ]:          0 :                 unpack_remote_handles( buffProcs[ind2 / 2], localOwnedBuffs[ind2 / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1323                 :            :         }
    1324                 :            :     }
    1325                 :            : 
    1326                 :          0 :     return MB_SUCCESS;
    1327                 :            : #endif
    1328                 :            : }
    1329                 :            : 
    1330                 :          0 : ErrorCode ParallelComm::pack_buffer( Range& orig_ents, const bool /*adjacencies*/, const bool tags,
    1331                 :            :                                      const bool store_remote_handles, const int to_proc, Buffer* buff,
    1332                 :            :                                      TupleList* entprocs, Range* allsent )
    1333                 :            : {
    1334                 :            :     // Pack the buffer with the entity ranges, adjacencies, and tags sections
    1335                 :            :     //
    1336                 :            :     // Note: new entities used in subsequent connectivity lists, sets, or tags,
    1337                 :            :     // are referred to as (MBMAXTYPE + index), where index is into vector
    1338                 :            :     // of new entities, 0-based
    1339                 :            :     ErrorCode result;
    1340                 :            : 
    1341         [ #  # ]:          0 :     Range set_range;
    1342         [ #  # ]:          0 :     std::vector< Tag > all_tags;
    1343         [ #  # ]:          0 :     std::vector< Range > tag_ranges;
    1344                 :            : 
    1345         [ #  # ]:          0 :     Range::const_iterator rit;
    1346                 :            : 
    1347                 :            :     // Entities
    1348 [ #  # ][ #  # ]:          0 :     result = pack_entities( orig_ents, buff, store_remote_handles, to_proc, false, entprocs, allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1349                 :            : 
    1350                 :            :     // Sets
    1351 [ #  # ][ #  # ]:          0 :     result = pack_sets( orig_ents, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing sets (count) failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1352                 :            : 
    1353                 :            :     // Tags
    1354         [ #  # ]:          0 :     Range final_ents;
    1355         [ #  # ]:          0 :     if( tags )
    1356                 :            :     {
    1357 [ #  # ][ #  # ]:          0 :         result = get_tag_send_list( orig_ents, all_tags, tag_ranges );MB_CHK_SET_ERR( result, "Failed to get tagged entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1358 [ #  # ][ #  # ]:          0 :         result = pack_tags( orig_ents, all_tags, all_tags, tag_ranges, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing tags (count) failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1359                 :            :     }
    1360                 :            :     else
    1361                 :            :     {  // Set tag size to 0
    1362         [ #  # ]:          0 :         buff->check_space( sizeof( int ) );
    1363         [ #  # ]:          0 :         PACK_INT( buff->buff_ptr, 0 );
    1364         [ #  # ]:          0 :         buff->set_stored_size();
    1365                 :            :     }
    1366                 :            : 
    1367                 :          0 :     return result;
    1368                 :            : }
    1369                 :            : 
    1370                 :          0 : ErrorCode ParallelComm::unpack_buffer( unsigned char* buff_ptr, const bool store_remote_handles, const int from_proc,
    1371                 :            :                                        const int ind, std::vector< std::vector< EntityHandle > >& L1hloc,
    1372                 :            :                                        std::vector< std::vector< EntityHandle > >& L1hrem,
    1373                 :            :                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
    1374                 :            :                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
    1375                 :            :                                        std::vector< EntityHandle >& new_ents, const bool created_iface )
    1376                 :            : {
    1377                 :          0 :     unsigned char* tmp_buff = buff_ptr;
    1378                 :            :     ErrorCode result;
    1379                 :            :     result = unpack_entities( buff_ptr, store_remote_handles, ind, false, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
    1380 [ #  # ][ #  # ]:          0 :                               new_ents, created_iface );MB_CHK_SET_ERR( result, "Unpacking entities failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1381         [ #  # ]:          0 :     if( myDebug->get_verbosity() == 3 )
    1382                 :            :     {
    1383                 :          0 :         myDebug->tprintf( 4, "unpack_entities buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
    1384                 :          0 :         tmp_buff = buff_ptr;
    1385                 :            :     }
    1386 [ #  # ][ #  # ]:          0 :     result = unpack_sets( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking sets failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1387         [ #  # ]:          0 :     if( myDebug->get_verbosity() == 3 )
    1388                 :            :     {
    1389                 :          0 :         myDebug->tprintf( 4, "unpack_sets buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
    1390                 :          0 :         tmp_buff = buff_ptr;
    1391                 :            :     }
    1392 [ #  # ][ #  # ]:          0 :     result = unpack_tags( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking tags failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1393         [ #  # ]:          0 :     if( myDebug->get_verbosity() == 3 )
    1394                 :            :     {
    1395                 :          0 :         myDebug->tprintf( 4, "unpack_tags buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
    1396                 :            :         // tmp_buff = buff_ptr;
    1397                 :            :     }
    1398                 :            : 
    1399         [ #  # ]:          0 :     if( myDebug->get_verbosity() == 3 ) myDebug->print( 4, "\n" );
    1400                 :            : 
    1401                 :          0 :     return MB_SUCCESS;
    1402                 :            : }
    1403                 :            : 
    1404                 :          0 : int ParallelComm::estimate_ents_buffer_size( Range& entities, const bool store_remote_handles )
    1405                 :            : {
    1406                 :          0 :     int buff_size = 0;
    1407         [ #  # ]:          0 :     std::vector< EntityHandle > dum_connect_vec;
    1408                 :            :     const EntityHandle* connect;
    1409                 :            :     int num_connect;
    1410                 :            : 
    1411         [ #  # ]:          0 :     int num_verts = entities.num_of_type( MBVERTEX );
    1412                 :            :     // # verts + coords + handles
    1413                 :          0 :     buff_size += 2 * sizeof( int ) + 3 * sizeof( double ) * num_verts;
    1414         [ #  # ]:          0 :     if( store_remote_handles ) buff_size += sizeof( EntityHandle ) * num_verts;
    1415                 :            : 
    1416                 :            :     // Do a rough count by looking at first entity of each type
    1417 [ #  # ][ #  # ]:          0 :     for( EntityType t = MBEDGE; t < MBENTITYSET; t++ )
    1418                 :            :     {
    1419         [ #  # ]:          0 :         const Range::iterator rit = entities.lower_bound( t );
    1420 [ #  # ][ #  # ]:          0 :         if( TYPE_FROM_HANDLE( *rit ) != t ) continue;
                 [ #  # ]
    1421                 :            : 
    1422 [ #  # ][ #  # ]:          0 :         ErrorCode result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect_vec );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get connectivity to estimate buffer size", -1 );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1423                 :            : 
    1424                 :            :         // Number, type, nodes per entity
    1425                 :          0 :         buff_size += 3 * sizeof( int );
    1426         [ #  # ]:          0 :         int num_ents = entities.num_of_type( t );
    1427                 :            :         // Connectivity, handle for each ent
    1428                 :          0 :         buff_size += ( num_connect + 1 ) * sizeof( EntityHandle ) * num_ents;
    1429                 :            :     }
    1430                 :            : 
    1431                 :            :     // Extra entity type at end, passed as int
    1432                 :          0 :     buff_size += sizeof( int );
    1433                 :            : 
    1434                 :          0 :     return buff_size;
    1435                 :            : }
    1436                 :            : 
    1437                 :          0 : int ParallelComm::estimate_sets_buffer_size( Range& entities, const bool /*store_remote_handles*/ )
    1438                 :            : {
    1439                 :            :     // Number of sets
    1440                 :          0 :     int buff_size = sizeof( int );
    1441                 :            : 
    1442                 :            :     // Do a rough count by looking at first entity of each type
    1443         [ #  # ]:          0 :     Range::iterator rit = entities.lower_bound( MBENTITYSET );
    1444                 :            :     ErrorCode result;
    1445                 :            : 
    1446 [ #  # ][ #  # ]:          0 :     for( ; rit != entities.end(); ++rit )
         [ #  # ][ #  # ]
    1447                 :            :     {
    1448                 :            :         unsigned int options;
    1449 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_meshset_options( *rit, options );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get meshset options", -1 );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1450                 :            : 
    1451                 :          0 :         buff_size += sizeof( int );
    1452                 :            : 
    1453         [ #  # ]:          0 :         Range set_range;
    1454         [ #  # ]:          0 :         if( options & MESHSET_SET )
    1455                 :            :         {
    1456                 :            :             // Range-based set; count the subranges
    1457 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_handle( *rit, set_range );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get set entities", -1 );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1458                 :            : 
    1459                 :            :             // Set range
    1460         [ #  # ]:          0 :             buff_size += RANGE_SIZE( set_range );
    1461                 :            :         }
    1462         [ #  # ]:          0 :         else if( options & MESHSET_ORDERED )
    1463                 :            :         {
    1464                 :            :             // Just get the number of entities in the set
    1465                 :            :             int num_ents;
    1466 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_number_entities_by_handle( *rit, num_ents );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get number entities in ordered set", -1 );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1467                 :            : 
    1468                 :            :             // Set vec
    1469                 :          0 :             buff_size += sizeof( EntityHandle ) * num_ents + sizeof( int );
    1470                 :            :         }
    1471                 :            : 
    1472                 :            :         // Get numbers of parents/children
    1473                 :            :         int num_par, num_ch;
    1474 [ #  # ][ #  # ]:          0 :         result = mbImpl->num_child_meshsets( *rit, &num_ch );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num children", -1 );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1475 [ #  # ][ #  # ]:          0 :         result = mbImpl->num_parent_meshsets( *rit, &num_par );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num parents", -1 );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1476                 :            : 
    1477         [ #  # ]:          0 :         buff_size += ( num_ch + num_par ) * sizeof( EntityHandle ) + 2 * sizeof( int );
    1478                 :          0 :     }
    1479                 :            : 
    1480                 :          0 :     return buff_size;
    1481                 :            : }
    1482                 :            : 
    1483                 :          0 : ErrorCode ParallelComm::pack_entities( Range& entities, Buffer* buff, const bool store_remote_handles,
    1484                 :            :                                        const int to_proc, const bool /*is_iface*/, TupleList* entprocs,
    1485                 :            :                                        Range* /*allsent*/ )
    1486                 :            : {
    1487                 :            :     // Packed information:
    1488                 :            :     // 1. # entities = E
    1489                 :            :     // 2. for e in E
    1490                 :            :     //   a. # procs sharing e, incl. sender and receiver = P
    1491                 :            :     //   b. for p in P (procs sharing e)
    1492                 :            :     //   c. for p in P (handle for e on p) (Note1)
    1493                 :            :     // 3. vertex/entity info
    1494                 :            : 
    1495                 :            :     // Get an estimate of the buffer size & pre-allocate buffer size
    1496         [ #  # ]:          0 :     int buff_size = estimate_ents_buffer_size( entities, store_remote_handles );
    1497 [ #  # ][ #  # ]:          0 :     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate ents buffer size" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1498         [ #  # ]:          0 :     buff->check_space( buff_size );
    1499 [ #  # ][ #  # ]:          0 :     myDebug->tprintf( 3, "estimate buffer size for %d entities: %d \n", (int)entities.size(), buff_size );
    1500                 :            : 
    1501                 :            :     unsigned int num_ents;
    1502                 :            :     ErrorCode result;
    1503                 :            : 
    1504 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > entities_vec( entities.size() );
    1505 [ #  # ][ #  # ]:          0 :     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
                 [ #  # ]
    1506                 :            : 
    1507                 :            :     // First pack procs/handles sharing this ent, not including this dest but including
    1508                 :            :     // others (with zero handles)
    1509         [ #  # ]:          0 :     if( store_remote_handles )
    1510                 :            :     {
    1511                 :            :         // Buff space is at least proc + handle for each entity; use avg of 4 other procs
    1512                 :            :         // to estimate buff size, but check later
    1513 [ #  # ][ #  # ]:          0 :         buff->check_space( sizeof( int ) + ( 5 * sizeof( int ) + sizeof( EntityHandle ) ) * entities.size() );
    1514                 :            : 
    1515                 :            :         // 1. # entities = E
    1516 [ #  # ][ #  # ]:          0 :         PACK_INT( buff->buff_ptr, entities.size() );
    1517                 :            : 
    1518         [ #  # ]:          0 :         Range::iterator rit;
    1519                 :            : 
    1520                 :            :         // Pre-fetch sharedp and pstatus
    1521 [ #  # ][ #  # ]:          0 :         std::vector< int > sharedp_vals( entities.size() );
    1522 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( sharedp_tag(), entities, &sharedp_vals[0] );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1523 [ #  # ][ #  # ]:          0 :         std::vector< char > pstatus_vals( entities.size() );
                 [ #  # ]
    1524 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( pstatus_tag(), entities, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1525                 :            : 
    1526                 :            :         unsigned int i;
    1527                 :            :         int tmp_procs[MAX_SHARING_PROCS];
    1528                 :            :         EntityHandle tmp_handles[MAX_SHARING_PROCS];
    1529 [ #  # ][ #  # ]:          0 :         std::set< unsigned int > dumprocs;
    1530                 :            : 
    1531                 :            :         // 2. for e in E
    1532 [ #  # ][ #  # ]:          0 :         for( rit = entities.begin(), i = 0; rit != entities.end(); ++rit, i++ )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1533                 :            :         {
    1534                 :            :             unsigned int ind =
    1535 [ #  # ][ #  # ]:          0 :                 std::lower_bound( entprocs->vul_rd, entprocs->vul_rd + entprocs->get_n(), *rit ) - entprocs->vul_rd;
                 [ #  # ]
    1536 [ #  # ][ #  # ]:          0 :             assert( ind < entprocs->get_n() );
    1537                 :            : 
    1538 [ #  # ][ #  # ]:          0 :             while( ind < entprocs->get_n() && entprocs->vul_rd[ind] == *rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    1539         [ #  # ]:          0 :                 dumprocs.insert( entprocs->vi_rd[ind++] );
    1540                 :            : 
    1541 [ #  # ][ #  # ]:          0 :             result = build_sharedhps_list( *rit, pstatus_vals[i], sharedp_vals[i], dumprocs, num_ents, tmp_procs,
                 [ #  # ]
    1542 [ #  # ][ #  # ]:          0 :                                            tmp_handles );MB_CHK_SET_ERR( result, "Failed to build sharedhps" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1543                 :            : 
    1544                 :          0 :             dumprocs.clear();
    1545                 :            : 
    1546                 :            :             // Now pack them
    1547         [ #  # ]:          0 :             buff->check_space( ( num_ents + 1 ) * sizeof( int ) + num_ents * sizeof( EntityHandle ) );
    1548         [ #  # ]:          0 :             PACK_INT( buff->buff_ptr, num_ents );
    1549         [ #  # ]:          0 :             PACK_INTS( buff->buff_ptr, tmp_procs, num_ents );
    1550         [ #  # ]:          0 :             PACK_EH( buff->buff_ptr, tmp_handles, num_ents );
    1551                 :            : 
    1552                 :            : #ifndef NDEBUG
    1553                 :            :             // Check for duplicates in proc list
    1554                 :          0 :             unsigned int dp = 0;
    1555 [ #  # ][ #  # ]:          0 :             for( ; dp < MAX_SHARING_PROCS && -1 != tmp_procs[dp]; dp++ )
    1556         [ #  # ]:          0 :                 dumprocs.insert( tmp_procs[dp] );
    1557         [ #  # ]:          0 :             assert( dumprocs.size() == dp );
    1558                 :          0 :             dumprocs.clear();
    1559                 :            : #endif
    1560                 :          0 :         }
    1561                 :            :     }
    1562                 :            : 
    1563                 :            :     // Pack vertices
    1564         [ #  # ]:          0 :     Range these_ents = entities.subset_by_type( MBVERTEX );
    1565         [ #  # ]:          0 :     num_ents         = these_ents.size();
    1566                 :            : 
    1567         [ #  # ]:          0 :     if( num_ents )
    1568                 :            :     {
    1569                 :          0 :         buff_size = 2 * sizeof( int ) + 3 * num_ents * sizeof( double );
    1570         [ #  # ]:          0 :         buff->check_space( buff_size );
    1571                 :            : 
    1572                 :            :         // Type, # ents
    1573         [ #  # ]:          0 :         PACK_INT( buff->buff_ptr, ( (int)MBVERTEX ) );
    1574         [ #  # ]:          0 :         PACK_INT( buff->buff_ptr, ( (int)num_ents ) );
    1575                 :            : 
    1576         [ #  # ]:          0 :         std::vector< double > tmp_coords( 3 * num_ents );
    1577 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_coords( these_ents, &tmp_coords[0] );MB_CHK_SET_ERR( result, "Failed to get vertex coordinates" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1578 [ #  # ][ #  # ]:          0 :         PACK_DBLS( buff->buff_ptr, &tmp_coords[0], 3 * num_ents );
    1579                 :            : 
    1580                 :            :         myDebug->tprintf( 4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
    1581 [ #  # ][ #  # ]:          0 :                           CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1582                 :            :     }
    1583                 :            : 
    1584                 :            :     // Now entities; go through range, packing by type and equal # verts per element
    1585 [ #  # ][ #  # ]:          0 :     Range::iterator start_rit = entities.find( *these_ents.rbegin() );
                 [ #  # ]
    1586         [ #  # ]:          0 :     ++start_rit;
    1587                 :          0 :     int last_nodes       = -1;
    1588                 :          0 :     EntityType last_type = MBMAXTYPE;
    1589         [ #  # ]:          0 :     these_ents.clear();
    1590                 :          0 :     Range::iterator end_rit = start_rit;
    1591                 :            :     EntitySequence* seq;
    1592                 :            :     ElementSequence* eseq;
    1593                 :            : 
    1594 [ #  # ][ #  # ]:          0 :     while( start_rit != entities.end() || !these_ents.empty() )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
           [ #  #  #  # ]
    1595                 :            :     {
    1596                 :            :         // Cases:
    1597                 :            :         // A: !end, last_type == MBMAXTYPE, seq: save contig sequence in these_ents
    1598                 :            :         // B: !end, last type & nodes same, seq: save contig sequence in these_ents
    1599                 :            :         // C: !end, last type & nodes different: pack these_ents, then save contig sequence in
    1600                 :            :         // these_ents D: end: pack these_ents
    1601                 :            : 
    1602                 :            :         // Find the sequence holding current start entity, if we're not at end
    1603                 :          0 :         eseq = NULL;
    1604 [ #  # ][ #  # ]:          0 :         if( start_rit != entities.end() )
                 [ #  # ]
    1605                 :            :         {
    1606 [ #  # ][ #  # ]:          0 :             result = sequenceManager->find( *start_rit, seq );MB_CHK_SET_ERR( result, "Failed to find entity sequence" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1607         [ #  # ]:          0 :             if( NULL == seq ) return MB_FAILURE;
    1608         [ #  # ]:          0 :             eseq = dynamic_cast< ElementSequence* >( seq );
    1609                 :            :         }
    1610                 :            : 
    1611                 :            :         // Pack the last batch if at end or next one is different
    1612 [ #  # ][ #  # ]:          0 :         if( !these_ents.empty() &&
         [ #  # ][ #  # ]
    1613 [ #  # ][ #  # ]:          0 :             ( !eseq || eseq->type() != last_type || last_nodes != (int)eseq->nodes_per_element() ) )
         [ #  # ][ #  # ]
    1614                 :            :         {
    1615 [ #  # ][ #  # ]:          0 :             result = pack_entity_seq( last_nodes, store_remote_handles, to_proc, these_ents, entities_vec, buff );MB_CHK_SET_ERR( result, "Failed to pack entities from a sequence" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1616         [ #  # ]:          0 :             these_ents.clear();
    1617                 :            :         }
    1618                 :            : 
    1619         [ #  # ]:          0 :         if( eseq )
    1620                 :            :         {
    1621                 :            :             // Continuation of current range, just save these entities
    1622                 :            :             // Get position in entities list one past end of this sequence
    1623 [ #  # ][ #  # ]:          0 :             end_rit = entities.lower_bound( start_rit, entities.end(), eseq->end_handle() + 1 );
                 [ #  # ]
    1624                 :            : 
    1625                 :            :             // Put these entities in the range
    1626 [ #  # ][ #  # ]:          0 :             std::copy( start_rit, end_rit, range_inserter( these_ents ) );
    1627                 :            : 
    1628         [ #  # ]:          0 :             last_type  = eseq->type();
    1629         [ #  # ]:          0 :             last_nodes = eseq->nodes_per_element();
    1630                 :            :         }
    1631 [ #  # ][ #  # ]:          0 :         else if( start_rit != entities.end() && TYPE_FROM_HANDLE( *start_rit ) == MBENTITYSET )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
           [ #  #  #  # ]
    1632                 :          0 :             break;
    1633                 :            : 
    1634                 :          0 :         start_rit = end_rit;
    1635                 :            :     }
    1636                 :            : 
    1637                 :            :     // Pack MBMAXTYPE to indicate end of ranges
    1638         [ #  # ]:          0 :     buff->check_space( sizeof( int ) );
    1639         [ #  # ]:          0 :     PACK_INT( buff->buff_ptr, ( (int)MBMAXTYPE ) );
    1640                 :            : 
    1641         [ #  # ]:          0 :     buff->set_stored_size();
    1642                 :          0 :     return MB_SUCCESS;
    1643                 :            : }
    1644                 :            : 
    1645                 :          0 : ErrorCode ParallelComm::build_sharedhps_list( const EntityHandle entity, const unsigned char pstatus,
    1646                 :            :                                               const int
    1647                 :            : #ifndef NDEBUG
    1648                 :            :                                                   sharedp
    1649                 :            : #endif
    1650                 :            :                                               ,
    1651                 :            :                                               const std::set< unsigned int >& procs, unsigned int& num_ents,
    1652                 :            :                                               int* tmp_procs, EntityHandle* tmp_handles )
    1653                 :            : {
    1654                 :          0 :     num_ents = 0;
    1655                 :            :     unsigned char pstat;
    1656 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_sharing_data( entity, tmp_procs, tmp_handles, pstat, num_ents );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1657         [ #  # ]:          0 :     assert( pstat == pstatus );
    1658                 :            : 
    1659                 :            :     // Build shared proc/handle lists
    1660                 :            :     // Start with multi-shared, since if it is the owner will be first
    1661         [ #  # ]:          0 :     if( pstatus & PSTATUS_MULTISHARED ) {}
    1662         [ #  # ]:          0 :     else if( pstatus & PSTATUS_NOT_OWNED )
    1663                 :            :     {
    1664                 :            :         // If not multishared and not owned, other sharing proc is owner, put that
    1665                 :            :         // one first
    1666 [ #  # ][ #  # ]:          0 :         assert( "If not owned, I should be shared too" && pstatus & PSTATUS_SHARED && 1 == num_ents );
    1667         [ #  # ]:          0 :         tmp_procs[1]   = procConfig.proc_rank();
    1668                 :          0 :         tmp_handles[1] = entity;
    1669                 :          0 :         num_ents       = 2;
    1670                 :            :     }
    1671         [ #  # ]:          0 :     else if( pstatus & PSTATUS_SHARED )
    1672                 :            :     {
    1673                 :            :         // If not multishared and owned, I'm owner
    1674         [ #  # ]:          0 :         assert( "shared and owned, should be only 1 sharing proc" && 1 == num_ents );
    1675                 :          0 :         tmp_procs[1]   = tmp_procs[0];
    1676         [ #  # ]:          0 :         tmp_procs[0]   = procConfig.proc_rank();
    1677                 :          0 :         tmp_handles[1] = tmp_handles[0];
    1678                 :          0 :         tmp_handles[0] = entity;
    1679                 :          0 :         num_ents       = 2;
    1680                 :            :     }
    1681                 :            :     else
    1682                 :            :     {
    1683                 :            :         // Not shared yet, just add owner (me)
    1684         [ #  # ]:          0 :         tmp_procs[0]   = procConfig.proc_rank();
    1685                 :          0 :         tmp_handles[0] = entity;
    1686                 :          0 :         num_ents       = 1;
    1687                 :            :     }
    1688                 :            : 
    1689                 :            : #ifndef NDEBUG
    1690                 :          0 :     int tmp_ps = num_ents;
    1691                 :            : #endif
    1692                 :            : 
    1693                 :            :     // Now add others, with zero handle for now
    1694 [ #  # ][ #  # ]:          0 :     for( std::set< unsigned int >::iterator sit = procs.begin(); sit != procs.end(); ++sit )
                 [ #  # ]
    1695                 :            :     {
    1696                 :            : #ifndef NDEBUG
    1697 [ #  # ][ #  # ]:          0 :         if( tmp_ps && std::find( tmp_procs, tmp_procs + tmp_ps, *sit ) != tmp_procs + tmp_ps )
         [ #  # ][ #  # ]
                 [ #  # ]
    1698                 :            :         {
    1699 [ #  # ][ #  # ]:          0 :             std::cerr << "Trouble with something already in shared list on proc " << procConfig.proc_rank()
                 [ #  # ]
    1700 [ #  # ][ #  # ]:          0 :                       << ". Entity:" << std::endl;
    1701         [ #  # ]:          0 :             list_entities( &entity, 1 );
    1702 [ #  # ][ #  # ]:          0 :             std::cerr << "pstatus = " << (int)pstatus << ", sharedp = " << sharedp << std::endl;
         [ #  # ][ #  # ]
                 [ #  # ]
    1703         [ #  # ]:          0 :             std::cerr << "tmp_ps = ";
    1704         [ #  # ]:          0 :             for( int i = 0; i < tmp_ps; i++ )
    1705 [ #  # ][ #  # ]:          0 :                 std::cerr << tmp_procs[i] << " ";
    1706         [ #  # ]:          0 :             std::cerr << std::endl;
    1707         [ #  # ]:          0 :             std::cerr << "procs = ";
    1708 [ #  # ][ #  # ]:          0 :             for( std::set< unsigned int >::iterator sit2 = procs.begin(); sit2 != procs.end(); ++sit2 )
                 [ #  # ]
    1709 [ #  # ][ #  # ]:          0 :                 std::cerr << *sit2 << " ";
                 [ #  # ]
    1710                 :          0 :             assert( false );
    1711                 :            :         }
    1712                 :            : #endif
    1713         [ #  # ]:          0 :         tmp_procs[num_ents]   = *sit;
    1714                 :          0 :         tmp_handles[num_ents] = 0;
    1715                 :          0 :         num_ents++;
    1716                 :            :     }
    1717                 :            : 
    1718                 :            :     // Put -1 after procs and 0 after handles
    1719         [ #  # ]:          0 :     if( MAX_SHARING_PROCS > num_ents )
    1720                 :            :     {
    1721                 :          0 :         tmp_procs[num_ents]   = -1;
    1722                 :          0 :         tmp_handles[num_ents] = 0;
    1723                 :            :     }
    1724                 :            : 
    1725                 :          0 :     return MB_SUCCESS;
    1726                 :            : }
    1727                 :            : 
    1728                 :          0 : ErrorCode ParallelComm::pack_entity_seq( const int nodes_per_entity, const bool store_remote_handles, const int to_proc,
    1729                 :            :                                          Range& these_ents, std::vector< EntityHandle >& entities_vec, Buffer* buff )
    1730                 :            : {
    1731         [ #  # ]:          0 :     int tmp_space = 3 * sizeof( int ) + nodes_per_entity * these_ents.size() * sizeof( EntityHandle );
    1732         [ #  # ]:          0 :     buff->check_space( tmp_space );
    1733                 :            : 
    1734                 :            :     // Pack the entity type
    1735 [ #  # ][ #  # ]:          0 :     PACK_INT( buff->buff_ptr, ( (int)TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
         [ #  # ][ #  # ]
    1736                 :            : 
    1737                 :            :     // Pack # ents
    1738 [ #  # ][ #  # ]:          0 :     PACK_INT( buff->buff_ptr, these_ents.size() );
    1739                 :            : 
    1740                 :            :     // Pack the nodes per entity
    1741         [ #  # ]:          0 :     PACK_INT( buff->buff_ptr, nodes_per_entity );
    1742 [ #  # ][ #  # ]:          0 :     myDebug->tprintf( 3, "after some pack int  %d \n", buff->get_current_size() );
    1743                 :            : 
    1744                 :            :     // Pack the connectivity
    1745         [ #  # ]:          0 :     std::vector< EntityHandle > connect;
    1746                 :          0 :     ErrorCode result = MB_SUCCESS;
    1747 [ #  # ][ #  # ]:          0 :     for( Range::const_iterator rit = these_ents.begin(); rit != these_ents.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    1748                 :            :     {
    1749                 :          0 :         connect.clear();
    1750 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_connectivity( &( *rit ), 1, connect, false );MB_CHK_SET_ERR( result, "Failed to get connectivity" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1751         [ #  # ]:          0 :         assert( (int)connect.size() == nodes_per_entity );
    1752                 :            :         result =
    1753 [ #  # ][ #  # ]:          0 :             get_remote_handles( store_remote_handles, &connect[0], &connect[0], connect.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1754 [ #  # ][ #  # ]:          0 :         PACK_EH( buff->buff_ptr, &connect[0], connect.size() );
    1755                 :            :     }
    1756                 :            : 
    1757                 :            :     myDebug->tprintf( 3, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
    1758 [ #  # ][ #  # ]:          0 :                       CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1759                 :            : 
    1760                 :          0 :     return result;
    1761                 :            : }
    1762                 :            : 
    1763                 :          0 : ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, EntityHandle* from_vec,
    1764                 :            :                                             EntityHandle* to_vec_tmp, int num_ents, int to_proc,
    1765                 :            :                                             const std::vector< EntityHandle >& new_ents )
    1766                 :            : {
    1767                 :            :     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE RANGE-BASED VERSION, NO REUSE
    1768                 :            :     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
    1769                 :            :     // OTHER VERSION TOO!!!
    1770         [ #  # ]:          0 :     if( 0 == num_ents ) return MB_SUCCESS;
    1771                 :            : 
    1772                 :            :     // Use a local destination ptr in case we're doing an in-place copy
    1773         [ #  # ]:          0 :     std::vector< EntityHandle > tmp_vector;
    1774                 :          0 :     EntityHandle* to_vec = to_vec_tmp;
    1775         [ #  # ]:          0 :     if( to_vec == from_vec )
    1776                 :            :     {
    1777         [ #  # ]:          0 :         tmp_vector.resize( num_ents );
    1778         [ #  # ]:          0 :         to_vec = &tmp_vector[0];
    1779                 :            :     }
    1780                 :            : 
    1781         [ #  # ]:          0 :     if( !store_remote_handles )
    1782                 :            :     {
    1783                 :            :         int err;
    1784                 :            :         // In this case, substitute position in new_ents list
    1785         [ #  # ]:          0 :         for( int i = 0; i < num_ents; i++ )
    1786                 :            :         {
    1787 [ #  # ][ #  # ]:          0 :             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
    1788 [ #  # ][ #  # ]:          0 :             assert( new_ents[ind] == from_vec[i] );
    1789         [ #  # ]:          0 :             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
    1790 [ #  # ][ #  # ]:          0 :             assert( to_vec[i] != 0 && !err && -1 != ind );
                 [ #  # ]
    1791                 :            :         }
    1792                 :            :     }
    1793                 :            :     else
    1794                 :            :     {
    1795                 :            :         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
    1796 [ #  # ][ #  # ]:          0 :         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1797                 :            : 
    1798                 :            :         // Get single-proc destination handles and shared procs
    1799         [ #  # ]:          0 :         std::vector< int > sharing_procs( num_ents );
    1800 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( shh_tag, from_vec, num_ents, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1801 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( shp_tag, from_vec, num_ents, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1802         [ #  # ]:          0 :         for( int j = 0; j < num_ents; j++ )
    1803                 :            :         {
    1804 [ #  # ][ #  # ]:          0 :             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
         [ #  # ][ #  # ]
    1805                 :            :         }
    1806                 :            : 
    1807                 :            :         EntityHandle tmp_handles[MAX_SHARING_PROCS];
    1808                 :            :         int tmp_procs[MAX_SHARING_PROCS];
    1809                 :            :         int i;
    1810                 :            :         // Go through results, and for 0-valued ones, look for multiple shared proc
    1811 [ #  # ][ #  # ]:          0 :         for( i = 0; i < num_ents; i++ )
    1812                 :            :         {
    1813         [ #  # ]:          0 :             if( !to_vec[i] )
    1814                 :            :             {
    1815         [ #  # ]:          0 :                 result = mbImpl->tag_get_data( shps_tag, from_vec + i, 1, tmp_procs );
    1816         [ #  # ]:          0 :                 if( MB_SUCCESS == result )
    1817                 :            :                 {
    1818         [ #  # ]:          0 :                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
    1819                 :            :                     {
    1820         [ #  # ]:          0 :                         if( -1 == tmp_procs[j] )
    1821                 :          0 :                             break;
    1822         [ #  # ]:          0 :                         else if( tmp_procs[j] == to_proc )
    1823                 :            :                         {
    1824 [ #  # ][ #  # ]:          0 :                             result = mbImpl->tag_get_data( shhs_tag, from_vec + i, 1, tmp_handles );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1825                 :          0 :                             to_vec[i] = tmp_handles[j];
    1826         [ #  # ]:          0 :                             assert( to_vec[i] );
    1827                 :          0 :                             break;
    1828                 :            :                         }
    1829                 :            :                     }
    1830                 :            :                 }
    1831         [ #  # ]:          0 :                 if( !to_vec[i] )
    1832                 :            :                 {
    1833 [ #  # ][ #  # ]:          0 :                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
    1834         [ #  # ]:          0 :                     if( (int)new_ents.size() == j )
    1835                 :            :                     {
    1836 [ #  # ][ #  # ]:          0 :                         std::cout << "Failed to find new entity in send list, proc " << procConfig.proc_rank()
                 [ #  # ]
    1837         [ #  # ]:          0 :                                   << std::endl;
    1838         [ #  # ]:          0 :                         for( int k = 0; k <= num_ents; k++ )
    1839 [ #  # ][ #  # ]:          0 :                             std::cout << k << ": " << from_vec[k] << " " << to_vec[k] << std::endl;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1840 [ #  # ][ #  # ]:          0 :                         MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
         [ #  # ][ #  # ]
                 [ #  # ]
    1841                 :            :                     }
    1842                 :            :                     int err;
    1843         [ #  # ]:          0 :                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
    1844 [ #  # ][ #  # ]:          0 :                     if( err ) { MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1845                 :            :                 }
    1846                 :            :             }
    1847                 :          0 :         }
    1848                 :            :     }
    1849                 :            : 
    1850                 :            :     // memcpy over results if from_vec and to_vec are the same
    1851         [ #  # ]:          0 :     if( to_vec_tmp == from_vec ) memcpy( from_vec, to_vec, num_ents * sizeof( EntityHandle ) );
    1852                 :            : 
    1853                 :          0 :     return MB_SUCCESS;
    1854                 :            : }
    1855                 :            : 
    1856                 :          0 : ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, const Range& from_range,
    1857                 :            :                                             EntityHandle* to_vec, int to_proc,
    1858                 :            :                                             const std::vector< EntityHandle >& new_ents )
    1859                 :            : {
    1860                 :            :     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE VECTOR-BASED VERSION, NO REUSE
    1861                 :            :     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
    1862                 :            :     // OTHER VERSION TOO!!!
    1863         [ #  # ]:          0 :     if( from_range.empty() ) return MB_SUCCESS;
    1864                 :            : 
    1865         [ #  # ]:          0 :     if( !store_remote_handles )
    1866                 :            :     {
    1867                 :            :         int err;
    1868                 :            :         // In this case, substitute position in new_ents list
    1869         [ #  # ]:          0 :         Range::iterator rit;
    1870                 :            :         unsigned int i;
    1871 [ #  # ][ #  # ]:          0 :         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    1872                 :            :         {
    1873 [ #  # ][ #  # ]:          0 :             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
                 [ #  # ]
    1874 [ #  # ][ #  # ]:          0 :             assert( new_ents[ind] == *rit );
                 [ #  # ]
    1875         [ #  # ]:          0 :             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
    1876 [ #  # ][ #  # ]:          0 :             assert( to_vec[i] != 0 && !err && -1 != ind );
                 [ #  # ]
    1877                 :            :         }
    1878                 :            :     }
    1879                 :            :     else
    1880                 :            :     {
    1881                 :            :         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
    1882 [ #  # ][ #  # ]:          0 :         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1883                 :            : 
    1884                 :            :         // Get single-proc destination handles and shared procs
    1885 [ #  # ][ #  # ]:          0 :         std::vector< int > sharing_procs( from_range.size() );
    1886 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( shh_tag, from_range, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1887 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( shp_tag, from_range, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1888 [ #  # ][ #  # ]:          0 :         for( unsigned int j = 0; j < from_range.size(); j++ )
    1889                 :            :         {
    1890 [ #  # ][ #  # ]:          0 :             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
         [ #  # ][ #  # ]
    1891                 :            :         }
    1892                 :            : 
    1893                 :            :         EntityHandle tmp_handles[MAX_SHARING_PROCS];
    1894                 :            :         int tmp_procs[MAX_SHARING_PROCS];
    1895                 :            :         // Go through results, and for 0-valued ones, look for multiple shared proc
    1896         [ #  # ]:          0 :         Range::iterator rit;
    1897                 :            :         unsigned int i;
    1898 [ #  # ][ #  # ]:          0 :         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1899                 :            :         {
    1900         [ #  # ]:          0 :             if( !to_vec[i] )
    1901                 :            :             {
    1902 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_get_data( shhs_tag, &( *rit ), 1, tmp_handles );
    1903         [ #  # ]:          0 :                 if( MB_SUCCESS == result )
    1904                 :            :                 {
    1905 [ #  # ][ #  # ]:          0 :                     result = mbImpl->tag_get_data( shps_tag, &( *rit ), 1, tmp_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1906         [ #  # ]:          0 :                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
    1907         [ #  # ]:          0 :                         if( tmp_procs[j] == to_proc )
    1908                 :            :                         {
    1909                 :          0 :                             to_vec[i] = tmp_handles[j];
    1910                 :          0 :                             break;
    1911                 :            :                         }
    1912                 :            :                 }
    1913                 :            : 
    1914         [ #  # ]:          0 :                 if( !to_vec[i] )
    1915                 :            :                 {
    1916 [ #  # ][ #  # ]:          0 :                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
                 [ #  # ]
    1917         [ #  # ]:          0 :                     if( (int)new_ents.size() == j )
    1918 [ #  # ][ #  # ]:          0 :                     { MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    1919                 :            :                     int err;
    1920         [ #  # ]:          0 :                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
    1921 [ #  # ][ #  # ]:          0 :                     if( err ) { MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1922                 :            :                 }
    1923                 :            :             }
    1924                 :          0 :         }
    1925                 :            :     }
    1926                 :            : 
    1927                 :          0 :     return MB_SUCCESS;
    1928                 :            : }
    1929                 :            : 
    1930                 :          0 : ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, const Range& from_range, Range& to_range,
    1931                 :            :                                             int to_proc, const std::vector< EntityHandle >& new_ents )
    1932                 :            : {
    1933 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > to_vector( from_range.size() );
    1934                 :            : 
    1935 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_remote_handles( store_remote_handles, from_range, &to_vector[0], to_proc, new_ents );MB_CHK_SET_ERR( result, "Failed to get remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1936 [ #  # ][ #  # ]:          0 :     std::copy( to_vector.begin(), to_vector.end(), range_inserter( to_range ) );
    1937                 :          0 :     return result;
    1938                 :            : }
    1939                 :            : 
    1940                 :          0 : ErrorCode ParallelComm::unpack_entities( unsigned char*& buff_ptr, const bool store_remote_handles,
    1941                 :            :                                          const int /*from_ind*/, const bool is_iface,
    1942                 :            :                                          std::vector< std::vector< EntityHandle > >& L1hloc,
    1943                 :            :                                          std::vector< std::vector< EntityHandle > >& L1hrem,
    1944                 :            :                                          std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
    1945                 :            :                                          std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
    1946                 :            :                                          std::vector< EntityHandle >& new_ents, const bool created_iface )
    1947                 :            : {
    1948                 :            :     // General algorithm:
    1949                 :            :     // - unpack # entities
    1950                 :            :     // - save start of remote handle info, then scan forward to entity definition data
    1951                 :            :     // - for all vertices or entities w/ same # verts:
    1952                 :            :     //   . get entity type, num ents, and (if !vert) # verts
    1953                 :            :     //   . for each ent:
    1954                 :            :     //      o get # procs/handles in remote handle info
    1955                 :            :     //      o if # procs/handles > 2, check for already-created entity:
    1956                 :            :     //        x get index of owner proc (1st in proc list), resize L1 list if nec
    1957                 :            :     //        x look for already-arrived entity in L2 by owner handle
    1958                 :            :     //      o if no existing entity:
    1959                 :            :     //        x if iface, look for existing entity with same connect & type
    1960                 :            :     //        x if none found, create vertex or element
    1961                 :            :     //        x if !iface & multi-shared, save on L2
    1962                 :            :     //        x if !iface, put new entity on new_ents list
    1963                 :            :     //      o update proc/handle, pstatus tags, adjusting to put owner first if iface
    1964                 :            :     //      o if !iface, save new handle on L1 for all sharing procs
    1965                 :            : 
    1966                 :            :     // Lists of handles/procs to return to sending/other procs
    1967                 :            :     // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
    1968                 :            :     //         and h' is either the remote proc handle (if that is known) or
    1969                 :            :     //         the owner proc handle (otherwise);
    1970                 :            :     // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
    1971                 :            :     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
    1972                 :            :     //         remote handles are on owning proc
    1973                 :            :     // L2p: owning procs for handles in L2hrem
    1974                 :            : 
    1975                 :            :     ErrorCode result;
    1976                 :          0 :     bool done         = false;
    1977                 :          0 :     ReadUtilIface* ru = NULL;
    1978                 :            : 
    1979 [ #  # ][ #  # ]:          0 :     result = mbImpl->query_interface( ru );MB_CHK_SET_ERR( result, "Failed to get ReadUtilIface" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1980                 :            : 
    1981                 :            :     // 1. # entities = E
    1982                 :          0 :     int num_ents             = 0;
    1983                 :          0 :     unsigned char* buff_save = buff_ptr;
    1984                 :            :     int i, j;
    1985                 :            : 
    1986         [ #  # ]:          0 :     if( store_remote_handles )
    1987                 :            :     {
    1988         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, num_ents );
    1989                 :            : 
    1990                 :          0 :         buff_save = buff_ptr;
    1991                 :            : 
    1992                 :            :         // Save place where remote handle info starts, then scan forward to ents
    1993         [ #  # ]:          0 :         for( i = 0; i < num_ents; i++ )
    1994                 :            :         {
    1995         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, j );
    1996         [ #  # ]:          0 :             if( j < 0 )
    1997                 :            :             {
    1998         [ #  # ]:          0 :                 std::cout << "Should be non-negative # proc/handles.";
    1999                 :          0 :                 return MB_FAILURE;
    2000                 :            :             }
    2001                 :            : 
    2002                 :          0 :             buff_ptr += j * ( sizeof( int ) + sizeof( EntityHandle ) );
    2003                 :            :         }
    2004                 :            :     }
    2005                 :            : 
    2006         [ #  # ]:          0 :     std::vector< EntityHandle > msg_ents;
    2007                 :            : 
    2008         [ #  # ]:          0 :     while( !done )
    2009                 :            :     {
    2010                 :          0 :         EntityType this_type = MBMAXTYPE;
    2011         [ #  # ]:          0 :         UNPACK_TYPE( buff_ptr, this_type );
    2012         [ #  # ]:          0 :         assert( this_type != MBENTITYSET );
    2013                 :            : 
    2014                 :            :         // MBMAXTYPE signifies end of entities data
    2015         [ #  # ]:          0 :         if( MBMAXTYPE == this_type ) break;
    2016                 :            : 
    2017                 :            :         // Get the number of ents
    2018                 :          0 :         int num_ents2, verts_per_entity = 0;
    2019         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, num_ents2 );
    2020                 :            : 
    2021                 :            :         // Unpack the nodes per entity
    2022 [ #  # ][ #  # ]:          0 :         if( MBVERTEX != this_type && num_ents2 ) { UNPACK_INT( buff_ptr, verts_per_entity ); }
                 [ #  # ]
    2023                 :            : 
    2024         [ #  # ]:          0 :         std::vector< int > ps( MAX_SHARING_PROCS, -1 );
    2025 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > hs( MAX_SHARING_PROCS, 0 );
    2026         [ #  # ]:          0 :         for( int e = 0; e < num_ents2; e++ )
    2027                 :            :         {
    2028                 :            :             // Check for existing entity, otherwise make new one
    2029                 :          0 :             EntityHandle new_h = 0;
    2030                 :            :             EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
    2031                 :            :             double coords[3];
    2032                 :          0 :             int num_ps = -1;
    2033                 :            : 
    2034                 :            :             //=======================================
    2035                 :            :             // Unpack all the data at once, to make sure the buffer pointers
    2036                 :            :             // are tracked correctly
    2037                 :            :             //=======================================
    2038         [ #  # ]:          0 :             if( store_remote_handles )
    2039                 :            :             {
    2040                 :            :                 // Pointers to other procs/handles
    2041         [ #  # ]:          0 :                 UNPACK_INT( buff_save, num_ps );
    2042         [ #  # ]:          0 :                 if( 0 >= num_ps )
    2043                 :            :                 {
    2044 [ #  # ][ #  # ]:          0 :                     std::cout << "Shouldn't ever be fewer than 1 procs here." << std::endl;
    2045                 :          0 :                     return MB_FAILURE;
    2046                 :            :                 }
    2047                 :            : 
    2048 [ #  # ][ #  # ]:          0 :                 UNPACK_INTS( buff_save, &ps[0], num_ps );
    2049 [ #  # ][ #  # ]:          0 :                 UNPACK_EH( buff_save, &hs[0], num_ps );
    2050                 :            :             }
    2051                 :            : 
    2052 [ #  # ][ #  # ]:          0 :             if( MBVERTEX == this_type ) { UNPACK_DBLS( buff_ptr, coords, 3 ); }
    2053                 :            :             else
    2054                 :            :             {
    2055         [ #  # ]:          0 :                 assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
    2056         [ #  # ]:          0 :                 UNPACK_EH( buff_ptr, connect, verts_per_entity );
    2057                 :            : 
    2058                 :            :                 // Update connectivity to local handles
    2059 [ #  # ][ #  # ]:          0 :                 result = get_local_handles( connect, verts_per_entity, msg_ents );MB_CHK_SET_ERR( result, "Failed to get local handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2060                 :            :             }
    2061                 :            : 
    2062                 :            :             //=======================================
    2063                 :            :             // Now, process that data; begin by finding an identical
    2064                 :            :             // entity, if there is one
    2065                 :            :             //=======================================
    2066         [ #  # ]:          0 :             if( store_remote_handles )
    2067                 :            :             {
    2068 [ #  # ][ #  # ]:          0 :                 result = find_existing_entity( is_iface, ps[0], hs[0], num_ps, connect, verts_per_entity, this_type,
    2069 [ #  # ][ #  # ]:          0 :                                                L2hloc, L2hrem, L2p, new_h );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2070                 :            :             }
    2071                 :            : 
    2072                 :            :             //=======================================
    2073                 :            :             // If we didn't find one, we'll have to create one
    2074                 :            :             //=======================================
    2075                 :          0 :             bool created_here = false;
    2076 [ #  # ][ #  # ]:          0 :             if( !new_h && !is_iface )
    2077                 :            :             {
    2078         [ #  # ]:          0 :                 if( MBVERTEX == this_type )
    2079                 :            :                 {
    2080                 :            :                     // Create a vertex
    2081 [ #  # ][ #  # ]:          0 :                     result = mbImpl->create_vertex( coords, new_h );MB_CHK_SET_ERR( result, "Failed to make new vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2082                 :            :                 }
    2083                 :            :                 else
    2084                 :            :                 {
    2085                 :            :                     // Create the element
    2086 [ #  # ][ #  # ]:          0 :                     result = mbImpl->create_element( this_type, connect, verts_per_entity, new_h );MB_CHK_SET_ERR( result, "Failed to make new element" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2087                 :            : 
    2088                 :            :                     // Update adjacencies
    2089 [ #  # ][ #  # ]:          0 :                     result = ru->update_adjacencies( new_h, 1, verts_per_entity, connect );MB_CHK_SET_ERR( result, "Failed to update adjacencies" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2090                 :            :                 }
    2091                 :            : 
    2092                 :            :                 // Should have a new handle now
    2093         [ #  # ]:          0 :                 assert( new_h );
    2094                 :            : 
    2095                 :          0 :                 created_here = true;
    2096                 :            :             }
    2097                 :            : 
    2098                 :            :             //=======================================
    2099                 :            :             // Take care of sharing data
    2100                 :            :             //=======================================
    2101                 :            : 
    2102                 :            :             // Need to save entities found in order, for interpretation of
    2103                 :            :             // later parts of this message
    2104         [ #  # ]:          0 :             if( !is_iface )
    2105                 :            :             {
    2106         [ #  # ]:          0 :                 assert( new_h );
    2107         [ #  # ]:          0 :                 msg_ents.push_back( new_h );
    2108                 :            :             }
    2109                 :            : 
    2110 [ #  # ][ #  # ]:          0 :             if( created_here ) new_ents.push_back( new_h );
    2111                 :            : 
    2112 [ #  # ][ #  # ]:          0 :             if( new_h && store_remote_handles )
    2113                 :            :             {
    2114                 :          0 :                 unsigned char new_pstat = 0x0;
    2115         [ #  # ]:          0 :                 if( is_iface )
    2116                 :            :                 {
    2117                 :          0 :                     new_pstat = PSTATUS_INTERFACE;
    2118                 :            :                     // Here, lowest rank proc should be first
    2119 [ #  # ][ #  # ]:          0 :                     int idx = std::min_element( &ps[0], &ps[0] + num_ps ) - &ps[0];
         [ #  # ][ #  # ]
    2120         [ #  # ]:          0 :                     if( idx )
    2121                 :            :                     {
    2122 [ #  # ][ #  # ]:          0 :                         std::swap( ps[0], ps[idx] );
    2123 [ #  # ][ #  # ]:          0 :                         std::swap( hs[0], hs[idx] );
    2124                 :            :                     }
    2125                 :            :                     // Set ownership based on lowest rank; can't be in update_remote_data, because
    2126                 :            :                     // there we don't know whether it resulted from ghosting or not
    2127 [ #  # ][ #  # ]:          0 :                     if( ( num_ps > 1 && ps[0] != (int)rank() ) ) new_pstat |= PSTATUS_NOT_OWNED;
         [ #  # ][ #  # ]
                 [ #  # ]
    2128                 :            :                 }
    2129         [ #  # ]:          0 :                 else if( created_here )
    2130                 :            :                 {
    2131         [ #  # ]:          0 :                     if( created_iface )
    2132                 :          0 :                         new_pstat = PSTATUS_NOT_OWNED;
    2133                 :            :                     else
    2134                 :          0 :                         new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
    2135                 :            :                 }
    2136                 :            : 
    2137                 :            :                 // Update sharing data and pstatus, adjusting order if iface
    2138 [ #  # ][ #  # ]:          0 :                 result = update_remote_data( new_h, &ps[0], &hs[0], num_ps, new_pstat );MB_CHK_SET_ERR( result, "unpack_entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2139                 :            : 
    2140                 :            :                 // If a new multi-shared entity, save owner for subsequent lookup in L2 lists
    2141 [ #  # ][ #  # ]:          0 :                 if( store_remote_handles && !is_iface && num_ps > 2 )
                 [ #  # ]
    2142                 :            :                 {
    2143 [ #  # ][ #  # ]:          0 :                     L2hrem.push_back( hs[0] );
    2144         [ #  # ]:          0 :                     L2hloc.push_back( new_h );
    2145 [ #  # ][ #  # ]:          0 :                     L2p.push_back( ps[0] );
    2146                 :            :                 }
    2147                 :            : 
    2148                 :            :                 // Need to send this new handle to all sharing procs
    2149         [ #  # ]:          0 :                 if( !is_iface )
    2150                 :            :                 {
    2151         [ #  # ]:          0 :                     for( j = 0; j < num_ps; j++ )
    2152                 :            :                     {
    2153 [ #  # ][ #  # ]:          0 :                         if( ps[j] == (int)procConfig.proc_rank() ) continue;
                 [ #  # ]
    2154 [ #  # ][ #  # ]:          0 :                         int idx = get_buffers( ps[j] );
    2155         [ #  # ]:          0 :                         if( idx == (int)L1hloc.size() )
    2156                 :            :                         {
    2157         [ #  # ]:          0 :                             L1hloc.resize( idx + 1 );
    2158         [ #  # ]:          0 :                             L1hrem.resize( idx + 1 );
    2159         [ #  # ]:          0 :                             L1p.resize( idx + 1 );
    2160                 :            :                         }
    2161                 :            : 
    2162                 :            :                         // Don't bother adding if it's already in the list
    2163                 :            :                         std::vector< EntityHandle >::iterator vit =
    2164 [ #  # ][ #  # ]:          0 :                             std::find( L1hloc[idx].begin(), L1hloc[idx].end(), new_h );
                 [ #  # ]
    2165 [ #  # ][ #  # ]:          0 :                         if( vit != L1hloc[idx].end() )
                 [ #  # ]
    2166                 :            :                         {
    2167                 :            :                             // If it's in the list but remote handle isn't known but we know
    2168                 :            :                             // it, replace in the list
    2169 [ #  # ][ #  # ]:          0 :                             if( L1p[idx][vit - L1hloc[idx].begin()] != -1 && hs[j] )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
           [ #  #  #  # ]
    2170                 :            :                             {
    2171 [ #  # ][ #  # ]:          0 :                                 L1hrem[idx][vit - L1hloc[idx].begin()] = hs[j];
         [ #  # ][ #  # ]
                 [ #  # ]
    2172 [ #  # ][ #  # ]:          0 :                                 L1p[idx][vit - L1hloc[idx].begin()]    = -1;
         [ #  # ][ #  # ]
    2173                 :            :                             }
    2174                 :            :                             else
    2175                 :          0 :                                 continue;
    2176                 :            :                         }
    2177                 :            :                         else
    2178                 :            :                         {
    2179 [ #  # ][ #  # ]:          0 :                             if( !hs[j] )
    2180                 :            :                             {
    2181 [ #  # ][ #  # ]:          0 :                                 assert( -1 != ps[0] && num_ps > 2 );
                 [ #  # ]
    2182 [ #  # ][ #  # ]:          0 :                                 L1p[idx].push_back( ps[0] );
                 [ #  # ]
    2183 [ #  # ][ #  # ]:          0 :                                 L1hrem[idx].push_back( hs[0] );
                 [ #  # ]
    2184                 :            :                             }
    2185                 :            :                             else
    2186                 :            :                             {
    2187 [ #  # ][ #  # ]:          0 :                                 assert(
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  #  
          #  #  #  #  #  
                      # ]
    2188                 :            :                                     "either this remote handle isn't in the remote list, or "
    2189                 :            :                                     "it's for another proc" &&
    2190                 :            :                                     ( std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) == L1hrem[idx].end() ||
    2191                 :            :                                       L1p[idx][std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) -
    2192         [ #  # ]:          0 :                                                L1hrem[idx].begin()] != -1 ) );
    2193 [ #  # ][ #  # ]:          0 :                                 L1p[idx].push_back( -1 );
    2194 [ #  # ][ #  # ]:          0 :                                 L1hrem[idx].push_back( hs[j] );
                 [ #  # ]
    2195                 :            :                             }
    2196 [ #  # ][ #  # ]:          0 :                             L1hloc[idx].push_back( new_h );
    2197                 :            :                         }
    2198                 :            :                     }
    2199                 :            :                 }
    2200                 :            : 
    2201         [ #  # ]:          0 :                 assert( "Shouldn't be here for non-shared entities" && -1 != num_ps );
    2202 [ #  # ][ #  # ]:          0 :                 std::fill( &ps[0], &ps[num_ps], -1 );
                 [ #  # ]
    2203 [ #  # ][ #  # ]:          0 :                 std::fill( &hs[0], &hs[num_ps], 0 );
                 [ #  # ]
    2204                 :            :             }
    2205                 :            :         }
    2206                 :            : 
    2207 [ #  # ][ #  # ]:          0 :         myDebug->tprintf( 4, "Unpacked %d ents of type %s", num_ents2, CN::EntityTypeName( this_type ) );
                 [ #  # ]
    2208                 :          0 :     }
    2209                 :            : 
    2210         [ #  # ]:          0 :     myDebug->tprintf( 4, "Done unpacking entities.\n" );
    2211                 :            : 
    2212                 :            :     // Need to sort here, to enable searching
    2213         [ #  # ]:          0 :     std::sort( new_ents.begin(), new_ents.end() );
    2214                 :            : 
    2215                 :          0 :     return MB_SUCCESS;
    2216                 :            : }
    2217                 :            : 
    2218                 :          0 : ErrorCode ParallelComm::print_buffer( unsigned char* buff_ptr, int mesg_tag, int from_proc, bool sent )
    2219                 :            : {
    2220 [ #  # ][ #  # ]:          0 :     std::cerr << procConfig.proc_rank();
    2221         [ #  # ]:          0 :     if( sent )
    2222         [ #  # ]:          0 :         std::cerr << " sent";
    2223                 :            :     else
    2224         [ #  # ]:          0 :         std::cerr << " received";
    2225 [ #  # ][ #  # ]:          0 :     std::cerr << " message type " << mesg_tag << " to/from proc " << from_proc << "; contents:" << std::endl;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2226                 :            : 
    2227                 :            :     int msg_length, num_ents;
    2228                 :          0 :     unsigned char* orig_ptr = buff_ptr;
    2229         [ #  # ]:          0 :     UNPACK_INT( buff_ptr, msg_length );
    2230 [ #  # ][ #  # ]:          0 :     std::cerr << msg_length << " bytes..." << std::endl;
                 [ #  # ]
    2231                 :            : 
    2232 [ #  # ][ #  # ]:          0 :     if( MB_MESG_ENTS_SIZE == mesg_tag || MB_MESG_ENTS_LARGE == mesg_tag )
    2233                 :            :     {
    2234                 :            :         // 1. # entities = E
    2235                 :            :         int i, j, k;
    2236         [ #  # ]:          0 :         std::vector< int > ps;
    2237 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > hs;
    2238                 :            : 
    2239         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, num_ents );
    2240 [ #  # ][ #  # ]:          0 :         std::cerr << num_ents << " entities..." << std::endl;
                 [ #  # ]
    2241                 :            : 
    2242                 :            :         // Save place where remote handle info starts, then scan forward to ents
    2243         [ #  # ]:          0 :         for( i = 0; i < num_ents; i++ )
    2244                 :            :         {
    2245         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, j );
    2246         [ #  # ]:          0 :             if( 0 > j ) return MB_FAILURE;
    2247         [ #  # ]:          0 :             ps.resize( j );
    2248         [ #  # ]:          0 :             hs.resize( j );
    2249 [ #  # ][ #  # ]:          0 :             std::cerr << "Entity " << i << ", # procs = " << j << std::endl;
         [ #  # ][ #  # ]
                 [ #  # ]
    2250 [ #  # ][ #  # ]:          0 :             UNPACK_INTS( buff_ptr, &ps[0], j );
    2251 [ #  # ][ #  # ]:          0 :             UNPACK_EH( buff_ptr, &hs[0], j );
    2252         [ #  # ]:          0 :             std::cerr << "   Procs: ";
    2253         [ #  # ]:          0 :             for( k = 0; k < j; k++ )
    2254 [ #  # ][ #  # ]:          0 :                 std::cerr << ps[k] << " ";
                 [ #  # ]
    2255         [ #  # ]:          0 :             std::cerr << std::endl;
    2256         [ #  # ]:          0 :             std::cerr << "   Handles: ";
    2257         [ #  # ]:          0 :             for( k = 0; k < j; k++ )
    2258 [ #  # ][ #  # ]:          0 :                 std::cerr << hs[k] << " ";
                 [ #  # ]
    2259         [ #  # ]:          0 :             std::cerr << std::endl;
    2260                 :            : 
    2261         [ #  # ]:          0 :             if( buff_ptr - orig_ptr > msg_length )
    2262                 :            :             {
    2263 [ #  # ][ #  # ]:          0 :                 std::cerr << "End of buffer..." << std::endl;
    2264         [ #  # ]:          0 :                 std::cerr.flush();
    2265                 :          0 :                 return MB_FAILURE;
    2266                 :            :             }
    2267                 :            :         }
    2268                 :            : 
    2269                 :            :         while( true )
    2270                 :            :         {
    2271                 :          0 :             EntityType this_type = MBMAXTYPE;
    2272         [ #  # ]:          0 :             UNPACK_TYPE( buff_ptr, this_type );
    2273         [ #  # ]:          0 :             assert( this_type != MBENTITYSET );
    2274                 :            : 
    2275                 :            :             // MBMAXTYPE signifies end of entities data
    2276         [ #  # ]:          0 :             if( MBMAXTYPE == this_type ) break;
    2277                 :            : 
    2278                 :            :             // Get the number of ents
    2279                 :          0 :             int num_ents2, verts_per_entity = 0;
    2280         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, num_ents2 );
    2281                 :            : 
    2282                 :            :             // Unpack the nodes per entity
    2283 [ #  # ][ #  # ]:          0 :             if( MBVERTEX != this_type && num_ents2 ) { UNPACK_INT( buff_ptr, verts_per_entity ); }
                 [ #  # ]
    2284                 :            : 
    2285 [ #  # ][ #  # ]:          0 :             std::cerr << "Type: " << CN::EntityTypeName( this_type ) << "; num_ents = " << num_ents2;
         [ #  # ][ #  # ]
                 [ #  # ]
    2286 [ #  # ][ #  # ]:          0 :             if( MBVERTEX != this_type ) std::cerr << "; verts_per_ent = " << verts_per_entity;
                 [ #  # ]
    2287         [ #  # ]:          0 :             std::cerr << std::endl;
    2288 [ #  # ][ #  # ]:          0 :             if( num_ents2 < 0 || num_ents2 > msg_length )
    2289                 :            :             {
    2290 [ #  # ][ #  # ]:          0 :                 std::cerr << "Wrong number of entities, returning." << std::endl;
    2291         [ #  # ]:          0 :                 return MB_FAILURE;
    2292                 :            :             }
    2293                 :            : 
    2294         [ #  # ]:          0 :             for( int e = 0; e < num_ents2; e++ )
    2295                 :            :             {
    2296                 :            :                 // Check for existing entity, otherwise make new one
    2297         [ #  # ]:          0 :                 if( MBVERTEX == this_type )
    2298                 :            :                 {
    2299                 :            :                     double coords[3];
    2300         [ #  # ]:          0 :                     UNPACK_DBLS( buff_ptr, coords, 3 );
    2301 [ #  # ][ #  # ]:          0 :                     std::cerr << "xyz = " << coords[0] << ", " << coords[1] << ", " << coords[2] << std::endl;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2302                 :            :                 }
    2303                 :            :                 else
    2304                 :            :                 {
    2305                 :            :                     EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
    2306         [ #  # ]:          0 :                     assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
    2307         [ #  # ]:          0 :                     UNPACK_EH( buff_ptr, connect, verts_per_entity );
    2308                 :            : 
    2309                 :            :                     // Update connectivity to local handles
    2310         [ #  # ]:          0 :                     std::cerr << "Connectivity: ";
    2311         [ #  # ]:          0 :                     for( k = 0; k < verts_per_entity; k++ )
    2312 [ #  # ][ #  # ]:          0 :                         std::cerr << connect[k] << " ";
    2313         [ #  # ]:          0 :                     std::cerr << std::endl;
    2314                 :            :                 }
    2315                 :            : 
    2316         [ #  # ]:          0 :                 if( buff_ptr - orig_ptr > msg_length )
    2317                 :            :                 {
    2318 [ #  # ][ #  # ]:          0 :                     std::cerr << "End of buffer..." << std::endl;
    2319         [ #  # ]:          0 :                     std::cerr.flush();
    2320                 :          0 :                     return MB_FAILURE;
    2321                 :            :                 }
    2322                 :            :             }
    2323                 :          0 :         }
    2324                 :            :     }
    2325 [ #  # ][ #  # ]:          0 :     else if( MB_MESG_REMOTEH_SIZE == mesg_tag || MB_MESG_REMOTEH_LARGE == mesg_tag )
    2326                 :            :     {
    2327         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, num_ents );
    2328 [ #  # ][ #  # ]:          0 :         std::cerr << num_ents << " entities..." << std::endl;
                 [ #  # ]
    2329 [ #  # ][ #  # ]:          0 :         if( 0 > num_ents || num_ents > msg_length )
    2330                 :            :         {
    2331 [ #  # ][ #  # ]:          0 :             std::cerr << "Wrong number of entities, returning." << std::endl;
    2332                 :          0 :             return MB_FAILURE;
    2333                 :            :         }
    2334 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > L1hloc( num_ents ), L1hrem( num_ents );
                 [ #  # ]
    2335 [ #  # ][ #  # ]:          0 :         std::vector< int > L1p( num_ents );
    2336 [ #  # ][ #  # ]:          0 :         UNPACK_INTS( buff_ptr, &L1p[0], num_ents );
    2337 [ #  # ][ #  # ]:          0 :         UNPACK_EH( buff_ptr, &L1hrem[0], num_ents );
    2338 [ #  # ][ #  # ]:          0 :         UNPACK_EH( buff_ptr, &L1hloc[0], num_ents );
    2339 [ #  # ][ #  # ]:          0 :         std::cerr << num_ents << " Entity pairs; hremote/hlocal/proc: " << std::endl;
                 [ #  # ]
    2340         [ #  # ]:          0 :         for( int i = 0; i < num_ents; i++ )
    2341                 :            :         {
    2342 [ #  # ][ #  # ]:          0 :             EntityType etype = TYPE_FROM_HANDLE( L1hloc[i] );
    2343 [ #  # ][ #  # ]:          0 :             std::cerr << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hrem[i] ) << ", "
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2344 [ #  # ][ #  # ]:          0 :                       << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hloc[i] ) << ", " << L1p[i] << std::endl;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2345                 :            :         }
    2346                 :            : 
    2347         [ #  # ]:          0 :         if( buff_ptr - orig_ptr > msg_length )
    2348                 :            :         {
    2349 [ #  # ][ #  # ]:          0 :             std::cerr << "End of buffer..." << std::endl;
    2350         [ #  # ]:          0 :             std::cerr.flush();
    2351         [ #  # ]:          0 :             return MB_FAILURE;
    2352                 :          0 :         }
    2353                 :            :     }
    2354 [ #  # ][ #  # ]:          0 :     else if( mesg_tag == MB_MESG_TAGS_SIZE || mesg_tag == MB_MESG_TAGS_LARGE )
    2355                 :            :     {
    2356                 :            :         int num_tags, dum1, data_type, tag_size;
    2357         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, num_tags );
    2358 [ #  # ][ #  # ]:          0 :         std::cerr << "Number of tags = " << num_tags << std::endl;
                 [ #  # ]
    2359         [ #  # ]:          0 :         for( int i = 0; i < num_tags; i++ )
    2360                 :            :         {
    2361 [ #  # ][ #  # ]:          0 :             std::cerr << "Tag " << i << ":" << std::endl;
         [ #  # ][ #  # ]
    2362         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, tag_size );
    2363         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, dum1 );
    2364         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, data_type );
    2365 [ #  # ][ #  # ]:          0 :             std::cerr << "Tag size, type, data type = " << tag_size << ", " << dum1 << ", " << data_type << std::endl;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2366         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, dum1 );
    2367 [ #  # ][ #  # ]:          0 :             std::cerr << "Default value size = " << dum1 << std::endl;
                 [ #  # ]
    2368                 :          0 :             buff_ptr += dum1;
    2369         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, dum1 );
    2370         [ #  # ]:          0 :             std::string name( (char*)buff_ptr, dum1 );
    2371 [ #  # ][ #  # ]:          0 :             std::cerr << "Tag name = " << name.c_str() << std::endl;
                 [ #  # ]
    2372                 :          0 :             buff_ptr += dum1;
    2373         [ #  # ]:          0 :             UNPACK_INT( buff_ptr, num_ents );
    2374 [ #  # ][ #  # ]:          0 :             std::cerr << "Number of ents = " << num_ents << std::endl;
                 [ #  # ]
    2375         [ #  # ]:          0 :             std::vector< EntityHandle > tmp_buff( num_ents );
    2376 [ #  # ][ #  # ]:          0 :             UNPACK_EH( buff_ptr, &tmp_buff[0], num_ents );
    2377                 :          0 :             int tot_length = 0;
    2378         [ #  # ]:          0 :             for( int j = 0; j < num_ents; j++ )
    2379                 :            :             {
    2380 [ #  # ][ #  # ]:          0 :                 EntityType etype = TYPE_FROM_HANDLE( tmp_buff[j] );
    2381 [ #  # ][ #  # ]:          0 :                 std::cerr << CN::EntityTypeName( etype ) << " " << ID_FROM_HANDLE( tmp_buff[j] ) << ", tag = ";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2382         [ #  # ]:          0 :                 if( tag_size == MB_VARIABLE_LENGTH )
    2383                 :            :                 {
    2384         [ #  # ]:          0 :                     UNPACK_INT( buff_ptr, dum1 );
    2385                 :          0 :                     tot_length += dum1;
    2386 [ #  # ][ #  # ]:          0 :                     std::cerr << "(variable, length = " << dum1 << ")" << std::endl;
         [ #  # ][ #  # ]
    2387                 :            :                 }
    2388         [ #  # ]:          0 :                 else if( data_type == MB_TYPE_DOUBLE )
    2389                 :            :                 {
    2390                 :            :                     double dum_dbl;
    2391         [ #  # ]:          0 :                     UNPACK_DBL( buff_ptr, dum_dbl );
    2392 [ #  # ][ #  # ]:          0 :                     std::cerr << dum_dbl << std::endl;
    2393                 :            :                 }
    2394         [ #  # ]:          0 :                 else if( data_type == MB_TYPE_INTEGER )
    2395                 :            :                 {
    2396                 :            :                     int dum_int;
    2397         [ #  # ]:          0 :                     UNPACK_INT( buff_ptr, dum_int );
    2398 [ #  # ][ #  # ]:          0 :                     std::cerr << dum_int << std::endl;
    2399                 :            :                 }
    2400         [ #  # ]:          0 :                 else if( data_type == MB_TYPE_OPAQUE )
    2401                 :            :                 {
    2402 [ #  # ][ #  # ]:          0 :                     std::cerr << "(opaque)" << std::endl;
    2403                 :          0 :                     buff_ptr += tag_size;
    2404                 :            :                 }
    2405         [ #  # ]:          0 :                 else if( data_type == MB_TYPE_HANDLE )
    2406                 :            :                 {
    2407                 :            :                     EntityHandle dum_eh;
    2408         [ #  # ]:          0 :                     UNPACK_EH( buff_ptr, &dum_eh, 1 );
    2409 [ #  # ][ #  # ]:          0 :                     std::cerr << dum_eh << std::endl;
    2410                 :            :                 }
    2411         [ #  # ]:          0 :                 else if( data_type == MB_TYPE_BIT )
    2412                 :            :                 {
    2413 [ #  # ][ #  # ]:          0 :                     std::cerr << "(bit)" << std::endl;
    2414                 :          0 :                     buff_ptr += tag_size;
    2415                 :            :                 }
    2416                 :            :             }
    2417         [ #  # ]:          0 :             if( tag_size == MB_VARIABLE_LENGTH ) buff_ptr += tot_length;
    2418                 :          0 :         }
    2419                 :            :     }
    2420                 :            :     else
    2421                 :            :     {
    2422                 :          0 :         assert( false );
    2423                 :            :         return MB_FAILURE;
    2424                 :            :     }
    2425                 :            : 
    2426         [ #  # ]:          0 :     std::cerr.flush();
    2427                 :            : 
    2428                 :          0 :     return MB_SUCCESS;
    2429                 :            : }
    2430                 :            : 
    2431                 :          0 : ErrorCode ParallelComm::list_entities( const EntityHandle* ents, int num_ents )
    2432                 :            : {
    2433         [ #  # ]:          0 :     if( NULL == ents )
    2434                 :            :     {
    2435         [ #  # ]:          0 :         Range shared_ents;
    2436 [ #  # ][ #  # ]:          0 :         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( shared_ents ) );
    2437         [ #  # ]:          0 :         shared_ents.print( "Shared entities:\n" );
    2438                 :          0 :         return MB_SUCCESS;
    2439                 :            :     }
    2440                 :            : 
    2441                 :            :     unsigned char pstat;
    2442                 :            :     EntityHandle tmp_handles[MAX_SHARING_PROCS];
    2443                 :            :     int tmp_procs[MAX_SHARING_PROCS];
    2444                 :            :     unsigned int num_ps;
    2445                 :            :     ErrorCode result;
    2446                 :            : 
    2447         [ #  # ]:          0 :     for( int i = 0; i < num_ents; i++ )
    2448                 :            :     {
    2449 [ #  # ][ #  # ]:          0 :         result = mbImpl->list_entities( ents + i, 1 );MB_CHK_ERR( result );
         [ #  # ][ #  # ]
    2450                 :            :         double coords[3];
    2451         [ #  # ]:          0 :         result = mbImpl->get_coords( ents + i, 1, coords );
    2452 [ #  # ][ #  # ]:          0 :         std::cout << " coords: " << coords[0] << " " << coords[1] << " " << coords[2] << "\n";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2453                 :            : 
    2454 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( ents[i], tmp_procs, tmp_handles, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2455                 :            : 
    2456         [ #  # ]:          0 :         std::cout << "Pstatus: ";
    2457         [ #  # ]:          0 :         if( !num_ps )
    2458 [ #  # ][ #  # ]:          0 :             std::cout << "local " << std::endl;
    2459                 :            :         else
    2460                 :            :         {
    2461 [ #  # ][ #  # ]:          0 :             if( pstat & PSTATUS_NOT_OWNED ) std::cout << "NOT_OWNED; ";
    2462 [ #  # ][ #  # ]:          0 :             if( pstat & PSTATUS_SHARED ) std::cout << "SHARED; ";
    2463 [ #  # ][ #  # ]:          0 :             if( pstat & PSTATUS_MULTISHARED ) std::cout << "MULTISHARED; ";
    2464 [ #  # ][ #  # ]:          0 :             if( pstat & PSTATUS_INTERFACE ) std::cout << "INTERFACE; ";
    2465 [ #  # ][ #  # ]:          0 :             if( pstat & PSTATUS_GHOST ) std::cout << "GHOST; ";
    2466         [ #  # ]:          0 :             std::cout << std::endl;
    2467         [ #  # ]:          0 :             for( unsigned int j = 0; j < num_ps; j++ )
    2468                 :            :             {
    2469 [ #  # ][ #  # ]:          0 :                 std::cout << "  proc " << tmp_procs[j] << " id (handle) " << mbImpl->id_from_handle( tmp_handles[j] )
         [ #  # ][ #  # ]
                 [ #  # ]
    2470 [ #  # ][ #  # ]:          0 :                           << "(" << tmp_handles[j] << ")" << std::endl;
         [ #  # ][ #  # ]
    2471                 :            :             }
    2472                 :            :         }
    2473         [ #  # ]:          0 :         std::cout << std::endl;
    2474                 :            :     }
    2475                 :            : 
    2476                 :          0 :     return MB_SUCCESS;
    2477                 :            : }
    2478                 :            : 
    2479                 :          0 : ErrorCode ParallelComm::list_entities( const Range& ents )
    2480                 :            : {
    2481 [ #  # ][ #  # ]:          0 :     for( Range::iterator rit = ents.begin(); rit != ents.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    2482 [ #  # ][ #  # ]:          0 :         list_entities( &( *rit ), 1 );
    2483                 :            : 
    2484                 :          0 :     return MB_SUCCESS;
    2485                 :            : }
    2486                 :            : 
    2487                 :          0 : ErrorCode ParallelComm::update_remote_data( Range& local_range, Range& remote_range, int other_proc,
    2488                 :            :                                             const unsigned char add_pstat )
    2489                 :            : {
    2490 [ #  # ][ #  # ]:          0 :     Range::iterator rit, rit2;
    2491                 :          0 :     ErrorCode result = MB_SUCCESS;
    2492                 :            : 
    2493                 :            :     // For each pair of local/remote handles:
    2494 [ #  # ][ #  # ]:          0 :     for( rit = local_range.begin(), rit2 = remote_range.begin(); rit != local_range.end(); ++rit, ++rit2 )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2495                 :            :     {
    2496 [ #  # ][ #  # ]:          0 :         result = update_remote_data( *rit, &other_proc, &( *rit2 ), 1, add_pstat );MB_CHK_ERR( result );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2497                 :            :     }
    2498                 :            : 
    2499                 :          0 :     return MB_SUCCESS;
    2500                 :            : }
    2501                 :            : 
    2502                 :          0 : ErrorCode ParallelComm::update_remote_data( const EntityHandle new_h, const int* ps, const EntityHandle* hs,
    2503                 :            :                                             const int num_ps, const unsigned char add_pstat
    2504                 :            :                                             // The following lines left in for future debugging, at least until I trust
    2505                 :            :                                             // this function; tjt, 10/4/2013
    2506                 :            :                                             //                                           , int *new_ps,
    2507                 :            :                                             //                                           EntityHandle *new_hs,
    2508                 :            :                                             //                                           int &new_numps,
    2509                 :            :                                             //                                           unsigned char &new_pstat
    2510                 :            : )
    2511                 :            : {
    2512                 :            :     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
    2513                 :            :     // in this function, so no need to initialize; sharing data does not include
    2514                 :            :     // this proc if shared with only one other
    2515                 :            : 
    2516                 :            :     // Following variables declared here to avoid compiler errors
    2517                 :            :     int new_numps;
    2518                 :            :     unsigned char new_pstat;
    2519         [ #  # ]:          0 :     std::vector< int > new_ps( MAX_SHARING_PROCS, -1 );
    2520         [ #  # ]:          0 :     std::vector< EntityHandle > new_hs( MAX_SHARING_PROCS, 0 );
    2521                 :            : 
    2522                 :          0 :     new_numps        = 0;
    2523 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_sharing_data( new_h, &new_ps[0], &new_hs[0], new_pstat, new_numps );MB_CHK_SET_ERR( result, "Failed to get sharing data in update_remote_data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2524                 :          0 :     int num_exist = new_numps;
    2525                 :            : 
    2526                 :            :     // Add new pstat info to the flag
    2527                 :          0 :     new_pstat |= add_pstat;
    2528                 :            : 
    2529                 :            :     /*
    2530                 :            :     #define plist(str, lst, siz)                                          \
    2531                 :            :         std::cout << str << "(";                                          \
    2532                 :            :         for (int i = 0; i < (int)siz; i++) std::cout << lst[i] << " ";    \
    2533                 :            :         std::cout << ") ";                                                \
    2534                 :            : 
    2535                 :            :         std::cout << "update_remote_data: rank = " << rank() << ", new_h = " << new_h << std::endl;
    2536                 :            :         std::string ostr;
    2537                 :            :         plist("ps", ps, num_ps);
    2538                 :            :         plist("hs", hs, num_ps);
    2539                 :            :         print_pstatus(add_pstat, ostr);
    2540                 :            :         std::cout << ", add_pstat = " << ostr.c_str() << std::endl;
    2541                 :            :         plist("tag_ps", new_ps, new_numps);
    2542                 :            :         plist("tag_hs", new_hs, new_numps);
    2543                 :            :         assert(new_numps <= size());
    2544                 :            :         print_pstatus(new_pstat, ostr);
    2545                 :            :         std::cout << ", tag_pstat=" << ostr.c_str() << std::endl;
    2546                 :            :     */
    2547                 :            : 
    2548                 :            : #ifndef NDEBUG
    2549                 :            :     {
    2550                 :            :         // Check for duplicates in proc list
    2551         [ #  # ]:          0 :         std::set< unsigned int > dumprocs;
    2552                 :          0 :         unsigned int dp = 0;
    2553 [ #  # ][ #  # ]:          0 :         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
    2554         [ #  # ]:          0 :             dumprocs.insert( ps[dp] );
    2555         [ #  # ]:          0 :         assert( dp == dumprocs.size() );
    2556                 :            :     }
    2557                 :            : #endif
    2558                 :            : 
    2559                 :            :     // If only one sharer and I'm the owner, insert myself in the list;
    2560                 :            :     // otherwise, my data is checked at the end
    2561 [ #  # ][ #  # ]:          0 :     if( 1 == new_numps && !( new_pstat & PSTATUS_NOT_OWNED ) )
    2562                 :            :     {
    2563 [ #  # ][ #  # ]:          0 :         new_hs[1] = new_hs[0];
    2564 [ #  # ][ #  # ]:          0 :         new_ps[1] = new_ps[0];
    2565         [ #  # ]:          0 :         new_hs[0] = new_h;
    2566 [ #  # ][ #  # ]:          0 :         new_ps[0] = rank();
    2567                 :          0 :         new_numps = 2;
    2568                 :            :     }
    2569                 :            : 
    2570                 :            :     // Now put passed-in data onto lists
    2571                 :            :     int idx;
    2572         [ #  # ]:          0 :     for( int i = 0; i < num_ps; i++ )
    2573                 :            :     {
    2574 [ #  # ][ #  # ]:          0 :         idx = std::find( &new_ps[0], &new_ps[0] + new_numps, ps[i] ) - &new_ps[0];
         [ #  # ][ #  # ]
    2575         [ #  # ]:          0 :         if( idx < new_numps )
    2576                 :            :         {
    2577 [ #  # ][ #  # ]:          0 :             if( !new_hs[idx] && hs[i] )
         [ #  # ][ #  # ]
    2578                 :            :                 // h on list is 0 and passed-in h is non-zero, replace it
    2579         [ #  # ]:          0 :                 new_hs[idx] = hs[i];
    2580                 :            :             else
    2581 [ #  # ][ #  # ]:          0 :                 assert( !hs[i] || new_hs[idx] == hs[i] );
                 [ #  # ]
    2582                 :            :         }
    2583                 :            :         else
    2584                 :            :         {
    2585         [ #  # ]:          0 :             if( new_numps + 1 == MAX_SHARING_PROCS )
    2586                 :            :             {
    2587 [ #  # ][ #  # ]:          0 :                 MB_SET_ERR( MB_FAILURE, "Exceeded MAX_SHARING_PROCS for "
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2588                 :            :                                             << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) ) << ' '
    2589                 :            :                                             << ID_FROM_HANDLE( new_h ) << " in process " << rank() );
    2590                 :            :             }
    2591         [ #  # ]:          0 :             new_ps[new_numps] = ps[i];
    2592         [ #  # ]:          0 :             new_hs[new_numps] = hs[i];
    2593                 :          0 :             new_numps++;
    2594                 :            :         }
    2595                 :            :     }
    2596                 :            : 
    2597                 :            :     // Add myself, if it isn't there already
    2598 [ #  # ][ #  # ]:          0 :     idx = std::find( &new_ps[0], &new_ps[0] + new_numps, rank() ) - &new_ps[0];
         [ #  # ][ #  # ]
                 [ #  # ]
    2599         [ #  # ]:          0 :     if( idx == new_numps )
    2600                 :            :     {
    2601 [ #  # ][ #  # ]:          0 :         new_ps[new_numps] = rank();
    2602         [ #  # ]:          0 :         new_hs[new_numps] = new_h;
    2603                 :          0 :         new_numps++;
    2604                 :            :     }
    2605 [ #  # ][ #  # ]:          0 :     else if( !new_hs[idx] && new_numps > 2 )
         [ #  # ][ #  # ]
    2606         [ #  # ]:          0 :         new_hs[idx] = new_h;
    2607                 :            : 
    2608                 :            :     // Proc list is complete; update for shared, multishared
    2609         [ #  # ]:          0 :     if( new_numps > 1 )
    2610                 :            :     {
    2611         [ #  # ]:          0 :         if( new_numps > 2 ) new_pstat |= PSTATUS_MULTISHARED;
    2612                 :          0 :         new_pstat |= PSTATUS_SHARED;
    2613                 :            :     }
    2614                 :            : 
    2615                 :            :     /*
    2616                 :            :         plist("new_ps", new_ps, new_numps);
    2617                 :            :         plist("new_hs", new_hs, new_numps);
    2618                 :            :         print_pstatus(new_pstat, ostr);
    2619                 :            :         std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
    2620                 :            :         std::cout << std::endl;
    2621                 :            :     */
    2622                 :            : 
    2623 [ #  # ][ #  # ]:          0 :     result = set_sharing_data( new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0] );MB_CHK_SET_ERR( result, "Failed to set sharing data in update_remote_data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2624                 :            : 
    2625 [ #  # ][ #  # ]:          0 :     if( new_pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
    2626                 :            : 
    2627                 :          0 :     return MB_SUCCESS;
    2628                 :            : }
    2629                 :            : 
    2630                 :          0 : ErrorCode ParallelComm::update_remote_data_old( const EntityHandle new_h, const int* ps, const EntityHandle* hs,
    2631                 :            :                                                 const int num_ps, const unsigned char add_pstat )
    2632                 :            : {
    2633                 :            :     EntityHandle tag_hs[MAX_SHARING_PROCS];
    2634                 :            :     int tag_ps[MAX_SHARING_PROCS];
    2635                 :            :     unsigned char pstat;
    2636                 :            :     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
    2637                 :            :     // in this function, so no need to initialize
    2638                 :            :     unsigned int num_exist;
    2639 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_sharing_data( new_h, tag_ps, tag_hs, pstat, num_exist );MB_CHK_ERR( result );
         [ #  # ][ #  # ]
    2640                 :            : 
    2641                 :            : #ifndef NDEBUG
    2642                 :            :     {
    2643                 :            :         // Check for duplicates in proc list
    2644         [ #  # ]:          0 :         std::set< unsigned int > dumprocs;
    2645                 :          0 :         unsigned int dp = 0;
    2646 [ #  # ][ #  # ]:          0 :         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
    2647         [ #  # ]:          0 :             dumprocs.insert( ps[dp] );
    2648         [ #  # ]:          0 :         assert( dp == dumprocs.size() );
    2649                 :            :     }
    2650                 :            : #endif
    2651                 :            : 
    2652                 :            :     // Add any new sharing data
    2653                 :          0 :     bool changed = false;
    2654                 :            :     int idx;
    2655         [ #  # ]:          0 :     if( !num_exist )
    2656                 :            :     {
    2657                 :            :         // Just take what caller passed
    2658                 :          0 :         memcpy( tag_ps, ps, num_ps * sizeof( int ) );
    2659                 :          0 :         memcpy( tag_hs, hs, num_ps * sizeof( EntityHandle ) );
    2660                 :          0 :         num_exist = num_ps;
    2661                 :            :         // If it's only one, hopefully I'm not there yet...
    2662 [ #  # ][ #  # ]:          0 :         assert( "I shouldn't be the only proc there." && ( 1 != num_exist || ps[0] != (int)procConfig.proc_rank() ) );
                 [ #  # ]
    2663                 :          0 :         changed = true;
    2664                 :            :     }
    2665                 :            :     else
    2666                 :            :     {
    2667         [ #  # ]:          0 :         for( int i = 0; i < num_ps; i++ )
    2668                 :            :         {
    2669         [ #  # ]:          0 :             idx = std::find( tag_ps, tag_ps + num_exist, ps[i] ) - tag_ps;
    2670         [ #  # ]:          0 :             if( idx == (int)num_exist )
    2671                 :            :             {
    2672         [ #  # ]:          0 :                 if( num_exist == MAX_SHARING_PROCS )
    2673                 :            :                 {
    2674 [ #  # ][ #  # ]:          0 :                     std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) )
         [ #  # ][ #  # ]
    2675 [ #  # ][ #  # ]:          0 :                               << ' ' << ID_FROM_HANDLE( new_h ) << " in process " << proc_config().proc_rank()
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    2676         [ #  # ]:          0 :                               << std::endl;
    2677         [ #  # ]:          0 :                     std::cerr.flush();
    2678 [ #  # ][ #  # ]:          0 :                     MPI_Abort( proc_config().proc_comm(), 66 );
                 [ #  # ]
    2679                 :            :                 }
    2680                 :            : 
    2681                 :            :                 // If there's only 1 sharing proc, and it's not me, then
    2682                 :            :                 // we'll end up with 3; add me to the front
    2683 [ #  # ][ #  # ]:          0 :                 if( !i && num_ps == 1 && num_exist == 1 && ps[0] != (int)procConfig.proc_rank() )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2684                 :            :                 {
    2685                 :          0 :                     int j = 1;
    2686                 :            :                     // If I own this entity, put me at front, otherwise after first
    2687         [ #  # ]:          0 :                     if( !( pstat & PSTATUS_NOT_OWNED ) )
    2688                 :            :                     {
    2689                 :          0 :                         tag_ps[1] = tag_ps[0];
    2690                 :          0 :                         tag_hs[1] = tag_hs[0];
    2691                 :          0 :                         j         = 0;
    2692                 :            :                     }
    2693         [ #  # ]:          0 :                     tag_ps[j] = procConfig.proc_rank();
    2694                 :          0 :                     tag_hs[j] = new_h;
    2695                 :          0 :                     num_exist++;
    2696                 :            :                 }
    2697                 :            : 
    2698                 :          0 :                 tag_ps[num_exist] = ps[i];
    2699                 :          0 :                 tag_hs[num_exist] = hs[i];
    2700                 :          0 :                 num_exist++;
    2701                 :          0 :                 changed = true;
    2702                 :            :             }
    2703         [ #  # ]:          0 :             else if( 0 == tag_hs[idx] )
    2704                 :            :             {
    2705                 :          0 :                 tag_hs[idx] = hs[i];
    2706                 :          0 :                 changed     = true;
    2707                 :            :             }
    2708         [ #  # ]:          0 :             else if( 0 != hs[i] )
    2709                 :            :             {
    2710         [ #  # ]:          0 :                 assert( hs[i] == tag_hs[idx] );
    2711                 :            :             }
    2712                 :            :         }
    2713                 :            :     }
    2714                 :            : 
    2715                 :            :     // Adjust for interface layer if necessary
    2716         [ #  # ]:          0 :     if( add_pstat & PSTATUS_INTERFACE )
    2717                 :            :     {
    2718         [ #  # ]:          0 :         idx = std::min_element( tag_ps, tag_ps + num_exist ) - tag_ps;
    2719         [ #  # ]:          0 :         if( idx )
    2720                 :            :         {
    2721                 :          0 :             int tag_proc       = tag_ps[idx];
    2722                 :          0 :             tag_ps[idx]        = tag_ps[0];
    2723                 :          0 :             tag_ps[0]          = tag_proc;
    2724                 :          0 :             EntityHandle tag_h = tag_hs[idx];
    2725                 :          0 :             tag_hs[idx]        = tag_hs[0];
    2726                 :          0 :             tag_hs[0]          = tag_h;
    2727                 :          0 :             changed            = true;
    2728 [ #  # ][ #  # ]:          0 :             if( tag_ps[0] != (int)procConfig.proc_rank() ) pstat |= PSTATUS_NOT_OWNED;
    2729                 :            :         }
    2730                 :            :     }
    2731                 :            : 
    2732         [ #  # ]:          0 :     if( !changed ) return MB_SUCCESS;
    2733                 :            : 
    2734 [ #  # ][ #  # ]:          0 :     assert( "interface entities should have > 1 proc" && ( !( add_pstat & PSTATUS_INTERFACE ) || num_exist > 1 ) );
    2735 [ #  # ][ #  # ]:          0 :     assert( "ghost entities should have > 1 proc" && ( !( add_pstat & PSTATUS_GHOST ) || num_exist > 1 ) );
    2736                 :            : 
    2737                 :            :     // If it's multi-shared and we created the entity in this unpack,
    2738                 :            :     // local handle probably isn't in handle list yet
    2739         [ #  # ]:          0 :     if( num_exist > 2 )
    2740                 :            :     {
    2741 [ #  # ][ #  # ]:          0 :         idx = std::find( tag_ps, tag_ps + num_exist, procConfig.proc_rank() ) - tag_ps;
    2742         [ #  # ]:          0 :         assert( idx < (int)num_exist );
    2743         [ #  # ]:          0 :         if( !tag_hs[idx] ) tag_hs[idx] = new_h;
    2744                 :            :     }
    2745                 :            : 
    2746                 :            :     int tag_p;
    2747                 :            :     EntityHandle tag_h;
    2748                 :            : 
    2749                 :            :     // Update pstat
    2750                 :          0 :     pstat |= add_pstat;
    2751                 :            : 
    2752         [ #  # ]:          0 :     if( num_exist > 2 )
    2753                 :          0 :         pstat |= ( PSTATUS_MULTISHARED | PSTATUS_SHARED );
    2754         [ #  # ]:          0 :     else if( num_exist > 0 )
    2755                 :          0 :         pstat |= PSTATUS_SHARED;
    2756                 :            : 
    2757                 :            :     //    compare_remote_data(new_h, num_ps, hs, ps, add_pstat,
    2758                 :            :     //                        num_exist, tag_hs, tag_ps, pstat);
    2759                 :            : 
    2760                 :            :     // Reset single shared proc/handle if was shared and moving to multi-shared
    2761 [ #  # ][ #  # ]:          0 :     if( num_exist > 2 && !( pstat & PSTATUS_MULTISHARED ) && ( pstat & PSTATUS_SHARED ) )
                 [ #  # ]
    2762                 :            :     {
    2763                 :            :         // Must remove sharedp/h first, which really means set to default value
    2764                 :          0 :         tag_p  = -1;
    2765 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, &tag_p );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2766                 :          0 :         tag_h  = 0;
    2767 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, &tag_h );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2768                 :            :     }
    2769                 :            : 
    2770                 :            :     // Set sharing tags
    2771         [ #  # ]:          0 :     if( num_exist > 2 )
    2772                 :            :     {
    2773         [ #  # ]:          0 :         std::fill( tag_ps + num_exist, tag_ps + MAX_SHARING_PROCS, -1 );
    2774         [ #  # ]:          0 :         std::fill( tag_hs + num_exist, tag_hs + MAX_SHARING_PROCS, 0 );
    2775 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedps_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedps tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2776 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedhs_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2777                 :            : 
    2778                 :            : #ifndef NDEBUG
    2779                 :            :         {
    2780                 :            :             // Check for duplicates in proc list
    2781         [ #  # ]:          0 :             std::set< unsigned int > dumprocs;
    2782                 :          0 :             unsigned int dp = 0;
    2783 [ #  # ][ #  # ]:          0 :             for( ; dp < num_exist && -1 != tag_ps[dp]; dp++ )
    2784         [ #  # ]:          0 :                 dumprocs.insert( tag_ps[dp] );
    2785         [ #  # ]:          0 :             assert( dp == dumprocs.size() );
    2786                 :            :         }
    2787                 :            : #endif
    2788                 :            :     }
    2789 [ #  # ][ #  # ]:          0 :     else if( num_exist == 2 || num_exist == 1 )
    2790                 :            :     {
    2791 [ #  # ][ #  # ]:          0 :         if( tag_ps[0] == (int)procConfig.proc_rank() )
    2792                 :            :         {
    2793 [ #  # ][ #  # ]:          0 :             assert( 2 == num_exist && tag_ps[1] != (int)procConfig.proc_rank() );
                 [ #  # ]
    2794                 :          0 :             tag_ps[0] = tag_ps[1];
    2795                 :          0 :             tag_hs[0] = tag_hs[1];
    2796                 :            :         }
    2797 [ #  # ][ #  # ]:          0 :         assert( tag_ps[0] != -1 && tag_hs[0] != 0 );
    2798 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2799 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2800                 :            :     }
    2801                 :            : 
    2802                 :            :     // Now set new pstatus
    2803 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_set_data( pstatus_tag(), &new_h, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2804                 :            : 
    2805 [ #  # ][ #  # ]:          0 :     if( pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
    2806                 :            : 
    2807                 :          0 :     return MB_SUCCESS;
    2808                 :            : }
    2809                 :            : 
    2810                 :          0 : ErrorCode ParallelComm::get_sharing_data( const Range& entities, std::set< int >& procs, int operation )
    2811                 :            : {
    2812                 :            :     // Get the union or intersection of sharing data for multiple entities
    2813                 :            :     ErrorCode result;
    2814                 :            :     int sp2[MAX_SHARING_PROCS];
    2815                 :            :     int num_ps;
    2816                 :            :     unsigned char pstat;
    2817         [ #  # ]:          0 :     std::set< int > tmp_procs;
    2818                 :          0 :     procs.clear();
    2819                 :            : 
    2820 [ #  # ][ #  # ]:          0 :     for( Range::const_iterator rit = entities.begin(); rit != entities.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    2821                 :            :     {
    2822                 :            :         // Get sharing procs
    2823 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( *rit, sp2, NULL, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_sharing_data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2824 [ #  # ][ #  # ]:          0 :         if( !( pstat & PSTATUS_SHARED ) && Interface::INTERSECT == operation )
    2825                 :            :         {
    2826                 :          0 :             procs.clear();
    2827                 :          0 :             return MB_SUCCESS;
    2828                 :            :         }
    2829                 :            : 
    2830 [ #  # ][ #  # ]:          0 :         if( rit == entities.begin() ) { std::copy( sp2, sp2 + num_ps, std::inserter( procs, procs.begin() ) ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    2831                 :            :         else
    2832                 :            :         {
    2833         [ #  # ]:          0 :             std::sort( sp2, sp2 + num_ps );
    2834                 :          0 :             tmp_procs.clear();
    2835         [ #  # ]:          0 :             if( Interface::UNION == operation )
    2836                 :            :                 std::set_union( procs.begin(), procs.end(), sp2, sp2 + num_ps,
    2837 [ #  # ][ #  # ]:          0 :                                 std::inserter( tmp_procs, tmp_procs.end() ) );
    2838         [ #  # ]:          0 :             else if( Interface::INTERSECT == operation )
    2839                 :            :                 std::set_intersection( procs.begin(), procs.end(), sp2, sp2 + num_ps,
    2840 [ #  # ][ #  # ]:          0 :                                        std::inserter( tmp_procs, tmp_procs.end() ) );
    2841                 :            :             else
    2842                 :            :             {
    2843                 :          0 :                 assert( "Unknown operation." && false );
    2844                 :            :                 return MB_FAILURE;
    2845                 :            :             }
    2846         [ #  # ]:          0 :             procs.swap( tmp_procs );
    2847                 :            :         }
    2848 [ #  # ][ #  # ]:          0 :         if( Interface::INTERSECT == operation && procs.empty() ) return MB_SUCCESS;
                 [ #  # ]
    2849                 :            :     }
    2850                 :            : 
    2851                 :          0 :     return MB_SUCCESS;
    2852                 :            : }
    2853                 :            : 
    2854                 :          0 : ErrorCode ParallelComm::get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs, unsigned char& pstat,
    2855                 :            :                                           unsigned int& num_ps )
    2856                 :            : {
    2857 [ #  # ][ #  # ]:          0 :     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2858         [ #  # ]:          0 :     if( pstat & PSTATUS_MULTISHARED )
    2859                 :            :     {
    2860 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2861         [ #  # ]:          0 :         if( hs )
    2862                 :            :         {
    2863 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2864                 :            :         }
    2865         [ #  # ]:          0 :         num_ps = std::find( ps, ps + MAX_SHARING_PROCS, -1 ) - ps;
    2866                 :            :     }
    2867         [ #  # ]:          0 :     else if( pstat & PSTATUS_SHARED )
    2868                 :            :     {
    2869 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2870         [ #  # ]:          0 :         if( hs )
    2871                 :            :         {
    2872 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( sharedh_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2873                 :          0 :             hs[1] = 0;
    2874                 :            :         }
    2875                 :            :         // Initialize past end of data
    2876                 :          0 :         ps[1]  = -1;
    2877                 :          0 :         num_ps = 1;
    2878                 :            :     }
    2879                 :            :     else
    2880                 :            :     {
    2881                 :          0 :         ps[0] = -1;
    2882         [ #  # ]:          0 :         if( hs ) hs[0] = 0;
    2883                 :          0 :         num_ps = 0;
    2884                 :            :     }
    2885                 :            : 
    2886         [ #  # ]:          0 :     assert( MAX_SHARING_PROCS >= num_ps );
    2887                 :            : 
    2888                 :          0 :     return MB_SUCCESS;
    2889                 :            : }
    2890                 :            : 
    2891                 :          0 : ErrorCode ParallelComm::find_existing_entity( const bool is_iface, const int owner_p, const EntityHandle owner_h,
    2892                 :            :                                               const int num_ps, const EntityHandle* connect, const int num_connect,
    2893                 :            :                                               const EntityType this_type, std::vector< EntityHandle >& L2hloc,
    2894                 :            :                                               std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
    2895                 :            :                                               EntityHandle& new_h )
    2896                 :            : {
    2897                 :          0 :     new_h = 0;
    2898 [ #  # ][ #  # ]:          0 :     if( !is_iface && num_ps > 2 )
    2899                 :            :     {
    2900         [ #  # ]:          0 :         for( unsigned int i = 0; i < L2hrem.size(); i++ )
    2901                 :            :         {
    2902 [ #  # ][ #  # ]:          0 :             if( L2hrem[i] == owner_h && owner_p == (int)L2p[i] )
         [ #  # ][ #  # ]
                 [ #  # ]
    2903                 :            :             {
    2904         [ #  # ]:          0 :                 new_h = L2hloc[i];
    2905                 :          0 :                 return MB_SUCCESS;
    2906                 :            :             }
    2907                 :            :         }
    2908                 :            :     }
    2909                 :            : 
    2910                 :            :     // If we got here and it's a vertex, we don't need to look further
    2911 [ #  # ][ #  # ]:          0 :     if( MBVERTEX == this_type || !connect || !num_connect ) return MB_SUCCESS;
                 [ #  # ]
    2912                 :            : 
    2913         [ #  # ]:          0 :     Range tmp_range;
    2914 [ #  # ][ #  # ]:          0 :     ErrorCode result = mbImpl->get_adjacencies( connect, num_connect, CN::Dimension( this_type ), false, tmp_range );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    2915 [ #  # ][ #  # ]:          0 :     if( !tmp_range.empty() )
    2916                 :            :     {
    2917                 :            :         // Found a corresponding entity - return target
    2918 [ #  # ][ #  # ]:          0 :         new_h = *tmp_range.begin();
    2919                 :            :     }
    2920                 :            :     else
    2921                 :            :     {
    2922                 :          0 :         new_h = 0;
    2923                 :            :     }
    2924                 :            : 
    2925                 :          0 :     return MB_SUCCESS;
    2926                 :            : }
    2927                 :            : 
    2928                 :          0 : ErrorCode ParallelComm::get_local_handles( const Range& remote_handles, Range& local_handles,
    2929                 :            :                                            const std::vector< EntityHandle >& new_ents )
    2930                 :            : {
    2931         [ #  # ]:          0 :     std::vector< EntityHandle > rh_vec;
    2932 [ #  # ][ #  # ]:          0 :     rh_vec.reserve( remote_handles.size() );
    2933 [ #  # ][ #  # ]:          0 :     std::copy( remote_handles.begin(), remote_handles.end(), std::back_inserter( rh_vec ) );
         [ #  # ][ #  # ]
    2934 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_local_handles( &rh_vec[0], remote_handles.size(), new_ents );
                 [ #  # ]
    2935 [ #  # ][ #  # ]:          0 :     std::copy( rh_vec.begin(), rh_vec.end(), range_inserter( local_handles ) );
    2936                 :          0 :     return result;
    2937                 :            : }
    2938                 :            : 
    2939                 :          0 : ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents )
    2940                 :            : {
    2941         [ #  # ]:          0 :     std::vector< EntityHandle > tmp_ents;
    2942 [ #  # ][ #  # ]:          0 :     std::copy( new_ents.begin(), new_ents.end(), std::back_inserter( tmp_ents ) );
         [ #  # ][ #  # ]
    2943         [ #  # ]:          0 :     return get_local_handles( from_vec, num_ents, tmp_ents );
    2944                 :            : }
    2945                 :            : 
    2946                 :          0 : ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents,
    2947                 :            :                                            const std::vector< EntityHandle >& new_ents )
    2948                 :            : {
    2949         [ #  # ]:          0 :     for( int i = 0; i < num_ents; i++ )
    2950                 :            :     {
    2951         [ #  # ]:          0 :         if( TYPE_FROM_HANDLE( from_vec[i] ) == MBMAXTYPE )
    2952                 :            :         {
    2953         [ #  # ]:          0 :             assert( ID_FROM_HANDLE( from_vec[i] ) < (int)new_ents.size() );
    2954                 :          0 :             from_vec[i] = new_ents[ID_FROM_HANDLE( from_vec[i] )];
    2955                 :            :         }
    2956                 :            :     }
    2957                 :            : 
    2958                 :          0 :     return MB_SUCCESS;
    2959                 :            : }
    2960                 :            : 
    2961                 :            : /*
    2962                 :            : template <typename T> void
    2963                 :            : insert_in_array(T* array, size_t array_size, size_t location, T value)
    2964                 :            : {
    2965                 :            :   assert(location + 1 < array_size);
    2966                 :            :   for (size_t i = array_size - 1; i > location; i--)
    2967                 :            :     array[i] = array[i - 1];
    2968                 :            :   array[location] = value;
    2969                 :            : }
    2970                 :            : */
    2971                 :            : 
    2972                 :          0 : ErrorCode ParallelComm::pack_range_map( Range& key_range, EntityHandle val_start, HandleMap& handle_map )
    2973                 :            : {
    2974 [ #  # ][ #  # ]:          0 :     for( Range::const_pair_iterator key_it = key_range.const_pair_begin(); key_it != key_range.const_pair_end();
         [ #  # ][ #  # ]
                 [ #  # ]
    2975                 :            :          ++key_it )
    2976                 :            :     {
    2977 [ #  # ][ #  # ]:          0 :         int tmp_num = ( *key_it ).second - ( *key_it ).first + 1;
    2978 [ #  # ][ #  # ]:          0 :         handle_map.insert( ( *key_it ).first, val_start, tmp_num );
    2979                 :          0 :         val_start += tmp_num;
    2980                 :            :     }
    2981                 :            : 
    2982                 :          0 :     return MB_SUCCESS;
    2983                 :            : }
    2984                 :            : 
    2985                 :          0 : ErrorCode ParallelComm::pack_sets( Range& entities, Buffer* buff, const bool store_remote_handles, const int to_proc )
    2986                 :            : {
    2987                 :            :     // SETS:
    2988                 :            :     // . #sets
    2989                 :            :     // . for each set:
    2990                 :            :     //   - options[#sets] (unsigned int)
    2991                 :            :     //   - if (unordered) set range
    2992                 :            :     //   - else if ordered
    2993                 :            :     //     . #ents in set
    2994                 :            :     //     . handles[#ents]
    2995                 :            :     //   - #parents
    2996                 :            :     //   - if (#parents) handles[#parents]
    2997                 :            :     //   - #children
    2998                 :            :     //   - if (#children) handles[#children]
    2999                 :            : 
    3000                 :            :     // Now the sets; assume any sets the application wants to pass are in the entities list
    3001                 :            :     ErrorCode result;
    3002         [ #  # ]:          0 :     Range all_sets = entities.subset_by_type( MBENTITYSET );
    3003                 :            : 
    3004         [ #  # ]:          0 :     int buff_size = estimate_sets_buffer_size( all_sets, store_remote_handles );
    3005 [ #  # ][ #  # ]:          0 :     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate sets buffer size" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3006         [ #  # ]:          0 :     buff->check_space( buff_size );
    3007                 :            : 
    3008                 :            :     // Number of sets
    3009 [ #  # ][ #  # ]:          0 :     PACK_INT( buff->buff_ptr, all_sets.size() );
    3010                 :            : 
    3011                 :            :     // Options for all sets
    3012 [ #  # ][ #  # ]:          0 :     std::vector< unsigned int > options( all_sets.size() );
    3013         [ #  # ]:          0 :     Range::iterator rit;
    3014         [ #  # ]:          0 :     std::vector< EntityHandle > members;
    3015                 :            :     int i;
    3016 [ #  # ][ #  # ]:          0 :     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    3017                 :            :     {
    3018 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_meshset_options( *rit, options[i] );MB_CHK_SET_ERR( result, "Failed to get meshset options" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3019                 :            :     }
    3020 [ #  # ][ #  # ]:          0 :     buff->check_space( all_sets.size() * sizeof( unsigned int ) );
    3021 [ #  # ][ #  # ]:          0 :     PACK_VOID( buff->buff_ptr, &options[0], all_sets.size() * sizeof( unsigned int ) );
                 [ #  # ]
    3022                 :            : 
    3023                 :            :     // Pack parallel geometry unique id
    3024 [ #  # ][ #  # ]:          0 :     if( !all_sets.empty() )
    3025                 :            :     {
    3026                 :            :         Tag uid_tag;
    3027         [ #  # ]:          0 :         int n_sets  = all_sets.size();
    3028                 :          0 :         bool b_pack = false;
    3029         [ #  # ]:          0 :         std::vector< int > id_data( n_sets );
    3030                 :            :         result =
    3031 [ #  # ][ #  # ]:          0 :             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3032                 :            : 
    3033 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( uid_tag, all_sets, &id_data[0] );
    3034         [ #  # ]:          0 :         if( MB_TAG_NOT_FOUND != result )
    3035                 :            :         {
    3036 [ #  # ][ #  # ]:          0 :             if( MB_SUCCESS != result ) MB_SET_ERR( result, "Failed to get parallel geometry unique ids" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3037         [ #  # ]:          0 :             for( i = 0; i < n_sets; i++ )
    3038                 :            :             {
    3039 [ #  # ][ #  # ]:          0 :                 if( id_data[i] != 0 )
    3040                 :            :                 {
    3041                 :          0 :                     b_pack = true;
    3042                 :          0 :                     break;
    3043                 :            :                 }
    3044                 :            :             }
    3045                 :            :         }
    3046                 :            : 
    3047         [ #  # ]:          0 :         if( b_pack )
    3048                 :            :         {  // If you find
    3049         [ #  # ]:          0 :             buff->check_space( ( n_sets + 1 ) * sizeof( int ) );
    3050         [ #  # ]:          0 :             PACK_INT( buff->buff_ptr, n_sets );
    3051 [ #  # ][ #  # ]:          0 :             PACK_INTS( buff->buff_ptr, &id_data[0], n_sets );
    3052                 :            :         }
    3053                 :            :         else
    3054                 :            :         {
    3055         [ #  # ]:          0 :             buff->check_space( sizeof( int ) );
    3056 [ #  # ][ #  # ]:          0 :             PACK_INT( buff->buff_ptr, 0 );
    3057                 :          0 :         }
    3058                 :            :     }
    3059                 :            : 
    3060                 :            :     // Vectors/ranges
    3061 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > entities_vec( entities.size() );
    3062 [ #  # ][ #  # ]:          0 :     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
                 [ #  # ]
    3063 [ #  # ][ #  # ]:          0 :     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    3064                 :            :     {
    3065                 :          0 :         members.clear();
    3066 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_entities_by_handle( *rit, members );MB_CHK_SET_ERR( result, "Failed to get entities in ordered set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3067                 :            :         result =
    3068 [ #  # ][ #  # ]:          0 :             get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3069         [ #  # ]:          0 :         buff->check_space( members.size() * sizeof( EntityHandle ) + sizeof( int ) );
    3070         [ #  # ]:          0 :         PACK_INT( buff->buff_ptr, members.size() );
    3071 [ #  # ][ #  # ]:          0 :         PACK_EH( buff->buff_ptr, &members[0], members.size() );
    3072                 :            :     }
    3073                 :            : 
    3074                 :            :     // Pack parent/child sets
    3075         [ #  # ]:          0 :     if( !store_remote_handles )
    3076                 :            :     {  // Only works not store remote handles
    3077                 :            :         // Pack numbers of parents/children
    3078                 :          0 :         unsigned int tot_pch = 0;
    3079                 :            :         int num_pch;
    3080 [ #  # ][ #  # ]:          0 :         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
    3081 [ #  # ][ #  # ]:          0 :         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    3082                 :            :         {
    3083                 :            :             // Pack parents
    3084 [ #  # ][ #  # ]:          0 :             result = mbImpl->num_parent_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num parents" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3085         [ #  # ]:          0 :             PACK_INT( buff->buff_ptr, num_pch );
    3086                 :          0 :             tot_pch += num_pch;
    3087 [ #  # ][ #  # ]:          0 :             result = mbImpl->num_child_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num children" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3088         [ #  # ]:          0 :             PACK_INT( buff->buff_ptr, num_pch );
    3089                 :          0 :             tot_pch += num_pch;
    3090                 :            :         }
    3091                 :            : 
    3092                 :            :         // Now pack actual parents/children
    3093                 :          0 :         members.clear();
    3094         [ #  # ]:          0 :         members.reserve( tot_pch );
    3095         [ #  # ]:          0 :         std::vector< EntityHandle > tmp_pch;
    3096 [ #  # ][ #  # ]:          0 :         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    3097                 :            :         {
    3098 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_parent_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get parents" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3099 [ #  # ][ #  # ]:          0 :             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
    3100                 :          0 :             tmp_pch.clear();
    3101 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_child_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get children" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3102 [ #  # ][ #  # ]:          0 :             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
    3103                 :          0 :             tmp_pch.clear();
    3104                 :            :         }
    3105         [ #  # ]:          0 :         assert( members.size() == tot_pch );
    3106         [ #  # ]:          0 :         if( !members.empty() )
    3107                 :            :         {
    3108 [ #  # ][ #  # ]:          0 :             result = get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc,
    3109 [ #  # ][ #  # ]:          0 :                                          entities_vec );MB_CHK_SET_ERR( result, "Failed to get remote handles for set parent/child sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3110                 :            : #ifndef NDEBUG
    3111                 :            :             // Check that all handles are either sets or maxtype
    3112         [ #  # ]:          0 :             for( unsigned int __j = 0; __j < members.size(); __j++ )
    3113 [ #  # ][ #  # ]:          0 :                 assert( ( TYPE_FROM_HANDLE( members[__j] ) == MBMAXTYPE &&
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3114                 :            :                           ID_FROM_HANDLE( members[__j] ) < (int)entities.size() ) ||
    3115         [ #  # ]:          0 :                         TYPE_FROM_HANDLE( members[__j] ) == MBENTITYSET );
    3116                 :            : #endif
    3117         [ #  # ]:          0 :             buff->check_space( members.size() * sizeof( EntityHandle ) );
    3118 [ #  # ][ #  # ]:          0 :             PACK_EH( buff->buff_ptr, &members[0], members.size() );
                 [ #  # ]
    3119                 :          0 :         }
    3120                 :            :     }
    3121                 :            :     else
    3122                 :            :     {
    3123 [ #  # ][ #  # ]:          0 :         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
    3124 [ #  # ][ #  # ]:          0 :         for( rit = all_sets.begin(); rit != all_sets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    3125                 :            :         {
    3126         [ #  # ]:          0 :             PACK_INT( buff->buff_ptr, 0 );
    3127         [ #  # ]:          0 :             PACK_INT( buff->buff_ptr, 0 );
    3128                 :            :         }
    3129                 :            :     }
    3130                 :            : 
    3131                 :            :     // Pack the handles
    3132 [ #  # ][ #  # ]:          0 :     if( store_remote_handles && !all_sets.empty() )
         [ #  # ][ #  # ]
    3133                 :            :     {
    3134         [ #  # ]:          0 :         buff_size = RANGE_SIZE( all_sets );
    3135         [ #  # ]:          0 :         buff->check_space( buff_size );
    3136         [ #  # ]:          0 :         PACK_RANGE( buff->buff_ptr, all_sets );
    3137                 :            :     }
    3138                 :            : 
    3139         [ #  # ]:          0 :     myDebug->tprintf( 4, "Done packing sets.\n" );
    3140                 :            : 
    3141         [ #  # ]:          0 :     buff->set_stored_size();
    3142                 :            : 
    3143                 :          0 :     return MB_SUCCESS;
    3144                 :            : }
    3145                 :            : 
    3146                 :          0 : ErrorCode ParallelComm::unpack_sets( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities,
    3147                 :            :                                      const bool store_remote_handles, const int from_proc )
    3148                 :            : {
    3149                 :            :     // Now the sets; assume any sets the application wants to pass are in the entities list
    3150                 :            :     ErrorCode result;
    3151                 :            : 
    3152 [ #  # ][ #  # ]:          0 :     bool no_sets = ( entities.empty() || ( mbImpl->type_from_handle( *entities.rbegin() ) == MBENTITYSET ) );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3153                 :            : 
    3154         [ #  # ]:          0 :     Range new_sets;
    3155                 :            :     int num_sets;
    3156         [ #  # ]:          0 :     UNPACK_INT( buff_ptr, num_sets );
    3157                 :            : 
    3158         [ #  # ]:          0 :     if( !num_sets ) return MB_SUCCESS;
    3159                 :            : 
    3160                 :            :     int i;
    3161         [ #  # ]:          0 :     Range::const_iterator rit;
    3162         [ #  # ]:          0 :     std::vector< EntityHandle > members;
    3163                 :            :     int num_ents;
    3164         [ #  # ]:          0 :     std::vector< unsigned int > options_vec( num_sets );
    3165                 :            :     // Option value
    3166 [ #  # ][ #  # ]:          0 :     if( num_sets ) UNPACK_VOID( buff_ptr, &options_vec[0], num_sets * sizeof( unsigned int ) );
                 [ #  # ]
    3167                 :            : 
    3168                 :            :     // Unpack parallel geometry unique id
    3169                 :            :     int n_uid;
    3170         [ #  # ]:          0 :     UNPACK_INT( buff_ptr, n_uid );
    3171 [ #  # ][ #  # ]:          0 :     if( n_uid > 0 && n_uid != num_sets )
    3172 [ #  # ][ #  # ]:          0 :     { std::cerr << "The number of Parallel geometry unique ids should be same." << std::endl; }
    3173                 :            : 
    3174         [ #  # ]:          0 :     if( n_uid > 0 )
    3175                 :            :     {  // If parallel geometry unique id is packed
    3176         [ #  # ]:          0 :         std::vector< int > uids( n_uid );
    3177 [ #  # ][ #  # ]:          0 :         UNPACK_INTS( buff_ptr, &uids[0], n_uid );
    3178                 :            : 
    3179                 :            :         Tag uid_tag;
    3180                 :            :         result =
    3181 [ #  # ][ #  # ]:          0 :             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3182                 :            : 
    3183                 :            :         // Find existing sets
    3184 [ #  # ][ #  # ]:          0 :         for( i = 0; i < n_uid; i++ )
    3185                 :            :         {
    3186                 :            :             EntityHandle set_handle;
    3187         [ #  # ]:          0 :             Range temp_sets;
    3188         [ #  # ]:          0 :             void* tag_vals[] = { &uids[i] };
    3189 [ #  # ][ #  # ]:          0 :             if( uids[i] > 0 )
    3190         [ #  # ]:          0 :             { result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &uid_tag, tag_vals, 1, temp_sets ); }
    3191 [ #  # ][ #  # ]:          0 :             if( !temp_sets.empty() )
    3192                 :            :             {  // Existing set
    3193 [ #  # ][ #  # ]:          0 :                 set_handle = *temp_sets.begin();
    3194                 :            :             }
    3195                 :            :             else
    3196                 :            :             {  // Create a new set
    3197 [ #  # ][ #  # ]:          0 :                 result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3198 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_set_data( uid_tag, &set_handle, 1, &uids[i] );MB_CHK_SET_ERR( result, "Failed to set parallel geometry unique ids" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3199                 :            :             }
    3200 [ #  # ][ #  # ]:          0 :             new_sets.insert( set_handle );
    3201                 :          0 :         }
    3202                 :            :     }
    3203                 :            :     else
    3204                 :            :     {
    3205                 :            :         // Create sets
    3206         [ #  # ]:          0 :         for( i = 0; i < num_sets; i++ )
    3207                 :            :         {
    3208                 :            :             EntityHandle set_handle;
    3209 [ #  # ][ #  # ]:          0 :             result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3210                 :            : 
    3211                 :            :             // Make sure new sets handles are monotonically increasing
    3212 [ #  # ][ #  # ]:          0 :             assert( set_handle > *new_sets.rbegin() );
                 [ #  # ]
    3213         [ #  # ]:          0 :             new_sets.insert( set_handle );
    3214                 :            :         }
    3215                 :            :     }
    3216                 :            : 
    3217 [ #  # ][ #  # ]:          0 :     std::copy( new_sets.begin(), new_sets.end(), std::back_inserter( entities ) );
         [ #  # ][ #  # ]
    3218                 :            :     // Only need to sort if we came in with no sets on the end
    3219 [ #  # ][ #  # ]:          0 :     if( !no_sets ) std::sort( entities.begin(), entities.end() );
    3220                 :            : 
    3221 [ #  # ][ #  # ]:          0 :     for( rit = new_sets.begin(), i = 0; rit != new_sets.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    3222                 :            :     {
    3223                 :            :         // Unpack entities as vector, with length
    3224         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, num_ents );
    3225         [ #  # ]:          0 :         members.resize( num_ents );
    3226 [ #  # ][ #  # ]:          0 :         if( num_ents ) UNPACK_EH( buff_ptr, &members[0], num_ents );
                 [ #  # ]
    3227 [ #  # ][ #  # ]:          0 :         result = get_local_handles( &members[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for ordered set contents" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3228 [ #  # ][ #  # ]:          0 :         result = mbImpl->add_entities( *rit, &members[0], num_ents );MB_CHK_SET_ERR( result, "Failed to add ents to ordered set in unpack" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3229                 :            :     }
    3230                 :            : 
    3231 [ #  # ][ #  # ]:          0 :     std::vector< int > num_pch( 2 * new_sets.size() );
    3232                 :          0 :     std::vector< int >::iterator vit;
    3233                 :          0 :     int tot_pch = 0;
    3234 [ #  # ][ #  # ]:          0 :     for( vit = num_pch.begin(); vit != num_pch.end(); ++vit )
                 [ #  # ]
    3235                 :            :     {
    3236 [ #  # ][ #  # ]:          0 :         UNPACK_INT( buff_ptr, *vit );
    3237         [ #  # ]:          0 :         tot_pch += *vit;
    3238                 :            :     }
    3239                 :            : 
    3240         [ #  # ]:          0 :     members.resize( tot_pch );
    3241 [ #  # ][ #  # ]:          0 :     UNPACK_EH( buff_ptr, &members[0], tot_pch );
    3242 [ #  # ][ #  # ]:          0 :     result = get_local_handles( &members[0], tot_pch, entities );MB_CHK_SET_ERR( result, "Failed to get local handle for parent/child sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3243                 :            : 
    3244                 :          0 :     int num               = 0;
    3245         [ #  # ]:          0 :     EntityHandle* mem_ptr = &members[0];
    3246 [ #  # ][ #  # ]:          0 :     for( rit = new_sets.begin(); rit != new_sets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    3247                 :            :     {
    3248                 :            :         // Unpack parents/children
    3249 [ #  # ][ #  # ]:          0 :         int num_par = num_pch[num++], num_child = num_pch[num++];
    3250         [ #  # ]:          0 :         if( num_par + num_child )
    3251                 :            :         {
    3252         [ #  # ]:          0 :             for( i = 0; i < num_par; i++ )
    3253                 :            :             {
    3254         [ #  # ]:          0 :                 assert( 0 != mem_ptr[i] );
    3255 [ #  # ][ #  # ]:          0 :                 result = mbImpl->add_parent_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add parent to set in unpack" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3256                 :            :             }
    3257                 :          0 :             mem_ptr += num_par;
    3258         [ #  # ]:          0 :             for( i = 0; i < num_child; i++ )
    3259                 :            :             {
    3260         [ #  # ]:          0 :                 assert( 0 != mem_ptr[i] );
    3261 [ #  # ][ #  # ]:          0 :                 result = mbImpl->add_child_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add child to set in unpack" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3262                 :            :             }
    3263                 :          0 :             mem_ptr += num_child;
    3264                 :            :         }
    3265                 :            :     }
    3266                 :            : 
    3267                 :            :     // Unpack source handles
    3268         [ #  # ]:          0 :     Range dum_range;
    3269 [ #  # ][ #  # ]:          0 :     if( store_remote_handles && !new_sets.empty() )
         [ #  # ][ #  # ]
    3270                 :            :     {
    3271         [ #  # ]:          0 :         UNPACK_RANGE( buff_ptr, dum_range );
    3272 [ #  # ][ #  # ]:          0 :         result = update_remote_data( new_sets, dum_range, from_proc, 0 );MB_CHK_SET_ERR( result, "Failed to set sharing data for sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3273                 :            :     }
    3274                 :            : 
    3275         [ #  # ]:          0 :     myDebug->tprintf( 4, "Done unpacking sets." );
    3276                 :            : 
    3277                 :          0 :     return MB_SUCCESS;
    3278                 :            : }
    3279                 :            : 
    3280                 :          0 : ErrorCode ParallelComm::pack_adjacencies( Range& /*entities*/, Range::const_iterator& /*start_rit*/,
    3281                 :            :                                           Range& /*whole_range*/, unsigned char*& /*buff_ptr*/, int& /*count*/,
    3282                 :            :                                           const bool /*just_count*/, const bool /*store_handles*/,
    3283                 :            :                                           const int /*to_proc*/ )
    3284                 :            : {
    3285                 :          0 :     return MB_FAILURE;
    3286                 :            : }
    3287                 :            : 
    3288                 :          0 : ErrorCode ParallelComm::unpack_adjacencies( unsigned char*& /*buff_ptr*/, Range& /*entities*/,
    3289                 :            :                                             const bool /*store_handles*/, const int /*from_proc*/ )
    3290                 :            : {
    3291                 :          0 :     return MB_FAILURE;
    3292                 :            : }
    3293                 :            : 
    3294                 :          0 : ErrorCode ParallelComm::pack_tags( Range& entities, const std::vector< Tag >& src_tags,
    3295                 :            :                                    const std::vector< Tag >& dst_tags, const std::vector< Range >& tag_ranges,
    3296                 :            :                                    Buffer* buff, const bool store_remote_handles, const int to_proc )
    3297                 :            : {
    3298                 :            :     ErrorCode result;
    3299                 :          0 :     std::vector< Tag >::const_iterator tag_it, dst_it;
    3300                 :          0 :     std::vector< Range >::const_iterator rit;
    3301                 :          0 :     int count = 0;
    3302                 :            : 
    3303 [ #  # ][ #  # ]:          0 :     for( tag_it = src_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end(); ++tag_it, ++rit )
         [ #  # ][ #  # ]
    3304                 :            :     {
    3305 [ #  # ][ #  # ]:          0 :         result = packed_tag_size( *tag_it, *rit, count );
                 [ #  # ]
    3306         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return result;
    3307                 :            :     }
    3308                 :            : 
    3309                 :            :     // Number of tags
    3310                 :          0 :     count += sizeof( int );
    3311                 :            : 
    3312         [ #  # ]:          0 :     buff->check_space( count );
    3313                 :            : 
    3314         [ #  # ]:          0 :     PACK_INT( buff->buff_ptr, src_tags.size() );
    3315                 :            : 
    3316 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > entities_vec( entities.size() );
    3317 [ #  # ][ #  # ]:          0 :     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
                 [ #  # ]
    3318                 :            : 
    3319 [ #  # ][ #  # ]:          0 :     for( tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end();
         [ #  # ][ #  # ]
                 [ #  # ]
    3320                 :            :          ++tag_it, ++dst_it, ++rit )
    3321                 :            :     {
    3322 [ #  # ][ #  # ]:          0 :         result = pack_tag( *tag_it, *dst_it, *rit, entities_vec, buff, store_remote_handles, to_proc );
         [ #  # ][ #  # ]
    3323         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return result;
    3324                 :            :     }
    3325                 :            : 
    3326         [ #  # ]:          0 :     myDebug->tprintf( 4, "Done packing tags." );
    3327                 :            : 
    3328         [ #  # ]:          0 :     buff->set_stored_size();
    3329                 :            : 
    3330                 :          0 :     return MB_SUCCESS;
    3331                 :            : }
    3332                 :            : 
    3333                 :          0 : ErrorCode ParallelComm::packed_tag_size( Tag tag, const Range& tagged_entities, int& count )
    3334                 :            : {
    3335                 :            :     // For dense tags, compute size assuming all entities have that tag
    3336                 :            :     // For sparse tags, get number of entities w/ that tag to compute size
    3337                 :            : 
    3338         [ #  # ]:          0 :     std::vector< int > var_len_sizes;
    3339         [ #  # ]:          0 :     std::vector< const void* > var_len_values;
    3340                 :            : 
    3341                 :            :     // Default value
    3342                 :          0 :     count += sizeof( int );
    3343 [ #  # ][ #  # ]:          0 :     if( NULL != tag->get_default_value() ) count += tag->get_default_value_size();
                 [ #  # ]
    3344                 :            : 
    3345                 :            :     // Size, type, data type
    3346                 :          0 :     count += 3 * sizeof( int );
    3347                 :            : 
    3348                 :            :     // Name
    3349                 :          0 :     count += sizeof( int );
    3350         [ #  # ]:          0 :     count += tag->get_name().size();
    3351                 :            : 
    3352                 :            :     // Range of tag
    3353         [ #  # ]:          0 :     count += sizeof( int ) + tagged_entities.size() * sizeof( EntityHandle );
    3354                 :            : 
    3355 [ #  # ][ #  # ]:          0 :     if( tag->get_size() == MB_VARIABLE_LENGTH )
    3356                 :            :     {
    3357         [ #  # ]:          0 :         const int num_ent = tagged_entities.size();
    3358                 :            :         // Send a tag size for each entity
    3359                 :          0 :         count += num_ent * sizeof( int );
    3360                 :            :         // Send tag data for each entity
    3361         [ #  # ]:          0 :         var_len_sizes.resize( num_ent );
    3362         [ #  # ]:          0 :         var_len_values.resize( num_ent );
    3363                 :            :         ErrorCode result =
    3364 [ #  # ][ #  # ]:          0 :             tag->get_data( sequenceManager, errorHandler, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get lenghts of variable-length tag values" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3365         [ #  # ]:          0 :         count += std::accumulate( var_len_sizes.begin(), var_len_sizes.end(), 0 );
    3366                 :            :     }
    3367                 :            :     else
    3368                 :            :     {
    3369                 :            :         // Tag data values for range or vector
    3370 [ #  # ][ #  # ]:          0 :         count += tagged_entities.size() * tag->get_size();
    3371                 :            :     }
    3372                 :            : 
    3373                 :          0 :     return MB_SUCCESS;
    3374                 :            : }
    3375                 :            : 
    3376                 :          0 : ErrorCode ParallelComm::pack_tag( Tag src_tag, Tag dst_tag, const Range& tagged_entities,
    3377                 :            :                                   const std::vector< EntityHandle >& whole_vec, Buffer* buff,
    3378                 :            :                                   const bool store_remote_handles, const int to_proc )
    3379                 :            : {
    3380                 :            :     ErrorCode result;
    3381         [ #  # ]:          0 :     std::vector< int > var_len_sizes;
    3382         [ #  # ]:          0 :     std::vector< const void* > var_len_values;
    3383                 :            : 
    3384         [ #  # ]:          0 :     if( src_tag != dst_tag )
    3385                 :            :     {
    3386 [ #  # ][ #  # ]:          0 :         if( dst_tag->get_size() != src_tag->get_size() ) return MB_TYPE_OUT_OF_RANGE;
                 [ #  # ]
    3387 [ #  # ][ #  # ]:          0 :         if( dst_tag->get_data_type() != src_tag->get_data_type() && dst_tag->get_data_type() != MB_TYPE_OPAQUE &&
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3388         [ #  # ]:          0 :             src_tag->get_data_type() != MB_TYPE_OPAQUE )
    3389                 :          0 :             return MB_TYPE_OUT_OF_RANGE;
    3390                 :            :     }
    3391                 :            : 
    3392                 :            :     // Size, type, data type
    3393         [ #  # ]:          0 :     buff->check_space( 3 * sizeof( int ) );
    3394 [ #  # ][ #  # ]:          0 :     PACK_INT( buff->buff_ptr, src_tag->get_size() );
    3395                 :            :     TagType this_type;
    3396         [ #  # ]:          0 :     result = mbImpl->tag_get_type( dst_tag, this_type );
    3397         [ #  # ]:          0 :     PACK_INT( buff->buff_ptr, (int)this_type );
    3398         [ #  # ]:          0 :     DataType data_type = src_tag->get_data_type();
    3399         [ #  # ]:          0 :     PACK_INT( buff->buff_ptr, (int)data_type );
    3400         [ #  # ]:          0 :     int type_size = TagInfo::size_from_data_type( data_type );
    3401                 :            : 
    3402                 :            :     // Default value
    3403 [ #  # ][ #  # ]:          0 :     if( NULL == src_tag->get_default_value() )
    3404                 :            :     {
    3405         [ #  # ]:          0 :         buff->check_space( sizeof( int ) );
    3406         [ #  # ]:          0 :         PACK_INT( buff->buff_ptr, 0 );
    3407                 :            :     }
    3408                 :            :     else
    3409                 :            :     {
    3410 [ #  # ][ #  # ]:          0 :         buff->check_space( src_tag->get_default_value_size() );
    3411 [ #  # ][ #  # ]:          0 :         PACK_BYTES( buff->buff_ptr, src_tag->get_default_value(), src_tag->get_default_value_size() );
                 [ #  # ]
    3412                 :            :     }
    3413                 :            : 
    3414                 :            :     // Name
    3415 [ #  # ][ #  # ]:          0 :     buff->check_space( src_tag->get_name().size() );
    3416 [ #  # ][ #  # ]:          0 :     PACK_BYTES( buff->buff_ptr, dst_tag->get_name().c_str(), dst_tag->get_name().size() );
                 [ #  # ]
    3417                 :            : 
    3418 [ #  # ][ #  # ]:          0 :     myDebug->tprintf( 4, "Packing tag \"%s\"", src_tag->get_name().c_str() );
    3419 [ #  # ][ #  # ]:          0 :     if( src_tag != dst_tag ) myDebug->tprintf( 4, " (as tag \"%s\")", dst_tag->get_name().c_str() );
                 [ #  # ]
    3420         [ #  # ]:          0 :     myDebug->tprintf( 4, "\n" );
    3421                 :            : 
    3422                 :            :     // Pack entities
    3423 [ #  # ][ #  # ]:          0 :     buff->check_space( tagged_entities.size() * sizeof( EntityHandle ) + sizeof( int ) );
    3424 [ #  # ][ #  # ]:          0 :     PACK_INT( buff->buff_ptr, tagged_entities.size() );
    3425 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > dum_tagged_entities( tagged_entities.size() );
    3426 [ #  # ][ #  # ]:          0 :     result = get_remote_handles( store_remote_handles, tagged_entities, &dum_tagged_entities[0], to_proc, whole_vec );
    3427         [ #  # ]:          0 :     if( MB_SUCCESS != result )
    3428                 :            :     {
    3429 [ #  # ][ #  # ]:          0 :         if( myDebug->get_verbosity() == 3 )
    3430                 :            :         {
    3431 [ #  # ][ #  # ]:          0 :             std::cerr << "Failed to get remote handles for tagged entities:" << std::endl;
    3432         [ #  # ]:          0 :             tagged_entities.print( "  " );
    3433                 :            :         }
    3434 [ #  # ][ #  # ]:          0 :         MB_SET_ERR( result, "Failed to get remote handles for tagged entities" );
         [ #  # ][ #  # ]
                 [ #  # ]
    3435                 :            :     }
    3436                 :            : 
    3437 [ #  # ][ #  # ]:          0 :     PACK_EH( buff->buff_ptr, &dum_tagged_entities[0], dum_tagged_entities.size() );
    3438                 :            : 
    3439         [ #  # ]:          0 :     const size_t num_ent = tagged_entities.size();
    3440 [ #  # ][ #  # ]:          0 :     if( src_tag->get_size() == MB_VARIABLE_LENGTH )
    3441                 :            :     {
    3442         [ #  # ]:          0 :         var_len_sizes.resize( num_ent, 0 );
    3443         [ #  # ]:          0 :         var_len_values.resize( num_ent, 0 );
    3444 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_by_ptr( src_tag, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get variable-length tag data in pack_tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3445         [ #  # ]:          0 :         buff->check_space( num_ent * sizeof( int ) );
    3446 [ #  # ][ #  # ]:          0 :         PACK_INTS( buff->buff_ptr, &var_len_sizes[0], num_ent );
    3447         [ #  # ]:          0 :         for( unsigned int i = 0; i < num_ent; i++ )
    3448                 :            :         {
    3449 [ #  # ][ #  # ]:          0 :             buff->check_space( var_len_sizes[i] );
    3450 [ #  # ][ #  # ]:          0 :             PACK_VOID( buff->buff_ptr, var_len_values[i], type_size * var_len_sizes[i] );
                 [ #  # ]
    3451                 :            :         }
    3452                 :            :     }
    3453                 :            :     else
    3454                 :            :     {
    3455 [ #  # ][ #  # ]:          0 :         buff->check_space( num_ent * src_tag->get_size() );
    3456                 :            :         // Should be OK to read directly into buffer, since tags are untyped and
    3457                 :            :         // handled by memcpy
    3458 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( src_tag, tagged_entities, buff->buff_ptr );MB_CHK_SET_ERR( result, "Failed to get tag data in pack_tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3459         [ #  # ]:          0 :         buff->buff_ptr += num_ent * src_tag->get_size();
    3460                 :            :         PC( num_ent * src_tag->get_size(), " void" );
    3461                 :            :     }
    3462                 :            : 
    3463                 :          0 :     return MB_SUCCESS;
    3464                 :            : }
    3465                 :            : 
    3466                 :          0 : ErrorCode ParallelComm::get_tag_send_list( const Range& whole_range, std::vector< Tag >& all_tags,
    3467                 :            :                                            std::vector< Range >& tag_ranges )
    3468                 :            : {
    3469         [ #  # ]:          0 :     std::vector< Tag > tmp_tags;
    3470 [ #  # ][ #  # ]:          0 :     ErrorCode result = mbImpl->tag_get_tags( tmp_tags );MB_CHK_SET_ERR( result, "Failed to get tags in pack_tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3471                 :            : 
    3472                 :          0 :     std::vector< Tag >::iterator tag_it;
    3473 [ #  # ][ #  # ]:          0 :     for( tag_it = tmp_tags.begin(); tag_it != tmp_tags.end(); ++tag_it )
                 [ #  # ]
    3474                 :            :     {
    3475         [ #  # ]:          0 :         std::string tag_name;
    3476 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_name( *tag_it, tag_name );
    3477 [ #  # ][ #  # ]:          0 :         if( tag_name.c_str()[0] == '_' && tag_name.c_str()[1] == '_' ) continue;
                 [ #  # ]
    3478                 :            : 
    3479         [ #  # ]:          0 :         Range tmp_range;
              [ #  #  # ]
    3480 [ #  # ][ #  # ]:          0 :         result = ( *tag_it )->get_tagged_entities( sequenceManager, tmp_range );MB_CHK_SET_ERR( result, "Failed to get entities for tag in pack_tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3481 [ #  # ][ #  # ]:          0 :         tmp_range = intersect( tmp_range, whole_range );
    3482                 :            : 
    3483 [ #  # ][ #  # ]:          0 :         if( tmp_range.empty() ) continue;
    3484                 :            : 
    3485                 :            :         // OK, we'll be sending this tag
    3486 [ #  # ][ #  # ]:          0 :         all_tags.push_back( *tag_it );
    3487 [ #  # ][ #  # ]:          0 :         tag_ranges.push_back( Range() );
    3488 [ #  # ][ #  # ]:          0 :         tag_ranges.back().swap( tmp_range );
              [ #  #  # ]
    3489                 :          0 :     }
    3490                 :            : 
    3491                 :          0 :     return MB_SUCCESS;
    3492                 :            : }
    3493                 :            : 
    3494                 :          0 : ErrorCode ParallelComm::unpack_tags( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities,
    3495                 :            :                                      const bool /*store_remote_handles*/, const int /*from_proc*/,
    3496                 :            :                                      const MPI_Op* const mpi_op )
    3497                 :            : {
    3498                 :            :     // Tags
    3499                 :            :     // Get all the tags
    3500                 :            :     // For dense tags, compute size assuming all entities have that tag
    3501                 :            :     // For sparse tags, get number of entities w/ that tag to compute size
    3502                 :            : 
    3503                 :            :     ErrorCode result;
    3504                 :            : 
    3505                 :            :     int num_tags;
    3506         [ #  # ]:          0 :     UNPACK_INT( buff_ptr, num_tags );
    3507         [ #  # ]:          0 :     std::vector< const void* > var_len_vals;
    3508         [ #  # ]:          0 :     std::vector< unsigned char > dum_vals;
    3509         [ #  # ]:          0 :     std::vector< EntityHandle > dum_ehvals;
    3510                 :            : 
    3511         [ #  # ]:          0 :     for( int i = 0; i < num_tags; i++ )
    3512                 :            :     {
    3513                 :            :         // Tag handle
    3514                 :            :         Tag tag_handle;
    3515                 :            : 
    3516                 :            :         // Size, data type
    3517                 :            :         int tag_size, tag_data_type, tag_type;
    3518         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, tag_size );
    3519         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, tag_type );
    3520         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, tag_data_type );
    3521                 :            : 
    3522                 :            :         // Default value
    3523                 :            :         int def_val_size;
    3524         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, def_val_size );
    3525                 :          0 :         void* def_val_ptr = NULL;
    3526         [ #  # ]:          0 :         if( def_val_size )
    3527                 :            :         {
    3528                 :          0 :             def_val_ptr = buff_ptr;
    3529                 :          0 :             buff_ptr += def_val_size;
    3530                 :            :             UPC( tag_size, " void" );
    3531                 :            :         }
    3532                 :            : 
    3533                 :            :         // Name
    3534                 :            :         int name_len;
    3535         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, name_len );
    3536         [ #  # ]:          0 :         std::string tag_name( reinterpret_cast< char* >( buff_ptr ), name_len );
    3537                 :          0 :         buff_ptr += name_len;
    3538                 :            :         UPC( 64, " chars" );
    3539                 :            : 
    3540         [ #  # ]:          0 :         myDebug->tprintf( 4, "Unpacking tag %s\n", tag_name.c_str() );
    3541                 :            : 
    3542                 :            :         // Create the tag
    3543         [ #  # ]:          0 :         if( tag_size == MB_VARIABLE_LENGTH )
    3544                 :            :             result = mbImpl->tag_get_handle( tag_name.c_str(), def_val_size, (DataType)tag_data_type, tag_handle,
    3545         [ #  # ]:          0 :                                              MB_TAG_VARLEN | MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
    3546                 :            :         else
    3547                 :            :             result = mbImpl->tag_get_handle( tag_name.c_str(), tag_size, (DataType)tag_data_type, tag_handle,
    3548         [ #  # ]:          0 :                                              MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
    3549         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return result;
    3550                 :            : 
    3551                 :            :         // Get handles and convert to local handles
    3552                 :            :         int num_ents;
    3553         [ #  # ]:          0 :         UNPACK_INT( buff_ptr, num_ents );
    3554 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > dum_ents( num_ents );
    3555 [ #  # ][ #  # ]:          0 :         UNPACK_EH( buff_ptr, &dum_ents[0], num_ents );
    3556                 :            : 
    3557                 :            :         // In this case handles are indices into new entity range; need to convert
    3558                 :            :         // to local handles
    3559 [ #  # ][ #  # ]:          0 :         result = get_local_handles( &dum_ents[0], num_ents, entities );MB_CHK_SET_ERR( result, "Unable to convert to local handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3560                 :            : 
    3561                 :            :         // If it's a handle type, also convert tag vals in-place in buffer
    3562         [ #  # ]:          0 :         if( MB_TYPE_HANDLE == tag_type )
    3563                 :            :         {
    3564         [ #  # ]:          0 :             dum_ehvals.resize( num_ents );
    3565 [ #  # ][ #  # ]:          0 :             UNPACK_EH( buff_ptr, &dum_ehvals[0], num_ents );
    3566 [ #  # ][ #  # ]:          0 :             result = get_local_handles( &dum_ehvals[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for tag vals" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3567                 :            :         }
    3568                 :            : 
    3569                 :            :         DataType data_type;
    3570         [ #  # ]:          0 :         mbImpl->tag_get_data_type( tag_handle, data_type );
    3571         [ #  # ]:          0 :         int type_size = TagInfo::size_from_data_type( data_type );
    3572                 :            : 
    3573         [ #  # ]:          0 :         if( !dum_ents.empty() )
    3574                 :            :         {
    3575         [ #  # ]:          0 :             if( tag_size == MB_VARIABLE_LENGTH )
    3576                 :            :             {
    3577                 :            :                 // Be careful of alignment here. If the integers are aligned
    3578                 :            :                 // in the buffer, we can use them directly. Otherwise we must
    3579                 :            :                 // copy them.
    3580         [ #  # ]:          0 :                 std::vector< int > var_lengths( num_ents );
    3581 [ #  # ][ #  # ]:          0 :                 UNPACK_INTS( buff_ptr, &var_lengths[0], num_ents );
    3582                 :            :                 UPC( sizeof( int ) * num_ents, " void" );
    3583                 :            : 
    3584                 :            :                 // Get pointers into buffer for each tag value
    3585         [ #  # ]:          0 :                 var_len_vals.resize( num_ents );
    3586         [ #  # ]:          0 :                 for( std::vector< EntityHandle >::size_type j = 0; j < (std::vector< EntityHandle >::size_type)num_ents;
    3587                 :            :                      j++ )
    3588                 :            :                 {
    3589         [ #  # ]:          0 :                     var_len_vals[j] = buff_ptr;
    3590         [ #  # ]:          0 :                     buff_ptr += var_lengths[j] * type_size;
    3591                 :            :                     UPC( var_lengths[j], " void" );
    3592                 :            :                 }
    3593                 :            :                 result =
    3594 [ #  # ][ #  # ]:          0 :                     mbImpl->tag_set_by_ptr( tag_handle, &dum_ents[0], num_ents, &var_len_vals[0], &var_lengths[0] );MB_CHK_SET_ERR( result, "Failed to set tag data when unpacking variable-length tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3595                 :            :             }
    3596                 :            :             else
    3597                 :            :             {
    3598                 :            :                 // Get existing values of dst tag
    3599         [ #  # ]:          0 :                 dum_vals.resize( tag_size * num_ents );
    3600         [ #  # ]:          0 :                 if( mpi_op )
    3601                 :            :                 {
    3602                 :            :                     int tag_length;
    3603 [ #  # ][ #  # ]:          0 :                     result = mbImpl->tag_get_length( tag_handle, tag_length );MB_CHK_SET_ERR( result, "Failed to get tag length" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3604 [ #  # ][ #  # ]:          0 :                     result = mbImpl->tag_get_data( tag_handle, &dum_ents[0], num_ents, &dum_vals[0] );MB_CHK_SET_ERR( result, "Failed to get existing value of dst tag on entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3605 [ #  # ][ #  # ]:          0 :                     result = reduce_void( tag_data_type, *mpi_op, tag_length * num_ents, &dum_vals[0], buff_ptr );MB_CHK_SET_ERR( result, "Failed to perform mpi op on dst tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3606                 :            :                 }
    3607 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_set_data( tag_handle, &dum_ents[0], num_ents, buff_ptr );MB_CHK_SET_ERR( result, "Failed to set range-based tag data when unpacking tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3608         [ #  # ]:          0 :                 buff_ptr += num_ents * tag_size;
    3609                 :            :                 UPC( num_ents * tag_size, " void" );
    3610                 :            :             }
    3611                 :            :         }
    3612                 :          0 :     }
    3613                 :            : 
    3614         [ #  # ]:          0 :     myDebug->tprintf( 4, "Done unpacking tags.\n" );
    3615                 :            : 
    3616                 :          0 :     return MB_SUCCESS;
    3617                 :            : }
    3618                 :            : 
    3619                 :            : template < class T >
    3620                 :          0 : T LAND( const T& arg1, const T& arg2 )
    3621                 :            : {
    3622 [ #  # ][ #  # ]:          0 :     return arg1 && arg2;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3623                 :            : }
    3624                 :            : template < class T >
    3625                 :          0 : T LOR( const T& arg1, const T& arg2 )
    3626                 :            : {
    3627 [ #  # ][ #  # ]:          0 :     return arg1 || arg2;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3628                 :            : }
    3629                 :            : template < class T >
    3630                 :          0 : T LXOR( const T& arg1, const T& arg2 )
    3631                 :            : {
    3632 [ #  # ][ #  # ]:          0 :     return ( ( arg1 && !arg2 ) || ( !arg1 && arg2 ) );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3633                 :            : }
    3634                 :            : template < class T >
    3635                 :          0 : T MAX( const T& arg1, const T& arg2 )
    3636                 :            : {
    3637 [ #  # ][ #  # ]:          0 :     return ( arg1 > arg2 ? arg1 : arg2 );
                 [ #  # ]
    3638                 :            : }
    3639                 :            : template < class T >
    3640                 :          0 : T MIN( const T& arg1, const T& arg2 )
    3641                 :            : {
    3642 [ #  # ][ #  # ]:          0 :     return ( arg1 < arg2 ? arg1 : arg2 );
                 [ #  # ]
    3643                 :            : }
    3644                 :            : template < class T >
    3645                 :          0 : T ADD( const T& arg1, const T& arg2 )
    3646                 :            : {
    3647                 :          0 :     return arg1 + arg2;
    3648                 :            : }
    3649                 :            : template < class T >
    3650                 :          0 : T MULT( const T& arg1, const T& arg2 )
    3651                 :            : {
    3652                 :          0 :     return arg1 * arg2;
    3653                 :            : }
    3654                 :            : 
    3655                 :            : template < class T >
    3656                 :          0 : ErrorCode ParallelComm::reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals )
    3657                 :            : {
    3658                 :          0 :     T* old_tmp = reinterpret_cast< T* >( old_vals );
    3659                 :            :     // T *new_tmp = reinterpret_cast<T*>(new_vals);
    3660                 :            :     // new vals pointer needs to be aligned , some compilers will optimize and will shift
    3661                 :            : 
    3662 [ #  # ][ #  # ]:          0 :     std::vector< T > new_values;
                 [ #  # ]
    3663 [ #  # ][ #  # ]:          0 :     new_values.resize( num_ents );
                 [ #  # ]
    3664 [ #  # ][ #  # ]:          0 :     memcpy( &new_values[0], new_vals, num_ents * sizeof( T ) );
                 [ #  # ]
    3665 [ #  # ][ #  # ]:          0 :     T* new_tmp = &new_values[0];
                 [ #  # ]
    3666                 :            : 
    3667 [ #  # ][ #  # ]:          0 :     if( mpi_op == MPI_SUM )
                 [ #  # ]
    3668 [ #  # ][ #  # ]:          0 :         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, ADD< T > );
                 [ #  # ]
    3669 [ #  # ][ #  # ]:          0 :     else if( mpi_op == MPI_PROD )
                 [ #  # ]
    3670 [ #  # ][ #  # ]:          0 :         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MULT< T > );
                 [ #  # ]
    3671 [ #  # ][ #  # ]:          0 :     else if( mpi_op == MPI_MAX )
                 [ #  # ]
    3672 [ #  # ][ #  # ]:          0 :         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MAX< T > );
                 [ #  # ]
    3673 [ #  # ][ #  # ]:          0 :     else if( mpi_op == MPI_MIN )
                 [ #  # ]
    3674 [ #  # ][ #  # ]:          0 :         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MIN< T > );
                 [ #  # ]
    3675 [ #  # ][ #  # ]:          0 :     else if( mpi_op == MPI_LAND )
                 [ #  # ]
    3676 [ #  # ][ #  # ]:          0 :         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LAND< T > );
                 [ #  # ]
    3677 [ #  # ][ #  # ]:          0 :     else if( mpi_op == MPI_LOR )
                 [ #  # ]
    3678 [ #  # ][ #  # ]:          0 :         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LOR< T > );
                 [ #  # ]
    3679 [ #  # ][ #  # ]:          0 :     else if( mpi_op == MPI_LXOR )
                 [ #  # ]
    3680 [ #  # ][ #  # ]:          0 :         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LXOR< T > );
                 [ #  # ]
    3681 [ #  # ][ #  # ]:          0 :     else if( mpi_op == MPI_BAND || mpi_op == MPI_BOR || mpi_op == MPI_BXOR )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3682                 :            :     {
    3683 [ #  # ][ #  # ]:          0 :         std::cerr << "Bitwise operations not allowed in tag reductions." << std::endl;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3684                 :          0 :         return MB_FAILURE;
    3685                 :            :     }
    3686 [ #  # ][ #  # ]:          0 :     else if( mpi_op != MPI_OP_NULL )
                 [ #  # ]
    3687                 :            :     {
    3688 [ #  # ][ #  # ]:          0 :         std::cerr << "Unknown MPI operation type." << std::endl;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3689                 :          0 :         return MB_TYPE_OUT_OF_RANGE;
    3690                 :            :     }
    3691                 :            : 
    3692                 :            :     // copy now the result back where it should be
    3693                 :          0 :     memcpy( new_vals, new_tmp, num_ents * sizeof( T ) );
    3694 [ #  # ][ #  # ]:          0 :     std::vector< T >().swap( new_values );  // way to release allocated vector
                 [ #  # ]
    3695                 :            : 
    3696                 :          0 :     return MB_SUCCESS;
    3697                 :            : }
    3698                 :            : 
    3699                 :          0 : ErrorCode ParallelComm::reduce_void( int tag_data_type, const MPI_Op mpi_op, int num_ents, void* old_vals,
    3700                 :            :                                      void* new_vals )
    3701                 :            : {
    3702                 :            :     ErrorCode result;
    3703   [ #  #  #  # ]:          0 :     switch( tag_data_type )
    3704                 :            :     {
    3705                 :            :         case MB_TYPE_INTEGER:
    3706                 :          0 :             result = reduce< int >( mpi_op, num_ents, old_vals, new_vals );
    3707                 :          0 :             break;
    3708                 :            :         case MB_TYPE_DOUBLE:
    3709                 :          0 :             result = reduce< double >( mpi_op, num_ents, old_vals, new_vals );
    3710                 :          0 :             break;
    3711                 :            :         case MB_TYPE_BIT:
    3712                 :          0 :             result = reduce< unsigned char >( mpi_op, num_ents, old_vals, new_vals );
    3713                 :          0 :             break;
    3714                 :            :         default:
    3715                 :          0 :             result = MB_SUCCESS;
    3716                 :          0 :             break;
    3717                 :            :     }
    3718                 :            : 
    3719                 :          0 :     return result;
    3720                 :            : }
    3721                 :            : 
    3722                 :          0 : ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, int resolve_dim, int shared_dim, const Tag* id_tag )
    3723                 :            : {
    3724                 :            :     ErrorCode result;
    3725         [ #  # ]:          0 :     Range proc_ents;
    3726                 :            : 
    3727                 :            :     // Check for structured mesh, and do it differently if it is
    3728                 :            :     ScdInterface* scdi;
    3729         [ #  # ]:          0 :     result = mbImpl->query_interface( scdi );
    3730         [ #  # ]:          0 :     if( scdi )
    3731                 :            :     {
    3732         [ #  # ]:          0 :         result = scdi->tag_shared_vertices( this, this_set );
    3733         [ #  # ]:          0 :         if( MB_SUCCESS == result )
    3734                 :            :         {
    3735         [ #  # ]:          0 :             myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
    3736                 :          0 :             return result;
    3737                 :            :         }
    3738                 :            :     }
    3739                 :            : 
    3740         [ #  # ]:          0 :     if( 0 == this_set )
    3741                 :            :     {
    3742                 :            :         // Get the entities in the partition sets
    3743 [ #  # ][ #  # ]:          0 :         for( Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    3744                 :            :         {
    3745         [ #  # ]:          0 :             Range tmp_ents;
    3746 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_handle( *rit, tmp_ents, true );
    3747         [ #  # ]:          0 :             if( MB_SUCCESS != result ) return result;
    3748 [ #  # ][ #  # ]:          0 :             proc_ents.merge( tmp_ents );
    3749                 :          0 :         }
    3750                 :            :     }
    3751                 :            :     else
    3752                 :            :     {
    3753         [ #  # ]:          0 :         result = mbImpl->get_entities_by_handle( this_set, proc_ents, true );
    3754         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return result;
    3755                 :            :     }
    3756                 :            : 
    3757                 :            :     // Resolve dim is maximal dim of entities in proc_ents
    3758         [ #  # ]:          0 :     if( -1 == resolve_dim )
    3759                 :            :     {
    3760 [ #  # ][ #  # ]:          0 :         if( !proc_ents.empty() ) resolve_dim = mbImpl->dimension_from_handle( *proc_ents.rbegin() );
         [ #  # ][ #  # ]
                 [ #  # ]
    3761                 :            :     }
    3762                 :            : 
    3763                 :            :     // proc_ents should all be of same dimension
    3764 [ #  # ][ #  # ]:          0 :     if( resolve_dim > shared_dim &&
                 [ #  # ]
    3765 [ #  # ][ #  # ]:          0 :         mbImpl->dimension_from_handle( *proc_ents.rbegin() ) != mbImpl->dimension_from_handle( *proc_ents.begin() ) )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
           [ #  #  #  # ]
    3766                 :            :     {
    3767         [ #  # ]:          0 :         Range::iterator lower = proc_ents.lower_bound( CN::TypeDimensionMap[0].first ),
    3768         [ #  # ]:          0 :                         upper = proc_ents.upper_bound( CN::TypeDimensionMap[resolve_dim - 1].second );
    3769         [ #  # ]:          0 :         proc_ents.erase( lower, upper );
    3770                 :            :     }
    3771                 :            : 
    3772                 :            :     // Must call even if we don't have any entities, to make sure
    3773                 :            :     // collective comm'n works
    3774         [ #  # ]:          0 :     return resolve_shared_ents( this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag );
    3775                 :            : }
    3776                 :            : 
    3777                 :          0 : ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, Range& proc_ents, int resolve_dim, int shared_dim,
    3778                 :            :                                              Range* skin_ents, const Tag* id_tag )
    3779                 :            : {
    3780                 :            : #ifdef MOAB_HAVE_MPE
    3781                 :            :     if( myDebug->get_verbosity() == 2 )
    3782                 :            :     {
    3783                 :            :         define_mpe();
    3784                 :            :         MPE_Log_event( RESOLVE_START, procConfig.proc_rank(), "Entering resolve_shared_ents." );
    3785                 :            :     }
    3786                 :            : #endif
    3787                 :            : 
    3788                 :            :     ErrorCode result;
    3789         [ #  # ]:          0 :     myDebug->tprintf( 1, "Resolving shared entities.\n" );
    3790                 :            : 
    3791         [ #  # ]:          0 :     if( resolve_dim < shared_dim )
    3792 [ #  # ][ #  # ]:          0 :     { MB_SET_ERR( MB_FAILURE, "MOAB does not support vertex-based partitions, only element-based ones" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    3793                 :            : 
    3794         [ #  # ]:          0 :     if( -1 == shared_dim )
    3795                 :            :     {
    3796 [ #  # ][ #  # ]:          0 :         if( !proc_ents.empty() )
    3797 [ #  # ][ #  # ]:          0 :             shared_dim = mbImpl->dimension_from_handle( *proc_ents.begin() ) - 1;
                 [ #  # ]
    3798         [ #  # ]:          0 :         else if( resolve_dim == 3 )
    3799                 :          0 :             shared_dim = 2;
    3800                 :            :     }
    3801                 :          0 :     int max_global_resolve_dim = -1;
    3802 [ #  # ][ #  # ]:          0 :     int err = MPI_Allreduce( &resolve_dim, &max_global_resolve_dim, 1, MPI_INT, MPI_MAX, proc_config().proc_comm() );
                 [ #  # ]
    3803 [ #  # ][ #  # ]:          0 :     if( MPI_SUCCESS != err ) { MB_SET_ERR( MB_FAILURE, "Unable to guess global resolve_dim" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3804 [ #  # ][ #  # ]:          0 :     if( shared_dim < 0 || resolve_dim < 0 )
    3805                 :            :     {
    3806                 :            :         // MB_SET_ERR(MB_FAILURE, "Unable to guess shared_dim or resolve_dim");
    3807                 :          0 :         resolve_dim = max_global_resolve_dim;
    3808                 :          0 :         shared_dim  = resolve_dim - 1;
    3809                 :            :     }
    3810                 :            : 
    3811 [ #  # ][ #  # ]:          0 :     if( resolve_dim < 0 || shared_dim < 0 ) return MB_SUCCESS;
    3812                 :            :     // no task has any mesh, get out
    3813                 :            : 
    3814                 :            :     // Get the skin entities by dimension
    3815 [ #  # ][ #  # ]:          0 :     Range tmp_skin_ents[4];
                 [ #  # ]
    3816                 :            : 
    3817                 :            :     // Get the entities to be skinned
    3818                 :            :     // Find the skin
    3819                 :          0 :     int skin_dim = resolve_dim - 1;
    3820         [ #  # ]:          0 :     if( !skin_ents )
    3821                 :            :     {
    3822                 :          0 :         skin_ents              = tmp_skin_ents;
    3823         [ #  # ]:          0 :         skin_ents[resolve_dim] = proc_ents;
    3824         [ #  # ]:          0 :         Skinner skinner( mbImpl );
    3825                 :            :         result =
    3826 [ #  # ][ #  # ]:          0 :             skinner.find_skin( this_set, skin_ents[skin_dim + 1], false, skin_ents[skin_dim], NULL, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3827         [ #  # ]:          0 :         myDebug->tprintf( 1, "Found skin:   skin_dim: %d resolve_dim: %d , now resolving.\n", skin_dim, resolve_dim );
    3828         [ #  # ]:          0 :         myDebug->tprintf( 3, "skin_ents[0].size(): %d skin_ents[1].size(): %d  \n", (int)skin_ents[0].size(),
    3829 [ #  # ][ #  # ]:          0 :                           (int)skin_ents[1].size() );
    3830                 :            :         // Get entities adjacent to skin ents from shared_dim down to zero
    3831 [ #  # ][ #  # ]:          0 :         for( int this_dim = skin_dim - 1; this_dim >= 0; this_dim-- )
    3832                 :            :         {
    3833                 :            :             result =
    3834 [ #  # ][ #  # ]:          0 :                 mbImpl->get_adjacencies( skin_ents[skin_dim], this_dim, true, skin_ents[this_dim], Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3835                 :            : 
    3836 [ #  # ][ #  # ]:          0 :             if( this_set && skin_dim == 2 && this_dim == 1 )
                 [ #  # ]
    3837                 :            :             {
    3838 [ #  # ][ #  # ]:          0 :                 result = mbImpl->add_entities( this_set, skin_ents[this_dim] );MB_CHK_ERR( result );
         [ #  # ][ #  # ]
    3839                 :            :             }
    3840                 :          0 :         }
    3841                 :            :     }
    3842 [ #  # ][ #  # ]:          0 :     else if( skin_ents[resolve_dim].empty() )
    3843         [ #  # ]:          0 :         skin_ents[resolve_dim] = proc_ents;
    3844                 :            : 
    3845                 :            :     // Global id tag
    3846                 :            :     Tag gid_tag;
    3847         [ #  # ]:          0 :     if( id_tag )
    3848                 :          0 :         gid_tag = *id_tag;
    3849                 :            :     else
    3850                 :            :     {
    3851                 :          0 :         bool tag_created = false;
    3852                 :          0 :         int def_val      = -1;
    3853                 :            :         result = mbImpl->tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE | MB_TAG_CREAT,
    3854         [ #  # ]:          0 :                                          &def_val, &tag_created );
    3855 [ #  # ][ #  # ]:          0 :         if( MB_ALREADY_ALLOCATED != result && MB_SUCCESS != result )
    3856 [ #  # ][ #  # ]:          0 :         { MB_SET_ERR( result, "Failed to create/get gid tag handle" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    3857         [ #  # ]:          0 :         else if( tag_created )
    3858                 :            :         {
    3859                 :            :             // Just created it, so we need global ids
    3860 [ #  # ][ #  # ]:          0 :             result = assign_global_ids( this_set, skin_dim + 1, true, true, true );MB_CHK_SET_ERR( result, "Failed to assign global ids" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3861                 :            :         }
    3862                 :            :     }
    3863                 :            : 
    3864                 :            :     DataType tag_type;
    3865 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data_type( gid_tag, tag_type );MB_CHK_SET_ERR( result, "Failed to get tag data type" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3866                 :            :     int bytes_per_tag;
    3867 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_bytes( gid_tag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed to get number of bytes per tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3868                 :            :     // On 64 bits, long and int are different
    3869                 :            :     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
    3870                 :            : 
    3871                 :            :     // Get gids for skin ents in a vector, to pass to gs
    3872 [ #  # ][ #  # ]:          0 :     std::vector< long > lgid_data( skin_ents[0].size() );
    3873                 :            :     // Size is either long or int
    3874                 :            :     // On 64 bit is 8 or 4
    3875 [ #  # ][ #  # ]:          0 :     if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
                 [ #  # ]
    3876                 :            :     {  // It is a special id tag
    3877 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &lgid_data[0] );MB_CHK_SET_ERR( result, "Couldn't get gid tag for skin vertices" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3878                 :            :     }
    3879         [ #  # ]:          0 :     else if( 4 == bytes_per_tag )
    3880                 :            :     {  // Must be GLOBAL_ID tag or 32 bits ...
    3881         [ #  # ]:          0 :         std::vector< int > gid_data( lgid_data.size() );
    3882 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &gid_data[0] );MB_CHK_SET_ERR( result, "Failed to get gid tag for skin vertices" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3883 [ #  # ][ #  # ]:          0 :         std::copy( gid_data.begin(), gid_data.end(), lgid_data.begin() );
    3884                 :            :     }
    3885                 :            :     else
    3886                 :            :     {
    3887                 :            :         // Not supported flag
    3888 [ #  # ][ #  # ]:          0 :         MB_SET_ERR( MB_FAILURE, "Unsupported id tag" );
         [ #  # ][ #  # ]
                 [ #  # ]
    3889                 :            :     }
    3890                 :            : 
    3891                 :            :     // Put handles in vector for passing to gs setup
    3892         [ #  # ]:          0 :     std::vector< Ulong > handle_vec;  // Assumes that we can do conversion from Ulong to EntityHandle
    3893 [ #  # ][ #  # ]:          0 :     std::copy( skin_ents[0].begin(), skin_ents[0].end(), std::back_inserter( handle_vec ) );
         [ #  # ][ #  # ]
    3894                 :            : 
    3895                 :            : #ifdef MOAB_HAVE_MPE
    3896                 :            :     if( myDebug->get_verbosity() == 2 )
    3897                 :            :     { MPE_Log_event( SHAREDV_START, procConfig.proc_rank(), "Creating crystal router." ); }
    3898                 :            : #endif
    3899                 :            : 
    3900                 :            :     // Get a crystal router
    3901         [ #  # ]:          0 :     gs_data::crystal_data* cd = procConfig.crystal_router();
    3902                 :            : 
    3903                 :            :     /*
    3904                 :            :     // Get total number of entities; will overshoot highest global id, but
    3905                 :            :     // that's OK
    3906                 :            :     int num_total[2] = {0, 0}, num_local[2] = {0, 0};
    3907                 :            :     result = mbImpl->get_number_entities_by_dimension(this_set, 0, num_local);
    3908                 :            :     if (MB_SUCCESS != result)return result;
    3909                 :            :     int failure = MPI_Allreduce(num_local, num_total, 1,
    3910                 :            :     MPI_INTEGER, MPI_SUM, procConfig.proc_comm());
    3911                 :            :     if (failure) {
    3912                 :            :       MB_SET_ERR(MB_FAILURE, "Allreduce for total number of shared ents failed");
    3913                 :            :     }
    3914                 :            :     */
    3915                 :            :     // Call gather-scatter to get shared ids & procs
    3916 [ #  # ][ #  # ]:          0 :     gs_data* gsd = new gs_data();
    3917                 :            :     // assert(sizeof(ulong_) == sizeof(EntityHandle));
    3918 [ #  # ][ #  # ]:          0 :     result = gsd->initialize( skin_ents[0].size(), &lgid_data[0], &handle_vec[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    3919                 :            : 
    3920                 :            :     // Get shared proc tags
    3921                 :            :     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
    3922 [ #  # ][ #  # ]:          0 :     result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3923                 :            : 
    3924                 :            :     // Load shared verts into a tuple, then sort by index
    3925         [ #  # ]:          0 :     TupleList shared_verts;
    3926 [ #  # ][ #  # ]:          0 :     shared_verts.initialize( 2, 0, 1, 0, skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 ) );
    3927         [ #  # ]:          0 :     shared_verts.enableWriteAccess();
    3928                 :            : 
    3929                 :          0 :     unsigned int i = 0, j = 0;
    3930         [ #  # ]:          0 :     for( unsigned int p = 0; p < gsd->nlinfo->_np; p++ )
    3931         [ #  # ]:          0 :         for( unsigned int np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
    3932                 :            :         {
    3933                 :          0 :             shared_verts.vi_wr[i++] = gsd->nlinfo->_sh_ind[j];
    3934                 :          0 :             shared_verts.vi_wr[i++] = gsd->nlinfo->_target[p];
    3935                 :          0 :             shared_verts.vul_wr[j]  = gsd->nlinfo->_ulabels[j];
    3936                 :          0 :             j++;
    3937         [ #  # ]:          0 :             shared_verts.inc_n();
    3938                 :            :         }
    3939                 :            : 
    3940 [ #  # ][ #  # ]:          0 :     myDebug->tprintf( 3, " shared verts size %d \n", (int)shared_verts.get_n() );
    3941                 :            : 
    3942         [ #  # ]:          0 :     int max_size = skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 );
    3943         [ #  # ]:          0 :     moab::TupleList::buffer sort_buffer;
    3944         [ #  # ]:          0 :     sort_buffer.buffer_init( max_size );
    3945         [ #  # ]:          0 :     shared_verts.sort( 0, &sort_buffer );
    3946         [ #  # ]:          0 :     sort_buffer.reset();
    3947                 :            : 
    3948                 :            :     // Set sharing procs and handles tags on skin ents
    3949                 :          0 :     int maxp = -1;
    3950         [ #  # ]:          0 :     std::vector< int > sharing_procs( MAX_SHARING_PROCS );
    3951         [ #  # ]:          0 :     std::fill( sharing_procs.begin(), sharing_procs.end(), maxp );
    3952                 :          0 :     j = 0;
    3953                 :          0 :     i = 0;
    3954                 :            : 
    3955                 :            :     // Get ents shared by 1 or n procs
    3956         [ #  # ]:          0 :     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
    3957         [ #  # ]:          0 :     Range proc_verts;
    3958 [ #  # ][ #  # ]:          0 :     result = mbImpl->get_adjacencies( proc_ents, 0, false, proc_verts, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get proc_verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3959                 :            : 
    3960         [ #  # ]:          0 :     myDebug->print( 3, " resolve shared ents:  proc verts ", proc_verts );
    3961 [ #  # ][ #  # ]:          0 :     result = tag_shared_verts( shared_verts, skin_ents, proc_nvecs, proc_verts );MB_CHK_SET_ERR( result, "Failed to tag shared verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3962                 :            : 
    3963                 :            : #ifdef MOAB_HAVE_MPE
    3964                 :            :     if( myDebug->get_verbosity() == 2 )
    3965                 :            :     { MPE_Log_event( SHAREDV_END, procConfig.proc_rank(), "Finished tag_shared_verts." ); }
    3966                 :            : #endif
    3967                 :            : 
    3968                 :            :     // Get entities shared by 1 or n procs
    3969 [ #  # ][ #  # ]:          0 :     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );MB_CHK_SET_ERR( result, "Failed to tag shared entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3970                 :            : 
    3971         [ #  # ]:          0 :     shared_verts.reset();
    3972                 :            : 
    3973 [ #  # ][ #  # ]:          0 :     if( myDebug->get_verbosity() > 0 )
    3974                 :            :     {
    3975 [ #  # ][ #  # ]:          0 :         for( std::map< std::vector< int >, std::vector< EntityHandle > >::const_iterator mit = proc_nvecs.begin();
         [ #  # ][ #  # ]
    3976         [ #  # ]:          0 :              mit != proc_nvecs.end(); ++mit )
    3977                 :            :         {
    3978         [ #  # ]:          0 :             myDebug->tprintf( 1, "Iface: " );
    3979 [ #  # ][ #  # ]:          0 :             for( std::vector< int >::const_iterator vit = ( mit->first ).begin(); vit != ( mit->first ).end(); ++vit )
         [ #  # ][ #  # ]
                 [ #  # ]
    3980 [ #  # ][ #  # ]:          0 :                 myDebug->printf( 1, " %d", *vit );
    3981         [ #  # ]:          0 :             myDebug->print( 1, "\n" );
    3982                 :            :         }
    3983                 :            :     }
    3984                 :            : 
    3985                 :            :     // Create the sets for each interface; store them as tags on
    3986                 :            :     // the interface instance
    3987         [ #  # ]:          0 :     Range iface_sets;
    3988 [ #  # ][ #  # ]:          0 :     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3989                 :            : 
    3990                 :            :     // Establish comm procs and buffers for them
    3991         [ #  # ]:          0 :     std::set< unsigned int > procs;
    3992 [ #  # ][ #  # ]:          0 :     result = get_interface_procs( procs, true );MB_CHK_SET_ERR( result, "Failed to get interface procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3993                 :            : 
    3994                 :            : #ifndef NDEBUG
    3995 [ #  # ][ #  # ]:          0 :     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Shared handle check failed after interface vertex exchange" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    3996                 :            : #endif
    3997                 :            : 
    3998                 :            :     // Resolve shared entity remote handles; implemented in ghost cell exchange
    3999                 :            :     // code because it's so similar
    4000 [ #  # ][ #  # ]:          0 :     result = exchange_ghost_cells( -1, -1, 0, 0, true, true );MB_CHK_SET_ERR( result, "Failed to resolve shared entity remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4001                 :            : 
    4002                 :            :     // Now build parent/child links for interface sets
    4003 [ #  # ][ #  # ]:          0 :     result = create_iface_pc_links();MB_CHK_SET_ERR( result, "Failed to create interface parent/child links" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4004                 :            : 
    4005         [ #  # ]:          0 :     gsd->reset();
    4006         [ #  # ]:          0 :     delete gsd;
    4007                 :            : 
    4008                 :            : #ifdef MOAB_HAVE_MPE
    4009                 :            :     if( myDebug->get_verbosity() == 2 )
    4010                 :            :     { MPE_Log_event( RESOLVE_END, procConfig.proc_rank(), "Exiting resolve_shared_ents." ); }
    4011                 :            : #endif
    4012                 :            : 
    4013                 :            :     // std::ostringstream ent_str;
    4014                 :            :     // ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
    4015                 :            :     // mbImpl->write_mesh(ent_str.str().c_str());
    4016                 :            : 
    4017                 :            :     // Done
    4018 [ #  # ][ #  # ]:          0 :     return result;
    4019                 :            : }
    4020                 :            : 
    4021                 :          0 : void ParallelComm::define_mpe()
    4022                 :            : {
    4023                 :            : #ifdef MOAB_HAVE_MPE
    4024                 :            :     if( myDebug->get_verbosity() == 2 )
    4025                 :            :     {
    4026                 :            :         // Define mpe states used for logging
    4027                 :            :         int success;
    4028                 :            :         MPE_Log_get_state_eventIDs( &IFACE_START, &IFACE_END );
    4029                 :            :         MPE_Log_get_state_eventIDs( &GHOST_START, &GHOST_END );
    4030                 :            :         MPE_Log_get_state_eventIDs( &SHAREDV_START, &SHAREDV_END );
    4031                 :            :         MPE_Log_get_state_eventIDs( &RESOLVE_START, &RESOLVE_END );
    4032                 :            :         MPE_Log_get_state_eventIDs( &ENTITIES_START, &ENTITIES_END );
    4033                 :            :         MPE_Log_get_state_eventIDs( &RHANDLES_START, &RHANDLES_END );
    4034                 :            :         MPE_Log_get_state_eventIDs( &OWNED_START, &OWNED_END );
    4035                 :            :         success = MPE_Describe_state( IFACE_START, IFACE_END, "Resolve interface ents", "green" );
    4036                 :            :         assert( MPE_LOG_OK == success );
    4037                 :            :         success = MPE_Describe_state( GHOST_START, GHOST_END, "Exchange ghost ents", "red" );
    4038                 :            :         assert( MPE_LOG_OK == success );
    4039                 :            :         success = MPE_Describe_state( SHAREDV_START, SHAREDV_END, "Resolve interface vertices", "blue" );
    4040                 :            :         assert( MPE_LOG_OK == success );
    4041                 :            :         success = MPE_Describe_state( RESOLVE_START, RESOLVE_END, "Resolve shared ents", "purple" );
    4042                 :            :         assert( MPE_LOG_OK == success );
    4043                 :            :         success = MPE_Describe_state( ENTITIES_START, ENTITIES_END, "Exchange shared ents", "yellow" );
    4044                 :            :         assert( MPE_LOG_OK == success );
    4045                 :            :         success = MPE_Describe_state( RHANDLES_START, RHANDLES_END, "Remote handles", "cyan" );
    4046                 :            :         assert( MPE_LOG_OK == success );
    4047                 :            :         success = MPE_Describe_state( OWNED_START, OWNED_END, "Exchange owned ents", "black" );
    4048                 :            :         assert( MPE_LOG_OK == success );
    4049                 :            :     }
    4050                 :            : #endif
    4051                 :          0 : }
    4052                 :            : 
    4053                 :          0 : ErrorCode ParallelComm::resolve_shared_ents( ParallelComm** pc, const unsigned int np, EntityHandle this_set,
    4054                 :            :                                              const int part_dim )
    4055                 :            : {
    4056         [ #  # ]:          0 :     std::vector< Range > verts( np );
    4057                 :          0 :     int tot_verts = 0;
    4058                 :            :     unsigned int p, i, j, v;
    4059                 :            :     ErrorCode rval;
    4060         [ #  # ]:          0 :     for( p = 0; p < np; p++ )
    4061                 :            :     {
    4062 [ #  # ][ #  # ]:          0 :         Skinner skinner( pc[p]->get_moab() );
    4063 [ #  # ][ #  # ]:          0 :         Range part_ents, skin_ents;
         [ #  # ][ #  # ]
    4064 [ #  # ][ #  # ]:          0 :         rval = pc[p]->get_moab()->get_entities_by_dimension( this_set, part_dim, part_ents );
    4065         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    4066         [ #  # ]:          0 :         rval = skinner.find_skin( this_set, part_ents, false, skin_ents, 0, true, true, true );
    4067         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    4068 [ #  # ][ #  # ]:          0 :         rval = pc[p]->get_moab()->get_adjacencies( skin_ents, 0, true, verts[p], Interface::UNION );
                 [ #  # ]
    4069         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    4070 [ #  # ][ #  # ]:          0 :         tot_verts += verts[p].size();
                 [ #  # ]
    4071                 :          0 :     }
    4072                 :            : 
    4073         [ #  # ]:          0 :     TupleList shared_ents;
    4074         [ #  # ]:          0 :     shared_ents.initialize( 2, 0, 1, 0, tot_verts );
    4075         [ #  # ]:          0 :     shared_ents.enableWriteAccess();
    4076                 :            : 
    4077                 :          0 :     i = 0;
    4078                 :          0 :     j = 0;
    4079         [ #  # ]:          0 :     std::vector< int > gids;
    4080         [ #  # ]:          0 :     Range::iterator rit;
    4081                 :            :     Tag gid_tag;
    4082         [ #  # ]:          0 :     for( p = 0; p < np; p++ )
    4083                 :            :     {
    4084 [ #  # ][ #  # ]:          0 :         gid_tag = pc[p]->get_moab()->globalId_tag();
    4085                 :            : 
    4086 [ #  # ][ #  # ]:          0 :         gids.resize( verts[p].size() );
                 [ #  # ]
    4087 [ #  # ][ #  # ]:          0 :         rval = pc[p]->get_moab()->tag_get_data( gid_tag, verts[p], &gids[0] );
         [ #  # ][ #  # ]
    4088         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    4089                 :            : 
    4090 [ #  # ][ #  # ]:          0 :         for( v = 0, rit = verts[p].begin(); v < gids.size(); v++, ++rit )
         [ #  # ][ #  # ]
    4091                 :            :         {
    4092         [ #  # ]:          0 :             shared_ents.vi_wr[i++] = gids[v];
    4093                 :          0 :             shared_ents.vi_wr[i++] = p;
    4094         [ #  # ]:          0 :             shared_ents.vul_wr[j]  = *rit;
    4095                 :          0 :             j++;
    4096         [ #  # ]:          0 :             shared_ents.inc_n();
    4097                 :            :         }
    4098                 :            :     }
    4099                 :            : 
    4100         [ #  # ]:          0 :     moab::TupleList::buffer sort_buffer;
    4101         [ #  # ]:          0 :     sort_buffer.buffer_init( tot_verts );
    4102         [ #  # ]:          0 :     shared_ents.sort( 0, &sort_buffer );
    4103         [ #  # ]:          0 :     sort_buffer.reset();
    4104                 :            : 
    4105                 :          0 :     j = 0;
    4106                 :          0 :     i = 0;
    4107         [ #  # ]:          0 :     std::vector< EntityHandle > handles;
    4108         [ #  # ]:          0 :     std::vector< int > procs;
    4109                 :            : 
    4110 [ #  # ][ #  # ]:          0 :     while( i < shared_ents.get_n() )
    4111                 :            :     {
    4112                 :          0 :         handles.clear();
    4113                 :          0 :         procs.clear();
    4114                 :            : 
    4115                 :            :         // Count & accumulate sharing procs
    4116                 :          0 :         int this_gid = shared_ents.vi_rd[j];
    4117 [ #  # ][ #  # ]:          0 :         while( i < shared_ents.get_n() && shared_ents.vi_rd[j] == this_gid )
         [ #  # ][ #  # ]
    4118                 :            :         {
    4119                 :          0 :             j++;
    4120         [ #  # ]:          0 :             procs.push_back( shared_ents.vi_rd[j++] );
    4121         [ #  # ]:          0 :             handles.push_back( shared_ents.vul_rd[i++] );
    4122                 :            :         }
    4123         [ #  # ]:          0 :         if( 1 == procs.size() ) continue;
    4124                 :            : 
    4125         [ #  # ]:          0 :         for( v = 0; v < procs.size(); v++ )
    4126                 :            :         {
    4127 [ #  # ][ #  # ]:          0 :             rval = pc[procs[v]]->update_remote_data( handles[v], &procs[0], &handles[0], procs.size(),
         [ #  # ][ #  # ]
    4128 [ #  # ][ #  # ]:          0 :                                                      ( procs[0] == (int)pc[procs[v]]->rank()
                 [ #  # ]
    4129                 :            :                                                            ? PSTATUS_INTERFACE
    4130 [ #  # ][ #  # ]:          0 :                                                            : ( PSTATUS_NOT_OWNED | PSTATUS_INTERFACE ) ) );
    4131         [ #  # ]:          0 :             if( MB_SUCCESS != rval ) return rval;
    4132                 :            :         }
    4133                 :            :     }
    4134                 :            : 
    4135         [ #  # ]:          0 :     std::set< unsigned int > psets;
    4136         [ #  # ]:          0 :     for( p = 0; p < np; p++ )
    4137                 :            :     {
    4138         [ #  # ]:          0 :         rval = pc[p]->create_interface_sets( this_set, part_dim, part_dim - 1 );
    4139         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    4140                 :            :         // Establish comm procs and buffers for them
    4141                 :          0 :         psets.clear();
    4142         [ #  # ]:          0 :         rval = pc[p]->get_interface_procs( psets, true );
    4143         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    4144                 :            :     }
    4145                 :            : 
    4146         [ #  # ]:          0 :     shared_ents.reset();
    4147                 :            : 
    4148                 :          0 :     return MB_SUCCESS;
    4149                 :            : }
    4150                 :            : 
    4151                 :          0 : ErrorCode ParallelComm::tag_iface_entities()
    4152                 :            : {
    4153                 :          0 :     ErrorCode result = MB_SUCCESS;
    4154 [ #  # ][ #  # ]:          0 :     Range iface_ents, tmp_ents, rmv_ents;
                 [ #  # ]
    4155         [ #  # ]:          0 :     std::vector< unsigned char > pstat;
    4156                 :            :     unsigned char set_pstat;
    4157         [ #  # ]:          0 :     Range::iterator rit2;
    4158                 :            :     unsigned int i;
    4159                 :            : 
    4160 [ #  # ][ #  # ]:          0 :     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    4161                 :            :     {
    4162         [ #  # ]:          0 :         iface_ents.clear();
    4163                 :            : 
    4164 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get interface set contents" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4165 [ #  # ][ #  # ]:          0 :         pstat.resize( iface_ents.size() );
    4166 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4167 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( pstatus_tag(), &( *rit ), 1, &set_pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4168         [ #  # ]:          0 :         rmv_ents.clear();
    4169 [ #  # ][ #  # ]:          0 :         for( rit2 = iface_ents.begin(), i = 0; rit2 != iface_ents.end(); ++rit2, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    4170                 :            :         {
    4171 [ #  # ][ #  # ]:          0 :             if( !( pstat[i] & PSTATUS_INTERFACE ) )
    4172                 :            :             {
    4173 [ #  # ][ #  # ]:          0 :                 rmv_ents.insert( *rit2 );
    4174         [ #  # ]:          0 :                 pstat[i] = 0x0;
    4175                 :            :             }
    4176                 :            :         }
    4177 [ #  # ][ #  # ]:          0 :         result = mbImpl->remove_entities( *rit, rmv_ents );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4178                 :            : 
    4179         [ #  # ]:          0 :         if( !( set_pstat & PSTATUS_NOT_OWNED ) ) continue;
    4180                 :            :         // If we're here, we need to set the notowned status on (remaining) set contents
    4181                 :            : 
    4182                 :            :         // Remove rmv_ents from the contents list
    4183 [ #  # ][ #  # ]:          0 :         iface_ents = subtract( iface_ents, rmv_ents );
    4184                 :            :         // Compress the pstat vector (removing 0x0's)
    4185                 :            :         std::remove_if( pstat.begin(), pstat.end(),
    4186 [ #  # ][ #  # ]:          0 :                         std::bind( std::equal_to< unsigned char >(), std::placeholders::_1, 0x0 ) );
    4187                 :            :         // std::bind2nd(std::equal_to<unsigned char>(), 0x0));
    4188                 :            :         // https://stackoverflow.com/questions/32739018/a-replacement-for-stdbind2nd
    4189                 :            :         // Fold the not_owned bit into remaining values
    4190         [ #  # ]:          0 :         unsigned int sz = iface_ents.size();
    4191         [ #  # ]:          0 :         for( i = 0; i < sz; i++ )
    4192         [ #  # ]:          0 :             pstat[i] |= PSTATUS_NOT_OWNED;
    4193                 :            : 
    4194                 :            :         // Set the tag on the entities
    4195 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus values for interface set entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4196                 :            :     }
    4197                 :            : 
    4198                 :          0 :     return MB_SUCCESS;
    4199                 :            : }
    4200                 :            : 
    4201                 :          0 : ErrorCode ParallelComm::set_pstatus_entities( Range& pstatus_ents, unsigned char pstatus_val, bool lower_dim_ents,
    4202                 :            :                                               bool verts_too, int operation )
    4203                 :            : {
    4204 [ #  # ][ #  # ]:          0 :     std::vector< unsigned char > pstatus_vals( pstatus_ents.size() );
    4205         [ #  # ]:          0 :     Range all_ents, *range_ptr = &pstatus_ents;
    4206                 :            :     ErrorCode result;
    4207 [ #  # ][ #  # ]:          0 :     if( lower_dim_ents || verts_too )
    4208                 :            :     {
    4209         [ #  # ]:          0 :         all_ents      = pstatus_ents;
    4210                 :          0 :         range_ptr     = &all_ents;
    4211 [ #  # ][ #  # ]:          0 :         int start_dim = ( lower_dim_ents ? mbImpl->dimension_from_handle( *pstatus_ents.rbegin() ) - 1 : 0 );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4212         [ #  # ]:          0 :         for( ; start_dim >= 0; start_dim-- )
    4213                 :            :         {
    4214 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_adjacencies( all_ents, start_dim, true, all_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get adjacencies for pstatus entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4215                 :            :         }
    4216                 :            :     }
    4217         [ #  # ]:          0 :     if( Interface::UNION == operation )
    4218                 :            :     {
    4219 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4220         [ #  # ]:          0 :         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
    4221         [ #  # ]:          0 :             pstatus_vals[i] |= pstatus_val;
    4222                 :            :     }
    4223                 :            :     else
    4224                 :            :     {
    4225         [ #  # ]:          0 :         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
    4226         [ #  # ]:          0 :             pstatus_vals[i] = pstatus_val;
    4227                 :            :     }
    4228 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_set_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4229                 :            : 
    4230                 :          0 :     return MB_SUCCESS;
    4231                 :            : }
    4232                 :            : 
    4233                 :          0 : ErrorCode ParallelComm::set_pstatus_entities( EntityHandle* pstatus_ents, int num_ents, unsigned char pstatus_val,
    4234                 :            :                                               bool lower_dim_ents, bool verts_too, int operation )
    4235                 :            : {
    4236         [ #  # ]:          0 :     std::vector< unsigned char > pstatus_vals( num_ents );
    4237                 :            :     ErrorCode result;
    4238 [ #  # ][ #  # ]:          0 :     if( lower_dim_ents || verts_too )
    4239                 :            :     {
    4240                 :            :         // In this case, call the range-based version
    4241         [ #  # ]:          0 :         Range tmp_range;
    4242 [ #  # ][ #  # ]:          0 :         std::copy( pstatus_ents, pstatus_ents + num_ents, range_inserter( tmp_range ) );
    4243         [ #  # ]:          0 :         return set_pstatus_entities( tmp_range, pstatus_val, lower_dim_ents, verts_too, operation );
    4244                 :            :     }
    4245                 :            : 
    4246         [ #  # ]:          0 :     if( Interface::UNION == operation )
    4247                 :            :     {
    4248 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4249         [ #  # ]:          0 :         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
    4250         [ #  # ]:          0 :             pstatus_vals[i] |= pstatus_val;
    4251                 :            :     }
    4252                 :            :     else
    4253                 :            :     {
    4254         [ #  # ]:          0 :         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
    4255         [ #  # ]:          0 :             pstatus_vals[i] = pstatus_val;
    4256                 :            :     }
    4257 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_set_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4258                 :            : 
    4259                 :          0 :     return MB_SUCCESS;
    4260                 :            : }
    4261                 :            : 
    4262                 :          0 : static size_t choose_owner_idx( const std::vector< unsigned >& proc_list )
    4263                 :            : {
    4264                 :            :     // Try to assign owners randomly so we get a good distribution,
    4265                 :            :     // (note: specifying the same seed on all procs is essential)
    4266                 :          0 :     unsigned val = 0;
    4267         [ #  # ]:          0 :     for( size_t i = 0; i < proc_list.size(); i++ )
    4268                 :          0 :         val ^= proc_list[i];
    4269                 :          0 :     srand( (int)( val ) );
    4270                 :          0 :     return rand() % proc_list.size();
    4271                 :            : }
    4272                 :            : 
    4273                 :            : struct set_tuple
    4274                 :            : {
    4275                 :            :     unsigned idx;
    4276                 :            :     unsigned proc;
    4277                 :            :     EntityHandle handle;
    4278                 :          0 :     inline bool operator<( set_tuple other ) const
    4279                 :            :     {
    4280         [ #  # ]:          0 :         return ( idx == other.idx ) ? ( proc < other.proc ) : ( idx < other.idx );
    4281                 :            :     }
    4282                 :            : };
    4283                 :            : 
    4284                 :          0 : ErrorCode ParallelComm::resolve_shared_sets( EntityHandle file, const Tag* idtag )
    4285                 :            : {
    4286                 :            :     // Find all sets with any of the following tags:
    4287                 :            :     const char* const shared_set_tag_names[] = { GEOM_DIMENSION_TAG_NAME, MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME,
    4288                 :          0 :                                                  NEUMANN_SET_TAG_NAME, PARALLEL_PARTITION_TAG_NAME };
    4289                 :          0 :     int num_tags                             = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
    4290         [ #  # ]:          0 :     Range candidate_sets;
    4291                 :          0 :     ErrorCode result = MB_FAILURE;
    4292                 :            : 
    4293                 :            :     // If we're not given an ID tag to use to globally identify sets,
    4294                 :            :     // then fall back to using known tag values
    4295         [ #  # ]:          0 :     if( !idtag )
    4296                 :            :     {
    4297                 :            :         Tag gid, tag;
    4298         [ #  # ]:          0 :         gid = mbImpl->globalId_tag();
    4299 [ #  # ][ #  # ]:          0 :         if( NULL != gid ) result = mbImpl->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tag );
    4300         [ #  # ]:          0 :         if( MB_SUCCESS == result )
    4301                 :            :         {
    4302         [ #  # ]:          0 :             for( int d = 0; d < 4; d++ )
    4303                 :            :             {
    4304         [ #  # ]:          0 :                 candidate_sets.clear();
    4305                 :          0 :                 const void* vals[] = { &d };
    4306         [ #  # ]:          0 :                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, vals, 1, candidate_sets );
    4307 [ #  # ][ #  # ]:          0 :                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, gid );
    4308                 :            :             }
    4309                 :            :         }
    4310                 :            : 
    4311         [ #  # ]:          0 :         for( int i = 1; i < num_tags; i++ )
    4312                 :            :         {
    4313         [ #  # ]:          0 :             result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag );
    4314         [ #  # ]:          0 :             if( MB_SUCCESS == result )
    4315                 :            :             {
    4316         [ #  # ]:          0 :                 candidate_sets.clear();
    4317         [ #  # ]:          0 :                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets );
    4318 [ #  # ][ #  # ]:          0 :                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, tag );
    4319                 :            :             }
    4320                 :            :         }
    4321                 :            : 
    4322                 :          0 :         return MB_SUCCESS;
    4323                 :            :     }
    4324                 :            : 
    4325         [ #  # ]:          0 :     for( int i = 0; i < num_tags; i++ )
    4326                 :            :     {
    4327                 :            :         Tag tag;
    4328         [ #  # ]:          0 :         result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag, MB_TAG_ANY );
    4329         [ #  # ]:          0 :         if( MB_SUCCESS != result ) continue;
    4330                 :            : 
    4331         [ #  # ]:          0 :         mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets, Interface::UNION );
    4332                 :            :     }
    4333                 :            : 
    4334                 :            :     // Find any additional sets that contain shared entities
    4335         [ #  # ]:          0 :     Range::iterator hint = candidate_sets.begin();
    4336         [ #  # ]:          0 :     Range all_sets;
    4337         [ #  # ]:          0 :     mbImpl->get_entities_by_type( file, MBENTITYSET, all_sets );
    4338 [ #  # ][ #  # ]:          0 :     all_sets           = subtract( all_sets, candidate_sets );
    4339         [ #  # ]:          0 :     Range::iterator it = all_sets.begin();
    4340 [ #  # ][ #  # ]:          0 :     while( it != all_sets.end() )
                 [ #  # ]
    4341                 :            :     {
    4342         [ #  # ]:          0 :         Range contents;
    4343 [ #  # ][ #  # ]:          0 :         mbImpl->get_entities_by_handle( *it, contents );
    4344 [ #  # ][ #  # ]:          0 :         contents.erase( contents.lower_bound( MBENTITYSET ), contents.end() );
                 [ #  # ]
    4345         [ #  # ]:          0 :         filter_pstatus( contents, PSTATUS_SHARED, PSTATUS_OR );
    4346 [ #  # ][ #  # ]:          0 :         if( contents.empty() ) { ++it; }
                 [ #  # ]
    4347                 :            :         else
    4348                 :            :         {
    4349 [ #  # ][ #  # ]:          0 :             hint = candidate_sets.insert( hint, *it );
    4350         [ #  # ]:          0 :             it   = all_sets.erase( it );
    4351                 :            :         }
    4352                 :          0 :     }
    4353                 :            : 
    4354                 :            :     // Find any additionl sets that contain or are parents of potential shared sets
    4355         [ #  # ]:          0 :     Range prev_list = candidate_sets;
    4356 [ #  # ][ #  # ]:          0 :     while( !prev_list.empty() )
    4357                 :            :     {
    4358         [ #  # ]:          0 :         it = all_sets.begin();
    4359         [ #  # ]:          0 :         Range new_list;
    4360         [ #  # ]:          0 :         hint = new_list.begin();
    4361 [ #  # ][ #  # ]:          0 :         while( it != all_sets.end() )
                 [ #  # ]
    4362                 :            :         {
    4363         [ #  # ]:          0 :             Range contents;
    4364 [ #  # ][ #  # ]:          0 :             mbImpl->get_entities_by_type( *it, MBENTITYSET, contents );
    4365 [ #  # ][ #  # ]:          0 :             if( !intersect( prev_list, contents ).empty() )
                 [ #  # ]
    4366                 :            :             {
    4367 [ #  # ][ #  # ]:          0 :                 hint = new_list.insert( hint, *it );
    4368         [ #  # ]:          0 :                 it   = all_sets.erase( it );
    4369                 :            :             }
    4370                 :            :             else
    4371                 :            :             {
    4372         [ #  # ]:          0 :                 new_list.clear();
    4373 [ #  # ][ #  # ]:          0 :                 mbImpl->get_child_meshsets( *it, contents );
    4374 [ #  # ][ #  # ]:          0 :                 if( !intersect( prev_list, contents ).empty() )
                 [ #  # ]
    4375                 :            :                 {
    4376 [ #  # ][ #  # ]:          0 :                     hint = new_list.insert( hint, *it );
    4377         [ #  # ]:          0 :                     it   = all_sets.erase( it );
    4378                 :            :                 }
    4379                 :            :                 else
    4380                 :            :                 {
    4381         [ #  # ]:          0 :                     ++it;
    4382                 :            :                 }
    4383                 :            :             }
    4384                 :          0 :         }
    4385                 :            : 
    4386         [ #  # ]:          0 :         candidate_sets.merge( new_list );
    4387         [ #  # ]:          0 :         prev_list.swap( new_list );
    4388                 :          0 :     }
    4389                 :            : 
    4390         [ #  # ]:          0 :     return resolve_shared_sets( candidate_sets, *idtag );
    4391                 :            : }
    4392                 :            : 
    4393                 :            : #ifndef NDEBUG
    4394                 :          0 : bool is_sorted_unique( std::vector< unsigned >& v )
    4395                 :            : {
    4396         [ #  # ]:          0 :     for( size_t i = 1; i < v.size(); i++ )
    4397         [ #  # ]:          0 :         if( v[i - 1] >= v[i] ) return false;
    4398                 :          0 :     return true;
    4399                 :            : }
    4400                 :            : #endif
    4401                 :            : 
    4402                 :          0 : ErrorCode ParallelComm::resolve_shared_sets( Range& sets, Tag idtag )
    4403                 :            : {
    4404                 :            :     ErrorCode result;
    4405 [ #  # ][ #  # ]:          0 :     const unsigned rk = proc_config().proc_rank();
    4406 [ #  # ][ #  # ]:          0 :     MPI_Comm cm       = proc_config().proc_comm();
    4407                 :            : 
    4408                 :            :     // Build sharing list for all sets
    4409                 :            : 
    4410                 :            :     // Get ids for sets in a vector, to pass to gs
    4411         [ #  # ]:          0 :     std::vector< long > larray;  // Allocate sufficient space for longs
    4412         [ #  # ]:          0 :     std::vector< Ulong > handles;
    4413         [ #  # ]:          0 :     Range tmp_sets;
    4414                 :            :     // The id tag can be size 4 or size 8
    4415                 :            :     // Based on that, convert to int or to long, similarly to what we do
    4416                 :            :     // for resolving shared vertices;
    4417                 :            :     // This code must work on 32 bit too, where long is 4 bytes, also
    4418                 :            :     // so test first size 4, then we should be fine
    4419                 :            :     DataType tag_type;
    4420 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data_type( idtag, tag_type );MB_CHK_SET_ERR( result, "Failed getting tag data type" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4421                 :            :     int bytes_per_tag;
    4422 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_bytes( idtag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed getting number of bytes per tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4423                 :            :     // On 64 bits, long and int are different
    4424                 :            :     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
    4425                 :            : 
    4426 [ #  # ][ #  # ]:          0 :     for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    4427                 :            :     {
    4428 [ #  # ][ #  # ]:          0 :         if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
                 [ #  # ]
    4429                 :            :         {  // It is a special id tag
    4430                 :            :             long dum;
    4431 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
    4432         [ #  # ]:          0 :             if( MB_SUCCESS == result )
    4433                 :            :             {
    4434         [ #  # ]:          0 :                 larray.push_back( dum );
    4435 [ #  # ][ #  # ]:          0 :                 handles.push_back( *rit );
    4436 [ #  # ][ #  # ]:          0 :                 tmp_sets.insert( tmp_sets.end(), *rit );
                 [ #  # ]
    4437                 :          0 :             }
    4438                 :            :         }
    4439         [ #  # ]:          0 :         else if( 4 == bytes_per_tag )
    4440                 :            :         {  // Must be GLOBAL_ID tag or MATERIAL_ID, etc
    4441                 :            :             int dum;
    4442 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
    4443         [ #  # ]:          0 :             if( MB_SUCCESS == result )
    4444                 :            :             {
    4445         [ #  # ]:          0 :                 larray.push_back( dum );
    4446 [ #  # ][ #  # ]:          0 :                 handles.push_back( *rit );
    4447 [ #  # ][ #  # ]:          0 :                 tmp_sets.insert( tmp_sets.end(), *rit );
                 [ #  # ]
    4448                 :            :             }
    4449                 :            :         }
    4450                 :            :     }
    4451                 :            : 
    4452                 :          0 :     const size_t nsets = handles.size();
    4453                 :            : 
    4454                 :            :     // Get handle array for sets
    4455                 :            :     // This is not true on windows machine, 64 bits: entity handle is 64 bit, long is 32
    4456                 :            :     // assert(sizeof(EntityHandle) <= sizeof(unsigned long));
    4457                 :            : 
    4458                 :            :     // Do communication of data
    4459         [ #  # ]:          0 :     gs_data::crystal_data* cd = procConfig.crystal_router();
    4460 [ #  # ][ #  # ]:          0 :     gs_data* gsd              = new gs_data();
    4461 [ #  # ][ #  # ]:          0 :     result                    = gsd->initialize( nsets, &larray[0], &handles[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4462                 :            : 
    4463                 :            :     // Convert from global IDs grouped by process rank to list
    4464                 :            :     // of <idx, rank> pairs so that we can sort primarily
    4465                 :            :     // by idx and secondarily by rank (we want lists of procs for each
    4466                 :            :     // idx, not lists if indices for each proc).
    4467                 :          0 :     size_t ntuple = 0;
    4468         [ #  # ]:          0 :     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
    4469                 :          0 :         ntuple += gsd->nlinfo->_nshared[p];
    4470         [ #  # ]:          0 :     std::vector< set_tuple > tuples;
    4471         [ #  # ]:          0 :     tuples.reserve( ntuple );
    4472                 :          0 :     size_t j = 0;
    4473         [ #  # ]:          0 :     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
    4474                 :            :     {
    4475         [ #  # ]:          0 :         for( unsigned np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
    4476                 :            :         {
    4477                 :            :             set_tuple t;
    4478                 :          0 :             t.idx    = gsd->nlinfo->_sh_ind[j];
    4479                 :          0 :             t.proc   = gsd->nlinfo->_target[p];
    4480                 :          0 :             t.handle = gsd->nlinfo->_ulabels[j];
    4481         [ #  # ]:          0 :             tuples.push_back( t );
    4482                 :          0 :             j++;
    4483                 :            :         }
    4484                 :            :     }
    4485         [ #  # ]:          0 :     std::sort( tuples.begin(), tuples.end() );
    4486                 :            : 
    4487                 :            :     // Release crystal router stuff
    4488         [ #  # ]:          0 :     gsd->reset();
    4489         [ #  # ]:          0 :     delete gsd;
    4490                 :            : 
    4491                 :            :     // Storing sharing data for each set
    4492                 :          0 :     size_t ti    = 0;
    4493                 :          0 :     unsigned idx = 0;
    4494         [ #  # ]:          0 :     std::vector< unsigned > procs;
    4495         [ #  # ]:          0 :     Range::iterator si = tmp_sets.begin();
    4496 [ #  # ][ #  # ]:          0 :     while( si != tmp_sets.end() && ti < tuples.size() )
         [ #  # ][ #  # ]
                 [ #  # ]
           [ #  #  #  # ]
    4497                 :            :     {
    4498 [ #  # ][ #  # ]:          0 :         assert( idx <= tuples[ti].idx );
    4499 [ #  # ][ #  # ]:          0 :         if( idx < tuples[ti].idx ) si += ( tuples[ti].idx - idx );
         [ #  # ][ #  # ]
    4500         [ #  # ]:          0 :         idx = tuples[ti].idx;
    4501                 :            : 
    4502                 :          0 :         procs.clear();
    4503                 :          0 :         size_t ti_init = ti;
    4504 [ #  # ][ #  # ]:          0 :         while( ti < tuples.size() && tuples[ti].idx == idx )
         [ #  # ][ #  # ]
    4505                 :            :         {
    4506 [ #  # ][ #  # ]:          0 :             procs.push_back( tuples[ti].proc );
    4507                 :          0 :             ++ti;
    4508                 :            :         }
    4509 [ #  # ][ #  # ]:          0 :         assert( is_sorted_unique( procs ) );
    4510                 :            : 
    4511 [ #  # ][ #  # ]:          0 :         result = sharedSetData->set_sharing_procs( *si, procs );
    4512         [ #  # ]:          0 :         if( MB_SUCCESS != result )
    4513                 :            :         {
    4514 [ #  # ][ #  # ]:          0 :             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
                 [ #  # ]
    4515         [ #  # ]:          0 :             std::cerr.flush();
    4516         [ #  # ]:          0 :             MPI_Abort( cm, 1 );
    4517                 :            :         }
    4518                 :            : 
    4519                 :            :         // Add this proc to list of sharing procs in correct position
    4520                 :            :         // so that all procs select owner based on same list
    4521         [ #  # ]:          0 :         std::vector< unsigned >::iterator it = std::lower_bound( procs.begin(), procs.end(), rk );
    4522 [ #  # ][ #  # ]:          0 :         assert( it == procs.end() || *it > rk );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4523         [ #  # ]:          0 :         procs.insert( it, rk );
    4524         [ #  # ]:          0 :         size_t owner_idx = choose_owner_idx( procs );
    4525                 :            :         EntityHandle owner_handle;
    4526 [ #  # ][ #  # ]:          0 :         if( procs[owner_idx] == rk )
    4527         [ #  # ]:          0 :             owner_handle = *si;
    4528 [ #  # ][ #  # ]:          0 :         else if( procs[owner_idx] > rk )
    4529         [ #  # ]:          0 :             owner_handle = tuples[ti_init + owner_idx - 1].handle;
    4530                 :            :         else
    4531         [ #  # ]:          0 :             owner_handle = tuples[ti_init + owner_idx].handle;
    4532 [ #  # ][ #  # ]:          0 :         result = sharedSetData->set_owner( *si, procs[owner_idx], owner_handle );
                 [ #  # ]
    4533         [ #  # ]:          0 :         if( MB_SUCCESS != result )
    4534                 :            :         {
    4535 [ #  # ][ #  # ]:          0 :             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
                 [ #  # ]
    4536         [ #  # ]:          0 :             std::cerr.flush();
    4537         [ #  # ]:          0 :             MPI_Abort( cm, 1 );
    4538                 :            :         }
    4539                 :            : 
    4540         [ #  # ]:          0 :         ++si;
    4541                 :          0 :         ++idx;
    4542                 :            :     }
    4543                 :            : 
    4544                 :          0 :     return MB_SUCCESS;
    4545                 :            : }
    4546                 :            : // populate sets with ghost entities, if necessary
    4547                 :          0 : ErrorCode ParallelComm::augment_default_sets_with_ghosts( EntityHandle file_set )
    4548                 :            : {
    4549                 :            :     // gather all default sets we are interested in, material, neumann, etc
    4550                 :            :     // we will skip geometry sets, because they are not uniquely identified with their tag value
    4551                 :            :     // maybe we will add another tag, like category
    4552                 :            : 
    4553 [ #  # ][ #  # ]:          0 :     if( procConfig.proc_size() < 2 ) return MB_SUCCESS;  // no reason to stop by
    4554                 :            :     const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
    4555                 :          0 :                                                  PARALLEL_PARTITION_TAG_NAME };
    4556                 :            : 
    4557                 :          0 :     int num_tags = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
    4558                 :            : 
    4559 [ #  # ][ #  # ]:          0 :     Range* rangeSets = new Range[num_tags];
         [ #  # ][ #  #  
             #  #  #  # ]
    4560 [ #  # ][ #  # ]:          0 :     Tag* tags        = new Tag[num_tags + 1];  // one extra for global id tag, which is an int, so far
    4561                 :            : 
    4562         [ #  # ]:          0 :     int my_rank   = rank();
    4563 [ #  # ][ #  # ]:          0 :     int** tagVals = new int*[num_tags];
    4564         [ #  # ]:          0 :     for( int i = 0; i < num_tags; i++ )
    4565                 :          0 :         tagVals[i] = NULL;
    4566                 :            :     ErrorCode rval;
    4567                 :            : 
    4568                 :            :     // for each tag, we keep a local map, from the value to the actual set with that value
    4569                 :            :     // we assume that the tag values are unique, for a given set, otherwise we
    4570                 :            :     // do not know to which set to add the entity
    4571                 :            : 
    4572                 :            :     typedef std::map< int, EntityHandle > MVal;
    4573                 :            :     typedef std::map< int, EntityHandle >::iterator itMVal;
    4574 [ #  # ][ #  # ]:          0 :     MVal* localMaps = new MVal[num_tags];
                 [ #  # ]
           [ #  #  #  # ]
    4575                 :            : 
    4576         [ #  # ]:          0 :     for( int i = 0; i < num_tags; i++ )
    4577                 :            :     {
    4578                 :            : 
    4579         [ #  # ]:          0 :         rval = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tags[i], MB_TAG_ANY );
    4580         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) continue;
    4581                 :          0 :         rval = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &( tags[i] ), 0, 1, rangeSets[i],
    4582 [ #  # ][ #  # ]:          0 :                                                      Interface::UNION );MB_CHK_SET_ERR( rval, "can't get sets with a tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4583                 :            : 
    4584 [ #  # ][ #  # ]:          0 :         if( rangeSets[i].size() > 0 )
    4585                 :            :         {
    4586 [ #  # ][ #  # ]:          0 :             tagVals[i] = new int[rangeSets[i].size()];
                 [ #  # ]
    4587                 :            :             // fill up with the tag values
    4588 [ #  # ][ #  # ]:          0 :             rval = mbImpl->tag_get_data( tags[i], rangeSets[i], tagVals[i] );MB_CHK_SET_ERR( rval, "can't get set tag values" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4589                 :            :             // now for inverse mapping:
    4590 [ #  # ][ #  # ]:          0 :             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
    4591                 :            :             {
    4592 [ #  # ][ #  # ]:          0 :                 localMaps[i][tagVals[i][j]] = rangeSets[i][j];
    4593                 :            :             }
    4594                 :            :         }
    4595                 :            :     }
    4596                 :            :     // get the global id tag too
    4597         [ #  # ]:          0 :     tags[num_tags] = mbImpl->globalId_tag();
    4598                 :            : 
    4599         [ #  # ]:          0 :     TupleList remoteEnts;
    4600                 :            :     // processor to send to, type of tag (0-mat,) tag value,     remote handle
    4601                 :            :     //                         1-diri
    4602                 :            :     //                         2-neum
    4603                 :            :     //                         3-part
    4604                 :            :     //
    4605                 :          0 :     int initialSize = (int)sharedEnts.size();  // estimate that on average, each shared ent
    4606                 :            :     // will be sent to one processor, for one tag
    4607                 :            :     // we will actually send only entities that are owned locally, and from those
    4608                 :            :     // only those that do have a special tag (material, neumann, etc)
    4609                 :            :     // if we exceed the capacity, we resize the tuple
    4610         [ #  # ]:          0 :     remoteEnts.initialize( 3, 0, 1, 0, initialSize );
    4611         [ #  # ]:          0 :     remoteEnts.enableWriteAccess();
    4612                 :            : 
    4613                 :            :     // now, for each owned entity, get the remote handle(s) and Proc(s), and verify if it
    4614                 :            :     // belongs to one of the sets; if yes, create a tuple and append it
    4615                 :            : 
    4616         [ #  # ]:          0 :     std::set< EntityHandle > own_and_sha;
    4617                 :          0 :     int ir = 0, jr = 0;
    4618 [ #  # ][ #  # ]:          0 :     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
                 [ #  # ]
    4619                 :            :     {
    4620                 :            :         // ghosted eh
    4621         [ #  # ]:          0 :         EntityHandle geh = *vit;
    4622 [ #  # ][ #  # ]:          0 :         if( own_and_sha.find( geh ) != own_and_sha.end() )  // already encountered
                 [ #  # ]
    4623                 :          0 :             continue;
    4624                 :            :         int procs[MAX_SHARING_PROCS];
    4625                 :            :         EntityHandle handles[MAX_SHARING_PROCS];
    4626                 :            :         int nprocs;
    4627                 :            :         unsigned char pstat;
    4628 [ #  # ][ #  # ]:          0 :         rval = get_sharing_data( geh, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( rval, "Failed to get sharing data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4629         [ #  # ]:          0 :         if( pstat & PSTATUS_NOT_OWNED ) continue;  // we will send info only for entities that we own
    4630         [ #  # ]:          0 :         own_and_sha.insert( geh );
    4631         [ #  # ]:          0 :         for( int i = 0; i < num_tags; i++ )
    4632                 :            :         {
    4633 [ #  # ][ #  # ]:          0 :             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
    4634                 :            :             {
    4635         [ #  # ]:          0 :                 EntityHandle specialSet = rangeSets[i][j];  // this set has tag i, value tagVals[i][j];
    4636 [ #  # ][ #  # ]:          0 :                 if( mbImpl->contains_entities( specialSet, &geh, 1 ) )
    4637                 :            :                 {
    4638                 :            :                     // this ghosted entity is in a special set, so form the tuple
    4639                 :            :                     // to send to the processors that do not own this
    4640         [ #  # ]:          0 :                     for( int k = 0; k < nprocs; k++ )
    4641                 :            :                     {
    4642         [ #  # ]:          0 :                         if( procs[k] != my_rank )
    4643                 :            :                         {
    4644 [ #  # ][ #  # ]:          0 :                             if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
                 [ #  # ]
    4645                 :            :                             {
    4646                 :            :                                 // resize, so we do not overflow
    4647         [ #  # ]:          0 :                                 int oldSize = remoteEnts.get_max();
    4648                 :            :                                 // increase with 50% the capacity
    4649         [ #  # ]:          0 :                                 remoteEnts.resize( oldSize + oldSize / 2 + 1 );
    4650                 :            :                             }
    4651                 :          0 :                             remoteEnts.vi_wr[ir++]  = procs[k];       // send to proc
    4652                 :          0 :                             remoteEnts.vi_wr[ir++]  = i;              // for the tags [i] (0-3)
    4653                 :          0 :                             remoteEnts.vi_wr[ir++]  = tagVals[i][j];  // actual value of the tag
    4654                 :          0 :                             remoteEnts.vul_wr[jr++] = handles[k];
    4655         [ #  # ]:          0 :                             remoteEnts.inc_n();
    4656                 :            :                         }
    4657                 :            :                     }
    4658                 :            :                 }
    4659                 :            :             }
    4660                 :            :         }
    4661                 :            :         // if the local entity has a global id, send it too, so we avoid
    4662                 :            :         // another "exchange_tags" for global id
    4663                 :            :         int gid;
    4664 [ #  # ][ #  # ]:          0 :         rval = mbImpl->tag_get_data( tags[num_tags], &geh, 1, &gid );MB_CHK_SET_ERR( rval, "Failed to get global id" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4665         [ #  # ]:          0 :         if( gid != 0 )
    4666                 :            :         {
    4667         [ #  # ]:          0 :             for( int k = 0; k < nprocs; k++ )
    4668                 :            :             {
    4669         [ #  # ]:          0 :                 if( procs[k] != my_rank )
    4670                 :            :                 {
    4671 [ #  # ][ #  # ]:          0 :                     if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
                 [ #  # ]
    4672                 :            :                     {
    4673                 :            :                         // resize, so we do not overflow
    4674         [ #  # ]:          0 :                         int oldSize = remoteEnts.get_max();
    4675                 :            :                         // increase with 50% the capacity
    4676         [ #  # ]:          0 :                         remoteEnts.resize( oldSize + oldSize / 2 + 1 );
    4677                 :            :                     }
    4678                 :          0 :                     remoteEnts.vi_wr[ir++]  = procs[k];  // send to proc
    4679                 :          0 :                     remoteEnts.vi_wr[ir++]  = num_tags;  // for the tags [j] (4)
    4680                 :          0 :                     remoteEnts.vi_wr[ir++]  = gid;       // actual value of the tag
    4681                 :          0 :                     remoteEnts.vul_wr[jr++] = handles[k];
    4682         [ #  # ]:          0 :                     remoteEnts.inc_n();
    4683                 :            :                 }
    4684                 :            :             }
    4685                 :            :         }
    4686                 :            :     }
    4687                 :            : 
    4688                 :            : #ifndef NDEBUG
    4689 [ #  # ][ #  # ]:          0 :     if( my_rank == 1 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 1, before augment routing" );
         [ #  # ][ #  # ]
                 [ #  # ]
    4690 [ #  # ][ #  # ]:          0 :     MPI_Barrier( procConfig.proc_comm() );
    4691         [ #  # ]:          0 :     int sentEnts = remoteEnts.get_n();
    4692 [ #  # ][ #  # ]:          0 :     assert( ( sentEnts == jr ) && ( 3 * sentEnts == ir ) );
    4693                 :            : #endif
    4694                 :            :     // exchange the info now, and send to
    4695         [ #  # ]:          0 :     gs_data::crystal_data* cd = this->procConfig.crystal_router();
    4696                 :            :     // All communication happens here; no other mpi calls
    4697                 :            :     // Also, this is a collective call
    4698 [ #  # ][ #  # ]:          0 :     rval = cd->gs_transfer( 1, remoteEnts, 0 );MB_CHK_SET_ERR( rval, "Error in tuple transfer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4699                 :            : #ifndef NDEBUG
    4700 [ #  # ][ #  # ]:          0 :     if( my_rank == 0 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 0, after augment routing" );
         [ #  # ][ #  # ]
                 [ #  # ]
    4701 [ #  # ][ #  # ]:          0 :     MPI_Barrier( procConfig.proc_comm() );
    4702                 :            : #endif
    4703                 :            : 
    4704                 :            :     // now process the data received from other processors
    4705         [ #  # ]:          0 :     int received = remoteEnts.get_n();
    4706         [ #  # ]:          0 :     for( int i = 0; i < received; i++ )
    4707                 :            :     {
    4708                 :            :         // int from = ents_to_delete.vi_rd[i];
    4709                 :          0 :         EntityHandle geh = (EntityHandle)remoteEnts.vul_rd[i];
    4710                 :          0 :         int from_proc    = remoteEnts.vi_rd[3 * i];
    4711         [ #  # ]:          0 :         if( my_rank == from_proc )
    4712 [ #  # ][ #  # ]:          0 :             std::cout << " unexpected receive from my rank " << my_rank << " during augmenting with ghosts\n ";
                 [ #  # ]
    4713                 :          0 :         int tag_type = remoteEnts.vi_rd[3 * i + 1];
    4714 [ #  # ][ #  # ]:          0 :         assert( ( 0 <= tag_type ) && ( tag_type <= num_tags ) );
    4715                 :          0 :         int value = remoteEnts.vi_rd[3 * i + 2];
    4716         [ #  # ]:          0 :         if( tag_type == num_tags )
    4717                 :            :         {
    4718                 :            :             // it is global id
    4719 [ #  # ][ #  # ]:          0 :             rval = mbImpl->tag_set_data( tags[num_tags], &geh, 1, &value );MB_CHK_SET_ERR( rval, "Error in setting gid tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4720                 :            :         }
    4721                 :            :         else
    4722                 :            :         {
    4723                 :            :             // now, based on value and tag type, see if we have that value in the map
    4724                 :          0 :             MVal& lmap = localMaps[tag_type];
    4725         [ #  # ]:          0 :             itMVal itm = lmap.find( value );
    4726 [ #  # ][ #  # ]:          0 :             if( itm == lmap.end() )
    4727                 :            :             {
    4728                 :            :                 // the value was not found yet in the local map, so we have to create the set
    4729                 :            :                 EntityHandle newSet;
    4730 [ #  # ][ #  # ]:          0 :                 rval = mbImpl->create_meshset( MESHSET_SET, newSet );MB_CHK_SET_ERR( rval, "can't create new set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4731         [ #  # ]:          0 :                 lmap[value] = newSet;
    4732                 :            :                 // set the tag value
    4733 [ #  # ][ #  # ]:          0 :                 rval = mbImpl->tag_set_data( tags[tag_type], &newSet, 1, &value );MB_CHK_SET_ERR( rval, "can't set tag for new set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4734                 :            : 
    4735                 :            :                 // we also need to add the new created set to the file set, if not null
    4736         [ #  # ]:          0 :                 if( file_set )
    4737                 :            :                 {
    4738 [ #  # ][ #  # ]:          0 :                     rval = mbImpl->add_entities( file_set, &newSet, 1 );MB_CHK_SET_ERR( rval, "can't add new set to the file set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4739                 :            :                 }
    4740                 :            :             }
    4741                 :            :             // add the entity to the set pointed to by the map
    4742 [ #  # ][ #  # ]:          0 :             rval = mbImpl->add_entities( lmap[value], &geh, 1 );MB_CHK_SET_ERR( rval, "can't add ghost ent to the set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4743                 :            :         }
    4744                 :            :     }
    4745                 :            : 
    4746         [ #  # ]:          0 :     for( int i = 0; i < num_tags; i++ )
    4747         [ #  # ]:          0 :         delete[] tagVals[i];
    4748         [ #  # ]:          0 :     delete[] tagVals;
    4749 [ #  # ][ #  # ]:          0 :     delete[] rangeSets;
    4750         [ #  # ]:          0 :     delete[] tags;
    4751 [ #  # ][ #  # ]:          0 :     delete[] localMaps;
    4752         [ #  # ]:          0 :     return MB_SUCCESS;
    4753                 :            : }
    4754                 :          0 : ErrorCode ParallelComm::create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim )
    4755                 :            : {
    4756         [ #  # ]:          0 :     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
    4757                 :            : 
    4758                 :            :     // Build up the list of shared entities
    4759                 :            :     int procs[MAX_SHARING_PROCS];
    4760                 :            :     EntityHandle handles[MAX_SHARING_PROCS];
    4761                 :            :     ErrorCode result;
    4762                 :            :     int nprocs;
    4763                 :            :     unsigned char pstat;
    4764 [ #  # ][ #  # ]:          0 :     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
           [ #  #  #  # ]
    4765                 :            :     {
    4766 [ #  # ][ #  # ]:          0 :         if( shared_dim != -1 && mbImpl->dimension_from_handle( *vit ) > shared_dim ) continue;
         [ #  # ][ #  # ]
                 [ #  # ]
    4767 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4768         [ #  # ]:          0 :         std::sort( procs, procs + nprocs );
    4769         [ #  # ]:          0 :         std::vector< int > tmp_procs( procs, procs + nprocs );
    4770         [ #  # ]:          0 :         assert( tmp_procs.size() != 2 );
    4771 [ #  # ][ #  # ]:          0 :         proc_nvecs[tmp_procs].push_back( *vit );
                 [ #  # ]
    4772                 :          0 :     }
    4773                 :            : 
    4774         [ #  # ]:          0 :     Skinner skinner( mbImpl );
    4775 [ #  # ][ #  # ]:          0 :     Range skin_ents[4];
                 [ #  # ]
           [ #  #  #  # ]
    4776 [ #  # ][ #  # ]:          0 :     result = mbImpl->get_entities_by_dimension( this_set, resolve_dim, skin_ents[resolve_dim] );MB_CHK_SET_ERR( result, "Failed to get skin entities by dimension" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4777                 :            :     result =
    4778 [ #  # ][ #  # ]:          0 :         skinner.find_skin( this_set, skin_ents[resolve_dim], false, skin_ents[resolve_dim - 1], 0, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4779         [ #  # ]:          0 :     if( shared_dim > 1 )
    4780                 :            :     {
    4781                 :          0 :         result = mbImpl->get_adjacencies( skin_ents[resolve_dim - 1], resolve_dim - 2, true, skin_ents[resolve_dim - 2],
    4782 [ #  # ][ #  # ]:          0 :                                           Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4783                 :            :     }
    4784                 :            : 
    4785         [ #  # ]:          0 :     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );
    4786                 :            : 
    4787         [ #  # ]:          0 :     return create_interface_sets( proc_nvecs );
    4788                 :            : }
    4789                 :            : 
    4790                 :          1 : ErrorCode ParallelComm::create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
    4791                 :            : {
    4792         [ +  - ]:          1 :     if( proc_nvecs.empty() ) return MB_SUCCESS;
    4793                 :            : 
    4794                 :            :     int proc_ids[MAX_SHARING_PROCS];
    4795                 :            :     EntityHandle proc_handles[MAX_SHARING_PROCS];
    4796                 :            :     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
    4797 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in create_interface_sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4798         [ #  # ]:          0 :     Range::iterator rit;
    4799                 :            : 
    4800                 :            :     // Create interface sets, tag them, and tag their contents with iface set tag
    4801         [ #  # ]:          0 :     std::vector< unsigned char > pstatus;
    4802 [ #  # ][ #  # ]:          0 :     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator vit = proc_nvecs.begin();
                 [ #  # ]
    4803                 :          0 :          vit != proc_nvecs.end(); ++vit )
    4804                 :            :     {
    4805                 :            :         // Create the set
    4806                 :            :         EntityHandle new_set;
    4807 [ #  # ][ #  # ]:          0 :         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4808         [ #  # ]:          0 :         interfaceSets.insert( new_set );
    4809                 :            : 
    4810                 :            :         // Add entities
    4811 [ #  # ][ #  # ]:          0 :         assert( !vit->second.empty() );
    4812 [ #  # ][ #  # ]:          0 :         result = mbImpl->add_entities( new_set, &( vit->second )[0], ( vit->second ).size() );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4813                 :            :         // Tag set with the proc rank(s)
    4814 [ #  # ][ #  # ]:          0 :         if( vit->first.size() == 1 )
    4815                 :            :         {
    4816 [ #  # ][ #  # ]:          0 :             assert( ( vit->first )[0] != (int)procConfig.proc_rank() );
         [ #  # ][ #  # ]
    4817 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shp_tag, &new_set, 1, &( vit->first )[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4818                 :          0 :             proc_handles[0] = 0;
    4819 [ #  # ][ #  # ]:          0 :             result          = mbImpl->tag_set_data( shh_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4820                 :            :         }
    4821                 :            :         else
    4822                 :            :         {
    4823                 :            :             // Pad tag data out to MAX_SHARING_PROCS with -1
    4824 [ #  # ][ #  # ]:          0 :             if( vit->first.size() > MAX_SHARING_PROCS )
    4825                 :            :             {
    4826 [ #  # ][ #  # ]:          0 :                 std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_set ) )
         [ #  # ][ #  # ]
    4827 [ #  # ][ #  # ]:          0 :                           << ' ' << ID_FROM_HANDLE( new_set ) << " on process " << proc_config().proc_rank()
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4828         [ #  # ]:          0 :                           << std::endl;
    4829         [ #  # ]:          0 :                 std::cerr.flush();
    4830 [ #  # ][ #  # ]:          0 :                 MPI_Abort( proc_config().proc_comm(), 66 );
                 [ #  # ]
    4831                 :            :             }
    4832                 :            :             // assert(vit->first.size() <= MAX_SHARING_PROCS);
    4833 [ #  # ][ #  # ]:          0 :             std::copy( vit->first.begin(), vit->first.end(), proc_ids );
                 [ #  # ]
    4834 [ #  # ][ #  # ]:          0 :             std::fill( proc_ids + vit->first.size(), proc_ids + MAX_SHARING_PROCS, -1 );
    4835 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shps_tag, &new_set, 1, proc_ids );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4836 [ #  # ][ #  # ]:          0 :             unsigned int ind = std::find( proc_ids, proc_ids + vit->first.size(), procConfig.proc_rank() ) - proc_ids;
                 [ #  # ]
    4837 [ #  # ][ #  # ]:          0 :             assert( ind < vit->first.size() );
    4838         [ #  # ]:          0 :             std::fill( proc_handles, proc_handles + MAX_SHARING_PROCS, 0 );
    4839                 :          0 :             proc_handles[ind] = new_set;
    4840 [ #  # ][ #  # ]:          0 :             result            = mbImpl->tag_set_data( shhs_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4841                 :            :         }
    4842                 :            : 
    4843                 :            :         // Get the owning proc, then set the pstatus tag on iface set
    4844 [ #  # ][ #  # ]:          0 :         int min_proc       = ( vit->first )[0];
    4845                 :          0 :         unsigned char pval = ( PSTATUS_SHARED | PSTATUS_INTERFACE );
    4846 [ #  # ][ #  # ]:          0 :         if( min_proc < (int)procConfig.proc_rank() ) pval |= PSTATUS_NOT_OWNED;
    4847 [ #  # ][ #  # ]:          0 :         if( vit->first.size() > 1 ) pval |= PSTATUS_MULTISHARED;
    4848 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( pstat_tag, &new_set, 1, &pval );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4849                 :            : 
    4850                 :            :         // Tag the vertices with the same thing
    4851                 :          0 :         pstatus.clear();
    4852         [ #  # ]:          0 :         std::vector< EntityHandle > verts;
    4853 [ #  # ][ #  # ]:          0 :         for( std::vector< EntityHandle >::iterator v2it = ( vit->second ).begin(); v2it != ( vit->second ).end();
         [ #  # ][ #  # ]
                 [ #  # ]
    4854                 :            :              ++v2it )
    4855 [ #  # ][ #  # ]:          0 :             if( mbImpl->type_from_handle( *v2it ) == MBVERTEX ) verts.push_back( *v2it );
         [ #  # ][ #  # ]
                 [ #  # ]
    4856         [ #  # ]:          0 :         pstatus.resize( verts.size(), pval );
    4857         [ #  # ]:          0 :         if( !verts.empty() )
    4858                 :            :         {
    4859 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( pstat_tag, &verts[0], verts.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set vertices with pstatus" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4860                 :            :         }
    4861                 :          0 :     }
    4862                 :            : 
    4863                 :          1 :     return MB_SUCCESS;
    4864                 :            : }
    4865                 :            : 
    4866                 :          0 : ErrorCode ParallelComm::create_iface_pc_links()
    4867                 :            : {
    4868                 :            :     // Now that we've resolved the entities in the iface sets,
    4869                 :            :     // set parent/child links between the iface sets
    4870                 :            : 
    4871                 :            :     // First tag all entities in the iface sets
    4872                 :            :     Tag tmp_iface_tag;
    4873                 :          0 :     EntityHandle tmp_iface_set = 0;
    4874                 :            :     ErrorCode result           = mbImpl->tag_get_handle( "__tmp_iface", 1, MB_TYPE_HANDLE, tmp_iface_tag,
    4875 [ #  # ][ #  # ]:          0 :                                                MB_TAG_DENSE | MB_TAG_CREAT, &tmp_iface_set );MB_CHK_SET_ERR( result, "Failed to create temporary interface set tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4876                 :            : 
    4877         [ #  # ]:          0 :     Range iface_ents;
    4878         [ #  # ]:          0 :     std::vector< EntityHandle > tag_vals;
    4879         [ #  # ]:          0 :     Range::iterator rit;
    4880                 :            : 
    4881 [ #  # ][ #  # ]:          0 :     for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    4882                 :            :     {
    4883                 :            :         // tag entities with interface set
    4884         [ #  # ]:          0 :         iface_ents.clear();
    4885 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4886                 :            : 
    4887 [ #  # ][ #  # ]:          0 :         if( iface_ents.empty() ) continue;
    4888                 :            : 
    4889 [ #  # ][ #  # ]:          0 :         tag_vals.resize( iface_ents.size() );
    4890 [ #  # ][ #  # ]:          0 :         std::fill( tag_vals.begin(), tag_vals.end(), *rit );
    4891 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( tmp_iface_tag, iface_ents, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to tag iface entities with interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4892                 :            :     }
    4893                 :            : 
    4894                 :            :     // Now go back through interface sets and add parent/child links
    4895         [ #  # ]:          0 :     Range tmp_ents2;
    4896         [ #  # ]:          0 :     for( int d = 2; d >= 0; d-- )
    4897                 :            :     {
    4898 [ #  # ][ #  # ]:          0 :         for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    4899                 :            :         {
    4900                 :            :             // Get entities on this interface
    4901         [ #  # ]:          0 :             iface_ents.clear();
    4902 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_handle( *rit, iface_ents, true );MB_CHK_SET_ERR( result, "Failed to get entities by handle" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4903 [ #  # ][ #  # ]:          0 :             if( iface_ents.empty() || mbImpl->dimension_from_handle( *iface_ents.rbegin() ) != d ) continue;
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4904                 :            : 
    4905                 :            :             // Get higher-dimensional entities and their interface sets
    4906 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_adjacencies( &( *iface_ents.begin() ), 1, d + 1, false, tmp_ents2 );MB_CHK_SET_ERR( result, "Failed to get adjacencies for interface sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4907 [ #  # ][ #  # ]:          0 :             tag_vals.resize( tmp_ents2.size() );
    4908 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( tmp_iface_tag, tmp_ents2, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to get tmp iface tag for interface sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4909                 :            : 
    4910                 :            :             // Go through and for any on interface make it a parent
    4911                 :          0 :             EntityHandle last_set = 0;
    4912         [ #  # ]:          0 :             for( unsigned int i = 0; i < tag_vals.size(); i++ )
    4913                 :            :             {
    4914 [ #  # ][ #  # ]:          0 :                 if( tag_vals[i] && tag_vals[i] != last_set )
         [ #  # ][ #  # ]
                 [ #  # ]
    4915                 :            :                 {
    4916 [ #  # ][ #  # ]:          0 :                     result = mbImpl->add_parent_child( tag_vals[i], *rit );MB_CHK_SET_ERR( result, "Failed to add parent/child link for interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4917         [ #  # ]:          0 :                     last_set = tag_vals[i];
    4918                 :            :                 }
    4919                 :            :             }
    4920                 :            :         }
    4921                 :            :     }
    4922                 :            : 
    4923                 :            :     // Delete the temporary tag
    4924 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_delete( tmp_iface_tag );MB_CHK_SET_ERR( result, "Failed to delete tmp iface tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4925                 :            : 
    4926                 :          0 :     return MB_SUCCESS;
    4927                 :            : }
    4928                 :            : 
    4929                 :          0 : ErrorCode ParallelComm::get_proc_nvecs( int resolve_dim, int shared_dim, Range* skin_ents,
    4930                 :            :                                         std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
    4931                 :            : {
    4932                 :            :     // Set sharing procs tags on other skin ents
    4933                 :            :     ErrorCode result;
    4934                 :            :     const EntityHandle* connect;
    4935                 :            :     int num_connect;
    4936         [ #  # ]:          0 :     std::set< int > sharing_procs;
    4937         [ #  # ]:          0 :     std::vector< EntityHandle > dum_connect;
    4938         [ #  # ]:          0 :     std::vector< int > sp_vec;
    4939                 :            : 
    4940         [ #  # ]:          0 :     for( int d = 3; d > 0; d-- )
    4941                 :            :     {
    4942         [ #  # ]:          0 :         if( resolve_dim == d ) continue;
    4943                 :            : 
    4944 [ #  # ][ #  # ]:          0 :         for( Range::iterator rit = skin_ents[d].begin(); rit != skin_ents[d].end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    4945                 :            :         {
    4946                 :            :             // Get connectivity
    4947 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect );MB_CHK_SET_ERR( result, "Failed to get connectivity on non-vertex skin entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    4948                 :            : 
    4949         [ #  # ]:          0 :             int op = ( resolve_dim < shared_dim ? Interface::UNION : Interface::INTERSECT );
    4950 [ #  # ][ #  # ]:          0 :             result = get_sharing_data( connect, num_connect, sharing_procs, op );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_proc_nvecs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4951   [ #  #  #  # ]:          0 :             if( sharing_procs.empty() ||
                 [ #  # ]
    4952 [ #  # ][ #  # ]:          0 :                 ( sharing_procs.size() == 1 && *sharing_procs.begin() == (int)procConfig.proc_rank() ) )
         [ #  # ][ #  # ]
                 [ #  # ]
    4953                 :          0 :                 continue;
    4954                 :            : 
    4955                 :            :             // Need to specify sharing data correctly for entities or they will
    4956                 :            :             // end up in a different interface set than corresponding vertices
    4957         [ #  # ]:          0 :             if( sharing_procs.size() == 2 )
    4958                 :            :             {
    4959 [ #  # ][ #  # ]:          0 :                 std::set< int >::iterator it = sharing_procs.find( proc_config().proc_rank() );
                 [ #  # ]
    4960 [ #  # ][ #  # ]:          0 :                 assert( it != sharing_procs.end() );
    4961         [ #  # ]:          0 :                 sharing_procs.erase( it );
    4962                 :            :             }
    4963                 :            : 
    4964                 :            :             // Intersection is the owning proc(s) for this skin ent
    4965                 :          0 :             sp_vec.clear();
    4966 [ #  # ][ #  # ]:          0 :             std::copy( sharing_procs.begin(), sharing_procs.end(), std::back_inserter( sp_vec ) );
    4967         [ #  # ]:          0 :             assert( sp_vec.size() != 2 );
    4968 [ #  # ][ #  # ]:          0 :             proc_nvecs[sp_vec].push_back( *rit );
                 [ #  # ]
    4969                 :            :         }
    4970                 :            :     }
    4971                 :            : 
    4972                 :            : #ifndef NDEBUG
    4973                 :            :     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
    4974   [ #  #  #  # ]:          0 :     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
                 [ #  # ]
    4975                 :          0 :          mit != proc_nvecs.end(); ++mit )
    4976                 :            :     {
    4977 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > tmp_vec = ( mit->second );
    4978         [ #  # ]:          0 :         std::sort( tmp_vec.begin(), tmp_vec.end() );
    4979         [ #  # ]:          0 :         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
    4980 [ #  # ][ #  # ]:          0 :         assert( vit == tmp_vec.end() );
    4981                 :          0 :     }
    4982                 :            : #endif
    4983                 :            : 
    4984                 :          0 :     return MB_SUCCESS;
    4985                 :            : }
    4986                 :            : 
    4987                 :            : // Overloaded form of tag_shared_verts
    4988                 :            : // Tuple coming in is of form (arbitrary value, remoteProc, localHandle, remoteHandle)
    4989                 :            : // Also will check for doubles in the list if the list is sorted
    4990                 :          0 : ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents,
    4991                 :            :                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
    4992                 :            :                                           Range& /*proc_verts*/, unsigned int i_extra )
    4993                 :            : {
    4994                 :            :     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
    4995 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    4996                 :            : 
    4997                 :          0 :     unsigned int j = 0, i = 0;
    4998 [ #  # ][ #  # ]:          0 :     std::vector< int > sharing_procs, sharing_procs2, tag_procs;
                 [ #  # ]
    4999 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > sharing_handles, sharing_handles2, tag_lhandles, tag_rhandles;
         [ #  # ][ #  # ]
    5000         [ #  # ]:          0 :     std::vector< unsigned char > pstatus;
    5001                 :            : 
    5002                 :            :     // Were on tuple j/2
    5003         [ #  # ]:          0 :     if( i_extra ) i += i_extra;
    5004 [ #  # ][ #  # ]:          0 :     while( j < 2 * shared_ents.get_n() )
    5005                 :            :     {
    5006                 :            :         // Count & accumulate sharing procs
    5007                 :          0 :         EntityHandle this_ent = shared_ents.vul_rd[j], other_ent = 0;
    5008                 :          0 :         int other_proc = -1;
    5009 [ #  # ][ #  # ]:          0 :         while( j < 2 * shared_ents.get_n() && shared_ents.vul_rd[j] == this_ent )
         [ #  # ][ #  # ]
    5010                 :            :         {
    5011                 :          0 :             j++;
    5012                 :            :             // Shouldn't have same proc
    5013 [ #  # ][ #  # ]:          0 :             assert( shared_ents.vi_rd[i] != (int)procConfig.proc_rank() );
    5014                 :            :             // Grab the remote data if its not a dublicate
    5015 [ #  # ][ #  # ]:          0 :             if( shared_ents.vul_rd[j] != other_ent || shared_ents.vi_rd[i] != other_proc )
    5016                 :            :             {
    5017         [ #  # ]:          0 :                 assert( 0 != shared_ents.vul_rd[j] );
    5018         [ #  # ]:          0 :                 sharing_procs.push_back( shared_ents.vi_rd[i] );
    5019         [ #  # ]:          0 :                 sharing_handles.push_back( shared_ents.vul_rd[j] );
    5020                 :            :             }
    5021                 :          0 :             other_proc = shared_ents.vi_rd[i];
    5022                 :          0 :             other_ent  = shared_ents.vul_rd[j];
    5023                 :          0 :             j++;
    5024                 :          0 :             i += 1 + i_extra;
    5025                 :            :         }
    5026                 :            : 
    5027         [ #  # ]:          0 :         if( sharing_procs.size() > 1 )
    5028                 :            :         {
    5029                 :            :             // Add current proc/handle to list
    5030 [ #  # ][ #  # ]:          0 :             sharing_procs.push_back( procConfig.proc_rank() );
    5031         [ #  # ]:          0 :             sharing_handles.push_back( this_ent );
    5032                 :            : 
    5033                 :            :             // Sort sharing_procs and sharing_handles such that
    5034                 :            :             // sharing_procs is in ascending order. Use temporary
    5035                 :            :             // lists and binary search to re-order sharing_handles.
    5036         [ #  # ]:          0 :             sharing_procs2 = sharing_procs;
    5037         [ #  # ]:          0 :             std::sort( sharing_procs2.begin(), sharing_procs2.end() );
    5038         [ #  # ]:          0 :             sharing_handles2.resize( sharing_handles.size() );
    5039         [ #  # ]:          0 :             for( size_t k = 0; k < sharing_handles.size(); k++ )
    5040                 :            :             {
    5041 [ #  # ][ #  # ]:          0 :                 size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
                 [ #  # ]
    5042                 :          0 :                              sharing_procs2.begin();
    5043 [ #  # ][ #  # ]:          0 :                 sharing_handles2[idx] = sharing_handles[k];
    5044                 :            :             }
    5045                 :          0 :             sharing_procs.swap( sharing_procs2 );
    5046                 :          0 :             sharing_handles.swap( sharing_handles2 );
    5047                 :            :         }
    5048                 :            : 
    5049         [ #  # ]:          0 :         assert( sharing_procs.size() != 2 );
    5050 [ #  # ][ #  # ]:          0 :         proc_nvecs[sharing_procs].push_back( this_ent );
    5051                 :            : 
    5052                 :          0 :         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
    5053         [ #  # ]:          0 :         if( sharing_procs.size() == 1 )
    5054                 :            :         {
    5055 [ #  # ][ #  # ]:          0 :             tag_procs.push_back( sharing_procs[0] );
    5056         [ #  # ]:          0 :             tag_lhandles.push_back( this_ent );
    5057 [ #  # ][ #  # ]:          0 :             tag_rhandles.push_back( sharing_handles[0] );
    5058         [ #  # ]:          0 :             pstatus.push_back( share_flag );
    5059                 :            :         }
    5060                 :            :         else
    5061                 :            :         {
    5062                 :            :             // Pad lists
    5063                 :            :             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
    5064         [ #  # ]:          0 :             if( sharing_procs.size() > MAX_SHARING_PROCS )
    5065                 :            :             {
    5066 [ #  # ][ #  # ]:          0 :                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
                 [ #  # ]
    5067 [ #  # ][ #  # ]:          0 :                           << proc_config().proc_rank() << std::endl;
         [ #  # ][ #  # ]
    5068         [ #  # ]:          0 :                 std::cerr.flush();
    5069 [ #  # ][ #  # ]:          0 :                 MPI_Abort( proc_config().proc_comm(), 66 );
                 [ #  # ]
    5070                 :            :             }
    5071         [ #  # ]:          0 :             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
    5072         [ #  # ]:          0 :             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
    5073 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5074 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5075 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5076         [ #  # ]:          0 :             sharedEnts.insert( this_ent );
    5077                 :            :         }
    5078                 :            : 
    5079                 :            :         // Reset sharing proc(s) tags
    5080                 :          0 :         sharing_procs.clear();
    5081                 :          0 :         sharing_handles.clear();
    5082                 :            :     }
    5083                 :            : 
    5084         [ #  # ]:          0 :     if( !tag_procs.empty() )
    5085                 :            :     {
    5086 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( shp_tag, &tag_lhandles[0], tag_procs.size(), &tag_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5087 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( shh_tag, &tag_lhandles[0], tag_procs.size(), &tag_rhandles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5088 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( pstat_tag, &tag_lhandles[0], tag_procs.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5089 [ #  # ][ #  # ]:          0 :         for( std::vector< EntityHandle >::iterator vvt = tag_lhandles.begin(); vvt != tag_lhandles.end(); vvt++ )
                 [ #  # ]
    5090 [ #  # ][ #  # ]:          0 :             sharedEnts.insert( *vvt );
    5091                 :            :     }
    5092                 :            : 
    5093                 :            : #ifndef NDEBUG
    5094                 :            :     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
    5095   [ #  #  #  # ]:          0 :     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
                 [ #  # ]
    5096                 :          0 :          mit != proc_nvecs.end(); ++mit )
    5097                 :            :     {
    5098 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > tmp_vec = ( mit->second );
    5099         [ #  # ]:          0 :         std::sort( tmp_vec.begin(), tmp_vec.end() );
    5100         [ #  # ]:          0 :         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
    5101 [ #  # ][ #  # ]:          0 :         assert( vit == tmp_vec.end() );
    5102                 :          0 :     }
    5103                 :            : #endif
    5104                 :            : 
    5105                 :          0 :     return MB_SUCCESS;
    5106                 :            : }
    5107                 :            : 
    5108                 :          0 : ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents, Range* skin_ents,
    5109                 :            :                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
    5110                 :            :                                           Range& /*proc_verts*/ )
    5111                 :            : {
    5112                 :            :     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
    5113 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5114                 :            : 
    5115                 :          0 :     unsigned int j = 0, i = 0;
    5116 [ #  # ][ #  # ]:          0 :     std::vector< int > sharing_procs, sharing_procs2;
    5117 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > sharing_handles, sharing_handles2, skin_verts( skin_ents[0].size() );
         [ #  # ][ #  # ]
    5118 [ #  # ][ #  # ]:          0 :     for( Range::iterator rit = skin_ents[0].begin(); rit != skin_ents[0].end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    5119 [ #  # ][ #  # ]:          0 :         skin_verts[i] = *rit;
    5120                 :          0 :     i = 0;
    5121                 :            : 
    5122 [ #  # ][ #  # ]:          0 :     while( j < 2 * shared_ents.get_n() )
    5123                 :            :     {
    5124                 :            :         // Count & accumulate sharing procs
    5125                 :          0 :         int this_idx          = shared_ents.vi_rd[j];
    5126         [ #  # ]:          0 :         EntityHandle this_ent = skin_verts[this_idx];
    5127 [ #  # ][ #  # ]:          0 :         while( j < 2 * shared_ents.get_n() && shared_ents.vi_rd[j] == this_idx )
         [ #  # ][ #  # ]
    5128                 :            :         {
    5129                 :          0 :             j++;
    5130                 :            :             // Shouldn't have same proc
    5131 [ #  # ][ #  # ]:          0 :             assert( shared_ents.vi_rd[j] != (int)procConfig.proc_rank() );
    5132         [ #  # ]:          0 :             sharing_procs.push_back( shared_ents.vi_rd[j++] );
    5133         [ #  # ]:          0 :             sharing_handles.push_back( shared_ents.vul_rd[i++] );
    5134                 :            :         }
    5135                 :            : 
    5136         [ #  # ]:          0 :         if( sharing_procs.size() > 1 )
    5137                 :            :         {
    5138                 :            :             // Add current proc/handle to list
    5139 [ #  # ][ #  # ]:          0 :             sharing_procs.push_back( procConfig.proc_rank() );
    5140         [ #  # ]:          0 :             sharing_handles.push_back( this_ent );
    5141                 :            :         }
    5142                 :            : 
    5143                 :            :         // Sort sharing_procs and sharing_handles such that
    5144                 :            :         // sharing_procs is in ascending order. Use temporary
    5145                 :            :         // lists and binary search to re-order sharing_handles.
    5146         [ #  # ]:          0 :         sharing_procs2 = sharing_procs;
    5147         [ #  # ]:          0 :         std::sort( sharing_procs2.begin(), sharing_procs2.end() );
    5148         [ #  # ]:          0 :         sharing_handles2.resize( sharing_handles.size() );
    5149         [ #  # ]:          0 :         for( size_t k = 0; k < sharing_handles.size(); k++ )
    5150                 :            :         {
    5151 [ #  # ][ #  # ]:          0 :             size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
                 [ #  # ]
    5152                 :          0 :                          sharing_procs2.begin();
    5153 [ #  # ][ #  # ]:          0 :             sharing_handles2[idx] = sharing_handles[k];
    5154                 :            :         }
    5155                 :          0 :         sharing_procs.swap( sharing_procs2 );
    5156                 :          0 :         sharing_handles.swap( sharing_handles2 );
    5157                 :            : 
    5158         [ #  # ]:          0 :         assert( sharing_procs.size() != 2 );
    5159 [ #  # ][ #  # ]:          0 :         proc_nvecs[sharing_procs].push_back( this_ent );
    5160                 :            : 
    5161                 :          0 :         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
    5162         [ #  # ]:          0 :         if( sharing_procs.size() == 1 )
    5163                 :            :         {
    5164 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shp_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5165 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shh_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5166 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &share_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5167         [ #  # ]:          0 :             sharedEnts.insert( this_ent );
    5168                 :            :         }
    5169                 :            :         else
    5170                 :            :         {
    5171                 :            :             // Pad lists
    5172                 :            :             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
    5173         [ #  # ]:          0 :             if( sharing_procs.size() > MAX_SHARING_PROCS )
    5174                 :            :             {
    5175 [ #  # ][ #  # ]:          0 :                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
                 [ #  # ]
    5176 [ #  # ][ #  # ]:          0 :                           << proc_config().proc_rank() << std::endl;
         [ #  # ][ #  # ]
    5177         [ #  # ]:          0 :                 std::cerr.flush();
    5178 [ #  # ][ #  # ]:          0 :                 MPI_Abort( proc_config().proc_comm(), 66 );
                 [ #  # ]
    5179                 :            :             }
    5180         [ #  # ]:          0 :             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
    5181         [ #  # ]:          0 :             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
    5182 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5183 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5184 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5185         [ #  # ]:          0 :             sharedEnts.insert( this_ent );
    5186                 :            :         }
    5187                 :            : 
    5188                 :            :         // Reset sharing proc(s) tags
    5189                 :          0 :         sharing_procs.clear();
    5190                 :          0 :         sharing_handles.clear();
    5191                 :            :     }
    5192                 :            : 
    5193                 :            : #ifndef NDEBUG
    5194                 :            :     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
    5195   [ #  #  #  # ]:          0 :     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
                 [ #  # ]
    5196                 :          0 :          mit != proc_nvecs.end(); ++mit )
    5197                 :            :     {
    5198 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > tmp_vec = ( mit->second );
    5199         [ #  # ]:          0 :         std::sort( tmp_vec.begin(), tmp_vec.end() );
    5200         [ #  # ]:          0 :         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
    5201 [ #  # ][ #  # ]:          0 :         assert( vit == tmp_vec.end() );
    5202                 :          0 :     }
    5203                 :            : #endif
    5204                 :            : 
    5205                 :          0 :     return MB_SUCCESS;
    5206                 :            : }
    5207                 :            : 
    5208                 :            : //! Get processors with which this processor communicates; sets are sorted by processor
    5209                 :          4 : ErrorCode ParallelComm::get_interface_procs( std::set< unsigned int >& procs_set, bool get_buffs )
    5210                 :            : {
    5211                 :            :     // Make sure the sharing procs vector is empty
    5212                 :          4 :     procs_set.clear();
    5213                 :            : 
    5214                 :            :     // Pre-load vector of single-proc tag values
    5215                 :            :     unsigned int i, j;
    5216 [ +  - ][ +  - ]:          4 :     std::vector< int > iface_proc( interfaceSets.size() );
    5217 [ +  - ][ +  - ]:          4 :     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), interfaceSets, &iface_proc[0] );MB_CHK_SET_ERR( result, "Failed to get iface_proc for iface sets" );
         [ +  - ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5218                 :            : 
    5219                 :            :     // Get sharing procs either from single-proc vector or by getting
    5220                 :            :     // multi-proc tag value
    5221                 :            :     int tmp_iface_procs[MAX_SHARING_PROCS];
    5222         [ +  - ]:          4 :     std::fill( tmp_iface_procs, tmp_iface_procs + MAX_SHARING_PROCS, -1 );
    5223         [ +  - ]:          4 :     Range::iterator rit;
    5224 [ +  - ][ #  # ]:          4 :     for( rit = interfaceSets.begin(), i = 0; rit != interfaceSets.end(); ++rit, i++ )
         [ +  - ][ +  - ]
                 [ -  + ]
    5225                 :            :     {
    5226 [ #  # ][ #  # ]:          0 :         if( -1 != iface_proc[i] )
    5227                 :            :         {
    5228 [ #  # ][ #  # ]:          0 :             assert( iface_proc[i] != (int)procConfig.proc_rank() );
                 [ #  # ]
    5229 [ #  # ][ #  # ]:          0 :             procs_set.insert( (unsigned int)iface_proc[i] );
    5230                 :            :         }
    5231                 :            :         else
    5232                 :            :         {
    5233                 :            :             // Get the sharing_procs tag
    5234 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, tmp_iface_procs );MB_CHK_SET_ERR( result, "Failed to get iface_procs for iface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5235         [ #  # ]:          0 :             for( j = 0; j < MAX_SHARING_PROCS; j++ )
    5236                 :            :             {
    5237 [ #  # ][ #  # ]:          0 :                 if( -1 != tmp_iface_procs[j] && tmp_iface_procs[j] != (int)procConfig.proc_rank() )
         [ #  # ][ #  # ]
    5238         [ #  # ]:          0 :                     procs_set.insert( (unsigned int)tmp_iface_procs[j] );
    5239         [ #  # ]:          0 :                 else if( -1 == tmp_iface_procs[j] )
    5240                 :            :                 {
    5241         [ #  # ]:          0 :                     std::fill( tmp_iface_procs, tmp_iface_procs + j, -1 );
    5242                 :          0 :                     break;
    5243                 :            :                 }
    5244                 :            :             }
    5245                 :            :         }
    5246                 :            :     }
    5247                 :            : 
    5248         [ -  + ]:          4 :     if( get_buffs )
    5249                 :            :     {
    5250 [ #  # ][ #  # ]:          0 :         for( std::set< unsigned int >::iterator sit = procs_set.begin(); sit != procs_set.end(); ++sit )
                 [ #  # ]
    5251 [ #  # ][ #  # ]:          0 :             get_buffers( *sit );
    5252                 :            :     }
    5253                 :            : 
    5254                 :          4 :     return MB_SUCCESS;
    5255                 :            : }
    5256                 :            : 
    5257                 :          0 : ErrorCode ParallelComm::get_pstatus( EntityHandle entity, unsigned char& pstatus_val )
    5258                 :            : {
    5259 [ #  # ][ #  # ]:          0 :     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstatus_val );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5260                 :          0 :     return result;
    5261                 :            : }
    5262                 :            : 
    5263                 :          0 : ErrorCode ParallelComm::get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents )
    5264                 :            : {
    5265         [ #  # ]:          0 :     Range ents;
    5266                 :            :     ErrorCode result;
    5267                 :            : 
    5268         [ #  # ]:          0 :     if( -1 == dim )
    5269                 :            :     {
    5270 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_entities_by_handle( 0, ents );MB_CHK_SET_ERR( result, "Failed to get all entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5271                 :            :     }
    5272                 :            :     else
    5273                 :            :     {
    5274 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_entities_by_dimension( 0, dim, ents );MB_CHK_SET_ERR( result, "Failed to get entities of dimension " << dim );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5275                 :            :     }
    5276                 :            : 
    5277 [ #  # ][ #  # ]:          0 :     std::vector< unsigned char > pstatus( ents.size() );
    5278 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data( pstatus_tag(), ents, &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5279         [ #  # ]:          0 :     Range::iterator rit = ents.begin();
    5280                 :          0 :     int i               = 0;
    5281         [ #  # ]:          0 :     if( pstatus_val )
    5282                 :            :     {
    5283 [ #  # ][ #  # ]:          0 :         for( ; rit != ents.end(); i++, ++rit )
         [ #  # ][ #  # ]
    5284                 :            :         {
    5285 [ #  # ][ #  # ]:          0 :             if( pstatus[i] & pstatus_val && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5286 [ #  # ][ #  # ]:          0 :                 pstatus_ents.insert( *rit );
    5287                 :            :         }
    5288                 :            :     }
    5289                 :            :     else
    5290                 :            :     {
    5291 [ #  # ][ #  # ]:          0 :         for( ; rit != ents.end(); i++, ++rit )
         [ #  # ][ #  # ]
    5292                 :            :         {
    5293 [ #  # ][ #  # ]:          0 :             if( !pstatus[i] && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5294 [ #  # ][ #  # ]:          0 :                 pstatus_ents.insert( *rit );
    5295                 :            :         }
    5296                 :            :     }
    5297                 :            : 
    5298                 :          0 :     return MB_SUCCESS;
    5299                 :            : }
    5300                 :            : 
    5301                 :         19 : ErrorCode ParallelComm::check_global_ids( EntityHandle this_set, const int dimension, const int start_id,
    5302                 :            :                                           const bool largest_dim_only, const bool parallel, const bool owned_only )
    5303                 :            : {
    5304                 :            :     // Global id tag
    5305         [ +  - ]:         19 :     Tag gid_tag = mbImpl->globalId_tag();
    5306                 :         19 :     int def_val = -1;
    5307         [ +  - ]:         19 :     Range dum_range;
    5308                 :            : 
    5309                 :         19 :     void* tag_ptr    = &def_val;
    5310 [ +  - ][ -  + ]:         19 :     ErrorCode result = mbImpl->get_entities_by_type_and_tag( this_set, MBVERTEX, &gid_tag, &tag_ptr, 1, dum_range );MB_CHK_SET_ERR( result, "Failed to get entities by MBVERTEX type and gid tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5311                 :            : 
    5312 [ +  - ][ -  + ]:         19 :     if( !dum_range.empty() )
    5313                 :            :     {
    5314                 :            :         // Just created it, so we need global ids
    5315 [ #  # ][ #  # ]:          0 :         result = assign_global_ids( this_set, dimension, start_id, largest_dim_only, parallel, owned_only );MB_CHK_SET_ERR( result, "Failed assigning global ids" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5316                 :            :     }
    5317                 :            : 
    5318                 :         19 :     return MB_SUCCESS;
    5319                 :            : }
    5320                 :            : 
    5321                 :          0 : bool ParallelComm::is_iface_proc( EntityHandle this_set, int to_proc )
    5322                 :            : {
    5323                 :            :     int sharing_procs[MAX_SHARING_PROCS];
    5324         [ #  # ]:          0 :     std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
    5325 [ #  # ][ #  # ]:          0 :     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), &this_set, 1, sharing_procs );
    5326 [ #  # ][ #  # ]:          0 :     if( MB_SUCCESS == result && to_proc == sharing_procs[0] ) return true;
    5327                 :            : 
    5328 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data( sharedps_tag(), &this_set, 1, sharing_procs );
    5329         [ #  # ]:          0 :     if( MB_SUCCESS != result ) return false;
    5330                 :            : 
    5331         [ #  # ]:          0 :     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
    5332                 :            :     {
    5333         [ #  # ]:          0 :         if( to_proc == sharing_procs[i] )
    5334                 :          0 :             return true;
    5335         [ #  # ]:          0 :         else if( -1 == sharing_procs[i] )
    5336                 :          0 :             return false;
    5337                 :            :     }
    5338                 :            : 
    5339                 :          0 :     return false;
    5340                 :            : }
    5341                 :            : 
    5342                 :         40 : ErrorCode ParallelComm::filter_pstatus( Range& ents, unsigned char pstat, unsigned char op, int to_proc,
    5343                 :            :                                         Range* returned_ents )
    5344                 :            : {
    5345         [ +  - ]:         40 :     Range tmp_ents;
    5346                 :            : 
    5347                 :            :     // assert(!ents.empty());
    5348 [ +  - ][ +  + ]:         40 :     if( ents.empty() )
    5349                 :            :     {
    5350 [ +  + ][ +  - ]:          6 :         if( returned_ents ) returned_ents->clear();
    5351                 :          6 :         return MB_SUCCESS;
    5352                 :            :     }
    5353                 :            : 
    5354                 :            :     // Put into tmp_ents any entities which are not owned locally or
    5355                 :            :     // who are already shared with to_proc
    5356 [ +  - ][ +  - ]:         68 :     std::vector< unsigned char > shared_flags( ents.size() ), shared_flags2;
                 [ +  - ]
    5357 [ +  - ][ +  - ]:         34 :     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), ents, &shared_flags[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus flag" );
         [ +  - ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5358 [ +  - ][ +  - ]:         34 :     Range::const_iterator rit, hint = tmp_ents.begin();
    5359                 :            :     ;
    5360                 :            :     int i;
    5361         [ -  + ]:         34 :     if( op == PSTATUS_OR )
    5362                 :            :     {
    5363 [ #  # ][ #  # ]:          0 :         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    5364                 :            :         {
    5365 [ #  # ][ #  # ]:          0 :             if( ( ( shared_flags[i] & ~pstat ) ^ shared_flags[i] ) & pstat )
                 [ #  # ]
    5366                 :            :             {
    5367 [ #  # ][ #  # ]:          0 :                 hint = tmp_ents.insert( hint, *rit );
    5368 [ #  # ][ #  # ]:          0 :                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
                 [ #  # ]
    5369                 :            :             }
    5370                 :            :         }
    5371                 :            :     }
    5372         [ -  + ]:         34 :     else if( op == PSTATUS_AND )
    5373                 :            :     {
    5374 [ #  # ][ #  # ]:          0 :         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    5375                 :            :         {
    5376 [ #  # ][ #  # ]:          0 :             if( ( shared_flags[i] & pstat ) == pstat )
    5377                 :            :             {
    5378 [ #  # ][ #  # ]:          0 :                 hint = tmp_ents.insert( hint, *rit );
    5379 [ #  # ][ #  # ]:          0 :                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
                 [ #  # ]
    5380                 :            :             }
    5381                 :            :         }
    5382                 :            :     }
    5383         [ +  - ]:         34 :     else if( op == PSTATUS_NOT )
    5384                 :            :     {
    5385 [ +  - ][ +  - ]:       5426 :         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
         [ +  - ][ +  - ]
                 [ +  + ]
    5386                 :            :         {
    5387 [ +  - ][ +  - ]:       5392 :             if( !( shared_flags[i] & pstat ) )
    5388                 :            :             {
    5389 [ +  - ][ +  - ]:       5392 :                 hint = tmp_ents.insert( hint, *rit );
    5390 [ -  + ][ #  # ]:       5392 :                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
                 [ #  # ]
    5391                 :            :             }
    5392                 :            :         }
    5393                 :            :     }
    5394                 :            :     else
    5395                 :            :     {
    5396                 :          0 :         assert( false );
    5397                 :            :         return MB_FAILURE;
    5398                 :            :     }
    5399                 :            : 
    5400         [ -  + ]:         34 :     if( -1 != to_proc )
    5401                 :            :     {
    5402                 :            :         int sharing_procs[MAX_SHARING_PROCS];
    5403         [ #  # ]:          0 :         std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
    5404         [ #  # ]:          0 :         Range tmp_ents2;
    5405         [ #  # ]:          0 :         hint = tmp_ents2.begin();
    5406                 :            : 
    5407 [ #  # ][ #  # ]:          0 :         for( rit = tmp_ents.begin(), i = 0; rit != tmp_ents.end(); ++rit, i++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    5408                 :            :         {
    5409                 :            :             // We need to check sharing procs
    5410 [ #  # ][ #  # ]:          0 :             if( shared_flags2[i] & PSTATUS_MULTISHARED )
    5411                 :            :             {
    5412 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5413         [ #  # ]:          0 :                 assert( -1 != sharing_procs[0] );
    5414         [ #  # ]:          0 :                 for( unsigned int j = 0; j < MAX_SHARING_PROCS; j++ )
    5415                 :            :                 {
    5416                 :            :                     // If to_proc shares this entity, add it to list
    5417 [ #  # ][ #  # ]:          0 :                     if( sharing_procs[j] == to_proc ) { hint = tmp_ents2.insert( hint, *rit ); }
                 [ #  # ]
    5418         [ #  # ]:          0 :                     else if( -1 == sharing_procs[j] )
    5419                 :          0 :                         break;
    5420                 :            : 
    5421                 :          0 :                     sharing_procs[j] = -1;
    5422                 :            :                 }
    5423                 :            :             }
    5424 [ #  # ][ #  # ]:          0 :             else if( shared_flags2[i] & PSTATUS_SHARED )
    5425                 :            :             {
    5426 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_get_data( sharedp_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedp tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5427         [ #  # ]:          0 :                 assert( -1 != sharing_procs[0] );
    5428 [ #  # ][ #  # ]:          0 :                 if( sharing_procs[0] == to_proc ) hint = tmp_ents2.insert( hint, *rit );
                 [ #  # ]
    5429                 :          0 :                 sharing_procs[0] = -1;
    5430                 :            :             }
    5431                 :            :             else
    5432                 :          0 :                 assert( "should never get here" && false );
    5433                 :            :         }
    5434                 :            : 
    5435 [ #  # ][ #  # ]:          0 :         tmp_ents.swap( tmp_ents2 );
    5436                 :            :     }
    5437                 :            : 
    5438         [ +  + ]:         34 :     if( returned_ents )
    5439         [ +  - ]:         13 :         returned_ents->swap( tmp_ents );
    5440                 :            :     else
    5441         [ +  - ]:         21 :         ents.swap( tmp_ents );
    5442                 :            : 
    5443                 :         74 :     return MB_SUCCESS;
    5444                 :            : }
    5445                 :            : 
    5446                 :          3 : ErrorCode ParallelComm::exchange_ghost_cells( int ghost_dim, int bridge_dim, int num_layers, int addl_ents,
    5447                 :            :                                               bool store_remote_handles, bool wait_all, EntityHandle* file_set )
    5448                 :            : {
    5449                 :            : #ifdef MOAB_HAVE_MPE
    5450                 :            :     if( myDebug->get_verbosity() == 2 )
    5451                 :            :     {
    5452                 :            :         if( !num_layers )
    5453                 :            :             MPE_Log_event( IFACE_START, procConfig.proc_rank(), "Starting interface exchange." );
    5454                 :            :         else
    5455                 :            :             MPE_Log_event( GHOST_START, procConfig.proc_rank(), "Starting ghost exchange." );
    5456                 :            :     }
    5457                 :            : #endif
    5458                 :            : 
    5459         [ +  - ]:          3 :     myDebug->tprintf( 1, "Entering exchange_ghost_cells with num_layers = %d\n", num_layers );
    5460 [ +  - ][ -  + ]:          3 :     if( myDebug->get_verbosity() == 4 )
    5461                 :            :     {
    5462                 :          0 :         msgs.clear();
    5463         [ #  # ]:          0 :         msgs.reserve( MAX_SHARING_PROCS );
    5464                 :            :     }
    5465                 :            : 
    5466                 :            :     // If we're only finding out about existing ents, we have to be storing
    5467                 :            :     // remote handles too
    5468 [ -  + ][ #  # ]:          3 :     assert( num_layers > 0 || store_remote_handles );
    5469                 :            : 
    5470                 :          3 :     const bool is_iface = !num_layers;
    5471                 :            : 
    5472                 :            :     // Get the b-dimensional interface(s) with with_proc, where b = bridge_dim
    5473                 :            : 
    5474                 :            :     int success;
    5475                 :          3 :     ErrorCode result = MB_SUCCESS;
    5476                 :          3 :     int incoming1 = 0, incoming2 = 0;
    5477                 :            : 
    5478         [ +  - ]:          3 :     reset_all_buffers();
    5479                 :            : 
    5480                 :            :     // When this function is called, buffProcs should already have any
    5481                 :            :     // communicating procs
    5482                 :            : 
    5483                 :            :     //===========================================
    5484                 :            :     // Post ghost irecv's for ghost entities from all communicating procs
    5485                 :            :     //===========================================
    5486                 :            : #ifdef MOAB_HAVE_MPE
    5487                 :            :     if( myDebug->get_verbosity() == 2 )
    5488                 :            :     { MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." ); }
    5489                 :            : #endif
    5490                 :            : 
    5491                 :            :     // Index reqs the same as buffer/sharing procs indices
    5492         [ +  - ]:          3 :     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL ),
    5493         [ +  - ]:          6 :         recv_remoteh_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    5494                 :          3 :     std::vector< unsigned int >::iterator proc_it;
    5495                 :            :     int ind, p;
    5496         [ +  - ]:          3 :     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    5497 [ #  # ][ +  - ]:          3 :     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
           [ -  +  #  # ]
    5498                 :            :     {
    5499                 :          0 :         incoming1++;
    5500 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                 [ #  # ]
    5501         [ #  # ]:          0 :                            MB_MESG_ENTS_SIZE, incoming1 );
    5502 [ #  # ][ #  # ]:          0 :         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
    5503 [ #  # ][ #  # ]:          0 :                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
                 [ #  # ]
    5504 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5505                 :            :     }
    5506                 :            : 
    5507                 :            :     //===========================================
    5508                 :            :     // Get entities to be sent to neighbors
    5509                 :            :     //===========================================
    5510 [ +  - ][ +  + ]:        390 :     Range sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
         [ +  - ][ +  - ]
                 [ +  + ]
           [ #  #  #  # ]
    5511         [ +  - ]:          6 :     TupleList entprocs;
    5512                 :            :     int dum_ack_buff;
    5513 [ +  - ][ -  + ]:          3 :     result = get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents, allsent, entprocs );MB_CHK_SET_ERR( result, "get_sent_ents failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5514                 :            : 
    5515                 :            :     // augment file set with the entities to be sent
    5516                 :            :     // we might have created new entities if addl_ents>0, edges and/or faces
    5517 [ -  + ][ #  # ]:          3 :     if( addl_ents > 0 && file_set && !allsent.empty() )
         [ #  # ][ #  # ]
                 [ -  + ]
    5518                 :            :     {
    5519 [ #  # ][ #  # ]:          0 :         result = mbImpl->add_entities( *file_set, allsent );MB_CHK_SET_ERR( result, "Failed to add new sub-entities to set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5520                 :            :     }
    5521                 :            :     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
    5522 [ +  - ][ +  - ]:          3 :                       (unsigned long)allsent.size() );
                 [ +  - ]
    5523                 :            : 
    5524                 :            :     //===========================================
    5525                 :            :     // Pack and send ents from this proc to others
    5526                 :            :     //===========================================
    5527 [ #  # ][ +  - ]:          3 :     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
                 [ -  + ]
    5528                 :            :     {
    5529                 :            :         myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", sent_ents[p].compactness(),
    5530 [ #  # ][ #  # ]:          0 :                           (unsigned long)sent_ents[p].size() );
                 [ #  # ]
    5531                 :            : 
    5532                 :            :         // Reserve space on front for size and for initial buff size
    5533 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[p]->reset_buffer( sizeof( int ) );
    5534                 :            : 
    5535                 :            :         // Entities
    5536 [ #  # ][ #  # ]:          0 :         result = pack_entities( sent_ents[p], localOwnedBuffs[p], store_remote_handles, buffProcs[p], is_iface,
    5537 [ #  # ][ #  # ]:          0 :                                 &entprocs, &allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5538                 :            : 
    5539 [ #  # ][ #  # ]:          0 :         if( myDebug->get_verbosity() == 4 )
    5540                 :            :         {
    5541         [ #  # ]:          0 :             msgs.resize( msgs.size() + 1 );
    5542 [ #  # ][ #  # ]:          0 :             msgs.back() = new Buffer( *localOwnedBuffs[p] );
         [ #  # ][ #  # ]
    5543                 :            :         }
    5544                 :            : 
    5545                 :            :         // Send the buffer (size stored in front in send_buffer)
    5546 [ #  # ][ #  # ]:          0 :         result = send_buffer( *proc_it, localOwnedBuffs[p], MB_MESG_ENTS_SIZE, sendReqs[3 * p],
                 [ #  # ]
    5547         [ #  # ]:          0 :                               recv_ent_reqs[3 * p + 2], &dum_ack_buff, incoming1, MB_MESG_REMOTEH_SIZE,
    5548         [ #  # ]:          0 :                               ( !is_iface && store_remote_handles ?  // this used for ghosting only
    5549         [ #  # ]:          0 :                                     localOwnedBuffs[p]
    5550                 :            :                                                                   : NULL ),
    5551 [ #  # ][ #  # ]:          0 :                               &recv_remoteh_reqs[3 * p], &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost exchange" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5552                 :            :     }
    5553                 :            : 
    5554         [ +  - ]:          3 :     entprocs.reset();
    5555                 :            : 
    5556                 :            :     //===========================================
    5557                 :            :     // Receive/unpack new entities
    5558                 :            :     //===========================================
    5559                 :            :     // Number of incoming messages for ghosts is the number of procs we
    5560                 :            :     // communicate with; for iface, it's the number of those with lower rank
    5561                 :            :     MPI_Status status;
    5562         [ +  - ]:          6 :     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
    5563 [ +  - ][ +  - ]:          6 :     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
    5564         [ +  - ]:          6 :     std::vector< std::vector< int > > L1p( buffProcs.size() );
    5565 [ +  - ][ +  - ]:          6 :     std::vector< EntityHandle > L2hloc, L2hrem;
    5566         [ +  - ]:          6 :     std::vector< unsigned int > L2p;
    5567         [ +  - ]:          6 :     std::vector< EntityHandle > new_ents;
    5568                 :            : 
    5569         [ -  + ]:          3 :     while( incoming1 )
    5570                 :            :     {
    5571                 :            :         // Wait for all recvs of ghost ents before proceeding to sending remote handles,
    5572                 :            :         // b/c some procs may have sent to a 3rd proc ents owned by me;
    5573 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
    5574                 :            : 
    5575 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &ind, &status );
    5576 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5577                 :            : 
    5578         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    5579                 :            : 
    5580                 :            :         // OK, received something; decrement incoming counter
    5581                 :          0 :         incoming1--;
    5582                 :          0 :         bool done = false;
    5583                 :            : 
    5584                 :            :         // In case ind is for ack, we need index of one before it
    5585                 :          0 :         unsigned int base_ind = 3 * ( ind / 3 );
    5586 [ #  # ][ #  # ]:          0 :         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 3], recv_ent_reqs[base_ind + 1],
    5587 [ #  # ][ #  # ]:          0 :                               recv_ent_reqs[base_ind + 2], incoming1, localOwnedBuffs[ind / 3], sendReqs[base_ind + 1],
                 [ #  # ]
    5588         [ #  # ]:          0 :                               sendReqs[base_ind + 2], done,
    5589 [ #  # ][ #  # ]:          0 :                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind / 3] : NULL ),
    5590                 :            :                               MB_MESG_REMOTEH_SIZE,  // maybe base_ind+1?
    5591 [ #  # ][ #  # ]:          0 :                               &recv_remoteh_reqs[base_ind + 1], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5592                 :            : 
    5593         [ #  # ]:          0 :         if( done )
    5594                 :            :         {
    5595 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 4 )
    5596                 :            :             {
    5597         [ #  # ]:          0 :                 msgs.resize( msgs.size() + 1 );
    5598 [ #  # ][ #  # ]:          0 :                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 3] );
         [ #  # ][ #  # ]
    5599                 :            :             }
    5600                 :            : 
    5601                 :            :             // Message completely received - process buffer that was sent
    5602 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
    5603         [ #  # ]:          0 :             result = unpack_entities( remoteOwnedBuffs[ind / 3]->buff_ptr, store_remote_handles, ind / 3, is_iface,
    5604         [ #  # ]:          0 :                                       L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );
    5605         [ #  # ]:          0 :             if( MB_SUCCESS != result )
    5606                 :            :             {
    5607 [ #  # ][ #  # ]:          0 :                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
    5608 [ #  # ][ #  # ]:          0 :                 print_buffer( remoteOwnedBuffs[ind / 3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 3], false );
                 [ #  # ]
    5609                 :          0 :                 return result;
    5610                 :            :             }
    5611                 :            : 
    5612         [ #  # ]:          0 :             if( recv_ent_reqs.size() != 3 * buffProcs.size() )
    5613                 :            :             {
    5614                 :            :                 // Post irecv's for remote handles from new proc; shouldn't be iface,
    5615                 :            :                 // since we know about all procs we share with
    5616         [ #  # ]:          0 :                 assert( !is_iface );
    5617         [ #  # ]:          0 :                 recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    5618         [ #  # ]:          0 :                 for( unsigned int i = recv_ent_reqs.size(); i < 3 * buffProcs.size(); i += 3 )
    5619                 :            :                 {
    5620 [ #  # ][ #  # ]:          0 :                     localOwnedBuffs[i / 3]->reset_buffer();
    5621                 :          0 :                     incoming2++;
    5622 [ #  # ][ #  # ]:          0 :                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 3], localOwnedBuffs[i / 3]->mem_ptr,
                 [ #  # ]
    5623         [ #  # ]:          0 :                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
    5624         [ #  # ]:          0 :                     success = MPI_Irecv( localOwnedBuffs[i / 3]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
    5625         [ #  # ]:          0 :                                          buffProcs[i / 3], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
    5626 [ #  # ][ #  # ]:          0 :                                          &recv_remoteh_reqs[i] );
                 [ #  # ]
    5627         [ #  # ]:          0 :                     if( success != MPI_SUCCESS )
    5628 [ #  # ][ #  # ]:          0 :                     { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    5629                 :            :                 }
    5630         [ #  # ]:          0 :                 recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    5631         [ #  # ]:          0 :                 sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    5632                 :            :             }
    5633                 :            :         }
    5634                 :            :     }
    5635                 :            : 
    5636                 :            :     // Add requests for any new addl procs
    5637         [ -  + ]:          3 :     if( recv_ent_reqs.size() != 3 * buffProcs.size() )
    5638                 :            :     {
    5639                 :            :         // Shouldn't get here...
    5640 [ #  # ][ #  # ]:          0 :         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in ghost exchange" );
         [ #  # ][ #  # ]
                 [ #  # ]
    5641                 :            :     }
    5642                 :            : 
    5643                 :            : #ifdef MOAB_HAVE_MPE
    5644                 :            :     if( myDebug->get_verbosity() == 2 )
    5645                 :            :     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange." ); }
    5646                 :            : #endif
    5647                 :            : 
    5648         [ -  + ]:          3 :     if( is_iface )
    5649                 :            :     {
    5650                 :            :         // Need to check over entities I sent and make sure I received
    5651                 :            :         // handles for them from all expected procs; if not, need to clean
    5652                 :            :         // them up
    5653         [ #  # ]:          0 :         result = check_clean_iface( allsent );
    5654 [ #  # ][ #  # ]:          0 :         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
                 [ #  # ]
    5655                 :            : 
    5656                 :            :         // Now set the shared/interface tag on non-vertex entities on interface
    5657 [ #  # ][ #  # ]:          0 :         result = tag_iface_entities();MB_CHK_SET_ERR( result, "Failed to tag iface entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5658                 :            : 
    5659                 :            : #ifndef NDEBUG
    5660         [ #  # ]:          0 :         result = check_sent_ents( allsent );
    5661 [ #  # ][ #  # ]:          0 :         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
                 [ #  # ]
    5662         [ #  # ]:          0 :         result = check_all_shared_handles( true );
    5663 [ #  # ][ #  # ]:          0 :         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
                 [ #  # ]
    5664                 :            : #endif
    5665                 :            : 
    5666                 :            : #ifdef MOAB_HAVE_MPE
    5667                 :            :         if( myDebug->get_verbosity() == 2 )
    5668                 :            :         { MPE_Log_event( IFACE_END, procConfig.proc_rank(), "Ending interface exchange." ); }
    5669                 :            : #endif
    5670                 :            : 
    5671                 :            :         //===========================================
    5672                 :            :         // Wait if requested
    5673                 :            :         //===========================================
    5674         [ #  # ]:          0 :         if( wait_all )
    5675                 :            :         {
    5676 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    5677                 :            :             else
    5678                 :            :             {
    5679                 :            :                 MPI_Status mult_status[3 * MAX_SHARING_PROCS];
    5680 [ #  # ][ #  # ]:          0 :                 success = MPI_Waitall( 3 * buffProcs.size(), &recv_ent_reqs[0], mult_status );
    5681 [ #  # ][ #  # ]:          0 :                 if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5682 [ #  # ][ #  # ]:          0 :                 success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
    5683 [ #  # ][ #  # ]:          0 :                 if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5684                 :            :                 /*success = MPI_Waitall(3*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
    5685                 :            :                 if (MPI_SUCCESS != success) {
    5686                 :            :                   MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
    5687                 :            :                 }*/
    5688                 :            :             }
    5689                 :            :         }
    5690                 :            : 
    5691         [ #  # ]:          0 :         myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
    5692         [ #  # ]:          0 :         myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==true \n" );
    5693                 :            : 
    5694                 :          0 :         return MB_SUCCESS;
    5695                 :            :     }
    5696                 :            : 
    5697                 :            :     // we still need to wait on sendReqs, if they are not fulfilled yet
    5698         [ +  - ]:          3 :     if( wait_all )
    5699                 :            :     {
    5700 [ +  - ][ -  + ]:          3 :         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    5701                 :            :         else
    5702                 :            :         {
    5703                 :            :             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
    5704 [ +  - ][ +  - ]:          3 :             success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
    5705 [ -  + ][ #  # ]:          3 :             if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5706                 :            :         }
    5707                 :            :     }
    5708                 :            :     //===========================================
    5709                 :            :     // Send local handles for new ghosts to owner, then add
    5710                 :            :     // those to ghost list for that owner
    5711                 :            :     //===========================================
    5712 [ #  # ][ +  - ]:          3 :     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
                 [ -  + ]
    5713                 :            :     {
    5714                 :            : 
    5715                 :            :         // Reserve space on front for size and for initial buff size
    5716 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs[p]->reset_buffer( sizeof( int ) );
    5717                 :            : 
    5718 [ #  # ][ #  # ]:          0 :         result = pack_remote_handles( L1hloc[p], L1hrem[p], L1p[p], *proc_it, remoteOwnedBuffs[p] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5719 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs[p]->set_stored_size();
    5720                 :            : 
    5721 [ #  # ][ #  # ]:          0 :         if( myDebug->get_verbosity() == 4 )
    5722                 :            :         {
    5723         [ #  # ]:          0 :             msgs.resize( msgs.size() + 1 );
    5724 [ #  # ][ #  # ]:          0 :             msgs.back() = new Buffer( *remoteOwnedBuffs[p] );
         [ #  # ][ #  # ]
    5725                 :            :         }
    5726 [ #  # ][ #  # ]:          0 :         result = send_buffer( buffProcs[p], remoteOwnedBuffs[p], MB_MESG_REMOTEH_SIZE, sendReqs[3 * p],
                 [ #  # ]
    5727 [ #  # ][ #  # ]:          0 :                               recv_remoteh_reqs[3 * p + 2], &dum_ack_buff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5728                 :            :     }
    5729                 :            : 
    5730                 :            :     //===========================================
    5731                 :            :     // Process remote handles of my ghosteds
    5732                 :            :     //===========================================
    5733         [ -  + ]:          3 :     while( incoming2 )
    5734                 :            :     {
    5735 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
    5736 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status );
    5737 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5738                 :            : 
    5739                 :            :         // OK, received something; decrement incoming counter
    5740                 :          0 :         incoming2--;
    5741                 :            : 
    5742         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    5743                 :            : 
    5744                 :          0 :         bool done             = false;
    5745                 :          0 :         unsigned int base_ind = 3 * ( ind / 3 );
    5746 [ #  # ][ #  # ]:          0 :         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 3], recv_remoteh_reqs[base_ind + 1],
    5747 [ #  # ][ #  # ]:          0 :                               recv_remoteh_reqs[base_ind + 2], incoming2, remoteOwnedBuffs[ind / 3],
    5748 [ #  # ][ #  # ]:          0 :                               sendReqs[base_ind + 1], sendReqs[base_ind + 2], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5749         [ #  # ]:          0 :         if( done )
    5750                 :            :         {
    5751                 :            :             // Incoming remote handles
    5752 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 4 )
    5753                 :            :             {
    5754         [ #  # ]:          0 :                 msgs.resize( msgs.size() + 1 );
    5755 [ #  # ][ #  # ]:          0 :                 msgs.back() = new Buffer( *localOwnedBuffs[ind / 3] );
         [ #  # ][ #  # ]
    5756                 :            :             }
    5757 [ #  # ][ #  # ]:          0 :             localOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
    5758                 :            :             result =
    5759 [ #  # ][ #  # ]:          0 :                 unpack_remote_handles( buffProcs[ind / 3], localOwnedBuffs[ind / 3]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5760                 :            :         }
    5761                 :            :     }
    5762                 :            : 
    5763                 :            : #ifdef MOAB_HAVE_MPE
    5764                 :            :     if( myDebug->get_verbosity() == 2 )
    5765                 :            :     {
    5766                 :            :         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
    5767                 :            :         MPE_Log_event( GHOST_END, procConfig.proc_rank(), "Ending ghost exchange (still doing checks)." );
    5768                 :            :     }
    5769                 :            : #endif
    5770                 :            : 
    5771                 :            :     //===========================================
    5772                 :            :     // Wait if requested
    5773                 :            :     //===========================================
    5774         [ +  - ]:          3 :     if( wait_all )
    5775                 :            :     {
    5776 [ +  - ][ -  + ]:          3 :         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    5777                 :            :         else
    5778                 :            :         {
    5779                 :            :             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
    5780 [ +  - ][ +  - ]:          3 :             success = MPI_Waitall( 3 * buffProcs.size(), &recv_remoteh_reqs[0], mult_status );
    5781 [ +  - ][ +  - ]:          3 :             if( MPI_SUCCESS == success ) success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
                 [ +  - ]
    5782                 :            :         }
    5783 [ -  + ][ #  # ]:          3 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5784                 :            :     }
    5785                 :            : 
    5786                 :            : #ifndef NDEBUG
    5787 [ +  - ][ -  + ]:          3 :     result = check_sent_ents( allsent );MB_CHK_SET_ERR( result, "Failed check on shared entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5788 [ +  - ][ -  + ]:          3 :     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Failed check on all shared handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    5789                 :            : #endif
    5790                 :            : 
    5791 [ -  + ][ #  # ]:          3 :     if( file_set && !new_ents.empty() )
                 [ -  + ]
    5792                 :            :     {
    5793 [ #  # ][ #  # ]:          0 :         result = mbImpl->add_entities( *file_set, &new_ents[0], new_ents.size() );MB_CHK_SET_ERR( result, "Failed to add new entities to set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5794                 :            :     }
    5795                 :            : 
    5796         [ +  - ]:          3 :     myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
    5797         [ +  - ]:          3 :     myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==false \n" );
    5798                 :            : 
    5799                 :          6 :     return MB_SUCCESS;
    5800                 :            : }
    5801                 :            : 
    5802                 :          0 : ErrorCode ParallelComm::send_buffer( const unsigned int to_proc, Buffer* send_buff, int mesg_tag, MPI_Request& send_req,
    5803                 :            :                                      MPI_Request& ack_req, int* ack_buff, int& this_incoming, int next_mesg_tag,
    5804                 :            :                                      Buffer* next_recv_buff, MPI_Request* next_recv_req, int* next_incoming )
    5805                 :            : {
    5806                 :          0 :     ErrorCode result = MB_SUCCESS;
    5807                 :            :     int success;
    5808                 :            : 
    5809                 :            :     // If small message, post recv for remote handle message
    5810 [ #  # ][ #  # ]:          0 :     if( send_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE && next_recv_buff )
                 [ #  # ]
    5811                 :            :     {
    5812                 :          0 :         ( *next_incoming )++;
    5813                 :          0 :         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, next_mesg_tag,
    5814                 :          0 :                            *next_incoming );
    5815                 :            :         success = MPI_Irecv( next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, to_proc, next_mesg_tag,
    5816                 :          0 :                              procConfig.proc_comm(), next_recv_req );
    5817         [ #  # ]:          0 :         if( success != MPI_SUCCESS )
    5818 [ #  # ][ #  # ]:          0 :         { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for next message in ghost exchange" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    5819                 :            :     }
    5820                 :            :     // If large, we'll need an ack before sending the rest
    5821         [ #  # ]:          0 :     else if( send_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
    5822                 :            :     {
    5823                 :          0 :         this_incoming++;
    5824                 :          0 :         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff, sizeof( int ), mesg_tag - 1,
    5825                 :          0 :                            this_incoming );
    5826                 :            :         success = MPI_Irecv( (void*)ack_buff, sizeof( int ), MPI_UNSIGNED_CHAR, to_proc, mesg_tag - 1,
    5827                 :          0 :                              procConfig.proc_comm(), &ack_req );
    5828         [ #  # ]:          0 :         if( success != MPI_SUCCESS )
    5829 [ #  # ][ #  # ]:          0 :         { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for entity ack in ghost exchange" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    5830                 :            :     }
    5831                 :            : 
    5832                 :            :     // Send the buffer
    5833 [ #  # ][ #  # ]:          0 :     PRINT_DEBUG_ISEND( procConfig.proc_rank(), to_proc, send_buff->mem_ptr, mesg_tag,
                 [ #  # ]
    5834         [ #  # ]:          0 :                        std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ) );
    5835 [ #  # ][ #  # ]:          0 :     assert( 0 <= send_buff->get_stored_size() && send_buff->get_stored_size() <= (int)send_buff->alloc_size );
    5836 [ #  # ][ #  # ]:          0 :     success = MPI_Isend( send_buff->mem_ptr, std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ),
    5837         [ #  # ]:          0 :                          MPI_UNSIGNED_CHAR, to_proc, mesg_tag, procConfig.proc_comm(), &send_req );
    5838         [ #  # ]:          0 :     if( success != MPI_SUCCESS ) return MB_FAILURE;
    5839                 :            : 
    5840                 :          0 :     return result;
    5841                 :            : }
    5842                 :            : 
    5843                 :          0 : ErrorCode ParallelComm::recv_buffer( int mesg_tag_expected, const MPI_Status& mpi_status, Buffer* recv_buff,
    5844                 :            :                                      MPI_Request& recv_req, MPI_Request& /*ack_recvd_req*/, int& this_incoming,
    5845                 :            :                                      Buffer* send_buff, MPI_Request& send_req, MPI_Request& sent_ack_req, bool& done,
    5846                 :            :                                      Buffer* next_buff, int next_tag, MPI_Request* next_req, int* next_incoming )
    5847                 :            : {
    5848                 :            :     // Process a received message; if there will be more coming,
    5849                 :            :     // post a receive for 2nd part then send an ack message
    5850                 :          0 :     int from_proc = mpi_status.MPI_SOURCE;
    5851                 :            :     int success;
    5852                 :            : 
    5853                 :            :     // Set the buff_ptr on the recv_buffer; needs to point beyond any
    5854                 :            :     // valid data already in the buffer
    5855 [ #  # ][ #  # ]:          0 :     recv_buff->reset_ptr( std::min( recv_buff->get_stored_size(), (int)recv_buff->alloc_size ) );
                 [ #  # ]
    5856                 :            : 
    5857 [ #  # ][ #  # ]:          0 :     if( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
                 [ #  # ]
    5858                 :            :     {
    5859                 :            :         // 1st message & large - allocate buffer, post irecv for 2nd message,
    5860                 :            :         // then send ack
    5861                 :          0 :         recv_buff->reserve( recv_buff->get_stored_size() );
    5862         [ #  # ]:          0 :         assert( recv_buff->alloc_size > INITIAL_BUFF_SIZE );
    5863                 :            : 
    5864                 :            :         // Will expect a 2nd message
    5865                 :          0 :         this_incoming++;
    5866                 :            : 
    5867                 :          0 :         PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr + INITIAL_BUFF_SIZE,
    5868                 :          0 :                            recv_buff->get_stored_size() - INITIAL_BUFF_SIZE, mesg_tag_expected + 1, this_incoming );
    5869                 :          0 :         success = MPI_Irecv( recv_buff->mem_ptr + INITIAL_BUFF_SIZE, recv_buff->get_stored_size() - INITIAL_BUFF_SIZE,
    5870                 :          0 :                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &recv_req );
    5871 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post 2nd iRecv in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5872                 :            : 
    5873                 :            :         // Send ack, doesn't matter what data actually is
    5874                 :          0 :         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr, mesg_tag_expected - 1,
    5875                 :          0 :                            sizeof( int ) );
    5876                 :            :         success = MPI_Isend( recv_buff->mem_ptr, sizeof( int ), MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected - 1,
    5877                 :          0 :                              procConfig.proc_comm(), &sent_ack_req );
    5878 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to send ack in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5879                 :            :     }
    5880         [ #  # ]:          0 :     else if( mpi_status.MPI_TAG == mesg_tag_expected - 1 )
    5881                 :            :     {
    5882                 :            :         // Got an ack back, send the 2nd half of message
    5883                 :            : 
    5884                 :            :         // Should be a large message if we got this
    5885         [ #  # ]:          0 :         assert( *( (size_t*)send_buff->mem_ptr ) > INITIAL_BUFF_SIZE );
    5886                 :            : 
    5887                 :            :         // Post irecv for next message, then send 2nd message
    5888         [ #  # ]:          0 :         if( next_buff )
    5889                 :            :         {
    5890                 :            :             // We'll expect a return message
    5891                 :          0 :             ( *next_incoming )++;
    5892                 :          0 :             PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, next_buff->mem_ptr, INITIAL_BUFF_SIZE, next_tag,
    5893                 :          0 :                                *next_incoming );
    5894                 :            : 
    5895                 :            :             success = MPI_Irecv( next_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc, next_tag,
    5896                 :          0 :                                  procConfig.proc_comm(), next_req );
    5897 [ #  # ][ #  # ]:          0 :             if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post next irecv in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5898                 :            :         }
    5899                 :            : 
    5900                 :            :         // Send 2nd message
    5901                 :          0 :         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, send_buff->mem_ptr + INITIAL_BUFF_SIZE,
    5902                 :          0 :                            mesg_tag_expected + 1, send_buff->get_stored_size() - INITIAL_BUFF_SIZE );
    5903                 :            : 
    5904         [ #  # ]:          0 :         assert( send_buff->get_stored_size() - INITIAL_BUFF_SIZE < send_buff->alloc_size &&
    5905         [ #  # ]:          0 :                 0 <= send_buff->get_stored_size() );
    5906                 :          0 :         success = MPI_Isend( send_buff->mem_ptr + INITIAL_BUFF_SIZE, send_buff->get_stored_size() - INITIAL_BUFF_SIZE,
    5907                 :          0 :                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &send_req );
    5908 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to send 2nd message in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5909                 :            :     }
    5910 [ #  # ][ #  # ]:          0 :     else if( ( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE ) ||
         [ #  # ][ #  # ]
    5911                 :          0 :              mpi_status.MPI_TAG == mesg_tag_expected + 1 )
    5912                 :            :     {
    5913                 :            :         // Message completely received - signal that we're done
    5914                 :          0 :         done = true;
    5915                 :            :     }
    5916                 :            : 
    5917                 :          0 :     return MB_SUCCESS;
    5918                 :            : }
    5919                 :            : 
    5920                 :            : struct ProcList
    5921                 :            : {
    5922                 :            :     int procs[MAX_SHARING_PROCS];
    5923                 :            : };
    5924                 :          0 : static bool operator<( const ProcList& a, const ProcList& b )
    5925                 :            : {
    5926         [ #  # ]:          0 :     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
    5927                 :            :     {
    5928         [ #  # ]:          0 :         if( a.procs[i] < b.procs[i] )
    5929                 :          0 :             return true;
    5930         [ #  # ]:          0 :         else if( b.procs[i] < a.procs[i] )
    5931                 :          0 :             return false;
    5932         [ #  # ]:          0 :         else if( a.procs[i] < 0 )
    5933                 :          0 :             return false;
    5934                 :            :     }
    5935                 :          0 :     return false;
    5936                 :            : }
    5937                 :            : 
    5938                 :          0 : ErrorCode ParallelComm::check_clean_iface( Range& allsent )
    5939                 :            : {
    5940                 :            :     // allsent is all entities I think are on interface; go over them, looking
    5941                 :            :     // for zero-valued handles, and fix any I find
    5942                 :            : 
    5943                 :            :     // Keep lists of entities for which teh sharing data changed, grouped
    5944                 :            :     // by set of sharing procs.
    5945                 :            :     typedef std::map< ProcList, Range > procmap_t;
    5946 [ #  # ][ #  # ]:          0 :     procmap_t old_procs, new_procs;
    5947                 :            : 
    5948                 :          0 :     ErrorCode result = MB_SUCCESS;
    5949         [ #  # ]:          0 :     Range::iterator rit;
    5950         [ #  # ]:          0 :     Range::reverse_iterator rvit;
    5951                 :            :     unsigned char pstatus;
    5952                 :            :     int nump;
    5953                 :            :     ProcList sharedp;
    5954                 :            :     EntityHandle sharedh[MAX_SHARING_PROCS];
    5955 [ #  # ][ #  # ]:          0 :     for( rvit = allsent.rbegin(); rvit != allsent.rend(); ++rvit )
         [ #  # ][ #  # ]
                 [ #  # ]
    5956                 :            :     {
    5957 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( *rvit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5958 [ #  # ][ #  # ]:          0 :         assert( "Should be shared with at least one other proc" &&
    5959         [ #  # ]:          0 :                 ( nump > 1 || sharedp.procs[0] != (int)procConfig.proc_rank() ) );
    5960 [ #  # ][ #  # ]:          0 :         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
    5961                 :            : 
    5962                 :            :         // Look for first null handle in list
    5963         [ #  # ]:          0 :         int idx = std::find( sharedh, sharedh + nump, (EntityHandle)0 ) - sharedh;
    5964         [ #  # ]:          0 :         if( idx == nump ) continue;  // All handles are valid
    5965                 :            : 
    5966                 :          0 :         ProcList old_list( sharedp );
    5967         [ #  # ]:          0 :         std::sort( old_list.procs, old_list.procs + nump );
    5968 [ #  # ][ #  # ]:          0 :         old_procs[old_list].insert( *rvit );
                 [ #  # ]
    5969                 :            : 
    5970                 :            :         // Remove null handles and corresponding proc ranks from lists
    5971                 :          0 :         int new_nump       = idx;
    5972                 :          0 :         bool removed_owner = !idx;
    5973         [ #  # ]:          0 :         for( ++idx; idx < nump; ++idx )
    5974                 :            :         {
    5975         [ #  # ]:          0 :             if( sharedh[idx] )
    5976                 :            :             {
    5977                 :          0 :                 sharedh[new_nump]       = sharedh[idx];
    5978                 :          0 :                 sharedp.procs[new_nump] = sharedp.procs[idx];
    5979                 :          0 :                 ++new_nump;
    5980                 :            :             }
    5981                 :            :         }
    5982                 :          0 :         sharedp.procs[new_nump] = -1;
    5983                 :            : 
    5984 [ #  # ][ #  # ]:          0 :         if( removed_owner && new_nump > 1 )
    5985                 :            :         {
    5986                 :            :             // The proc that we choose as the entity owner isn't sharing the
    5987                 :            :             // entity (doesn't have a copy of it). We need to pick a different
    5988                 :            :             // owner. Choose the proc with lowest rank.
    5989         [ #  # ]:          0 :             idx = std::min_element( sharedp.procs, sharedp.procs + new_nump ) - sharedp.procs;
    5990                 :          0 :             std::swap( sharedp.procs[0], sharedp.procs[idx] );
    5991                 :          0 :             std::swap( sharedh[0], sharedh[idx] );
    5992 [ #  # ][ #  # ]:          0 :             if( sharedp.procs[0] == (int)proc_config().proc_rank() ) pstatus &= ~PSTATUS_NOT_OWNED;
                 [ #  # ]
    5993                 :            :         }
    5994                 :            : 
    5995 [ #  # ][ #  # ]:          0 :         result = set_sharing_data( *rvit, pstatus, nump, new_nump, sharedp.procs, sharedh );MB_CHK_SET_ERR( result, "Failed to set sharing data in check_clean_iface" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    5996                 :            : 
    5997         [ #  # ]:          0 :         if( new_nump > 1 )
    5998                 :            :         {
    5999         [ #  # ]:          0 :             if( new_nump == 2 )
    6000                 :            :             {
    6001 [ #  # ][ #  # ]:          0 :                 if( sharedp.procs[1] != (int)proc_config().proc_rank() )
                 [ #  # ]
    6002                 :            :                 {
    6003 [ #  # ][ #  # ]:          0 :                     assert( sharedp.procs[0] == (int)proc_config().proc_rank() );
                 [ #  # ]
    6004                 :          0 :                     sharedp.procs[0] = sharedp.procs[1];
    6005                 :            :                 }
    6006                 :          0 :                 sharedp.procs[1] = -1;
    6007                 :            :             }
    6008                 :            :             else
    6009                 :            :             {
    6010         [ #  # ]:          0 :                 std::sort( sharedp.procs, sharedp.procs + new_nump );
    6011                 :            :             }
    6012 [ #  # ][ #  # ]:          0 :             new_procs[sharedp].insert( *rvit );
                 [ #  # ]
    6013                 :            :         }
    6014                 :            :     }
    6015                 :            : 
    6016         [ #  # ]:          0 :     if( old_procs.empty() )
    6017                 :            :     {
    6018         [ #  # ]:          0 :         assert( new_procs.empty() );
    6019                 :          0 :         return MB_SUCCESS;
    6020                 :            :     }
    6021                 :            : 
    6022                 :            :     // Update interface sets
    6023         [ #  # ]:          0 :     procmap_t::iterator pmit;
    6024                 :            :     // std::vector<unsigned char> pstatus_list;
    6025 [ #  # ][ #  # ]:          0 :     rit = interface_sets().begin();
    6026 [ #  # ][ #  # ]:          0 :     while( rit != interface_sets().end() )
         [ #  # ][ #  # ]
    6027                 :            :     {
    6028 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( *rit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data for interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6029         [ #  # ]:          0 :         assert( nump != 2 );
    6030         [ #  # ]:          0 :         std::sort( sharedp.procs, sharedp.procs + nump );
    6031 [ #  # ][ #  # ]:          0 :         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
    6032                 :            : 
    6033         [ #  # ]:          0 :         pmit = old_procs.find( sharedp );
    6034 [ #  # ][ #  # ]:          0 :         if( pmit != old_procs.end() )
    6035                 :            :         {
    6036 [ #  # ][ #  # ]:          0 :             result = mbImpl->remove_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6037                 :            :         }
    6038                 :            : 
    6039         [ #  # ]:          0 :         pmit = new_procs.find( sharedp );
    6040 [ #  # ][ #  # ]:          0 :         if( pmit == new_procs.end() )
    6041                 :            :         {
    6042                 :            :             int count;
    6043 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_number_entities_by_handle( *rit, count );MB_CHK_SET_ERR( result, "Failed to get number of entities in interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6044         [ #  # ]:          0 :             if( !count )
    6045                 :            :             {
    6046 [ #  # ][ #  # ]:          0 :                 result = mbImpl->delete_entities( &*rit, 1 );MB_CHK_SET_ERR( result, "Failed to delete entities from interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6047 [ #  # ][ #  # ]:          0 :                 rit = interface_sets().erase( rit );
    6048                 :            :             }
    6049                 :            :             else
    6050                 :            :             {
    6051         [ #  # ]:          0 :                 ++rit;
    6052                 :            :             }
    6053                 :            :         }
    6054                 :            :         else
    6055                 :            :         {
    6056 [ #  # ][ #  # ]:          0 :             result = mbImpl->add_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6057                 :            : 
    6058                 :            :             // Remove those that we've processed so that we know which ones
    6059                 :            :             // are new.
    6060         [ #  # ]:          0 :             new_procs.erase( pmit );
    6061         [ #  # ]:          0 :             ++rit;
    6062                 :            :         }
    6063                 :            :     }
    6064                 :            : 
    6065                 :            :     // Create interface sets for new proc id combinations
    6066         [ #  # ]:          0 :     std::fill( sharedh, sharedh + MAX_SHARING_PROCS, 0 );
    6067 [ #  # ][ #  # ]:          0 :     for( pmit = new_procs.begin(); pmit != new_procs.end(); ++pmit )
                 [ #  # ]
    6068                 :            :     {
    6069                 :            :         EntityHandle new_set;
    6070 [ #  # ][ #  # ]:          0 :         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6071         [ #  # ]:          0 :         interfaceSets.insert( new_set );
    6072                 :            : 
    6073                 :            :         // Add entities
    6074 [ #  # ][ #  # ]:          0 :         result = mbImpl->add_entities( new_set, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6075                 :            :         // Tag set with the proc rank(s)
    6076 [ #  # ][ #  # ]:          0 :         assert( pmit->first.procs[0] >= 0 );
    6077                 :          0 :         pstatus = PSTATUS_SHARED | PSTATUS_INTERFACE;
    6078 [ #  # ][ #  # ]:          0 :         if( pmit->first.procs[1] == -1 )
    6079                 :            :         {
    6080         [ #  # ]:          0 :             int other = pmit->first.procs[0];
    6081 [ #  # ][ #  # ]:          0 :             assert( other != (int)procConfig.proc_rank() );
    6082 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( sharedp_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6083                 :          0 :             sharedh[0] = 0;
    6084 [ #  # ][ #  # ]:          0 :             result     = mbImpl->tag_set_data( sharedh_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6085 [ #  # ][ #  # ]:          0 :             if( other < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
                 [ #  # ]
    6086                 :            :         }
    6087                 :            :         else
    6088                 :            :         {
    6089 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( sharedps_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6090 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_set_data( sharedhs_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6091                 :          0 :             pstatus |= PSTATUS_MULTISHARED;
    6092 [ #  # ][ #  # ]:          0 :             if( pmit->first.procs[0] < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
         [ #  # ][ #  # ]
    6093                 :            :         }
    6094                 :            : 
    6095 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( pstatus_tag(), &new_set, 1, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6096                 :            : 
    6097                 :            :         // Set pstatus on all interface entities in set
    6098 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_clear_data( pstatus_tag(), pmit->second, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface entities with pstatus" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6099                 :            :     }
    6100                 :            : 
    6101                 :          0 :     return MB_SUCCESS;
    6102                 :            : }
    6103                 :            : 
    6104                 :          0 : ErrorCode ParallelComm::set_sharing_data( EntityHandle ent, unsigned char pstatus, int old_nump, int new_nump, int* ps,
    6105                 :            :                                           EntityHandle* hs )
    6106                 :            : {
    6107                 :            :     // If new nump is less than 3, the entity is no longer mutishared
    6108 [ #  # ][ #  # ]:          0 :     if( old_nump > 2 && ( pstatus & PSTATUS_MULTISHARED ) && new_nump < 3 )
                 [ #  # ]
    6109                 :            :     {
    6110                 :            :         // Unset multishared flag
    6111                 :          0 :         pstatus ^= PSTATUS_MULTISHARED;
    6112                 :            :     }
    6113                 :            : 
    6114                 :            :     // Check for consistency in input data
    6115                 :            :     // DBG
    6116                 :            :     /*  bool con1 = ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) ||
    6117                 :            :       (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED)); bool con2 =
    6118                 :            :       (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED); bool con3 = (new_nump < 3 ||
    6119                 :            :       (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || (!(pstatus&PSTATUS_NOT_OWNED) && ps[0]
    6120                 :            :       == (int)rank())); std::cout<<"current rank = "<<rank()<<std::endl; std::cout<<"condition
    6121                 :            :       1::"<<con1<<std::endl; std::cout<<"condition 2::"<<con2<<std::endl; std::cout<<"condition
    6122                 :            :       3::"<<con3<<std::endl;*/
    6123                 :            : 
    6124                 :            :     // DBG
    6125                 :            : 
    6126 [ #  # ][ #  # ]:          0 :     assert( new_nump > 1 &&
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6127                 :            :             ( ( new_nump == 2 && pstatus & PSTATUS_SHARED &&
    6128                 :            :                 !( pstatus & PSTATUS_MULTISHARED ) ) ||  // If <= 2 must not be multishared
    6129                 :            :               ( new_nump > 2 && pstatus & PSTATUS_SHARED &&
    6130                 :            :                 pstatus & PSTATUS_MULTISHARED ) ) &&                         // If > 2 procs, must be multishared
    6131                 :            :             ( !( pstatus & PSTATUS_GHOST ) || pstatus & PSTATUS_SHARED ) &&  // If ghost, it must also be shared
    6132                 :            :             ( new_nump < 3 ||
    6133                 :            :               ( pstatus & PSTATUS_NOT_OWNED && ps[0] != (int)rank() ) ||      // I'm not owner and first proc not me
    6134                 :            :               ( !( pstatus & PSTATUS_NOT_OWNED ) && ps[0] == (int)rank() ) )  // I'm owner and first proc is me
    6135         [ #  # ]:          0 :     );
    6136                 :            : 
    6137                 :            : #ifndef NDEBUG
    6138                 :            :     {
    6139                 :            :         // Check for duplicates in proc list
    6140         [ #  # ]:          0 :         std::set< unsigned int > dumprocs;
    6141                 :          0 :         int dp = 0;
    6142 [ #  # ][ #  # ]:          0 :         for( ; dp < old_nump && -1 != ps[dp]; dp++ )
    6143         [ #  # ]:          0 :             dumprocs.insert( ps[dp] );
    6144         [ #  # ]:          0 :         assert( dp == (int)dumprocs.size() );
    6145                 :            :     }
    6146                 :            : #endif
    6147                 :            : 
    6148                 :            :     ErrorCode result;
    6149                 :            :     // Reset any old data that needs to be
    6150 [ #  # ][ #  # ]:          0 :     if( old_nump > 2 && new_nump < 3 )
    6151                 :            :     {
    6152                 :            :         // Need to remove multishared tags
    6153 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_delete_data( sharedps_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:1" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6154 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_delete_data( sharedhs_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:2" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6155                 :            :         //    if (new_nump < 2)
    6156                 :            :         //      pstatus = 0x0;
    6157                 :            :         //    else if (ps[0] != (int)proc_config().proc_rank())
    6158                 :            :         //      pstatus |= PSTATUS_NOT_OWNED;
    6159                 :            :     }
    6160 [ #  # ][ #  # ]:          0 :     else if( ( old_nump < 3 && new_nump > 2 ) || ( old_nump > 1 && new_nump == 1 ) )
         [ #  # ][ #  # ]
    6161                 :            :     {
    6162                 :            :         // Reset sharedp and sharedh tags
    6163                 :          0 :         int tmp_p          = -1;
    6164                 :          0 :         EntityHandle tmp_h = 0;
    6165 [ #  # ][ #  # ]:          0 :         result             = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, &tmp_p );MB_CHK_SET_ERR( result, "set_sharing_data:3" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6166 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, &tmp_h );MB_CHK_SET_ERR( result, "set_sharing_data:4" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6167                 :            :     }
    6168                 :            : 
    6169 [ #  # ][ #  # ]:          0 :     assert( "check for multishared/owner I'm first proc" &&
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6170                 :            :             ( !( pstatus & PSTATUS_MULTISHARED ) || ( pstatus & ( PSTATUS_NOT_OWNED | PSTATUS_GHOST ) ) ||
    6171                 :            :               ( ps[0] == (int)rank() ) ) &&
    6172                 :            :             "interface entities should have > 1 proc" && ( !( pstatus & PSTATUS_INTERFACE ) || new_nump > 1 ) &&
    6173         [ #  # ]:          0 :             "ghost entities should have > 1 proc" && ( !( pstatus & PSTATUS_GHOST ) || new_nump > 1 ) );
    6174                 :            : 
    6175                 :            :     // Now set new data
    6176         [ #  # ]:          0 :     if( new_nump > 2 )
    6177                 :            :     {
    6178 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedps_tag(), &ent, 1, ps );MB_CHK_SET_ERR( result, "set_sharing_data:5" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6179 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedhs_tag(), &ent, 1, hs );MB_CHK_SET_ERR( result, "set_sharing_data:6" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6180                 :            :     }
    6181                 :            :     else
    6182                 :            :     {
    6183         [ #  # ]:          0 :         unsigned int j = ( ps[0] == (int)procConfig.proc_rank() ? 1 : 0 );
    6184         [ #  # ]:          0 :         assert( -1 != ps[j] );
    6185 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, ps + j );MB_CHK_SET_ERR( result, "set_sharing_data:7" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6186 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, hs + j );MB_CHK_SET_ERR( result, "set_sharing_data:8" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6187                 :            :     }
    6188                 :            : 
    6189 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_set_data( pstatus_tag(), &ent, 1, &pstatus );MB_CHK_SET_ERR( result, "set_sharing_data:9" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6190                 :            : 
    6191 [ #  # ][ #  # ]:          0 :     if( old_nump > 1 && new_nump < 2 ) sharedEnts.erase( ent );
    6192                 :            : 
    6193                 :          0 :     return result;
    6194                 :            : }
    6195                 :            : 
    6196                 :          3 : ErrorCode ParallelComm::get_sent_ents( const bool is_iface, const int bridge_dim, const int ghost_dim,
    6197                 :            :                                        const int num_layers, const int addl_ents, Range* sent_ents, Range& allsent,
    6198                 :            :                                        TupleList& entprocs )
    6199                 :            : {
    6200                 :            :     ErrorCode result;
    6201                 :            :     unsigned int ind;
    6202                 :          3 :     std::vector< unsigned int >::iterator proc_it;
    6203         [ +  - ]:          3 :     Range tmp_range;
    6204                 :            : 
    6205                 :            :     // Done in a separate loop over procs because sometimes later procs
    6206                 :            :     // need to add info to earlier procs' messages
    6207 [ #  # ][ +  - ]:          3 :     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
                 [ -  + ]
    6208                 :            :     {
    6209         [ #  # ]:          0 :         if( !is_iface )
    6210                 :            :         {
    6211                 :            :             result =
    6212 [ #  # ][ #  # ]:          0 :                 get_ghosted_entities( bridge_dim, ghost_dim, buffProcs[ind], num_layers, addl_ents, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get ghost layers" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6213                 :            :         }
    6214                 :            :         else
    6215                 :            :         {
    6216 [ #  # ][ #  # ]:          0 :             result = get_iface_entities( buffProcs[ind], -1, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get interface layers" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6217                 :            :         }
    6218                 :            : 
    6219                 :            :         // Filter out entities already shared with destination
    6220         [ #  # ]:          0 :         tmp_range.clear();
    6221 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6222 [ #  # ][ #  # ]:          0 :         if( !tmp_range.empty() ) sent_ents[ind] = subtract( sent_ents[ind], tmp_range );
         [ #  # ][ #  # ]
    6223                 :            : 
    6224         [ #  # ]:          0 :         allsent.merge( sent_ents[ind] );
    6225                 :            :     }
    6226                 :            : 
    6227                 :            :     //===========================================
    6228                 :            :     // Need to get procs each entity is sent to
    6229                 :            :     //===========================================
    6230                 :            : 
    6231                 :            :     // Get the total # of proc/handle pairs
    6232                 :          3 :     int npairs = 0;
    6233         [ -  + ]:          3 :     for( ind = 0; ind < buffProcs.size(); ind++ )
    6234         [ #  # ]:          0 :         npairs += sent_ents[ind].size();
    6235                 :            : 
    6236                 :            :     // Allocate a TupleList of that size
    6237         [ +  - ]:          3 :     entprocs.initialize( 1, 0, 1, 0, npairs );
    6238         [ +  - ]:          3 :     entprocs.enableWriteAccess();
    6239                 :            : 
    6240                 :            :     // Put the proc/handle pairs in the list
    6241 [ #  # ][ +  - ]:          3 :     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
                 [ -  + ]
    6242                 :            :     {
    6243 [ #  # ][ #  # ]:          0 :         for( Range::iterator rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    6244                 :            :         {
    6245 [ #  # ][ #  # ]:          0 :             entprocs.vi_wr[entprocs.get_n()]  = *proc_it;
    6246 [ #  # ][ #  # ]:          0 :             entprocs.vul_wr[entprocs.get_n()] = *rit;
    6247         [ #  # ]:          0 :             entprocs.inc_n();
    6248                 :            :         }
    6249                 :            :     }
    6250                 :            :     // Sort by handle
    6251         [ +  - ]:          6 :     moab::TupleList::buffer sort_buffer;
    6252         [ +  - ]:          3 :     sort_buffer.buffer_init( npairs );
    6253         [ +  - ]:          3 :     entprocs.sort( 1, &sort_buffer );
    6254                 :            : 
    6255         [ +  - ]:          3 :     entprocs.disableWriteAccess();
    6256         [ +  - ]:          3 :     sort_buffer.reset();
    6257                 :            : 
    6258                 :          3 :     return MB_SUCCESS;
    6259                 :            : }
    6260                 :            : 
    6261                 :          0 : ErrorCode ParallelComm::exchange_ghost_cells( ParallelComm** pcs, unsigned int num_procs, int ghost_dim, int bridge_dim,
    6262                 :            :                                               int num_layers, int addl_ents, bool store_remote_handles,
    6263                 :            :                                               EntityHandle* file_sets )
    6264                 :            : {
    6265                 :            :     // Static version of function, exchanging info through buffers rather
    6266                 :            :     // than through messages
    6267                 :            : 
    6268                 :            :     // If we're only finding out about existing ents, we have to be storing
    6269                 :            :     // remote handles too
    6270 [ #  # ][ #  # ]:          0 :     assert( num_layers > 0 || store_remote_handles );
    6271                 :            : 
    6272                 :          0 :     const bool is_iface = !num_layers;
    6273                 :            : 
    6274                 :            :     unsigned int ind;
    6275                 :            :     ParallelComm* pc;
    6276                 :          0 :     ErrorCode result = MB_SUCCESS;
    6277                 :            : 
    6278 [ #  # ][ #  # ]:          0 :     std::vector< Error* > ehs( num_procs );
    6279         [ #  # ]:          0 :     for( unsigned int i = 0; i < num_procs; i++ )
    6280                 :            :     {
    6281 [ #  # ][ #  # ]:          0 :         result = pcs[i]->get_moab()->query_interface( ehs[i] );
                 [ #  # ]
    6282         [ #  # ]:          0 :         assert( MB_SUCCESS == result );
    6283                 :            :     }
    6284                 :            : 
    6285                 :            :     // When this function is called, buffProcs should already have any
    6286                 :            :     // communicating procs
    6287                 :            : 
    6288                 :            :     //===========================================
    6289                 :            :     // Get entities to be sent to neighbors
    6290                 :            :     //===========================================
    6291                 :            : 
    6292                 :            :     // Done in a separate loop over procs because sometimes later procs
    6293                 :            :     // need to add info to earlier procs' messages
    6294 [ #  # ][ #  # ]:          0 :     Range sent_ents[MAX_SHARING_PROCS][MAX_SHARING_PROCS], allsent[MAX_SHARING_PROCS];
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  #  
          #  #  #  #  #  
          #  #  #  #  #  
             #  #  #  # ]
    6295                 :            : 
    6296                 :            :     //===========================================
    6297                 :            :     // Get entities to be sent to neighbors
    6298                 :            :     //===========================================
    6299 [ #  # ][ #  # ]:          0 :     TupleList entprocs[MAX_SHARING_PROCS];
                 [ #  # ]
           [ #  #  #  # ]
    6300         [ #  # ]:          0 :     for( unsigned int p = 0; p < num_procs; p++ )
    6301                 :            :     {
    6302                 :          0 :         pc     = pcs[p];
    6303                 :            :         result = pc->get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents[p], allsent[p],
    6304 [ #  # ][ #  # ]:          0 :                                     entprocs[p] );MB_CHK_SET_ERR( result, "p = " << p << ", get_sent_ents failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6305                 :            : 
    6306                 :            :         //===========================================
    6307                 :            :         // Pack entities into buffers
    6308                 :            :         //===========================================
    6309         [ #  # ]:          0 :         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
    6310                 :            :         {
    6311                 :            :             // Entities
    6312 [ #  # ][ #  # ]:          0 :             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    6313         [ #  # ]:          0 :             result = pc->pack_entities( sent_ents[p][ind], pc->localOwnedBuffs[ind], store_remote_handles,
    6314 [ #  # ][ #  # ]:          0 :                                         pc->buffProcs[ind], is_iface, &entprocs[p], &allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", packing entities failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6315                 :            :         }
    6316                 :            : 
    6317         [ #  # ]:          0 :         entprocs[p].reset();
    6318                 :            :     }
    6319                 :            : 
    6320                 :            :     //===========================================
    6321                 :            :     // Receive/unpack new entities
    6322                 :            :     //===========================================
    6323                 :            :     // Number of incoming messages for ghosts is the number of procs we
    6324                 :            :     // communicate with; for iface, it's the number of those with lower rank
    6325 [ #  # ][ #  # ]:          0 :     std::vector< std::vector< EntityHandle > > L1hloc[MAX_SHARING_PROCS], L1hrem[MAX_SHARING_PROCS];
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
           [ #  #  #  #  
          #  #  #  #  #  
                #  #  # ]
    6326 [ #  # ][ #  # ]:          0 :     std::vector< std::vector< int > > L1p[MAX_SHARING_PROCS];
         [ #  # ][ #  #  
             #  #  #  # ]
    6327 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > L2hloc[MAX_SHARING_PROCS], L2hrem[MAX_SHARING_PROCS];
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
           [ #  #  #  #  
          #  #  #  #  #  
                #  #  # ]
    6328 [ #  # ][ #  # ]:          0 :     std::vector< unsigned int > L2p[MAX_SHARING_PROCS];
         [ #  # ][ #  #  
             #  #  #  # ]
    6329 [ #  # ][ #  # ]:          0 :     std::vector< EntityHandle > new_ents[MAX_SHARING_PROCS];
                 [ #  # ]
           [ #  #  #  # ]
    6330                 :            : 
    6331         [ #  # ]:          0 :     for( unsigned int p = 0; p < num_procs; p++ )
    6332                 :            :     {
    6333         [ #  # ]:          0 :         L1hloc[p].resize( pcs[p]->buffProcs.size() );
    6334         [ #  # ]:          0 :         L1hrem[p].resize( pcs[p]->buffProcs.size() );
    6335         [ #  # ]:          0 :         L1p[p].resize( pcs[p]->buffProcs.size() );
    6336                 :            :     }
    6337                 :            : 
    6338         [ #  # ]:          0 :     for( unsigned int p = 0; p < num_procs; p++ )
    6339                 :            :     {
    6340                 :          0 :         pc = pcs[p];
    6341                 :            : 
    6342         [ #  # ]:          0 :         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
    6343                 :            :         {
    6344                 :            :             // Incoming ghost entities; unpack; returns entities received
    6345                 :            :             // both from sending proc and from owning proc (which may be different)
    6346                 :            : 
    6347                 :            :             // Buffer could be empty, which means there isn't any message to
    6348                 :            :             // unpack (due to this comm proc getting added as a result of indirect
    6349                 :            :             // communication); just skip this unpack
    6350 [ #  # ][ #  # ]:          0 :             if( pc->localOwnedBuffs[ind]->get_stored_size() == 0 ) continue;
                 [ #  # ]
    6351                 :            : 
    6352         [ #  # ]:          0 :             unsigned int to_p = pc->buffProcs[ind];
    6353 [ #  # ][ #  # ]:          0 :             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    6354         [ #  # ]:          0 :             result = pcs[to_p]->unpack_entities( pc->localOwnedBuffs[ind]->buff_ptr, store_remote_handles, ind,
    6355                 :            :                                                  is_iface, L1hloc[to_p], L1hrem[to_p], L1p[to_p], L2hloc[to_p],
    6356 [ #  # ][ #  # ]:          0 :                                                  L2hrem[to_p], L2p[to_p], new_ents[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6357                 :            :         }
    6358                 :            :     }
    6359                 :            : 
    6360         [ #  # ]:          0 :     if( is_iface )
    6361                 :            :     {
    6362                 :            :         // Need to check over entities I sent and make sure I received
    6363                 :            :         // handles for them from all expected procs; if not, need to clean
    6364                 :            :         // them up
    6365         [ #  # ]:          0 :         for( unsigned int p = 0; p < num_procs; p++ )
    6366                 :            :         {
    6367 [ #  # ][ #  # ]:          0 :             result = pcs[p]->check_clean_iface( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6368                 :            :         }
    6369                 :            : 
    6370                 :            : #ifndef NDEBUG
    6371         [ #  # ]:          0 :         for( unsigned int p = 0; p < num_procs; p++ )
    6372                 :            :         {
    6373 [ #  # ][ #  # ]:          0 :             result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6374                 :            :         }
    6375 [ #  # ][ #  # ]:          0 :         result = check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6376                 :            : #endif
    6377                 :          0 :         return MB_SUCCESS;
    6378                 :            :     }
    6379                 :            : 
    6380                 :            :     //===========================================
    6381                 :            :     // Send local handles for new ghosts to owner, then add
    6382                 :            :     // those to ghost list for that owner
    6383                 :            :     //===========================================
    6384                 :          0 :     std::vector< unsigned int >::iterator proc_it;
    6385         [ #  # ]:          0 :     for( unsigned int p = 0; p < num_procs; p++ )
    6386                 :            :     {
    6387                 :          0 :         pc = pcs[p];
    6388                 :            : 
    6389 [ #  # ][ #  # ]:          0 :         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
                 [ #  # ]
    6390                 :            :         {
    6391                 :            :             // Skip if iface layer and higher-rank proc
    6392 [ #  # ][ #  # ]:          0 :             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    6393 [ #  # ][ #  # ]:          0 :             result = pc->pack_remote_handles( L1hloc[p][ind], L1hrem[p][ind], L1p[p][ind], *proc_it,
         [ #  # ][ #  # ]
    6394 [ #  # ][ #  # ]:          0 :                                               pc->localOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to pack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6395                 :            :         }
    6396                 :            :     }
    6397                 :            : 
    6398                 :            :     //===========================================
    6399                 :            :     // Process remote handles of my ghosteds
    6400                 :            :     //===========================================
    6401         [ #  # ]:          0 :     for( unsigned int p = 0; p < num_procs; p++ )
    6402                 :            :     {
    6403                 :          0 :         pc = pcs[p];
    6404                 :            : 
    6405 [ #  # ][ #  # ]:          0 :         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
                 [ #  # ]
    6406                 :            :         {
    6407                 :            :             // Incoming remote handles
    6408         [ #  # ]:          0 :             unsigned int to_p = pc->buffProcs[ind];
    6409 [ #  # ][ #  # ]:          0 :             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    6410         [ #  # ]:          0 :             result = pcs[to_p]->unpack_remote_handles( p, pc->localOwnedBuffs[ind]->buff_ptr, L2hloc[to_p],
    6411 [ #  # ][ #  # ]:          0 :                                                        L2hrem[to_p], L2p[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6412                 :            :         }
    6413                 :            :     }
    6414                 :            : 
    6415                 :            : #ifndef NDEBUG
    6416         [ #  # ]:          0 :     for( unsigned int p = 0; p < num_procs; p++ )
    6417                 :            :     {
    6418 [ #  # ][ #  # ]:          0 :         result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6419                 :            :     }
    6420                 :            : 
    6421 [ #  # ][ #  # ]:          0 :     result = ParallelComm::check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6422                 :            : #endif
    6423                 :            : 
    6424         [ #  # ]:          0 :     if( file_sets )
    6425                 :            :     {
    6426         [ #  # ]:          0 :         for( unsigned int p = 0; p < num_procs; p++ )
    6427                 :            :         {
    6428         [ #  # ]:          0 :             if( new_ents[p].empty() ) continue;
    6429 [ #  # ][ #  # ]:          0 :             result = pcs[p]->get_moab()->add_entities( file_sets[p], &new_ents[p][0], new_ents[p].size() );MB_CHK_SET_ERR( result, "p = " << p << ", failed to add new entities to set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6430                 :            :         }
    6431                 :            :     }
    6432                 :            : 
    6433                 :          0 :     return MB_SUCCESS;
    6434                 :            : }
    6435                 :            : 
    6436                 :          0 : ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& exchange_procs )
    6437                 :            : {
    6438                 :            :     // Set buffers
    6439                 :          0 :     int n_proc = exchange_procs.size();
    6440         [ #  # ]:          0 :     for( int i = 0; i < n_proc; i++ )
    6441                 :          0 :         get_buffers( exchange_procs[i] );
    6442                 :          0 :     reset_all_buffers();
    6443                 :            : 
    6444                 :            :     // Post ghost irecv's for entities from all communicating procs
    6445                 :            :     // Index requests the same as buffer/sharing procs indices
    6446                 :            :     int success;
    6447         [ #  # ]:          0 :     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    6448         [ #  # ]:          0 :     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    6449         [ #  # ]:          0 :     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    6450                 :            : 
    6451                 :          0 :     int incoming = 0;
    6452         [ #  # ]:          0 :     for( int i = 0; i < n_proc; i++ )
    6453                 :            :     {
    6454                 :          0 :         int ind = get_buffers( exchange_procs[i] );
    6455                 :          0 :         incoming++;
    6456                 :          0 :         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
    6457                 :          0 :                            MB_MESG_ENTS_SIZE, incoming );
    6458                 :          0 :         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
    6459                 :          0 :                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
    6460 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6461                 :            :     }
    6462                 :            : 
    6463                 :          0 :     return MB_SUCCESS;
    6464                 :            : }
    6465                 :            : 
    6466                 :          0 : ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs )
    6467                 :            : {
    6468                 :            :     // Set buffers
    6469                 :          0 :     int num = shared_procs.size();
    6470         [ #  # ]:          0 :     for( int i = 0; i < num; i++ )
    6471 [ #  # ][ #  # ]:          0 :         get_buffers( shared_procs[i] );
    6472         [ #  # ]:          0 :     reset_all_buffers();
    6473                 :          0 :     num = remoteOwnedBuffs.size();
    6474         [ #  # ]:          0 :     for( int i = 0; i < num; i++ )
    6475 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs[i]->set_stored_size();
    6476                 :          0 :     num = localOwnedBuffs.size();
    6477         [ #  # ]:          0 :     for( int i = 0; i < num; i++ )
    6478 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[i]->set_stored_size();
    6479                 :            : 
    6480                 :            :     // Post ghost irecv's for entities from all communicating procs
    6481                 :            :     // Index requests the same as buffer/sharing procs indices
    6482                 :            :     int success;
    6483         [ #  # ]:          0 :     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    6484         [ #  # ]:          0 :     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    6485         [ #  # ]:          0 :     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
    6486                 :            : 
    6487                 :          0 :     int incoming                           = 0;
    6488                 :          0 :     std::set< unsigned int >::iterator it  = recv_procs.begin();
    6489                 :          0 :     std::set< unsigned int >::iterator eit = recv_procs.end();
    6490 [ #  # ][ #  # ]:          0 :     for( ; it != eit; ++it )
                 [ #  # ]
    6491                 :            :     {
    6492 [ #  # ][ #  # ]:          0 :         int ind = get_buffers( *it );
    6493                 :          0 :         incoming++;
    6494 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                 [ #  # ]
    6495         [ #  # ]:          0 :                            MB_MESG_ENTS_SIZE, incoming );
    6496 [ #  # ][ #  # ]:          0 :         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
    6497 [ #  # ][ #  # ]:          0 :                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
                 [ #  # ]
    6498 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6499                 :            :     }
    6500                 :            : 
    6501                 :          0 :     return MB_SUCCESS;
    6502                 :            : }
    6503                 :            : 
    6504                 :          1 : ErrorCode ParallelComm::exchange_owned_meshs( std::vector< unsigned int >& exchange_procs,
    6505                 :            :                                               std::vector< Range* >& exchange_ents,
    6506                 :            :                                               std::vector< MPI_Request >& recv_ent_reqs,
    6507                 :            :                                               std::vector< MPI_Request >& recv_remoteh_reqs, bool store_remote_handles,
    6508                 :            :                                               bool wait_all, bool migrate, int dim )
    6509                 :            : {
    6510                 :            :     // Filter out entities already shared with destination
    6511                 :            :     // Exchange twice for entities and sets
    6512                 :            :     ErrorCode result;
    6513         [ +  - ]:          1 :     std::vector< unsigned int > exchange_procs_sets;
    6514         [ +  - ]:          2 :     std::vector< Range* > exchange_sets;
    6515                 :          1 :     int n_proc = exchange_procs.size();
    6516         [ -  + ]:          1 :     for( int i = 0; i < n_proc; i++ )
    6517                 :            :     {
    6518 [ #  # ][ #  # ]:          0 :         Range set_range   = exchange_ents[i]->subset_by_type( MBENTITYSET );
    6519 [ #  # ][ #  # ]:          0 :         *exchange_ents[i] = subtract( *exchange_ents[i], set_range );
         [ #  # ][ #  # ]
    6520 [ #  # ][ #  # ]:          0 :         Range* tmp_range  = new Range( set_range );
    6521         [ #  # ]:          0 :         exchange_sets.push_back( tmp_range );
    6522 [ #  # ][ #  # ]:          0 :         exchange_procs_sets.push_back( exchange_procs[i] );
    6523                 :          0 :     }
    6524                 :            : 
    6525         [ -  + ]:          1 :     if( dim == 2 )
    6526                 :            :     {
    6527                 :            :         // Exchange entities first
    6528                 :            :         result = exchange_owned_mesh( exchange_procs, exchange_ents, recvReqs, recvRemotehReqs, true,
    6529 [ #  # ][ #  # ]:          0 :                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6530                 :            : 
    6531                 :            :         // Exchange sets
    6532                 :            :         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recvReqs, recvRemotehReqs, false,
    6533         [ #  # ]:          0 :                                       store_remote_handles, wait_all, migrate );
    6534                 :            :     }
    6535                 :            :     else
    6536                 :            :     {
    6537                 :            :         // Exchange entities first
    6538                 :            :         result = exchange_owned_mesh( exchange_procs, exchange_ents, recv_ent_reqs, recv_remoteh_reqs, false,
    6539 [ +  - ][ -  + ]:          1 :                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6540                 :            : 
    6541                 :            :         // Exchange sets
    6542                 :            :         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recv_ent_reqs, recv_remoteh_reqs, false,
    6543 [ +  - ][ -  + ]:          1 :                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6544                 :            :     }
    6545                 :            : 
    6546         [ -  + ]:          1 :     for( int i = 0; i < n_proc; i++ )
    6547 [ #  # ][ #  # ]:          0 :         delete exchange_sets[i];
    6548                 :            : 
    6549                 :            :     // Build up the list of shared entities
    6550         [ +  - ]:          2 :     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
    6551                 :            :     int procs[MAX_SHARING_PROCS];
    6552                 :            :     EntityHandle handles[MAX_SHARING_PROCS];
    6553                 :            :     int nprocs;
    6554                 :            :     unsigned char pstat;
    6555 [ #  # ][ +  - ]:          1 :     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
                 [ -  + ]
    6556                 :            :     {
    6557 [ #  # ][ #  # ]:          0 :         if( mbImpl->dimension_from_handle( *vit ) > 2 ) continue;
                 [ #  # ]
    6558 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data in exchange_owned_meshs" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6559         [ #  # ]:          0 :         std::sort( procs, procs + nprocs );
    6560         [ #  # ]:          0 :         std::vector< int > tmp_procs( procs, procs + nprocs );
    6561         [ #  # ]:          0 :         assert( tmp_procs.size() != 2 );
    6562 [ #  # ][ #  # ]:          0 :         proc_nvecs[tmp_procs].push_back( *vit );
                 [ #  # ]
    6563                 :          0 :     }
    6564                 :            : 
    6565                 :            :     // Create interface sets from shared entities
    6566 [ +  - ][ -  + ]:          1 :     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6567                 :            : 
    6568                 :          2 :     return MB_SUCCESS;
    6569                 :            : }
    6570                 :            : 
    6571                 :          2 : ErrorCode ParallelComm::exchange_owned_mesh( std::vector< unsigned int >& exchange_procs,
    6572                 :            :                                              std::vector< Range* >& exchange_ents,
    6573                 :            :                                              std::vector< MPI_Request >& recv_ent_reqs,
    6574                 :            :                                              std::vector< MPI_Request >& recv_remoteh_reqs, const bool recv_posted,
    6575                 :            :                                              bool store_remote_handles, bool wait_all, bool migrate )
    6576                 :            : {
    6577                 :            : #ifdef MOAB_HAVE_MPE
    6578                 :            :     if( myDebug->get_verbosity() == 2 )
    6579                 :            :     { MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting owned ents exchange." ); }
    6580                 :            : #endif
    6581                 :            : 
    6582         [ +  - ]:          2 :     myDebug->tprintf( 1, "Entering exchange_owned_mesh\n" );
    6583 [ +  - ][ -  + ]:          2 :     if( myDebug->get_verbosity() == 4 )
    6584                 :            :     {
    6585                 :          0 :         msgs.clear();
    6586         [ #  # ]:          0 :         msgs.reserve( MAX_SHARING_PROCS );
    6587                 :            :     }
    6588                 :            :     unsigned int i;
    6589                 :            :     int ind, success;
    6590                 :          2 :     ErrorCode result = MB_SUCCESS;
    6591                 :          2 :     int incoming1 = 0, incoming2 = 0;
    6592                 :            : 
    6593                 :            :     // Set buffProcs with communicating procs
    6594                 :          2 :     unsigned int n_proc = exchange_procs.size();
    6595         [ -  + ]:          2 :     for( i = 0; i < n_proc; i++ )
    6596                 :            :     {
    6597 [ #  # ][ #  # ]:          0 :         ind    = get_buffers( exchange_procs[i] );
    6598 [ #  # ][ #  # ]:          0 :         result = add_verts( *exchange_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6599                 :            : 
    6600                 :            :         // Filter out entities already shared with destination
    6601         [ #  # ]:          0 :         Range tmp_range;
    6602 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( *exchange_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6603 [ #  # ][ #  # ]:          0 :         if( !tmp_range.empty() ) { *exchange_ents[i] = subtract( *exchange_ents[i], tmp_range ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6604                 :          0 :     }
    6605                 :            : 
    6606                 :            :     //===========================================
    6607                 :            :     // Post ghost irecv's for entities from all communicating procs
    6608                 :            :     //===========================================
    6609                 :            : #ifdef MOAB_HAVE_MPE
    6610                 :            :     if( myDebug->get_verbosity() == 2 )
    6611                 :            :     { MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." ); }
    6612                 :            : #endif
    6613                 :            : 
    6614                 :            :     // Index reqs the same as buffer/sharing procs indices
    6615         [ +  - ]:          2 :     if( !recv_posted )
    6616                 :            :     {
    6617         [ +  - ]:          2 :         reset_all_buffers();
    6618         [ +  - ]:          2 :         recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    6619         [ +  - ]:          2 :         recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    6620         [ +  - ]:          2 :         sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    6621                 :            : 
    6622         [ -  + ]:          2 :         for( i = 0; i < n_proc; i++ )
    6623                 :            :         {
    6624 [ #  # ][ #  # ]:          0 :             ind = get_buffers( exchange_procs[i] );
    6625                 :          0 :             incoming1++;
    6626 [ #  # ][ #  # ]:          0 :             PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr,
                 [ #  # ]
    6627         [ #  # ]:          0 :                                INITIAL_BUFF_SIZE, MB_MESG_ENTS_SIZE, incoming1 );
    6628 [ #  # ][ #  # ]:          0 :             success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
    6629 [ #  # ][ #  # ]:          0 :                                  MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
                 [ #  # ]
    6630 [ #  # ][ #  # ]:          0 :             if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6631                 :            :         }
    6632                 :            :     }
    6633                 :            :     else
    6634                 :          0 :         incoming1 += n_proc;
    6635                 :            : 
    6636                 :            :     //===========================================
    6637                 :            :     // Get entities to be sent to neighbors
    6638                 :            :     // Need to get procs each entity is sent to
    6639                 :            :     //===========================================
    6640 [ +  - ][ +  - ]:          4 :     Range allsent, tmp_range;
    6641                 :            :     int dum_ack_buff;
    6642                 :          2 :     int npairs = 0;
    6643         [ +  - ]:          4 :     TupleList entprocs;
    6644         [ -  + ]:          2 :     for( i = 0; i < n_proc; i++ )
    6645                 :            :     {
    6646 [ #  # ][ #  # ]:          0 :         int n_ents = exchange_ents[i]->size();
    6647         [ #  # ]:          0 :         if( n_ents > 0 )
    6648                 :            :         {
    6649                 :          0 :             npairs += n_ents;  // Get the total # of proc/handle pairs
    6650 [ #  # ][ #  # ]:          0 :             allsent.merge( *exchange_ents[i] );
    6651                 :            :         }
    6652                 :            :     }
    6653                 :            : 
    6654                 :            :     // Allocate a TupleList of that size
    6655         [ +  - ]:          2 :     entprocs.initialize( 1, 0, 1, 0, npairs );
    6656         [ +  - ]:          2 :     entprocs.enableWriteAccess();
    6657                 :            : 
    6658                 :            :     // Put the proc/handle pairs in the list
    6659         [ -  + ]:          2 :     for( i = 0; i < n_proc; i++ )
    6660                 :            :     {
    6661 [ #  # ][ #  # ]:          0 :         for( Range::iterator rit = exchange_ents[i]->begin(); rit != exchange_ents[i]->end(); ++rit )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6662                 :            :         {
    6663 [ #  # ][ #  # ]:          0 :             entprocs.vi_wr[entprocs.get_n()]  = exchange_procs[i];
    6664 [ #  # ][ #  # ]:          0 :             entprocs.vul_wr[entprocs.get_n()] = *rit;
    6665         [ #  # ]:          0 :             entprocs.inc_n();
    6666                 :            :         }
    6667                 :            :     }
    6668                 :            : 
    6669                 :            :     // Sort by handle
    6670         [ +  - ]:          4 :     moab::TupleList::buffer sort_buffer;
    6671         [ +  - ]:          2 :     sort_buffer.buffer_init( npairs );
    6672         [ +  - ]:          2 :     entprocs.sort( 1, &sort_buffer );
    6673         [ +  - ]:          2 :     sort_buffer.reset();
    6674                 :            : 
    6675                 :            :     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
    6676 [ +  - ][ +  - ]:          2 :                       (unsigned long)allsent.size() );
                 [ +  - ]
    6677                 :            : 
    6678                 :            :     //===========================================
    6679                 :            :     // Pack and send ents from this proc to others
    6680                 :            :     //===========================================
    6681         [ -  + ]:          2 :     for( i = 0; i < n_proc; i++ )
    6682                 :            :     {
    6683 [ #  # ][ #  # ]:          0 :         ind = get_buffers( exchange_procs[i] );
    6684         [ #  # ]:          0 :         myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", exchange_ents[i]->compactness(),
    6685 [ #  # ][ #  # ]:          0 :                           (unsigned long)exchange_ents[i]->size() );
         [ #  # ][ #  # ]
    6686                 :            :         // Reserve space on front for size and for initial buff size
    6687 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
    6688 [ #  # ][ #  # ]:          0 :         result = pack_buffer( *exchange_ents[i], false, true, store_remote_handles, buffProcs[ind],
    6689 [ #  # ][ #  # ]:          0 :                               localOwnedBuffs[ind], &entprocs, &allsent );
    6690                 :            : 
    6691 [ #  # ][ #  # ]:          0 :         if( myDebug->get_verbosity() == 4 )
    6692                 :            :         {
    6693         [ #  # ]:          0 :             msgs.resize( msgs.size() + 1 );
    6694 [ #  # ][ #  # ]:          0 :             msgs.back() = new Buffer( *localOwnedBuffs[ind] );
         [ #  # ][ #  # ]
    6695                 :            :         }
    6696                 :            : 
    6697                 :            :         // Send the buffer (size stored in front in send_buffer)
    6698 [ #  # ][ #  # ]:          0 :         result = send_buffer( exchange_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
                 [ #  # ]
    6699         [ #  # ]:          0 :                               recv_ent_reqs[3 * ind + 2], &dum_ack_buff, incoming1, MB_MESG_REMOTEH_SIZE,
    6700 [ #  # ][ #  # ]:          0 :                               ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recv_remoteh_reqs[3 * ind],
    6701 [ #  # ][ #  # ]:          0 :                               &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost exchange" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6702                 :            :     }
    6703                 :            : 
    6704         [ +  - ]:          2 :     entprocs.reset();
    6705                 :            : 
    6706                 :            :     //===========================================
    6707                 :            :     // Receive/unpack new entities
    6708                 :            :     //===========================================
    6709                 :            :     // Number of incoming messages is the number of procs we communicate with
    6710                 :            :     MPI_Status status;
    6711         [ +  - ]:          4 :     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
    6712 [ +  - ][ +  - ]:          4 :     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
    6713         [ +  - ]:          4 :     std::vector< std::vector< int > > L1p( buffProcs.size() );
    6714 [ +  - ][ +  - ]:          4 :     std::vector< EntityHandle > L2hloc, L2hrem;
    6715         [ +  - ]:          4 :     std::vector< unsigned int > L2p;
    6716         [ +  - ]:          4 :     std::vector< EntityHandle > new_ents;
    6717                 :            : 
    6718         [ -  + ]:          2 :     while( incoming1 )
    6719                 :            :     {
    6720                 :            :         // Wait for all recvs of ents before proceeding to sending remote handles,
    6721                 :            :         // b/c some procs may have sent to a 3rd proc ents owned by me;
    6722 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
    6723                 :            : 
    6724 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &ind, &status );
    6725 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6726                 :            : 
    6727         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    6728                 :            : 
    6729                 :            :         // OK, received something; decrement incoming counter
    6730                 :          0 :         incoming1--;
    6731                 :          0 :         bool done = false;
    6732                 :            : 
    6733                 :            :         // In case ind is for ack, we need index of one before it
    6734                 :          0 :         unsigned int base_ind = 3 * ( ind / 3 );
    6735 [ #  # ][ #  # ]:          0 :         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 3], recv_ent_reqs[base_ind + 1],
    6736 [ #  # ][ #  # ]:          0 :                               recv_ent_reqs[base_ind + 2], incoming1, localOwnedBuffs[ind / 3], sendReqs[base_ind + 1],
                 [ #  # ]
    6737 [ #  # ][ #  # ]:          0 :                               sendReqs[base_ind + 2], done, ( store_remote_handles ? localOwnedBuffs[ind / 3] : NULL ),
    6738 [ #  # ][ #  # ]:          0 :                               MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind + 1], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6739                 :            : 
    6740         [ #  # ]:          0 :         if( done )
    6741                 :            :         {
    6742 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 4 )
    6743                 :            :             {
    6744         [ #  # ]:          0 :                 msgs.resize( msgs.size() + 1 );
    6745 [ #  # ][ #  # ]:          0 :                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 3] );
         [ #  # ][ #  # ]
    6746                 :            :             }
    6747                 :            : 
    6748                 :            :             // Message completely received - process buffer that was sent
    6749 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
    6750 [ #  # ][ #  # ]:          0 :             result = unpack_buffer( remoteOwnedBuffs[ind / 3]->buff_ptr, store_remote_handles, buffProcs[ind / 3],
    6751         [ #  # ]:          0 :                                     ind / 3, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
    6752         [ #  # ]:          0 :             if( MB_SUCCESS != result )
    6753                 :            :             {
    6754 [ #  # ][ #  # ]:          0 :                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
    6755 [ #  # ][ #  # ]:          0 :                 print_buffer( remoteOwnedBuffs[ind / 3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 3], false );
                 [ #  # ]
    6756                 :          0 :                 return result;
    6757                 :            :             }
    6758                 :            : 
    6759         [ #  # ]:          0 :             if( recv_ent_reqs.size() != 3 * buffProcs.size() )
    6760                 :            :             {
    6761                 :            :                 // Post irecv's for remote handles from new proc
    6762         [ #  # ]:          0 :                 recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    6763         [ #  # ]:          0 :                 for( i = recv_ent_reqs.size(); i < 3 * buffProcs.size(); i += 3 )
    6764                 :            :                 {
    6765 [ #  # ][ #  # ]:          0 :                     localOwnedBuffs[i / 3]->reset_buffer();
    6766                 :          0 :                     incoming2++;
    6767 [ #  # ][ #  # ]:          0 :                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 3], localOwnedBuffs[i / 3]->mem_ptr,
                 [ #  # ]
    6768         [ #  # ]:          0 :                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
    6769         [ #  # ]:          0 :                     success = MPI_Irecv( localOwnedBuffs[i / 3]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
    6770         [ #  # ]:          0 :                                          buffProcs[i / 3], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
    6771 [ #  # ][ #  # ]:          0 :                                          &recv_remoteh_reqs[i] );
                 [ #  # ]
    6772         [ #  # ]:          0 :                     if( success != MPI_SUCCESS )
    6773 [ #  # ][ #  # ]:          0 :                     { MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    6774                 :            :                 }
    6775         [ #  # ]:          0 :                 recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    6776         [ #  # ]:          0 :                 sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    6777                 :            :             }
    6778                 :            :         }
    6779                 :            :     }
    6780                 :            : 
    6781                 :            :     // Assign and remove newly created elements from/to receive processor
    6782 [ +  - ][ +  - ]:          2 :     result = assign_entities_part( new_ents, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to assign entities to part" );
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6783         [ -  + ]:          2 :     if( migrate )
    6784                 :            :     {
    6785 [ #  # ][ #  # ]:          0 :         result = remove_entities_part( allsent, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to remove entities to part" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6786                 :            :     }
    6787                 :            : 
    6788                 :            :     // Add requests for any new addl procs
    6789         [ -  + ]:          2 :     if( recv_ent_reqs.size() != 3 * buffProcs.size() )
    6790                 :            :     {
    6791                 :            :         // Shouldn't get here...
    6792 [ #  # ][ #  # ]:          0 :         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in entity exchange" );
         [ #  # ][ #  # ]
                 [ #  # ]
    6793                 :            :     }
    6794                 :            : 
    6795                 :            : #ifdef MOAB_HAVE_MPE
    6796                 :            :     if( myDebug->get_verbosity() == 2 )
    6797                 :            :     { MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange." ); }
    6798                 :            : #endif
    6799                 :            : 
    6800                 :            :     // we still need to wait on sendReqs, if they are not fulfilled yet
    6801         [ +  - ]:          2 :     if( wait_all )
    6802                 :            :     {
    6803 [ +  - ][ -  + ]:          2 :         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    6804                 :            :         else
    6805                 :            :         {
    6806                 :            :             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
    6807 [ +  - ][ +  - ]:          2 :             success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
    6808 [ -  + ][ #  # ]:          2 :             if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in exchange owned mesh" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6809                 :            :         }
    6810                 :            :     }
    6811                 :            : 
    6812                 :            :     //===========================================
    6813                 :            :     // Send local handles for new entity to owner
    6814                 :            :     //===========================================
    6815         [ -  + ]:          2 :     for( i = 0; i < n_proc; i++ )
    6816                 :            :     {
    6817 [ #  # ][ #  # ]:          0 :         ind = get_buffers( exchange_procs[i] );
    6818                 :            :         // Reserve space on front for size and for initial buff size
    6819 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
    6820                 :            : 
    6821 [ #  # ][ #  # ]:          0 :         result = pack_remote_handles( L1hloc[ind], L1hrem[ind], L1p[ind], buffProcs[ind], remoteOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6822 [ #  # ][ #  # ]:          0 :         remoteOwnedBuffs[ind]->set_stored_size();
    6823                 :            : 
    6824 [ #  # ][ #  # ]:          0 :         if( myDebug->get_verbosity() == 4 )
    6825                 :            :         {
    6826         [ #  # ]:          0 :             msgs.resize( msgs.size() + 1 );
    6827 [ #  # ][ #  # ]:          0 :             msgs.back() = new Buffer( *remoteOwnedBuffs[ind] );
         [ #  # ][ #  # ]
    6828                 :            :         }
    6829 [ #  # ][ #  # ]:          0 :         result = send_buffer( buffProcs[ind], remoteOwnedBuffs[ind], MB_MESG_REMOTEH_SIZE, sendReqs[3 * ind],
                 [ #  # ]
    6830 [ #  # ][ #  # ]:          0 :                               recv_remoteh_reqs[3 * ind + 2], &dum_ack_buff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6831                 :            :     }
    6832                 :            : 
    6833                 :            :     //===========================================
    6834                 :            :     // Process remote handles of my ghosteds
    6835                 :            :     //===========================================
    6836         [ -  + ]:          2 :     while( incoming2 )
    6837                 :            :     {
    6838 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
    6839 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status );
    6840 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6841                 :            : 
    6842                 :            :         // OK, received something; decrement incoming counter
    6843                 :          0 :         incoming2--;
    6844                 :            : 
    6845         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    6846                 :            : 
    6847                 :          0 :         bool done             = false;
    6848                 :          0 :         unsigned int base_ind = 3 * ( ind / 3 );
    6849 [ #  # ][ #  # ]:          0 :         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 3], recv_remoteh_reqs[base_ind + 1],
    6850 [ #  # ][ #  # ]:          0 :                               recv_remoteh_reqs[base_ind + 2], incoming2, remoteOwnedBuffs[ind / 3],
    6851 [ #  # ][ #  # ]:          0 :                               sendReqs[base_ind + 1], sendReqs[base_ind + 2], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6852                 :            : 
    6853         [ #  # ]:          0 :         if( done )
    6854                 :            :         {
    6855                 :            :             // Incoming remote handles
    6856 [ #  # ][ #  # ]:          0 :             if( myDebug->get_verbosity() == 4 )
    6857                 :            :             {
    6858         [ #  # ]:          0 :                 msgs.resize( msgs.size() + 1 );
    6859 [ #  # ][ #  # ]:          0 :                 msgs.back() = new Buffer( *localOwnedBuffs[ind / 3] );
         [ #  # ][ #  # ]
    6860                 :            :             }
    6861                 :            : 
    6862 [ #  # ][ #  # ]:          0 :             localOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
    6863                 :            :             result =
    6864 [ #  # ][ #  # ]:          0 :                 unpack_remote_handles( buffProcs[ind / 3], localOwnedBuffs[ind / 3]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6865                 :            :         }
    6866                 :            :     }
    6867                 :            : 
    6868                 :            : #ifdef MOAB_HAVE_MPE
    6869                 :            :     if( myDebug->get_verbosity() == 2 )
    6870                 :            :     {
    6871                 :            :         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
    6872                 :            :         MPE_Log_event( OWNED_END, procConfig.proc_rank(), "Ending ghost exchange (still doing checks)." );
    6873                 :            :     }
    6874                 :            : #endif
    6875                 :            : 
    6876                 :            :     //===========================================
    6877                 :            :     // Wait if requested
    6878                 :            :     //===========================================
    6879         [ +  - ]:          2 :     if( wait_all )
    6880                 :            :     {
    6881 [ +  - ][ -  + ]:          2 :         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    6882                 :            :         else
    6883                 :            :         {
    6884                 :            :             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
    6885 [ +  - ][ +  - ]:          2 :             success = MPI_Waitall( 3 * buffProcs.size(), &recv_remoteh_reqs[0], mult_status );
    6886 [ +  - ][ +  - ]:          2 :             if( MPI_SUCCESS == success ) success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
                 [ +  - ]
    6887                 :            :         }
    6888 [ -  + ][ #  # ]:          2 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in owned entity exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6889                 :            :     }
    6890                 :            : 
    6891                 :            : #ifndef NDEBUG
    6892 [ +  - ][ -  + ]:          2 :     result = check_sent_ents( allsent );MB_CHK_SET_ERR( result, "Failed check on shared entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6893                 :            : #endif
    6894         [ +  - ]:          2 :     myDebug->tprintf( 1, "Exiting exchange_owned_mesh\n" );
    6895                 :            : 
    6896                 :          4 :     return MB_SUCCESS;
    6897                 :            : }
    6898                 :            : 
    6899                 :          0 : ErrorCode ParallelComm::get_iface_entities( int other_proc, int dim, Range& iface_ents )
    6900                 :            : {
    6901         [ #  # ]:          0 :     Range iface_sets;
    6902                 :          0 :     ErrorCode result = MB_SUCCESS;
    6903                 :            : 
    6904 [ #  # ][ #  # ]:          0 :     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    6905                 :            :     {
    6906 [ #  # ][ #  # ]:          0 :         if( -1 != other_proc && !is_iface_proc( *rit, other_proc ) ) continue;
         [ #  # ][ #  # ]
                 [ #  # ]
    6907                 :            : 
    6908         [ #  # ]:          0 :         if( -1 == dim )
    6909                 :            :         {
    6910 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in iface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6911                 :            :         }
    6912                 :            :         else
    6913                 :            :         {
    6914 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_dimension( *rit, dim, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in iface set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6915                 :            :         }
    6916                 :            :     }
    6917                 :            : 
    6918                 :          0 :     return MB_SUCCESS;
    6919                 :            : }
    6920                 :            : 
    6921                 :          2 : ErrorCode ParallelComm::assign_entities_part( std::vector< EntityHandle >& entities, const int proc )
    6922                 :            : {
    6923                 :            :     EntityHandle part_set;
    6924 [ +  - ][ -  + ]:          2 :     ErrorCode result = get_part_handle( proc, part_set );MB_CHK_SET_ERR( result, "Failed to get part handle" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6925                 :            : 
    6926         [ +  - ]:          2 :     if( part_set > 0 )
    6927                 :            :     {
    6928 [ +  - ][ +  - ]:          2 :         result = mbImpl->add_entities( part_set, &entities[0], entities.size() );MB_CHK_SET_ERR( result, "Failed to add entities to part set" );
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    6929                 :            :     }
    6930                 :            : 
    6931                 :          2 :     return MB_SUCCESS;
    6932                 :            : }
    6933                 :            : 
    6934                 :          0 : ErrorCode ParallelComm::remove_entities_part( Range& entities, const int proc )
    6935                 :            : {
    6936                 :            :     EntityHandle part_set;
    6937 [ #  # ][ #  # ]:          0 :     ErrorCode result = get_part_handle( proc, part_set );MB_CHK_SET_ERR( result, "Failed to get part handle" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6938                 :            : 
    6939         [ #  # ]:          0 :     if( part_set > 0 )
    6940                 :            :     {
    6941 [ #  # ][ #  # ]:          0 :         result = mbImpl->remove_entities( part_set, entities );MB_CHK_SET_ERR( result, "Failed to remove entities from part set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6942                 :            :     }
    6943                 :            : 
    6944                 :          0 :     return MB_SUCCESS;
    6945                 :            : }
    6946                 :            : 
    6947                 :          5 : ErrorCode ParallelComm::check_sent_ents( Range& allsent )
    6948                 :            : {
    6949                 :            :     // Check entities to make sure there are no zero-valued remote handles
    6950                 :            :     // where they shouldn't be
    6951 [ +  - ][ +  - ]:          5 :     std::vector< unsigned char > pstat( allsent.size() );
    6952 [ +  - ][ +  - ]:          5 :     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), allsent, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ +  - ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6953 [ +  - ][ +  - ]:         10 :     std::vector< EntityHandle > handles( allsent.size() );
    6954 [ +  - ][ +  - ]:          5 :     result = mbImpl->tag_get_data( sharedh_tag(), allsent, &handles[0] );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
         [ +  - ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6955 [ +  - ][ +  - ]:         10 :     std::vector< int > procs( allsent.size() );
    6956 [ +  - ][ +  - ]:          5 :     result = mbImpl->tag_get_data( sharedp_tag(), allsent, &procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
         [ +  - ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6957                 :            : 
    6958         [ +  - ]:         10 :     Range bad_entities;
    6959                 :            : 
    6960         [ +  - ]:          5 :     Range::iterator rit;
    6961                 :            :     unsigned int i;
    6962                 :            :     EntityHandle dum_hs[MAX_SHARING_PROCS];
    6963                 :            :     int dum_ps[MAX_SHARING_PROCS];
    6964                 :            : 
    6965 [ +  - ][ #  # ]:          5 :     for( rit = allsent.begin(), i = 0; rit != allsent.end(); ++rit, i++ )
         [ +  - ][ +  - ]
                 [ -  + ]
    6966                 :            :     {
    6967 [ #  # ][ #  # ]:          0 :         if( -1 != procs[i] && 0 == handles[i] )
         [ #  # ][ #  # ]
                 [ #  # ]
    6968 [ #  # ][ #  # ]:          0 :             bad_entities.insert( *rit );
    6969                 :            :         else
    6970                 :            :         {
    6971                 :            :             // Might be multi-shared...
    6972 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, dum_ps );
                 [ #  # ]
    6973         [ #  # ]:          0 :             if( MB_TAG_NOT_FOUND == result )
    6974                 :          0 :                 continue;
    6975         [ #  # ]:          0 :             else if( MB_SUCCESS != result )
    6976 [ #  # ][ #  # ]:          0 :                 MB_SET_ERR( result, "Failed to get sharedps tag data" );
         [ #  # ][ #  # ]
                 [ #  # ]
    6977 [ #  # ][ #  # ]:          0 :             result = mbImpl->tag_get_data( sharedhs_tag(), &( *rit ), 1, dum_hs );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    6978                 :            : 
    6979                 :            :             // Find first non-set proc
    6980         [ #  # ]:          0 :             int* ns_proc  = std::find( dum_ps, dum_ps + MAX_SHARING_PROCS, -1 );
    6981                 :          0 :             int num_procs = ns_proc - dum_ps;
    6982         [ #  # ]:          0 :             assert( num_procs <= MAX_SHARING_PROCS );
    6983                 :            :             // Now look for zero handles in active part of dum_hs
    6984         [ #  # ]:          0 :             EntityHandle* ns_handle = std::find( dum_hs, dum_hs + num_procs, 0 );
    6985                 :          0 :             int num_handles         = ns_handle - dum_hs;
    6986         [ #  # ]:          0 :             assert( num_handles <= num_procs );
    6987 [ #  # ][ #  # ]:          0 :             if( num_handles != num_procs ) bad_entities.insert( *rit );
                 [ #  # ]
    6988                 :            :         }
    6989                 :            :     }
    6990                 :            : 
    6991                 :         10 :     return MB_SUCCESS;
    6992                 :            : }
    6993                 :            : 
    6994                 :          0 : ErrorCode ParallelComm::pack_remote_handles( std::vector< EntityHandle >& L1hloc, std::vector< EntityHandle >& L1hrem,
    6995                 :            :                                              std::vector< int >& L1p, unsigned int /*to_proc*/, Buffer* buff )
    6996                 :            : {
    6997 [ #  # ][ #  # ]:          0 :     assert( std::find( L1hloc.begin(), L1hloc.end(), (EntityHandle)0 ) == L1hloc.end() );
                 [ #  # ]
    6998                 :            : 
    6999                 :            :     // 2 vectors of handles plus ints
    7000                 :          0 :     buff->check_space( ( ( L1p.size() + 1 ) * sizeof( int ) + ( L1hloc.size() + 1 ) * sizeof( EntityHandle ) +
    7001                 :          0 :                          ( L1hrem.size() + 1 ) * sizeof( EntityHandle ) ) );
    7002                 :            : 
    7003                 :            :     // Should be in pairs of handles
    7004                 :          0 :     PACK_INT( buff->buff_ptr, L1hloc.size() );
    7005                 :          0 :     PACK_INTS( buff->buff_ptr, &L1p[0], L1p.size() );
    7006                 :            :     // Pack handles in reverse order, (remote, local), so on destination they
    7007                 :            :     // are ordered (local, remote)
    7008                 :          0 :     PACK_EH( buff->buff_ptr, &L1hrem[0], L1hrem.size() );
    7009                 :          0 :     PACK_EH( buff->buff_ptr, &L1hloc[0], L1hloc.size() );
    7010                 :            : 
    7011                 :          0 :     buff->set_stored_size();
    7012                 :            : 
    7013                 :          0 :     return MB_SUCCESS;
    7014                 :            : }
    7015                 :            : 
    7016                 :          0 : ErrorCode ParallelComm::unpack_remote_handles( unsigned int from_proc, unsigned char*& buff_ptr,
    7017                 :            :                                                std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
    7018                 :            :                                                std::vector< unsigned int >& L2p )
    7019                 :            : {
    7020                 :            :     // Incoming remote handles; use to set remote handles
    7021                 :            :     int num_eh;
    7022         [ #  # ]:          0 :     UNPACK_INT( buff_ptr, num_eh );
    7023                 :            : 
    7024                 :          0 :     unsigned char* buff_proc = buff_ptr;
    7025                 :          0 :     buff_ptr += num_eh * sizeof( int );
    7026                 :          0 :     unsigned char* buff_rem = buff_ptr + num_eh * sizeof( EntityHandle );
    7027                 :            :     ErrorCode result;
    7028                 :            :     EntityHandle hpair[2], new_h;
    7029                 :            :     int proc;
    7030         [ #  # ]:          0 :     for( int i = 0; i < num_eh; i++ )
    7031                 :            :     {
    7032         [ #  # ]:          0 :         UNPACK_INT( buff_proc, proc );
    7033                 :            :         // Handles packed (local, remote), though here local is either on this
    7034                 :            :         // proc or owner proc, depending on value of proc (-1 = here, otherwise owner);
    7035                 :            :         // this is decoded in find_existing_entity
    7036         [ #  # ]:          0 :         UNPACK_EH( buff_ptr, hpair, 1 );
    7037         [ #  # ]:          0 :         UNPACK_EH( buff_rem, hpair + 1, 1 );
    7038                 :            : 
    7039         [ #  # ]:          0 :         if( -1 != proc )
    7040                 :            :         {
    7041                 :          0 :             result = find_existing_entity( false, proc, hpair[0], 3, NULL, 0, mbImpl->type_from_handle( hpair[1] ),
    7042 [ #  # ][ #  # ]:          0 :                                            L2hloc, L2hrem, L2p, new_h );MB_CHK_SET_ERR( result, "Didn't get existing entity" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7043         [ #  # ]:          0 :             if( new_h )
    7044                 :          0 :                 hpair[0] = new_h;
    7045                 :            :             else
    7046                 :          0 :                 hpair[0] = 0;
    7047                 :            :         }
    7048 [ #  # ][ #  # ]:          0 :         if( !( hpair[0] && hpair[1] ) ) return MB_FAILURE;
    7049                 :          0 :         int this_proc = from_proc;
    7050 [ #  # ][ #  # ]:          0 :         result        = update_remote_data( hpair[0], &this_proc, hpair + 1, 1, 0 );MB_CHK_SET_ERR( result, "Failed to set remote data range on sent entities in ghost exchange" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7051                 :            :     }
    7052                 :            : 
    7053                 :          0 :     return MB_SUCCESS;
    7054                 :            : }
    7055                 :            : 
    7056                 :          0 : ErrorCode ParallelComm::get_ghosted_entities( int bridge_dim, int ghost_dim, int to_proc, int num_layers, int addl_ents,
    7057                 :            :                                               Range& ghosted_ents )
    7058                 :            : {
    7059                 :            :     // Get bridge ents on interface(s)
    7060         [ #  # ]:          0 :     Range from_ents;
    7061                 :          0 :     ErrorCode result = MB_SUCCESS;
    7062         [ #  # ]:          0 :     assert( 0 < num_layers );
    7063 [ #  # ][ #  # ]:          0 :     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    7064                 :            :     {
    7065 [ #  # ][ #  # ]:          0 :         if( !is_iface_proc( *rit, to_proc ) ) continue;
                 [ #  # ]
    7066                 :            : 
    7067                 :            :         // Get starting "from" entities
    7068         [ #  # ]:          0 :         if( bridge_dim == -1 )
    7069                 :            :         {
    7070 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_handle( *rit, from_ents );MB_CHK_SET_ERR( result, "Failed to get bridge ents in the set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7071                 :            :         }
    7072                 :            :         else
    7073                 :            :         {
    7074 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_dimension( *rit, bridge_dim, from_ents );MB_CHK_SET_ERR( result, "Failed to get bridge ents in the set" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7075                 :            :         }
    7076                 :            : 
    7077                 :            :         // Need to get layers of bridge-adj entities
    7078 [ #  # ][ #  # ]:          0 :         if( from_ents.empty() ) continue;
    7079                 :            :         result =
    7080 [ #  # ][ #  # ]:          0 :             MeshTopoUtil( mbImpl ).get_bridge_adjacencies( from_ents, bridge_dim, ghost_dim, ghosted_ents, num_layers );MB_CHK_SET_ERR( result, "Failed to get bridge adjacencies" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7081                 :            :     }
    7082                 :            : 
    7083 [ #  # ][ #  # ]:          0 :     result = add_verts( ghosted_ents );MB_CHK_SET_ERR( result, "Failed to add verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7084                 :            : 
    7085         [ #  # ]:          0 :     if( addl_ents )
    7086                 :            :     {
    7087                 :            :         // First get the ents of ghost_dim
    7088 [ #  # ][ #  # ]:          0 :         Range tmp_ents, tmp_owned, tmp_notowned;
         [ #  # ][ #  # ]
                 [ #  # ]
    7089 [ #  # ][ #  # ]:          0 :         tmp_owned = ghosted_ents.subset_by_dimension( ghost_dim );
    7090 [ #  # ][ #  # ]:          0 :         if( tmp_owned.empty() ) return result;
    7091                 :            : 
    7092         [ #  # ]:          0 :         tmp_notowned = tmp_owned;
    7093                 :            : 
    7094                 :            :         // Next, filter by pstatus; can only create adj entities for entities I own
    7095 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( tmp_owned, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &tmp_owned );MB_CHK_SET_ERR( result, "Failed to filter owned entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7096                 :            : 
    7097         [ #  # ]:          0 :         tmp_notowned -= tmp_owned;
    7098                 :            : 
    7099                 :            :         // Get edges first
    7100 [ #  # ][ #  # ]:          0 :         if( 1 == addl_ents || 3 == addl_ents )
    7101                 :            :         {
    7102 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_adjacencies( tmp_owned, 1, true, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get edge adjacencies for owned ghost entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7103 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_adjacencies( tmp_notowned, 1, false, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get edge adjacencies for notowned ghost entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7104                 :            :         }
    7105 [ #  # ][ #  # ]:          0 :         if( 2 == addl_ents || 3 == addl_ents )
    7106                 :            :         {
    7107 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_adjacencies( tmp_owned, 2, true, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get face adjacencies for owned ghost entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7108 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_adjacencies( tmp_notowned, 2, false, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get face adjacencies for notowned ghost entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7109                 :            :         }
    7110                 :            : 
    7111 [ #  # ][ #  # ]:          0 :         ghosted_ents.merge( tmp_ents );
    7112                 :            :     }
    7113                 :            : 
    7114                 :          0 :     return result;
    7115                 :            : }
    7116                 :            : 
    7117                 :          0 : ErrorCode ParallelComm::add_verts( Range& sent_ents )
    7118                 :            : {
    7119                 :            :     // Get the verts adj to these entities, since we'll have to send those too
    7120                 :            : 
    7121                 :            :     // First check sets
    7122         [ #  # ]:          0 :     std::pair< Range::const_iterator, Range::const_iterator > set_range = sent_ents.equal_range( MBENTITYSET );
    7123                 :          0 :     ErrorCode result                                                    = MB_SUCCESS, tmp_result;
    7124 [ #  # ][ #  # ]:          0 :     for( Range::const_iterator rit = set_range.first; rit != set_range.second; ++rit )
                 [ #  # ]
    7125                 :            :     {
    7126 [ #  # ][ #  # ]:          0 :         tmp_result = mbImpl->get_entities_by_type( *rit, MBVERTEX, sent_ents );MB_CHK_SET_ERR( tmp_result, "Failed to get contained verts" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7127                 :            :     }
    7128                 :            : 
    7129                 :            :     // Now non-sets
    7130         [ #  # ]:          0 :     Range tmp_ents;
    7131 [ #  # ][ #  # ]:          0 :     std::copy( sent_ents.begin(), set_range.first, range_inserter( tmp_ents ) );
                 [ #  # ]
    7132 [ #  # ][ #  # ]:          0 :     result = mbImpl->get_adjacencies( tmp_ents, 0, false, sent_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get vertices adj to ghosted ents" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7133                 :            : 
    7134                 :            :     // if polyhedra, need to add all faces from there
    7135         [ #  # ]:          0 :     Range polyhedra = sent_ents.subset_by_type( MBPOLYHEDRON );
    7136                 :            :     // get all faces adjacent to every polyhedra
    7137 [ #  # ][ #  # ]:          0 :     result = mbImpl->get_connectivity( polyhedra, sent_ents );MB_CHK_SET_ERR( result, "Failed to get polyhedra faces" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7138                 :          0 :     return result;
    7139                 :            : }
    7140                 :            : 
    7141                 :          4 : ErrorCode ParallelComm::exchange_tags( const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags,
    7142                 :            :                                        const Range& entities_in )
    7143                 :            : {
    7144                 :            :     ErrorCode result;
    7145                 :            :     int success;
    7146                 :            : 
    7147         [ +  - ]:          4 :     myDebug->tprintf( 1, "Entering exchange_tags\n" );
    7148                 :            : 
    7149                 :            :     // Get all procs interfacing to this proc
    7150         [ +  - ]:          4 :     std::set< unsigned int > exch_procs;
    7151         [ +  - ]:          4 :     result = get_comm_procs( exch_procs );
    7152                 :            : 
    7153                 :            :     // Post ghost irecv's for all interface procs
    7154                 :            :     // Index requests the same as buffer/sharing procs indices
    7155         [ +  - ]:          8 :     std::vector< MPI_Request > recv_tag_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    7156                 :            :     // sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
    7157                 :          4 :     std::vector< unsigned int >::iterator sit;
    7158                 :            :     int ind;
    7159                 :            : 
    7160         [ +  - ]:          4 :     reset_all_buffers();
    7161                 :          4 :     int incoming = 0;
    7162                 :            : 
    7163 [ #  # ][ +  - ]:          4 :     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
                 [ -  + ]
    7164                 :            :     {
    7165                 :          0 :         incoming++;
    7166 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                 [ #  # ]
    7167         [ #  # ]:          0 :                            MB_MESG_TAGS_SIZE, incoming );
    7168                 :            : 
    7169 [ #  # ][ #  # ]:          0 :         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
    7170 [ #  # ][ #  # ]:          0 :                              MB_MESG_TAGS_SIZE, procConfig.proc_comm(), &recv_tag_reqs[3 * ind] );
                 [ #  # ]
    7171 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7172                 :            :     }
    7173                 :            : 
    7174                 :            :     // Pack and send tags from this proc to others
    7175                 :            :     // Make sendReqs vector to simplify initialization
    7176         [ +  - ]:          4 :     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    7177                 :            : 
    7178                 :            :     // Take all shared entities if incoming list is empty
    7179         [ +  - ]:          8 :     Range entities;
    7180 [ +  - ][ +  + ]:          4 :     if( entities_in.empty() )
    7181 [ +  - ][ +  - ]:          2 :         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( entities ) );
    7182                 :            :     else
    7183         [ +  - ]:          2 :         entities = entities_in;
    7184                 :            : 
    7185                 :            :     int dum_ack_buff;
    7186                 :            : 
    7187 [ #  # ][ +  - ]:          4 :     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
                 [ -  + ]
    7188                 :            :     {
    7189         [ #  # ]:          0 :         Range tag_ents = entities;
    7190                 :            : 
    7191                 :            :         // Get ents shared by proc *sit
    7192 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit );MB_CHK_SET_ERR( result, "Failed pstatus AND check" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7193                 :            : 
    7194                 :            :         // Remote nonowned entities
    7195 [ #  # ][ #  # ]:          0 :         if( !tag_ents.empty() )
    7196                 :            :         {
    7197 [ #  # ][ #  # ]:          0 :             result = filter_pstatus( tag_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT );MB_CHK_SET_ERR( result, "Failed pstatus NOT check" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7198                 :            :         }
    7199                 :            : 
    7200                 :            :         // Pack-send; this also posts receives if store_remote_handles is true
    7201 [ #  # ][ #  # ]:          0 :         std::vector< Range > tag_ranges;
    7202 [ #  # ][ #  # ]:          0 :         for( std::vector< Tag >::const_iterator vit = src_tags.begin(); vit != src_tags.end(); ++vit )
                 [ #  # ]
    7203                 :            :         {
    7204                 :            :             const void* ptr;
    7205                 :            :             int sz;
    7206 [ #  # ][ #  # ]:          0 :             if( mbImpl->tag_get_default_value( *vit, ptr, sz ) != MB_SUCCESS )
                 [ #  # ]
    7207                 :            :             {
    7208         [ #  # ]:          0 :                 Range tagged_ents;
    7209 [ #  # ][ #  # ]:          0 :                 mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &*vit, 0, 1, tagged_ents );
    7210 [ #  # ][ #  # ]:          0 :                 tag_ranges.push_back( intersect( tag_ents, tagged_ents ) );
    7211                 :            :             }
    7212                 :            :             else
    7213                 :            :             {
    7214         [ #  # ]:          0 :                 tag_ranges.push_back( tag_ents );
    7215                 :            :             }
    7216                 :            :         }
    7217                 :            : 
    7218                 :            :         // Pack the data
    7219                 :            :         // Reserve space on front for size and for initial buff size
    7220 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    7221                 :            : 
    7222 [ #  # ][ #  # ]:          0 :         result = pack_tags( tag_ents, src_tags, dst_tags, tag_ranges, localOwnedBuffs[ind], true, *sit );MB_CHK_SET_ERR( result, "Failed to count buffer in pack_send_tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7223                 :            : 
    7224                 :            :         // Now send it
    7225 [ #  # ][ #  # ]:          0 :         result = send_buffer( *sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3 * ind],
                 [ #  # ]
    7226 [ #  # ][ #  # ]:          0 :                               recv_tag_reqs[3 * ind + 2], &dum_ack_buff, incoming );MB_CHK_SET_ERR( result, "Failed to send buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7227                 :          0 :     }
    7228                 :            : 
    7229                 :            :     // Receive/unpack tags
    7230         [ -  + ]:          4 :     while( incoming )
    7231                 :            :     {
    7232                 :            :         MPI_Status status;
    7233                 :            :         int index_in_recv_requests;
    7234 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
    7235 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status );
    7236 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in tag exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7237                 :            :         // Processor index in the list is divided by 3
    7238                 :          0 :         ind = index_in_recv_requests / 3;
    7239                 :            : 
    7240         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    7241                 :            : 
    7242                 :            :         // OK, received something; decrement incoming counter
    7243                 :          0 :         incoming--;
    7244                 :            : 
    7245                 :          0 :         bool done = false;
    7246         [ #  # ]:          0 :         std::vector< EntityHandle > dum_vec;
    7247         [ #  # ]:          0 :         result = recv_buffer( MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind],
    7248         [ #  # ]:          0 :                               recv_tag_reqs[3 * ind + 1],  // This is for receiving the second message
    7249         [ #  # ]:          0 :                               recv_tag_reqs[3 * ind + 2],  // This would be for ack, but it is not
    7250                 :            :                                                            // used; consider removing it
    7251         [ #  # ]:          0 :                               incoming, localOwnedBuffs[ind],
    7252         [ #  # ]:          0 :                               sendReqs[3 * ind + 1],  // Send request for sending the second message
    7253         [ #  # ]:          0 :                               sendReqs[3 * ind + 2],  // This is for sending the ack
    7254 [ #  # ][ #  # ]:          0 :                               done );MB_CHK_SET_ERR( result, "Failed to resize recv buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7255         [ #  # ]:          0 :         if( done )
    7256                 :            :         {
    7257 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    7258 [ #  # ][ #  # ]:          0 :             result = unpack_tags( remoteOwnedBuffs[ind]->buff_ptr, dum_vec, true, buffProcs[ind] );MB_CHK_SET_ERR( result, "Failed to recv-unpack-tag message" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7259                 :            :         }
    7260                 :          0 :     }
    7261                 :            : 
    7262                 :            :     // OK, now wait
    7263 [ +  - ][ -  + ]:          4 :     if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    7264                 :            :     else
    7265                 :            :     {
    7266                 :            :         MPI_Status status[3 * MAX_SHARING_PROCS];
    7267 [ +  - ][ +  - ]:          4 :         success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], status );
    7268                 :            :     }
    7269 [ -  + ][ #  # ]:          4 :     if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failure in waitall in tag exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7270                 :            : 
    7271                 :            :     // If source tag is not equal to destination tag, then
    7272                 :            :     // do local copy for owned entities (communicate w/ self)
    7273         [ -  + ]:          4 :     assert( src_tags.size() == dst_tags.size() );
    7274 [ +  - ][ +  - ]:          4 :     if( src_tags != dst_tags )
    7275                 :            :     {
    7276         [ +  - ]:          4 :         std::vector< unsigned char > data;
    7277 [ +  - ][ +  - ]:          8 :         Range owned_ents;
    7278 [ +  - ][ +  + ]:          4 :         if( entities_in.empty() )
    7279 [ +  - ][ +  - ]:          2 :             std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( entities ) );
    7280                 :            :         else
    7281         [ +  - ]:          2 :             owned_ents = entities_in;
    7282 [ +  - ][ -  + ]:          4 :         result = filter_pstatus( owned_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT );MB_CHK_SET_ERR( result, "Failure to get subset of owned entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7283                 :            : 
    7284 [ +  - ][ +  + ]:          4 :         if( !owned_ents.empty() )
    7285                 :            :         {  // Check this here, otherwise we get
    7286                 :            :             // Unexpected results from get_entities_by_type_and_tag w/ Interface::INTERSECT
    7287 [ +  + ][ +  - ]:          6 :             for( size_t i = 0; i < src_tags.size(); i++ )
    7288                 :            :             {
    7289 [ +  - ][ +  - ]:          2 :                 if( src_tags[i] == dst_tags[i] ) continue;
                 [ -  + ]
    7290                 :            : 
    7291         [ +  - ]:          2 :                 Range tagged_ents( owned_ents );
    7292         [ +  - ]:          2 :                 result = mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &src_tags[0], 0, 1, tagged_ents,
    7293 [ +  - ][ -  + ]:          2 :                                                                Interface::INTERSECT );MB_CHK_SET_ERR( result, "get_entities_by_type_and_tag(type == MBMAXTYPE) failed" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7294                 :            : 
    7295                 :            :                 int sz, size2;
    7296 [ +  - ][ +  - ]:          2 :                 result = mbImpl->tag_get_bytes( src_tags[i], sz );MB_CHK_SET_ERR( result, "tag_get_size failed" );
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7297 [ +  - ][ +  - ]:          2 :                 result = mbImpl->tag_get_bytes( dst_tags[i], size2 );MB_CHK_SET_ERR( result, "tag_get_size failed" );
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7298 [ -  + ][ #  # ]:          2 :                 if( sz != size2 ) { MB_SET_ERR( MB_FAILURE, "tag sizes don't match" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7299                 :            : 
    7300 [ +  - ][ +  - ]:          2 :                 data.resize( sz * tagged_ents.size() );
    7301 [ +  - ][ +  - ]:          2 :                 result = mbImpl->tag_get_data( src_tags[i], tagged_ents, &data[0] );MB_CHK_SET_ERR( result, "tag_get_data failed" );
         [ +  - ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7302 [ +  - ][ +  - ]:          2 :                 result = mbImpl->tag_set_data( dst_tags[i], tagged_ents, &data[0] );MB_CHK_SET_ERR( result, "tag_set_data failed" );
         [ +  - ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ +  - ]
    7303                 :          2 :             }
    7304                 :          4 :         }
    7305                 :            :     }
    7306                 :            : 
    7307         [ +  - ]:          4 :     myDebug->tprintf( 1, "Exiting exchange_tags" );
    7308                 :            : 
    7309                 :          8 :     return MB_SUCCESS;
    7310                 :            : }
    7311                 :            : 
    7312                 :          0 : ErrorCode ParallelComm::reduce_tags( const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags,
    7313                 :            :                                      const MPI_Op mpi_op, const Range& entities_in )
    7314                 :            : {
    7315                 :            :     ErrorCode result;
    7316                 :            :     int success;
    7317                 :            : 
    7318         [ #  # ]:          0 :     myDebug->tprintf( 1, "Entering reduce_tags\n" );
    7319                 :            : 
    7320                 :            :     // Check that restrictions are met: number of source/dst tags...
    7321         [ #  # ]:          0 :     if( src_tags.size() != dst_tags.size() )
    7322 [ #  # ][ #  # ]:          0 :     { MB_SET_ERR( MB_FAILURE, "Source and destination tag handles must be specified for reduce_tags" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    7323                 :            : 
    7324                 :            :     // ... tag data types
    7325                 :          0 :     std::vector< Tag >::const_iterator vits, vitd;
    7326                 :            :     int tags_size, tagd_size;
    7327                 :            :     DataType tags_type, tagd_type;
    7328         [ #  # ]:          0 :     std::vector< unsigned char > vals;
    7329         [ #  # ]:          0 :     std::vector< int > tags_sizes;
    7330 [ #  # ][ #  # ]:          0 :     for( vits = src_tags.begin(), vitd = dst_tags.begin(); vits != src_tags.end(); ++vits, ++vitd )
         [ #  # ][ #  # ]
    7331                 :            :     {
    7332                 :            :         // Checks on tag characteristics
    7333 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data_type( *vits, tags_type );MB_CHK_SET_ERR( result, "Failed to get src tag data type" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7334 [ #  # ][ #  # ]:          0 :         if( tags_type != MB_TYPE_INTEGER && tags_type != MB_TYPE_DOUBLE && tags_type != MB_TYPE_BIT )
                 [ #  # ]
    7335 [ #  # ][ #  # ]:          0 :         { MB_SET_ERR( MB_FAILURE, "Src/dst tags must have integer, double, or bit data type" ); }
         [ #  # ][ #  # ]
                 [ #  # ]
    7336                 :            : 
    7337 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_bytes( *vits, tags_size );MB_CHK_SET_ERR( result, "Failed to get src tag bytes" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7338         [ #  # ]:          0 :         vals.resize( tags_size );
    7339 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_default_value( *vits, &vals[0] );MB_CHK_SET_ERR( result, "Src tag must have default value" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7340                 :            : 
    7341         [ #  # ]:          0 :         tags_sizes.push_back( tags_size );
    7342                 :            : 
    7343                 :            :         // OK, those passed; now check whether dest tags, if specified, agree with src tags
    7344 [ #  # ][ #  # ]:          0 :         if( *vits == *vitd ) continue;
                 [ #  # ]
    7345                 :            : 
    7346 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_bytes( *vitd, tagd_size );MB_CHK_SET_ERR( result, "Coudln't get dst tag bytes" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7347 [ #  # ][ #  # ]:          0 :         if( tags_size != tagd_size ) { MB_SET_ERR( MB_FAILURE, "Sizes between src and dst tags don't match" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7348 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data_type( *vitd, tagd_type );MB_CHK_SET_ERR( result, "Coudln't get dst tag data type" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7349 [ #  # ][ #  # ]:          0 :         if( tags_type != tagd_type ) { MB_SET_ERR( MB_FAILURE, "Src and dst tags must be of same data type" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7350                 :            :     }
    7351                 :            : 
    7352                 :            :     // Get all procs interfacing to this proc
    7353         [ #  # ]:          0 :     std::set< unsigned int > exch_procs;
    7354         [ #  # ]:          0 :     result = get_comm_procs( exch_procs );
    7355                 :            : 
    7356                 :            :     // Post ghost irecv's for all interface procs
    7357                 :            :     // Index requests the same as buffer/sharing procs indices
    7358         [ #  # ]:          0 :     std::vector< MPI_Request > recv_tag_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    7359                 :            : 
    7360                 :          0 :     std::vector< unsigned int >::iterator sit;
    7361                 :            :     int ind;
    7362                 :            : 
    7363         [ #  # ]:          0 :     reset_all_buffers();
    7364                 :          0 :     int incoming = 0;
    7365                 :            : 
    7366 [ #  # ][ #  # ]:          0 :     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
                 [ #  # ]
    7367                 :            :     {
    7368                 :          0 :         incoming++;
    7369 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                 [ #  # ]
    7370         [ #  # ]:          0 :                            MB_MESG_TAGS_SIZE, incoming );
    7371                 :            : 
    7372 [ #  # ][ #  # ]:          0 :         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
    7373 [ #  # ][ #  # ]:          0 :                              MB_MESG_TAGS_SIZE, procConfig.proc_comm(), &recv_tag_reqs[3 * ind] );
                 [ #  # ]
    7374 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7375                 :            :     }
    7376                 :            : 
    7377                 :            :     // Pack and send tags from this proc to others
    7378                 :            :     // Make sendReqs vector to simplify initialization
    7379         [ #  # ]:          0 :     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    7380                 :            : 
    7381                 :            :     // Take all shared entities if incoming list is empty
    7382         [ #  # ]:          0 :     Range entities;
    7383 [ #  # ][ #  # ]:          0 :     if( entities_in.empty() )
    7384 [ #  # ][ #  # ]:          0 :         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( entities ) );
    7385                 :            :     else
    7386         [ #  # ]:          0 :         entities = entities_in;
    7387                 :            : 
    7388                 :            :     // If the tags are different, copy the source to the dest tag locally
    7389                 :          0 :     std::vector< Tag >::const_iterator vit = src_tags.begin(), vit2 = dst_tags.begin();
    7390         [ #  # ]:          0 :     std::vector< int >::const_iterator vsizes = tags_sizes.begin();
    7391 [ #  # ][ #  # ]:          0 :     for( ; vit != src_tags.end(); ++vit, ++vit2, ++vsizes )
         [ #  # ][ #  # ]
                 [ #  # ]
    7392                 :            :     {
    7393 [ #  # ][ #  # ]:          0 :         if( *vit == *vit2 ) continue;
                 [ #  # ]
    7394 [ #  # ][ #  # ]:          0 :         vals.resize( entities.size() * ( *vsizes ) );
                 [ #  # ]
    7395 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( *vit, entities, &vals[0] );MB_CHK_SET_ERR( result, "Didn't get data properly" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7396 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_set_data( *vit2, entities, &vals[0] );MB_CHK_SET_ERR( result, "Didn't set data properly" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7397                 :            :     }
    7398                 :            : 
    7399                 :            :     int dum_ack_buff;
    7400                 :            : 
    7401 [ #  # ][ #  # ]:          0 :     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
                 [ #  # ]
    7402                 :            :     {
    7403         [ #  # ]:          0 :         Range tag_ents = entities;
    7404                 :            : 
    7405                 :            :         // Get ents shared by proc *sit
    7406 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit );MB_CHK_SET_ERR( result, "Failed pstatus AND check" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7407                 :            : 
    7408                 :            :         // Pack-send
    7409 [ #  # ][ #  # ]:          0 :         std::vector< Range > tag_ranges;
    7410 [ #  # ][ #  # ]:          0 :         for( vit = src_tags.begin(); vit != src_tags.end(); ++vit )
                 [ #  # ]
    7411                 :            :         {
    7412                 :            :             const void* ptr;
    7413                 :            :             int sz;
    7414 [ #  # ][ #  # ]:          0 :             if( mbImpl->tag_get_default_value( *vit, ptr, sz ) != MB_SUCCESS )
                 [ #  # ]
    7415                 :            :             {
    7416         [ #  # ]:          0 :                 Range tagged_ents;
    7417 [ #  # ][ #  # ]:          0 :                 mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &*vit, 0, 1, tagged_ents );
    7418 [ #  # ][ #  # ]:          0 :                 tag_ranges.push_back( intersect( tag_ents, tagged_ents ) );
    7419                 :            :             }
    7420                 :            :             else
    7421         [ #  # ]:          0 :                 tag_ranges.push_back( tag_ents );
    7422                 :            :         }
    7423                 :            : 
    7424                 :            :         // Pack the data
    7425                 :            :         // Reserve space on front for size and for initial buff size
    7426 [ #  # ][ #  # ]:          0 :         localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    7427                 :            : 
    7428 [ #  # ][ #  # ]:          0 :         result = pack_tags( tag_ents, src_tags, dst_tags, tag_ranges, localOwnedBuffs[ind], true, *sit );MB_CHK_SET_ERR( result, "Failed to count buffer in pack_send_tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7429                 :            : 
    7430                 :            :         // Now send it
    7431 [ #  # ][ #  # ]:          0 :         result = send_buffer( *sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3 * ind],
                 [ #  # ]
    7432 [ #  # ][ #  # ]:          0 :                               recv_tag_reqs[3 * ind + 2], &dum_ack_buff, incoming );MB_CHK_SET_ERR( result, "Failed to send buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7433                 :          0 :     }
    7434                 :            : 
    7435                 :            :     // Receive/unpack tags
    7436         [ #  # ]:          0 :     while( incoming )
    7437                 :            :     {
    7438                 :            :         MPI_Status status;
    7439                 :            :         int index_in_recv_requests;
    7440 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
    7441 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status );
    7442 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7443                 :          0 :         ind = index_in_recv_requests / 3;
    7444                 :            : 
    7445         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    7446                 :            : 
    7447                 :            :         // OK, received something; decrement incoming counter
    7448                 :          0 :         incoming--;
    7449                 :            : 
    7450                 :          0 :         bool done = false;
    7451         [ #  # ]:          0 :         std::vector< EntityHandle > dum_vec;
    7452         [ #  # ]:          0 :         result = recv_buffer( MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind],
    7453         [ #  # ]:          0 :                               recv_tag_reqs[3 * ind + 1],  // This is for receiving the second message
    7454         [ #  # ]:          0 :                               recv_tag_reqs[3 * ind + 2],  // This would be for ack, but it is not
    7455                 :            :                                                            // used; consider removing it
    7456         [ #  # ]:          0 :                               incoming, localOwnedBuffs[ind],
    7457         [ #  # ]:          0 :                               sendReqs[3 * ind + 1],  // Send request for sending the second message
    7458         [ #  # ]:          0 :                               sendReqs[3 * ind + 2],  // This is for sending the ack
    7459 [ #  # ][ #  # ]:          0 :                               done );MB_CHK_SET_ERR( result, "Failed to resize recv buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    7460         [ #  # ]:          0 :         if( done )
    7461                 :            :         {
    7462 [ #  # ][ #  # ]:          0 :             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
    7463 [ #  # ][ #  # ]:          0 :             result = unpack_tags( remoteOwnedBuffs[ind]->buff_ptr, dum_vec, true, buffProcs[ind], &mpi_op );MB_CHK_SET_ERR( result, "Failed to recv-unpack-tag message" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7464                 :            :         }
    7465                 :          0 :     }
    7466                 :            : 
    7467                 :            :     // OK, now wait
    7468 [ #  # ][ #  # ]:          0 :     if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    7469                 :            :     else
    7470                 :            :     {
    7471                 :            :         MPI_Status status[3 * MAX_SHARING_PROCS];
    7472 [ #  # ][ #  # ]:          0 :         success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], status );
    7473                 :            :     }
    7474 [ #  # ][ #  # ]:          0 :     if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failure in waitall in tag exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7475                 :            : 
    7476         [ #  # ]:          0 :     myDebug->tprintf( 1, "Exiting reduce_tags" );
    7477                 :            : 
    7478                 :          0 :     return MB_SUCCESS;
    7479                 :            : }
    7480                 :            : 
    7481                 :            : //! return sharedp tag
    7482                 :          9 : Tag ParallelComm::sharedp_tag()
    7483                 :            : {
    7484         [ +  + ]:          9 :     if( !sharedpTag )
    7485                 :            :     {
    7486                 :          4 :         int def_val      = -1;
    7487                 :            :         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedpTag,
    7488         [ +  - ]:          4 :                                                    MB_TAG_DENSE | MB_TAG_CREAT, &def_val );
    7489         [ -  + ]:          4 :         if( MB_SUCCESS != result ) return 0;
    7490                 :            :     }
    7491                 :            : 
    7492                 :          9 :     return sharedpTag;
    7493                 :            : }
    7494                 :            : 
    7495                 :            : //! return sharedps tag
    7496                 :          0 : Tag ParallelComm::sharedps_tag()
    7497                 :            : {
    7498         [ #  # ]:          0 :     if( !sharedpsTag )
    7499                 :            :     {
    7500                 :            :         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER,
    7501                 :          0 :                                                    sharedpsTag, MB_TAG_SPARSE | MB_TAG_CREAT );
    7502         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return 0;
    7503                 :            :     }
    7504                 :            : 
    7505                 :          0 :     return sharedpsTag;
    7506                 :            : }
    7507                 :            : 
    7508                 :            : //! return sharedh tag
    7509                 :          5 : Tag ParallelComm::sharedh_tag()
    7510                 :            : {
    7511         [ +  + ]:          5 :     if( !sharedhTag )
    7512                 :            :     {
    7513                 :          3 :         EntityHandle def_val = 0;
    7514                 :            :         ErrorCode result     = mbImpl->tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedhTag,
    7515         [ +  - ]:          3 :                                                    MB_TAG_DENSE | MB_TAG_CREAT, &def_val );
    7516         [ -  + ]:          3 :         if( MB_SUCCESS != result ) return 0;
    7517                 :            :     }
    7518                 :            : 
    7519                 :          5 :     return sharedhTag;
    7520                 :            : }
    7521                 :            : 
    7522                 :            : //! return sharedhs tag
    7523                 :          0 : Tag ParallelComm::sharedhs_tag()
    7524                 :            : {
    7525         [ #  # ]:          0 :     if( !sharedhsTag )
    7526                 :            :     {
    7527                 :            :         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE,
    7528                 :          0 :                                                    sharedhsTag, MB_TAG_SPARSE | MB_TAG_CREAT );
    7529         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return 0;
    7530                 :            :     }
    7531                 :            : 
    7532                 :          0 :     return sharedhsTag;
    7533                 :            : }
    7534                 :            : 
    7535                 :            : //! return pstatus tag
    7536                 :        186 : Tag ParallelComm::pstatus_tag()
    7537                 :            : {
    7538         [ +  + ]:        186 :     if( !pstatusTag )
    7539                 :            :     {
    7540                 :         22 :         unsigned char tmp_pstatus = 0;
    7541                 :            :         ErrorCode result          = mbImpl->tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatusTag,
    7542         [ +  - ]:         22 :                                                    MB_TAG_DENSE | MB_TAG_CREAT, &tmp_pstatus );
    7543         [ -  + ]:         22 :         if( MB_SUCCESS != result ) return 0;
    7544                 :            :     }
    7545                 :            : 
    7546                 :        186 :     return pstatusTag;
    7547                 :            : }
    7548                 :            : 
    7549                 :            : //! return partition set tag
    7550                 :         19 : Tag ParallelComm::partition_tag()
    7551                 :            : {
    7552         [ +  - ]:         19 :     if( !partitionTag )
    7553                 :            :     {
    7554                 :         19 :         int dum_id       = -1;
    7555                 :            :         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_PARTITION_TAG_NAME, 1, MB_TYPE_INTEGER, partitionTag,
    7556         [ +  - ]:         19 :                                                    MB_TAG_SPARSE | MB_TAG_CREAT, &dum_id );
    7557         [ -  + ]:         19 :         if( MB_SUCCESS != result ) return 0;
    7558                 :            :     }
    7559                 :            : 
    7560                 :         19 :     return partitionTag;
    7561                 :            : }
    7562                 :            : 
    7563                 :            : //! return pcomm tag; passes in impl 'cuz this is a static function
    7564                 :        779 : Tag ParallelComm::pcomm_tag( Interface* impl, bool create_if_missing )
    7565                 :            : {
    7566                 :        779 :     Tag this_tag = 0;
    7567                 :            :     ErrorCode result;
    7568         [ +  + ]:        779 :     if( create_if_missing )
    7569                 :            :     {
    7570                 :            :         result = impl->tag_get_handle( PARALLEL_COMM_TAG_NAME, MAX_SHARING_PROCS * sizeof( ParallelComm* ),
    7571         [ +  - ]:         87 :                                        MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE | MB_TAG_CREAT );
    7572                 :            :     }
    7573                 :            :     else
    7574                 :            :     {
    7575                 :            :         result = impl->tag_get_handle( PARALLEL_COMM_TAG_NAME, MAX_SHARING_PROCS * sizeof( ParallelComm* ),
    7576         [ +  - ]:        692 :                                        MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE );
    7577                 :            :     }
    7578                 :            : 
    7579         [ +  + ]:        779 :     if( MB_SUCCESS != result ) return 0;
    7580                 :            : 
    7581                 :        779 :     return this_tag;
    7582                 :            : }
    7583                 :            : 
    7584                 :            : //! get the indexed pcomm object from the interface
    7585                 :        281 : ParallelComm* ParallelComm::get_pcomm( Interface* impl, const int index )
    7586                 :            : {
    7587         [ +  - ]:        281 :     Tag pc_tag = pcomm_tag( impl, false );
    7588         [ +  + ]:        281 :     if( 0 == pc_tag ) return NULL;
    7589                 :            : 
    7590                 :        246 :     const EntityHandle root = 0;
    7591                 :            :     ParallelComm* pc_array[MAX_SHARING_PROCS];
    7592         [ +  - ]:        246 :     ErrorCode result = impl->tag_get_data( pc_tag, &root, 1, (void*)pc_array );
    7593         [ -  + ]:        246 :     if( MB_SUCCESS != result ) return NULL;
    7594                 :            : 
    7595                 :        281 :     return pc_array[index];
    7596                 :            : }
    7597                 :            : 
    7598                 :        371 : ErrorCode ParallelComm::get_all_pcomm( Interface* impl, std::vector< ParallelComm* >& list )
    7599                 :            : {
    7600         [ +  - ]:        371 :     Tag pc_tag = pcomm_tag( impl, false );
    7601         [ +  + ]:        371 :     if( 0 == pc_tag ) return MB_TAG_NOT_FOUND;
    7602                 :            : 
    7603                 :         44 :     const EntityHandle root = 0;
    7604                 :            :     ParallelComm* pc_array[MAX_SHARING_PROCS];
    7605         [ +  - ]:         44 :     ErrorCode rval = impl->tag_get_data( pc_tag, &root, 1, pc_array );
    7606         [ -  + ]:         44 :     if( MB_SUCCESS != rval ) return rval;
    7607                 :            : 
    7608         [ +  + ]:       2860 :     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
    7609                 :            :     {
    7610 [ +  + ][ +  - ]:       2816 :         if( pc_array[i] ) list.push_back( pc_array[i] );
    7611                 :            :     }
    7612                 :            : 
    7613                 :        371 :     return MB_SUCCESS;
    7614                 :            : }
    7615                 :            : 
    7616                 :            : //! get the indexed pcomm object from the interface
    7617                 :        265 : ParallelComm* ParallelComm::get_pcomm( Interface* impl, EntityHandle prtn, const MPI_Comm* comm )
    7618                 :            : {
    7619                 :            :     ErrorCode rval;
    7620                 :        265 :     ParallelComm* result = 0;
    7621                 :            : 
    7622                 :            :     Tag prtn_tag;
    7623                 :            :     rval =
    7624         [ +  - ]:        265 :         impl->tag_get_handle( PARTITIONING_PCOMM_TAG_NAME, 1, MB_TYPE_INTEGER, prtn_tag, MB_TAG_SPARSE | MB_TAG_CREAT );
    7625         [ -  + ]:        265 :     if( MB_SUCCESS != rval ) return 0;
    7626                 :            : 
    7627                 :            :     int pcomm_id;
    7628         [ +  - ]:        265 :     rval = impl->tag_get_data( prtn_tag, &prtn, 1, &pcomm_id );
    7629 [ +  + ][ +  - ]:        265 :     if( MB_SUCCESS == rval ) { result = get_pcomm( impl, pcomm_id ); }
    7630 [ +  - ][ +  - ]:         40 :     else if( MB_TAG_NOT_FOUND == rval && comm )
    7631                 :            :     {
    7632 [ +  - ][ +  - ]:         40 :         result = new ParallelComm( impl, *comm, &pcomm_id );
    7633         [ -  + ]:         40 :         if( !result ) return 0;
    7634         [ +  - ]:         40 :         result->set_partitioning( prtn );
    7635                 :            : 
    7636         [ +  - ]:         40 :         rval = impl->tag_set_data( prtn_tag, &prtn, 1, &pcomm_id );
    7637         [ -  + ]:         40 :         if( MB_SUCCESS != rval )
    7638                 :            :         {
    7639         [ #  # ]:          0 :             delete result;
    7640                 :          0 :             result = 0;
    7641                 :            :         }
    7642                 :            :     }
    7643                 :            : 
    7644                 :        265 :     return result;
    7645                 :            : }
    7646                 :            : 
    7647                 :         40 : ErrorCode ParallelComm::set_partitioning( EntityHandle set )
    7648                 :            : {
    7649                 :            :     ErrorCode rval;
    7650                 :            :     Tag prtn_tag;
    7651                 :            :     rval = mbImpl->tag_get_handle( PARTITIONING_PCOMM_TAG_NAME, 1, MB_TYPE_INTEGER, prtn_tag,
    7652         [ +  - ]:         40 :                                    MB_TAG_SPARSE | MB_TAG_CREAT );
    7653         [ -  + ]:         40 :     if( MB_SUCCESS != rval ) return rval;
    7654                 :            : 
    7655                 :            :     // Get my id
    7656                 :            :     ParallelComm* pcomm_arr[MAX_SHARING_PROCS];
    7657         [ +  - ]:         40 :     Tag pc_tag = pcomm_tag( mbImpl, false );
    7658         [ -  + ]:         40 :     if( 0 == pc_tag ) return MB_FAILURE;
    7659                 :         40 :     const EntityHandle root = 0;
    7660         [ +  - ]:         40 :     ErrorCode result        = mbImpl->tag_get_data( pc_tag, &root, 1, pcomm_arr );
    7661         [ -  + ]:         40 :     if( MB_SUCCESS != result ) return MB_FAILURE;
    7662         [ +  - ]:         40 :     int id = std::find( pcomm_arr, pcomm_arr + MAX_SHARING_PROCS, this ) - pcomm_arr;
    7663         [ -  + ]:         40 :     if( id == MAX_SHARING_PROCS ) return MB_FAILURE;
    7664                 :            : 
    7665                 :         40 :     EntityHandle old = partitioningSet;
    7666         [ -  + ]:         40 :     if( old )
    7667                 :            :     {
    7668         [ #  # ]:          0 :         rval = mbImpl->tag_delete_data( prtn_tag, &old, 1 );
    7669         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    7670                 :          0 :         partitioningSet = 0;
    7671                 :            :     }
    7672                 :            : 
    7673         [ -  + ]:         40 :     if( !set ) return MB_SUCCESS;
    7674                 :            : 
    7675         [ +  - ]:         40 :     Range contents;
    7676         [ -  + ]:         40 :     if( old )
    7677                 :            :     {
    7678         [ #  # ]:          0 :         rval = mbImpl->get_entities_by_handle( old, contents );
    7679         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    7680                 :            :     }
    7681                 :            :     else
    7682                 :            :     {
    7683 [ +  - ][ +  - ]:         40 :         contents = partition_sets();
    7684                 :            :     }
    7685                 :            : 
    7686         [ +  - ]:         40 :     rval = mbImpl->add_entities( set, contents );
    7687         [ -  + ]:         40 :     if( MB_SUCCESS != rval ) return rval;
    7688                 :            : 
    7689                 :            :     // Store pcomm id on new partition set
    7690         [ +  - ]:         40 :     rval = mbImpl->tag_set_data( prtn_tag, &set, 1, &id );
    7691         [ -  + ]:         40 :     if( MB_SUCCESS != rval ) return rval;
    7692                 :            : 
    7693                 :         40 :     partitioningSet = set;
    7694                 :         40 :     return MB_SUCCESS;
    7695                 :            : }
    7696                 :            : 
    7697                 :            : //! return all the entities in parts owned locally
    7698                 :          0 : ErrorCode ParallelComm::get_part_entities( Range& ents, int dim )
    7699                 :            : {
    7700                 :            :     ErrorCode result;
    7701                 :            : 
    7702 [ #  # ][ #  # ]:          0 :     for( Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit )
         [ #  # ][ #  # ]
                 [ #  # ]
    7703                 :            :     {
    7704         [ #  # ]:          0 :         Range tmp_ents;
    7705         [ #  # ]:          0 :         if( -1 == dim )
    7706 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_handle( *rit, tmp_ents, true );
    7707                 :            :         else
    7708 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_entities_by_dimension( *rit, dim, tmp_ents, true );
    7709                 :            : 
    7710         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return result;
    7711 [ #  # ][ #  # ]:          0 :         ents.merge( tmp_ents );
    7712                 :          0 :     }
    7713                 :            : 
    7714                 :          0 :     return MB_SUCCESS;
    7715                 :            : }
    7716                 :            : 
    7717                 :            : /** \brief Return the rank of the entity owner
    7718                 :            :  */
    7719                 :         26 : ErrorCode ParallelComm::get_owner_handle( EntityHandle entity, int& owner, EntityHandle& handle )
    7720                 :            : {
    7721                 :            :     unsigned char pstat;
    7722                 :            :     int sharing_procs[MAX_SHARING_PROCS];
    7723                 :            :     EntityHandle sharing_handles[MAX_SHARING_PROCS];
    7724                 :            : 
    7725 [ +  - ][ +  - ]:         26 :     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7726         [ +  - ]:         26 :     if( !( pstat & PSTATUS_NOT_OWNED ) )
    7727                 :            :     {
    7728 [ +  - ][ +  - ]:         26 :         owner  = proc_config().proc_rank();
    7729                 :         26 :         handle = entity;
    7730                 :            :     }
    7731         [ #  # ]:          0 :     else if( pstat & PSTATUS_MULTISHARED )
    7732                 :            :     {
    7733 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7734                 :          0 :         owner  = sharing_procs[0];
    7735 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, sharing_handles );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7736                 :          0 :         handle = sharing_handles[0];
    7737                 :            :     }
    7738         [ #  # ]:          0 :     else if( pstat & PSTATUS_SHARED )
    7739                 :            :     {
    7740 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7741                 :          0 :         owner  = sharing_procs[0];
    7742 [ #  # ][ #  # ]:          0 :         result = mbImpl->tag_get_data( sharedh_tag(), &entity, 1, sharing_handles );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7743                 :          0 :         handle = sharing_handles[0];
    7744                 :            :     }
    7745                 :            :     else
    7746                 :            :     {
    7747                 :          0 :         owner  = -1;
    7748                 :          0 :         handle = 0;
    7749                 :            :     }
    7750                 :            : 
    7751                 :         26 :     return MB_SUCCESS;
    7752                 :            : }
    7753                 :            : 
    7754                 :          1 : ErrorCode ParallelComm::get_global_part_count( int& count_out ) const
    7755                 :            : {
    7756                 :          1 :     count_out = globalPartCount;
    7757         [ -  + ]:          1 :     return count_out < 0 ? MB_FAILURE : MB_SUCCESS;
    7758                 :            : }
    7759                 :            : 
    7760                 :         20 : ErrorCode ParallelComm::get_part_owner( int part_id, int& owner ) const
    7761                 :            : {
    7762                 :            :     // FIXME: assumes only 1 local part
    7763                 :         20 :     owner = part_id;
    7764                 :         20 :     return MB_SUCCESS;
    7765                 :            : }
    7766                 :            : 
    7767                 :         44 : ErrorCode ParallelComm::get_part_id( EntityHandle /*part*/, int& id_out ) const
    7768                 :            : {
    7769                 :            :     // FIXME: assumes only 1 local part
    7770                 :         44 :     id_out = proc_config().proc_rank();
    7771                 :         44 :     return MB_SUCCESS;
    7772                 :            : }
    7773                 :            : 
    7774                 :          5 : ErrorCode ParallelComm::get_part_handle( int id, EntityHandle& handle_out ) const
    7775                 :            : {
    7776                 :            :     // FIXME: assumes only 1 local part
    7777         [ -  + ]:          5 :     if( (unsigned)id != proc_config().proc_rank() ) return MB_ENTITY_NOT_FOUND;
    7778                 :          5 :     handle_out = partition_sets().front();
    7779                 :          5 :     return MB_SUCCESS;
    7780                 :            : }
    7781                 :            : 
    7782                 :         19 : ErrorCode ParallelComm::create_part( EntityHandle& set_out )
    7783                 :            : {
    7784                 :            :     // Mark as invalid so we know that it needs to be updated
    7785                 :         19 :     globalPartCount = -1;
    7786                 :            : 
    7787                 :            :     // Create set representing part
    7788         [ +  - ]:         19 :     ErrorCode rval = mbImpl->create_meshset( MESHSET_SET, set_out );
    7789         [ -  + ]:         19 :     if( MB_SUCCESS != rval ) return rval;
    7790                 :            : 
    7791                 :            :     // Set tag on set
    7792 [ +  - ][ +  - ]:         19 :     int val = proc_config().proc_rank();
    7793 [ +  - ][ +  - ]:         19 :     rval    = mbImpl->tag_set_data( part_tag(), &set_out, 1, &val );
    7794                 :            : 
    7795         [ -  + ]:         19 :     if( MB_SUCCESS != rval )
    7796                 :            :     {
    7797         [ #  # ]:          0 :         mbImpl->delete_entities( &set_out, 1 );
    7798                 :          0 :         return rval;
    7799                 :            :     }
    7800                 :            : 
    7801 [ +  - ][ +  - ]:         19 :     if( get_partitioning() )
    7802                 :            :     {
    7803 [ +  - ][ +  - ]:         19 :         rval = mbImpl->add_entities( get_partitioning(), &set_out, 1 );
    7804         [ -  + ]:         19 :         if( MB_SUCCESS != rval )
    7805                 :            :         {
    7806         [ #  # ]:          0 :             mbImpl->delete_entities( &set_out, 1 );
    7807                 :          0 :             return rval;
    7808                 :            :         }
    7809                 :            :     }
    7810                 :            : 
    7811         [ +  - ]:         19 :     moab::Range& pSets = this->partition_sets();
    7812 [ +  - ][ +  - ]:         19 :     if( pSets.index( set_out ) < 0 ) { pSets.insert( set_out ); }
                 [ +  - ]
    7813                 :            : 
    7814                 :         19 :     return MB_SUCCESS;
    7815                 :            : }
    7816                 :            : 
    7817                 :          0 : ErrorCode ParallelComm::destroy_part( EntityHandle part_id )
    7818                 :            : {
    7819                 :            :     // Mark as invalid so we know that it needs to be updated
    7820                 :          0 :     globalPartCount = -1;
    7821                 :            : 
    7822                 :            :     ErrorCode rval;
    7823         [ #  # ]:          0 :     if( get_partitioning() )
    7824                 :            :     {
    7825                 :          0 :         rval = mbImpl->remove_entities( get_partitioning(), &part_id, 1 );
    7826         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    7827                 :            :     }
    7828                 :            : 
    7829                 :          0 :     moab::Range& pSets = this->partition_sets();
    7830         [ #  # ]:          0 :     if( pSets.index( part_id ) >= 0 ) { pSets.erase( part_id ); }
    7831                 :          0 :     return mbImpl->delete_entities( &part_id, 1 );
    7832                 :            : }
    7833                 :            : 
    7834                 :         19 : ErrorCode ParallelComm::collective_sync_partition()
    7835                 :            : {
    7836 [ +  - ][ +  - ]:         19 :     int count       = partition_sets().size();
    7837                 :         19 :     globalPartCount = 0;
    7838 [ +  - ][ +  - ]:         19 :     int err         = MPI_Allreduce( &count, &globalPartCount, 1, MPI_INT, MPI_SUM, proc_config().proc_comm() );
                 [ +  - ]
    7839         [ -  + ]:         19 :     return err ? MB_FAILURE : MB_SUCCESS;
    7840                 :            : }
    7841                 :            : 
    7842                 :          4 : ErrorCode ParallelComm::get_part_neighbor_ids( EntityHandle part, int neighbors_out[MAX_SHARING_PROCS],
    7843                 :            :                                                int& num_neighbors_out )
    7844                 :            : {
    7845                 :            :     ErrorCode rval;
    7846         [ +  - ]:          4 :     Range iface;
    7847         [ +  - ]:          4 :     rval = get_interface_sets( part, iface );
    7848         [ -  + ]:          4 :     if( MB_SUCCESS != rval ) return rval;
    7849                 :            : 
    7850                 :          4 :     num_neighbors_out = 0;
    7851                 :          4 :     int n, j = 0;
    7852                 :          4 :     int tmp[MAX_SHARING_PROCS] = { 0 }, curr[MAX_SHARING_PROCS] = { 0 };
    7853                 :          4 :     int* parts[2] = { neighbors_out, tmp };
    7854 [ +  - ][ #  # ]:          4 :     for( Range::iterator i = iface.begin(); i != iface.end(); ++i )
         [ +  - ][ +  - ]
                 [ -  + ]
    7855                 :            :     {
    7856                 :            :         unsigned char pstat;
    7857 [ #  # ][ #  # ]:          0 :         rval = get_sharing_data( *i, curr, NULL, pstat, n );
    7858         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    7859         [ #  # ]:          0 :         std::sort( curr, curr + n );
    7860         [ #  # ]:          0 :         assert( num_neighbors_out < MAX_SHARING_PROCS );
    7861         [ #  # ]:          0 :         int* k            = std::set_union( parts[j], parts[j] + num_neighbors_out, curr, curr + n, parts[1 - j] );
    7862                 :          0 :         j                 = 1 - j;
    7863                 :          0 :         num_neighbors_out = k - parts[j];
    7864                 :            :     }
    7865 [ -  + ][ #  # ]:          4 :     if( parts[j] != neighbors_out ) std::copy( parts[j], parts[j] + num_neighbors_out, neighbors_out );
    7866                 :            : 
    7867                 :            :     // Remove input part from list
    7868                 :            :     int id;
    7869         [ +  - ]:          4 :     rval = get_part_id( part, id );
    7870         [ +  - ]:          4 :     if( MB_SUCCESS == rval )
    7871         [ +  - ]:          4 :         num_neighbors_out = std::remove( neighbors_out, neighbors_out + num_neighbors_out, id ) - neighbors_out;
    7872                 :          4 :     return rval;
    7873                 :            : }
    7874                 :            : 
    7875                 :          6 : ErrorCode ParallelComm::get_interface_sets( EntityHandle, Range& iface_sets_out, int* adj_part_id )
    7876                 :            : {
    7877                 :            :     // FIXME : assumes one part per processor.
    7878                 :            :     // Need to store part iface sets as children to implement
    7879                 :            :     // this correctly.
    7880                 :          6 :     iface_sets_out = interface_sets();
    7881                 :            : 
    7882         [ -  + ]:          6 :     if( adj_part_id )
    7883                 :            :     {
    7884                 :            :         int part_ids[MAX_SHARING_PROCS], num_parts;
    7885         [ #  # ]:          0 :         Range::iterator i = iface_sets_out.begin();
    7886 [ #  # ][ #  # ]:          0 :         while( i != iface_sets_out.end() )
                 [ #  # ]
    7887                 :            :         {
    7888                 :            :             unsigned char pstat;
    7889 [ #  # ][ #  # ]:          0 :             ErrorCode rval = get_sharing_data( *i, part_ids, NULL, pstat, num_parts );
    7890         [ #  # ]:          0 :             if( MB_SUCCESS != rval ) return rval;
    7891                 :            : 
    7892 [ #  # ][ #  # ]:          0 :             if( std::find( part_ids, part_ids + num_parts, *adj_part_id ) - part_ids != num_parts )
    7893         [ #  # ]:          0 :                 ++i;
    7894                 :            :             else
    7895         [ #  # ]:          0 :                 i = iface_sets_out.erase( i );
    7896                 :            :         }
    7897                 :            :     }
    7898                 :            : 
    7899                 :          6 :     return MB_SUCCESS;
    7900                 :            : }
    7901                 :            : 
    7902                 :         62 : ErrorCode ParallelComm::get_owning_part( EntityHandle handle, int& owning_part_id, EntityHandle* remote_handle )
    7903                 :            : {
    7904                 :            :     // FIXME : assumes one part per proc, and therefore part_id == rank
    7905                 :            : 
    7906                 :            :     // If entity is not shared, then we're the owner.
    7907                 :            :     unsigned char pstat;
    7908 [ +  - ][ +  - ]:         62 :     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &handle, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7909         [ +  - ]:         62 :     if( !( pstat & PSTATUS_NOT_OWNED ) )
    7910                 :            :     {
    7911 [ +  - ][ +  - ]:         62 :         owning_part_id = proc_config().proc_rank();
    7912         [ +  + ]:         62 :         if( remote_handle ) *remote_handle = handle;
    7913                 :         62 :         return MB_SUCCESS;
    7914                 :            :     }
    7915                 :            : 
    7916                 :            :     // If entity is shared with one other proc, then
    7917                 :            :     // sharedp_tag will contain a positive value.
    7918 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data( sharedp_tag(), &handle, 1, &owning_part_id );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7919         [ #  # ]:          0 :     if( owning_part_id != -1 )
    7920                 :            :     {
    7921                 :            :         // Done?
    7922         [ #  # ]:          0 :         if( !remote_handle ) return MB_SUCCESS;
    7923                 :            : 
    7924                 :            :         // Get handles on remote processors (and this one)
    7925 [ #  # ][ #  # ]:          0 :         return mbImpl->tag_get_data( sharedh_tag(), &handle, 1, remote_handle );
    7926                 :            :     }
    7927                 :            : 
    7928                 :            :     // If here, then the entity is shared with at least two other processors.
    7929                 :            :     // Get the list from the sharedps_tag
    7930                 :          0 :     const void* part_id_list = 0;
    7931 [ #  # ][ #  # ]:          0 :     result                   = mbImpl->tag_get_by_ptr( sharedps_tag(), &handle, 1, &part_id_list );
    7932         [ #  # ]:          0 :     if( MB_SUCCESS != result ) return result;
    7933                 :          0 :     owning_part_id = ( (const int*)part_id_list )[0];
    7934                 :            : 
    7935                 :            :     // Done?
    7936         [ #  # ]:          0 :     if( !remote_handle ) return MB_SUCCESS;
    7937                 :            : 
    7938                 :            :     // Get remote handles
    7939                 :          0 :     const void* handle_list = 0;
    7940 [ #  # ][ #  # ]:          0 :     result                  = mbImpl->tag_get_by_ptr( sharedhs_tag(), &handle, 1, &handle_list );
    7941         [ #  # ]:          0 :     if( MB_SUCCESS != result ) return result;
    7942                 :            : 
    7943                 :          0 :     *remote_handle = ( (const EntityHandle*)handle_list )[0];
    7944                 :         62 :     return MB_SUCCESS;
    7945                 :            : }
    7946                 :            : 
    7947                 :         36 : ErrorCode ParallelComm::get_sharing_parts( EntityHandle entity, int part_ids_out[MAX_SHARING_PROCS],
    7948                 :            :                                            int& num_part_ids_out, EntityHandle remote_handles[MAX_SHARING_PROCS] )
    7949                 :            : {
    7950                 :            :     // FIXME : assumes one part per proc, and therefore part_id == rank
    7951                 :            : 
    7952                 :            :     // If entity is not shared, then we're the owner.
    7953                 :            :     unsigned char pstat;
    7954 [ +  - ][ +  - ]:         36 :     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
         [ -  + ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7955         [ +  - ]:         36 :     if( !( pstat & PSTATUS_SHARED ) )
    7956                 :            :     {
    7957 [ +  - ][ +  - ]:         36 :         part_ids_out[0] = proc_config().proc_rank();
    7958         [ +  + ]:         36 :         if( remote_handles ) remote_handles[0] = entity;
    7959                 :         36 :         num_part_ids_out = 1;
    7960                 :         36 :         return MB_SUCCESS;
    7961                 :            :     }
    7962                 :            : 
    7963                 :            :     // If entity is shared with one other proc, then
    7964                 :            :     // sharedp_tag will contain a positive value.
    7965 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, part_ids_out );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    7966         [ #  # ]:          0 :     if( part_ids_out[0] != -1 )
    7967                 :            :     {
    7968                 :          0 :         num_part_ids_out = 2;
    7969 [ #  # ][ #  # ]:          0 :         part_ids_out[1]  = proc_config().proc_rank();
    7970                 :            : 
    7971                 :            :         // Done?
    7972         [ #  # ]:          0 :         if( !remote_handles ) return MB_SUCCESS;
    7973                 :            : 
    7974                 :            :         // Get handles on remote processors (and this one)
    7975                 :          0 :         remote_handles[1] = entity;
    7976 [ #  # ][ #  # ]:          0 :         return mbImpl->tag_get_data( sharedh_tag(), &entity, 1, remote_handles );
    7977                 :            :     }
    7978                 :            : 
    7979                 :            :     // If here, then the entity is shared with at least two other processors.
    7980                 :            :     // Get the list from the sharedps_tag
    7981 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, part_ids_out );
    7982         [ #  # ]:          0 :     if( MB_SUCCESS != result ) return result;
    7983                 :            :     // Count number of valid (positive) entries in sharedps_tag
    7984 [ #  # ][ #  # ]:          0 :     for( num_part_ids_out = 0; num_part_ids_out < MAX_SHARING_PROCS && part_ids_out[num_part_ids_out] >= 0;
    7985                 :            :          num_part_ids_out++ )
    7986                 :            :         ;
    7987                 :            :         // part_ids_out[num_part_ids_out++] = proc_config().proc_rank();
    7988                 :            : #ifndef NDEBUG
    7989 [ #  # ][ #  # ]:          0 :     int my_idx = std::find( part_ids_out, part_ids_out + num_part_ids_out, proc_config().proc_rank() ) - part_ids_out;
                 [ #  # ]
    7990         [ #  # ]:          0 :     assert( my_idx < num_part_ids_out );
    7991                 :            : #endif
    7992                 :            : 
    7993                 :            :     // Done?
    7994         [ #  # ]:          0 :     if( !remote_handles ) return MB_SUCCESS;
    7995                 :            : 
    7996                 :            :     // Get remote handles
    7997 [ #  # ][ #  # ]:          0 :     result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, remote_handles );
    7998                 :            :     // remote_handles[num_part_ids_out - 1] = entity;
    7999         [ #  # ]:          0 :     assert( remote_handles[my_idx] == entity );
    8000                 :            : 
    8001                 :         36 :     return result;
    8002                 :            : }
    8003                 :            : 
    8004                 :          3 : ErrorCode ParallelComm::pack_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data )
    8005                 :            : {
    8006                 :            :     // Build up send buffers
    8007                 :          3 :     ErrorCode rval = MB_SUCCESS;
    8008                 :            :     int ent_procs[MAX_SHARING_PROCS];
    8009                 :            :     EntityHandle handles[MAX_SHARING_PROCS];
    8010                 :            :     int num_sharing, tmp_int;
    8011                 :            :     SharedEntityData tmp;
    8012         [ +  - ]:          3 :     send_data.resize( buffProcs.size() );
    8013 [ #  # ][ +  - ]:          3 :     for( std::set< EntityHandle >::iterator i = sharedEnts.begin(); i != sharedEnts.end(); ++i )
                 [ -  + ]
    8014                 :            :     {
    8015         [ #  # ]:          0 :         tmp.remote = *i;  // Swap local/remote so they're correct on the remote proc.
    8016 [ #  # ][ #  # ]:          0 :         rval       = get_owner( *i, tmp_int );
    8017                 :          0 :         tmp.owner  = tmp_int;
    8018         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    8019                 :            : 
    8020                 :            :         unsigned char pstat;
    8021 [ #  # ][ #  # ]:          0 :         rval = get_sharing_data( *i, ent_procs, handles, pstat, num_sharing );
    8022         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
    8023         [ #  # ]:          0 :         for( int j = 0; j < num_sharing; j++ )
    8024                 :            :         {
    8025 [ #  # ][ #  # ]:          0 :             if( ent_procs[j] == (int)proc_config().proc_rank() ) continue;
                 [ #  # ]
    8026                 :          0 :             tmp.local = handles[j];
    8027         [ #  # ]:          0 :             int ind   = get_buffers( ent_procs[j] );
    8028         [ #  # ]:          0 :             assert( -1 != ind );
    8029 [ #  # ][ #  # ]:          0 :             if( (int)send_data.size() < ind + 1 ) send_data.resize( ind + 1 );
    8030 [ #  # ][ #  # ]:          0 :             send_data[ind].push_back( tmp );
    8031                 :            :         }
    8032                 :            :     }
    8033                 :            : 
    8034                 :          3 :     return MB_SUCCESS;
    8035                 :            : }
    8036                 :            : 
    8037                 :          3 : ErrorCode ParallelComm::exchange_all_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data,
    8038                 :            :                                                      std::vector< std::vector< SharedEntityData > >& result )
    8039                 :            : {
    8040                 :            :     int ierr;
    8041                 :          3 :     const int tag      = 0;
    8042         [ +  - ]:          3 :     const MPI_Comm cm  = procConfig.proc_comm();
    8043                 :          3 :     const int num_proc = buffProcs.size();
    8044         [ +  - ]:          3 :     const std::vector< int > procs( buffProcs.begin(), buffProcs.end() );
    8045         [ +  - ]:          6 :     std::vector< MPI_Request > recv_req( buffProcs.size(), MPI_REQUEST_NULL );
    8046         [ +  - ]:          6 :     std::vector< MPI_Request > send_req( buffProcs.size(), MPI_REQUEST_NULL );
    8047                 :            : 
    8048                 :            :     // Set up to receive sizes
    8049 [ +  - ][ +  - ]:          6 :     std::vector< int > sizes_send( num_proc ), sizes_recv( num_proc );
    8050         [ -  + ]:          3 :     for( int i = 0; i < num_proc; i++ )
    8051                 :            :     {
    8052 [ #  # ][ #  # ]:          0 :         ierr = MPI_Irecv( &sizes_recv[i], 1, MPI_INT, procs[i], tag, cm, &recv_req[i] );
         [ #  # ][ #  # ]
    8053         [ #  # ]:          0 :         if( ierr ) return MB_FILE_WRITE_ERROR;
    8054                 :            :     }
    8055                 :            : 
    8056                 :            :     // Send sizes
    8057         [ -  + ]:          3 :     assert( num_proc == (int)send_data.size() );
    8058                 :            : 
    8059         [ +  - ]:          3 :     result.resize( num_proc );
    8060         [ -  + ]:          3 :     for( int i = 0; i < num_proc; i++ )
    8061                 :            :     {
    8062 [ #  # ][ #  # ]:          0 :         sizes_send[i] = send_data[i].size();
    8063 [ #  # ][ #  # ]:          0 :         ierr          = MPI_Isend( &sizes_send[i], 1, MPI_INT, buffProcs[i], tag, cm, &send_req[i] );
         [ #  # ][ #  # ]
    8064         [ #  # ]:          0 :         if( ierr ) return MB_FILE_WRITE_ERROR;
    8065                 :            :     }
    8066                 :            : 
    8067                 :            :     // Receive sizes
    8068         [ +  - ]:          6 :     std::vector< MPI_Status > stat( num_proc );
    8069 [ +  - ][ +  - ]:          3 :     ierr = MPI_Waitall( num_proc, &recv_req[0], &stat[0] );
                 [ +  - ]
    8070         [ -  + ]:          3 :     if( ierr ) return MB_FILE_WRITE_ERROR;
    8071                 :            : 
    8072                 :            :     // Wait until all sizes are sent (clean up pending req's)
    8073 [ +  - ][ +  - ]:          3 :     ierr = MPI_Waitall( num_proc, &send_req[0], &stat[0] );
                 [ +  - ]
    8074         [ -  + ]:          3 :     if( ierr ) return MB_FILE_WRITE_ERROR;
    8075                 :            : 
    8076                 :            :     // Set up to receive data
    8077         [ -  + ]:          3 :     for( int i = 0; i < num_proc; i++ )
    8078                 :            :     {
    8079 [ #  # ][ #  # ]:          0 :         result[i].resize( sizes_recv[i] );
                 [ #  # ]
    8080 [ #  # ][ #  # ]:          0 :         ierr = MPI_Irecv( (void*)( &( result[i][0] ) ), sizeof( SharedEntityData ) * sizes_recv[i], MPI_UNSIGNED_CHAR,
                 [ #  # ]
    8081 [ #  # ][ #  # ]:          0 :                           buffProcs[i], tag, cm, &recv_req[i] );
                 [ #  # ]
    8082         [ #  # ]:          0 :         if( ierr ) return MB_FILE_WRITE_ERROR;
    8083                 :            :     }
    8084                 :            : 
    8085                 :            :     // Send data
    8086         [ -  + ]:          3 :     for( int i = 0; i < num_proc; i++ )
    8087                 :            :     {
    8088 [ #  # ][ #  # ]:          0 :         ierr = MPI_Isend( (void*)( &( send_data[i][0] ) ), sizeof( SharedEntityData ) * sizes_send[i],
                 [ #  # ]
    8089 [ #  # ][ #  # ]:          0 :                           MPI_UNSIGNED_CHAR, buffProcs[i], tag, cm, &send_req[i] );
                 [ #  # ]
    8090         [ #  # ]:          0 :         if( ierr ) return MB_FILE_WRITE_ERROR;
    8091                 :            :     }
    8092                 :            : 
    8093                 :            :     // Receive data
    8094 [ +  - ][ +  - ]:          3 :     ierr = MPI_Waitall( num_proc, &recv_req[0], &stat[0] );
                 [ +  - ]
    8095         [ -  + ]:          3 :     if( ierr ) return MB_FILE_WRITE_ERROR;
    8096                 :            : 
    8097                 :            :     // Wait until everything is sent to release send buffers
    8098 [ +  - ][ +  - ]:          3 :     ierr = MPI_Waitall( num_proc, &send_req[0], &stat[0] );
                 [ +  - ]
    8099         [ -  + ]:          3 :     if( ierr ) return MB_FILE_WRITE_ERROR;
    8100                 :            : 
    8101                 :          6 :     return MB_SUCCESS;
    8102                 :            : }
    8103                 :            : 
    8104                 :          3 : ErrorCode ParallelComm::check_all_shared_handles( bool print_em )
    8105                 :            : {
    8106                 :            :     // Get all shared ent data from other procs
    8107 [ +  - ][ +  - ]:          6 :     std::vector< std::vector< SharedEntityData > > shents( buffProcs.size() ), send_data( buffProcs.size() );
    8108                 :            : 
    8109                 :            :     ErrorCode result;
    8110                 :          3 :     bool done = false;
    8111                 :            : 
    8112         [ +  + ]:          6 :     while( !done )
    8113                 :            :     {
    8114         [ +  - ]:          3 :         result = check_local_shared();
    8115         [ -  + ]:          3 :         if( MB_SUCCESS != result )
    8116                 :            :         {
    8117                 :          0 :             done = true;
    8118                 :          0 :             continue;
    8119                 :            :         }
    8120                 :            : 
    8121         [ +  - ]:          3 :         result = pack_shared_handles( send_data );
    8122         [ -  + ]:          3 :         if( MB_SUCCESS != result )
    8123                 :            :         {
    8124                 :          0 :             done = true;
    8125                 :          0 :             continue;
    8126                 :            :         }
    8127                 :            : 
    8128         [ +  - ]:          3 :         result = exchange_all_shared_handles( send_data, shents );
    8129         [ -  + ]:          3 :         if( MB_SUCCESS != result )
    8130                 :            :         {
    8131                 :          0 :             done = true;
    8132                 :          0 :             continue;
    8133                 :            :         }
    8134                 :            : 
    8135 [ -  + ][ #  # ]:          3 :         if( !shents.empty() ) result = check_my_shared_handles( shents );
    8136                 :          3 :         done = true;
    8137                 :            :     }
    8138                 :            : 
    8139 [ -  + ][ #  # ]:          3 :     if( MB_SUCCESS != result && print_em )
    8140                 :            :     {
    8141                 :            : #ifdef MOAB_HAVE_HDF5
    8142         [ #  # ]:          0 :         std::ostringstream ent_str;
    8143 [ #  # ][ #  # ]:          0 :         ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
         [ #  # ][ #  # ]
    8144 [ #  # ][ #  # ]:          0 :         mbImpl->write_mesh( ent_str.str().c_str() );
    8145                 :            : #endif
    8146                 :            :     }
    8147                 :            : 
    8148                 :          3 :     return result;
    8149                 :            : }
    8150                 :            : 
    8151                 :          3 : ErrorCode ParallelComm::check_local_shared()
    8152                 :            : {
    8153                 :            :     // Do some checks on shared entities to make sure things look
    8154                 :            :     // consistent
    8155                 :            : 
    8156                 :            :     // Check that non-vertex shared entities are shared by same procs as all
    8157                 :            :     // their vertices
    8158                 :            :     // std::pair<Range::const_iterator,Range::const_iterator> vert_it =
    8159                 :            :     //    sharedEnts.equal_range(MBVERTEX);
    8160         [ +  - ]:          3 :     std::vector< EntityHandle > dum_connect;
    8161                 :            :     const EntityHandle* connect;
    8162                 :            :     int num_connect;
    8163                 :            :     int tmp_procs[MAX_SHARING_PROCS];
    8164                 :            :     EntityHandle tmp_hs[MAX_SHARING_PROCS];
    8165 [ +  - ][ +  - ]:          6 :     std::set< int > tmp_set, vset;
    8166                 :            :     int num_ps;
    8167                 :            :     ErrorCode result;
    8168                 :            :     unsigned char pstat;
    8169         [ +  - ]:          6 :     std::vector< EntityHandle > bad_ents;
    8170         [ +  - ]:          6 :     std::vector< std::string > errors;
    8171                 :            : 
    8172         [ +  - ]:          3 :     std::set< EntityHandle >::iterator vit;
    8173 [ #  # ][ +  - ]:          3 :     for( vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
                 [ -  + ]
    8174                 :            :     {
    8175                 :            :         // Get sharing procs for this ent
    8176 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( *vit, tmp_procs, tmp_hs, pstat, num_ps );
    8177         [ #  # ]:          0 :         if( MB_SUCCESS != result )
    8178                 :            :         {
    8179 [ #  # ][ #  # ]:          0 :             bad_ents.push_back( *vit );
    8180 [ #  # ][ #  # ]:          0 :             errors.push_back( std::string( "Failure getting sharing data." ) );
    8181                 :          0 :             continue;
    8182                 :            :         }
    8183                 :            : 
    8184                 :          0 :         bool bad = false;
    8185                 :            :         // Entity must be shared
    8186         [ #  # ]:          0 :         if( !( pstat & PSTATUS_SHARED ) )
    8187 [ #  # ][ #  # ]:          0 :             errors.push_back( std::string( "Entity should be shared but isn't." ) ), bad = true;
    8188                 :            : 
    8189                 :            :         // If entity is not owned this must not be first proc
    8190 [ #  # ][ #  # ]:          0 :         if( pstat & PSTATUS_NOT_OWNED && tmp_procs[0] == (int)procConfig.proc_rank() )
         [ #  # ][ #  # ]
    8191 [ #  # ][ #  # ]:          0 :             errors.push_back( std::string( "Entity not owned but is first proc." ) ), bad = true;
    8192                 :            : 
    8193                 :            :         // If entity is owned and multishared, this must be first proc
    8194 [ #  # ][ #  # ]:          0 :         if( !( pstat & PSTATUS_NOT_OWNED ) && pstat & PSTATUS_MULTISHARED &&
         [ #  # ][ #  # ]
    8195 [ #  # ][ #  # ]:          0 :             ( tmp_procs[0] != (int)procConfig.proc_rank() || tmp_hs[0] != *vit ) )
                 [ #  # ]
    8196 [ #  # ][ #  # ]:          0 :             errors.push_back( std::string( "Entity owned and multishared but not first proc or not first handle." ) ),
    8197                 :          0 :                 bad = true;
    8198                 :            : 
    8199         [ #  # ]:          0 :         if( bad )
    8200                 :            :         {
    8201 [ #  # ][ #  # ]:          0 :             bad_ents.push_back( *vit );
    8202                 :          0 :             continue;
    8203                 :            :         }
    8204                 :            : 
    8205 [ #  # ][ #  # ]:          0 :         EntityType type = mbImpl->type_from_handle( *vit );
    8206 [ #  # ][ #  # ]:          0 :         if( type == MBVERTEX || type == MBENTITYSET ) continue;
    8207                 :            : 
    8208                 :            :         // Copy element's procs to vset and save size
    8209                 :          0 :         int orig_ps = num_ps;
    8210                 :          0 :         vset.clear();
    8211 [ #  # ][ #  # ]:          0 :         std::copy( tmp_procs, tmp_procs + num_ps, std::inserter( vset, vset.begin() ) );
    8212                 :            : 
    8213                 :            :         // Get vertices for this ent and intersection of sharing procs
    8214 [ #  # ][ #  # ]:          0 :         result = mbImpl->get_connectivity( *vit, connect, num_connect, false, &dum_connect );
    8215         [ #  # ]:          0 :         if( MB_SUCCESS != result )
    8216                 :            :         {
    8217 [ #  # ][ #  # ]:          0 :             bad_ents.push_back( *vit );
    8218 [ #  # ][ #  # ]:          0 :             errors.push_back( std::string( "Failed to get connectivity." ) );
    8219                 :          0 :             continue;
    8220                 :            :         }
    8221                 :            : 
    8222         [ #  # ]:          0 :         for( int i = 0; i < num_connect; i++ )
    8223                 :            :         {
    8224         [ #  # ]:          0 :             result = get_sharing_data( connect[i], tmp_procs, NULL, pstat, num_ps );
    8225         [ #  # ]:          0 :             if( MB_SUCCESS != result )
    8226                 :            :             {
    8227 [ #  # ][ #  # ]:          0 :                 bad_ents.push_back( *vit );
    8228                 :          0 :                 continue;
    8229                 :            :             }
    8230         [ #  # ]:          0 :             if( !num_ps )
    8231                 :            :             {
    8232                 :          0 :                 vset.clear();
    8233                 :          0 :                 break;
    8234                 :            :             }
    8235         [ #  # ]:          0 :             std::sort( tmp_procs, tmp_procs + num_ps );
    8236                 :          0 :             tmp_set.clear();
    8237                 :            :             std::set_intersection( tmp_procs, tmp_procs + num_ps, vset.begin(), vset.end(),
    8238 [ #  # ][ #  # ]:          0 :                                    std::inserter( tmp_set, tmp_set.end() ) );
    8239         [ #  # ]:          0 :             vset.swap( tmp_set );
    8240         [ #  # ]:          0 :             if( vset.empty() ) break;
    8241                 :            :         }
    8242                 :            : 
    8243                 :            :         // Intersect them; should be the same size as orig_ps
    8244                 :          0 :         tmp_set.clear();
    8245                 :            :         std::set_intersection( tmp_procs, tmp_procs + num_ps, vset.begin(), vset.end(),
    8246 [ #  # ][ #  # ]:          0 :                                std::inserter( tmp_set, tmp_set.end() ) );
    8247         [ #  # ]:          0 :         if( orig_ps != (int)tmp_set.size() )
    8248                 :            :         {
    8249 [ #  # ][ #  # ]:          0 :             errors.push_back( std::string( "Vertex proc set not same size as entity proc set." ) );
    8250 [ #  # ][ #  # ]:          0 :             bad_ents.push_back( *vit );
    8251         [ #  # ]:          0 :             for( int i = 0; i < num_connect; i++ )
    8252                 :            :             {
    8253         [ #  # ]:          0 :                 bad_ents.push_back( connect[i] );
    8254 [ #  # ][ #  # ]:          0 :                 errors.push_back( std::string( "vertex in connect" ) );
    8255                 :            :             }
    8256                 :            :         }
    8257                 :            :     }
    8258                 :            : 
    8259         [ -  + ]:          3 :     if( !bad_ents.empty() )
    8260                 :            :     {
    8261 [ #  # ][ #  # ]:          0 :         std::cout << "Found bad entities in check_local_shared, proc rank " << procConfig.proc_rank() << ","
         [ #  # ][ #  # ]
    8262         [ #  # ]:          0 :                   << std::endl;
    8263                 :          0 :         std::vector< std::string >::iterator sit;
    8264                 :          0 :         std::vector< EntityHandle >::iterator rit;
    8265 [ #  # ][ #  # ]:          0 :         for( rit = bad_ents.begin(), sit = errors.begin(); rit != bad_ents.end(); ++rit, ++sit )
         [ #  # ][ #  # ]
    8266                 :            :         {
    8267 [ #  # ][ #  # ]:          0 :             list_entities( &( *rit ), 1 );
    8268 [ #  # ][ #  # ]:          0 :             std::cout << "Reason: " << *sit << std::endl;
         [ #  # ][ #  # ]
    8269                 :            :         }
    8270                 :          0 :         return MB_FAILURE;
    8271                 :            :     }
    8272                 :            : 
    8273                 :            :     // To do: check interface sets
    8274                 :            : 
    8275                 :          6 :     return MB_SUCCESS;
    8276                 :            : }
    8277                 :            : 
    8278                 :          0 : ErrorCode ParallelComm::check_all_shared_handles( ParallelComm** pcs, int num_pcs )
    8279                 :            : {
    8280 [ #  # ][ #  # ]:          0 :     std::vector< std::vector< std::vector< SharedEntityData > > > shents, send_data;
    8281                 :          0 :     ErrorCode result = MB_SUCCESS, tmp_result;
    8282                 :            : 
    8283                 :            :     // Get all shared ent data from each proc to all other procs
    8284         [ #  # ]:          0 :     send_data.resize( num_pcs );
    8285         [ #  # ]:          0 :     for( int p = 0; p < num_pcs; p++ )
    8286                 :            :     {
    8287 [ #  # ][ #  # ]:          0 :         tmp_result = pcs[p]->pack_shared_handles( send_data[p] );
    8288         [ #  # ]:          0 :         if( MB_SUCCESS != tmp_result ) result = tmp_result;
    8289                 :            :     }
    8290         [ #  # ]:          0 :     if( MB_SUCCESS != result ) return result;
    8291                 :            : 
    8292                 :            :     // Move the data sorted by sending proc to data sorted by receiving proc
    8293         [ #  # ]:          0 :     shents.resize( num_pcs );
    8294         [ #  # ]:          0 :     for( int p = 0; p < num_pcs; p++ )
    8295 [ #  # ][ #  # ]:          0 :         shents[p].resize( pcs[p]->buffProcs.size() );
    8296                 :            : 
    8297         [ #  # ]:          0 :     for( int p = 0; p < num_pcs; p++ )
    8298                 :            :     {
    8299         [ #  # ]:          0 :         for( unsigned int idx_p = 0; idx_p < pcs[p]->buffProcs.size(); idx_p++ )
    8300                 :            :         {
    8301                 :            :             // Move send_data[p][to_p] to shents[to_p][idx_p]
    8302         [ #  # ]:          0 :             int to_p      = pcs[p]->buffProcs[idx_p];
    8303         [ #  # ]:          0 :             int top_idx_p = pcs[to_p]->get_buffers( p );
    8304         [ #  # ]:          0 :             assert( -1 != top_idx_p );
    8305 [ #  # ][ #  # ]:          0 :             shents[to_p][top_idx_p] = send_data[p][idx_p];
         [ #  # ][ #  # ]
                 [ #  # ]
    8306                 :            :         }
    8307                 :            :     }
    8308                 :            : 
    8309         [ #  # ]:          0 :     for( int p = 0; p < num_pcs; p++ )
    8310                 :            :     {
    8311         [ #  # ]:          0 :         std::ostringstream ostr;
    8312 [ #  # ][ #  # ]:          0 :         ostr << "Processor " << p << " bad entities:";
                 [ #  # ]
    8313 [ #  # ][ #  # ]:          0 :         tmp_result = pcs[p]->check_my_shared_handles( shents[p], ostr.str().c_str() );
                 [ #  # ]
    8314         [ #  # ]:          0 :         if( MB_SUCCESS != tmp_result ) result = tmp_result;
    8315                 :          0 :     }
    8316                 :            : 
    8317                 :          0 :     return result;
    8318                 :            : }
    8319                 :            : 
    8320                 :          0 : ErrorCode ParallelComm::check_my_shared_handles( std::vector< std::vector< SharedEntityData > >& shents,
    8321                 :            :                                                  const char* prefix )
    8322                 :            : {
    8323                 :            :     // Now check against what I think data should be
    8324                 :            :     // Get all shared entities
    8325                 :            :     ErrorCode result;
    8326         [ #  # ]:          0 :     Range all_shared;
    8327 [ #  # ][ #  # ]:          0 :     std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( all_shared ) );
    8328         [ #  # ]:          0 :     std::vector< EntityHandle > dum_vec;
    8329 [ #  # ][ #  # ]:          0 :     all_shared.erase( all_shared.upper_bound( MBPOLYHEDRON ), all_shared.end() );
                 [ #  # ]
    8330                 :            : 
    8331 [ #  # ][ #  # ]:          0 :     Range bad_ents, local_shared;
    8332                 :          0 :     std::vector< SharedEntityData >::iterator vit;
    8333                 :            :     unsigned char tmp_pstat;
    8334         [ #  # ]:          0 :     for( unsigned int i = 0; i < shents.size(); i++ )
    8335                 :            :     {
    8336         [ #  # ]:          0 :         int other_proc = buffProcs[i];
    8337         [ #  # ]:          0 :         result         = get_shared_entities( other_proc, local_shared );
    8338         [ #  # ]:          0 :         if( MB_SUCCESS != result ) return result;
    8339 [ #  # ][ #  # ]:          0 :         for( vit = shents[i].begin(); vit != shents[i].end(); ++vit )
         [ #  # ][ #  # ]
                 [ #  # ]
    8340                 :            :         {
    8341 [ #  # ][ #  # ]:          0 :             EntityHandle localh = vit->local, remoteh = vit->remote, dumh;
    8342         [ #  # ]:          0 :             local_shared.erase( localh );
    8343         [ #  # ]:          0 :             result = get_remote_handles( true, &localh, &dumh, 1, other_proc, dum_vec );
    8344 [ #  # ][ #  # ]:          0 :             if( MB_SUCCESS != result || dumh != remoteh ) bad_ents.insert( localh );
                 [ #  # ]
    8345         [ #  # ]:          0 :             result = get_pstatus( localh, tmp_pstat );
    8346 [ #  # ][ #  # ]:          0 :             if( MB_SUCCESS != result || ( !( tmp_pstat & PSTATUS_NOT_OWNED ) && (unsigned)vit->owner != rank() ) ||
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8347 [ #  # ][ #  # ]:          0 :                 ( tmp_pstat & PSTATUS_NOT_OWNED && (unsigned)vit->owner == rank() ) )
                 [ #  # ]
    8348         [ #  # ]:          0 :                 bad_ents.insert( localh );
    8349                 :            :         }
    8350                 :            : 
    8351 [ #  # ][ #  # ]:          0 :         if( !local_shared.empty() ) bad_ents.merge( local_shared );
                 [ #  # ]
    8352                 :            :     }
    8353                 :            : 
    8354 [ #  # ][ #  # ]:          0 :     if( !bad_ents.empty() )
    8355                 :            :     {
    8356 [ #  # ][ #  # ]:          0 :         if( prefix ) std::cout << prefix << std::endl;
                 [ #  # ]
    8357         [ #  # ]:          0 :         list_entities( bad_ents );
    8358                 :          0 :         return MB_FAILURE;
    8359                 :            :     }
    8360                 :            :     else
    8361                 :          0 :         return MB_SUCCESS;
    8362                 :            : }
    8363                 :            : 
    8364                 :          0 : ErrorCode ParallelComm::get_shared_entities( int other_proc, Range& shared_ents, int dim, const bool iface,
    8365                 :            :                                              const bool owned_filter )
    8366                 :            : {
    8367                 :          0 :     shared_ents.clear();
    8368                 :          0 :     ErrorCode result = MB_SUCCESS;
    8369                 :            : 
    8370                 :            :     // Dimension
    8371         [ #  # ]:          0 :     if( -1 != dim )
    8372                 :            :     {
    8373                 :          0 :         DimensionPair dp = CN::TypeDimensionMap[dim];
    8374         [ #  # ]:          0 :         Range dum_range;
    8375 [ #  # ][ #  # ]:          0 :         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( dum_range ) );
    8376 [ #  # ][ #  # ]:          0 :         shared_ents.merge( dum_range.lower_bound( dp.first ), dum_range.upper_bound( dp.second ) );
                 [ #  # ]
    8377                 :            :     }
    8378                 :            :     else
    8379         [ #  # ]:          0 :         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( shared_ents ) );
    8380                 :            : 
    8381                 :            :     // Filter by iface
    8382         [ #  # ]:          0 :     if( iface )
    8383                 :            :     {
    8384 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( shared_ents, PSTATUS_INTERFACE, PSTATUS_AND );MB_CHK_SET_ERR( result, "Failed to filter by iface" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8385                 :            :     }
    8386                 :            : 
    8387                 :            :     // Filter by owned
    8388         [ #  # ]:          0 :     if( owned_filter )
    8389                 :            :     {
    8390 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT );MB_CHK_SET_ERR( result, "Failed to filter by owned" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8391                 :            :     }
    8392                 :            : 
    8393                 :            :     // Filter by proc
    8394         [ #  # ]:          0 :     if( -1 != other_proc )
    8395                 :            :     {
    8396 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( shared_ents, PSTATUS_SHARED, PSTATUS_AND, other_proc );MB_CHK_SET_ERR( result, "Failed to filter by proc" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8397                 :            :     }
    8398                 :            : 
    8399                 :          0 :     return result;
    8400                 :            : }
    8401                 :            : 
    8402                 :          0 : ErrorCode ParallelComm::clean_shared_tags( std::vector< Range* >& exchange_ents )
    8403                 :            : {
    8404         [ #  # ]:          0 :     for( unsigned int i = 0; i < exchange_ents.size(); i++ )
    8405                 :            :     {
    8406         [ #  # ]:          0 :         Range* ents        = exchange_ents[i];
    8407         [ #  # ]:          0 :         int num_ents       = ents->size();
    8408         [ #  # ]:          0 :         Range::iterator it = ents->begin();
    8409                 :            : 
    8410         [ #  # ]:          0 :         for( int n = 0; n < num_ents; n++ )
    8411                 :            :         {
    8412                 :            :             int sharing_proc;
    8413 [ #  # ][ #  # ]:          0 :             ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), &( *ents->begin() ), 1, &sharing_proc );
         [ #  # ][ #  # ]
    8414 [ #  # ][ #  # ]:          0 :             if( result != MB_TAG_NOT_FOUND && sharing_proc == -1 )
    8415                 :            :             {
    8416 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_delete_data( sharedp_tag(), &( *it ), 1 );MB_CHK_SET_ERR( result, "Failed to delete sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8417 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_delete_data( sharedh_tag(), &( *it ), 1 );MB_CHK_SET_ERR( result, "Failed to delete sharedh tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8418 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_delete_data( pstatus_tag(), &( *it ), 1 );MB_CHK_SET_ERR( result, "Failed to delete pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8419                 :            :             }
    8420         [ #  # ]:          0 :             ++it;
    8421                 :            :         }
    8422                 :            :     }
    8423                 :            : 
    8424                 :          0 :     return MB_SUCCESS;
    8425                 :            : }
    8426                 :            : 
    8427                 :          0 : void ParallelComm::set_debug_verbosity( int verb )
    8428                 :            : {
    8429                 :          0 :     myDebug->set_verbosity( verb );
    8430                 :          0 : }
    8431                 :            : 
    8432                 :          0 : int ParallelComm::get_debug_verbosity()
    8433                 :            : {
    8434                 :          0 :     return myDebug->get_verbosity();
    8435                 :            : }
    8436                 :            : 
    8437                 :          0 : ErrorCode ParallelComm::get_entityset_procs( EntityHandle set, std::vector< unsigned >& ranks ) const
    8438                 :            : {
    8439                 :          0 :     return sharedSetData->get_sharing_procs( set, ranks );
    8440                 :            : }
    8441                 :            : 
    8442                 :          0 : ErrorCode ParallelComm::get_entityset_owner( EntityHandle entity_set, unsigned& owner_rank,
    8443                 :            :                                              EntityHandle* remote_handle ) const
    8444                 :            : {
    8445         [ #  # ]:          0 :     if( remote_handle )
    8446                 :          0 :         return sharedSetData->get_owner( entity_set, owner_rank, *remote_handle );
    8447                 :            :     else
    8448                 :          0 :         return sharedSetData->get_owner( entity_set, owner_rank );
    8449                 :            : }
    8450                 :            : 
    8451                 :          0 : ErrorCode ParallelComm::get_entityset_local_handle( unsigned owning_rank, EntityHandle remote_handle,
    8452                 :            :                                                     EntityHandle& local_handle ) const
    8453                 :            : {
    8454                 :          0 :     return sharedSetData->get_local_handle( owning_rank, remote_handle, local_handle );
    8455                 :            : }
    8456                 :            : 
    8457                 :          0 : ErrorCode ParallelComm::get_shared_sets( Range& result ) const
    8458                 :            : {
    8459                 :          0 :     return sharedSetData->get_shared_sets( result );
    8460                 :            : }
    8461                 :            : 
    8462                 :          0 : ErrorCode ParallelComm::get_entityset_owners( std::vector< unsigned >& ranks ) const
    8463                 :            : {
    8464                 :          0 :     return sharedSetData->get_owning_procs( ranks );
    8465                 :            : }
    8466                 :            : 
    8467                 :          0 : ErrorCode ParallelComm::get_owned_sets( unsigned owning_rank, Range& sets_out ) const
    8468                 :            : {
    8469                 :          0 :     return sharedSetData->get_shared_sets( owning_rank, sets_out );
    8470                 :            : }
    8471                 :            : 
    8472                 :          0 : ErrorCode ParallelComm::gather_data( Range& gather_ents, Tag& tag_handle, Tag id_tag, EntityHandle gather_set,
    8473                 :            :                                      int root_proc_rank )
    8474                 :            : {
    8475 [ #  # ][ #  # ]:          0 :     int dim           = mbImpl->dimension_from_handle( *gather_ents.begin() );
                 [ #  # ]
    8476                 :          0 :     int bytes_per_tag = 0;
    8477         [ #  # ]:          0 :     ErrorCode rval    = mbImpl->tag_get_bytes( tag_handle, bytes_per_tag );
    8478         [ #  # ]:          0 :     if( rval != MB_SUCCESS ) return rval;
    8479                 :            : 
    8480         [ #  # ]:          0 :     int sz_buffer         = sizeof( int ) + gather_ents.size() * ( sizeof( int ) + bytes_per_tag );
    8481                 :          0 :     void* senddata        = malloc( sz_buffer );
    8482         [ #  # ]:          0 :     ( (int*)senddata )[0] = (int)gather_ents.size();
    8483                 :          0 :     int* ptr_int          = (int*)senddata + 1;
    8484         [ #  # ]:          0 :     rval                  = mbImpl->tag_get_data( id_tag, gather_ents, (void*)ptr_int );
    8485         [ #  # ]:          0 :     if( rval != MB_SUCCESS ) return rval;
    8486         [ #  # ]:          0 :     ptr_int = (int*)( senddata ) + 1 + gather_ents.size();
    8487         [ #  # ]:          0 :     rval    = mbImpl->tag_get_data( tag_handle, gather_ents, (void*)ptr_int );
    8488         [ #  # ]:          0 :     if( rval != MB_SUCCESS ) return rval;
    8489 [ #  # ][ #  # ]:          0 :     std::vector< int > displs( proc_config().proc_size(), 0 );
                 [ #  # ]
    8490 [ #  # ][ #  # ]:          0 :     MPI_Gather( &sz_buffer, 1, MPI_INT, &displs[0], 1, MPI_INT, root_proc_rank, comm() );
                 [ #  # ]
    8491 [ #  # ][ #  # ]:          0 :     std::vector< int > recvcnts( proc_config().proc_size(), 0 );
                 [ #  # ]
    8492         [ #  # ]:          0 :     std::copy( displs.begin(), displs.end(), recvcnts.begin() );
    8493         [ #  # ]:          0 :     std::partial_sum( displs.begin(), displs.end(), displs.begin() );
    8494         [ #  # ]:          0 :     std::vector< int >::iterator lastM1 = displs.end() - 1;
    8495         [ #  # ]:          0 :     std::copy_backward( displs.begin(), lastM1, displs.end() );
    8496                 :            :     // std::copy_backward(displs.begin(), --displs.end(), displs.end());
    8497         [ #  # ]:          0 :     displs[0] = 0;
    8498                 :            : 
    8499 [ #  # ][ #  # ]:          0 :     if( (int)rank() != root_proc_rank )
    8500 [ #  # ][ #  # ]:          0 :         MPI_Gatherv( senddata, sz_buffer, MPI_BYTE, NULL, NULL, NULL, MPI_BYTE, root_proc_rank, comm() );
    8501                 :            :     else
    8502                 :            :     {
    8503         [ #  # ]:          0 :         Range gents;
    8504         [ #  # ]:          0 :         mbImpl->get_entities_by_dimension( gather_set, dim, gents );
    8505 [ #  # ][ #  # ]:          0 :         int recvbuffsz = gents.size() * ( bytes_per_tag + sizeof( int ) ) + proc_config().proc_size() * sizeof( int );
                 [ #  # ]
    8506                 :          0 :         void* recvbuf  = malloc( recvbuffsz );
    8507 [ #  # ][ #  # ]:          0 :         MPI_Gatherv( senddata, sz_buffer, MPI_BYTE, recvbuf, &recvcnts[0], &displs[0], MPI_BYTE, root_proc_rank,
    8508 [ #  # ][ #  # ]:          0 :                      comm() );
    8509                 :            : 
    8510                 :          0 :         void* gvals = NULL;
    8511                 :            : 
    8512                 :            :         // Test whether gents has multiple sequences
    8513                 :          0 :         bool multiple_sequences = false;
    8514 [ #  # ][ #  # ]:          0 :         if( gents.psize() > 1 )
    8515                 :          0 :             multiple_sequences = true;
    8516                 :            :         else
    8517                 :            :         {
    8518                 :            :             int count;
    8519 [ #  # ][ #  # ]:          0 :             rval = mbImpl->tag_iterate( tag_handle, gents.begin(), gents.end(), count, gvals );
                 [ #  # ]
    8520         [ #  # ]:          0 :             assert( NULL != gvals );
    8521         [ #  # ]:          0 :             assert( count > 0 );
    8522 [ #  # ][ #  # ]:          0 :             if( (size_t)count != gents.size() )
    8523                 :            :             {
    8524                 :          0 :                 multiple_sequences = true;
    8525                 :          0 :                 gvals              = NULL;
    8526                 :            :             }
    8527                 :            :         }
    8528                 :            : 
    8529                 :            :         // If gents has multiple sequences, create a temp buffer for gathered values
    8530         [ #  # ]:          0 :         if( multiple_sequences )
    8531                 :            :         {
    8532         [ #  # ]:          0 :             gvals = malloc( gents.size() * bytes_per_tag );
    8533         [ #  # ]:          0 :             assert( NULL != gvals );
    8534                 :            :         }
    8535                 :            : 
    8536 [ #  # ][ #  # ]:          0 :         for( int i = 0; i != (int)size(); i++ )
    8537                 :            :         {
    8538         [ #  # ]:          0 :             int numents   = *(int*)( ( (char*)recvbuf ) + displs[i] );
    8539         [ #  # ]:          0 :             int* id_ptr   = (int*)( ( (char*)recvbuf ) + displs[i] + sizeof( int ) );
    8540                 :          0 :             char* val_ptr = (char*)( id_ptr + numents );
    8541         [ #  # ]:          0 :             for( int j = 0; j != numents; j++ )
    8542                 :            :             {
    8543                 :          0 :                 int idx = id_ptr[j];
    8544                 :          0 :                 memcpy( (char*)gvals + ( idx - 1 ) * bytes_per_tag, val_ptr + j * bytes_per_tag, bytes_per_tag );
    8545                 :            :             }
    8546                 :            :         }
    8547                 :            : 
    8548                 :            :         // Free the receive buffer
    8549                 :          0 :         free( recvbuf );
    8550                 :            : 
    8551                 :            :         // If gents has multiple sequences, copy tag data (stored in the temp buffer) to each
    8552                 :            :         // sequence separately
    8553         [ #  # ]:          0 :         if( multiple_sequences )
    8554                 :            :         {
    8555         [ #  # ]:          0 :             Range::iterator iter = gents.begin();
    8556                 :          0 :             size_t start_idx     = 0;
    8557 [ #  # ][ #  # ]:          0 :             while( iter != gents.end() )
                 [ #  # ]
    8558                 :            :             {
    8559                 :            :                 int count;
    8560                 :            :                 void* ptr;
    8561 [ #  # ][ #  # ]:          0 :                 rval = mbImpl->tag_iterate( tag_handle, iter, gents.end(), count, ptr );
    8562         [ #  # ]:          0 :                 assert( NULL != ptr );
    8563         [ #  # ]:          0 :                 assert( count > 0 );
    8564                 :          0 :                 memcpy( (char*)ptr, (char*)gvals + start_idx * bytes_per_tag, bytes_per_tag * count );
    8565                 :            : 
    8566         [ #  # ]:          0 :                 iter += count;
    8567                 :          0 :                 start_idx += count;
    8568                 :            :             }
    8569 [ #  # ][ #  # ]:          0 :             assert( start_idx == gents.size() );
    8570                 :            : 
    8571                 :            :             // Free the temp buffer
    8572                 :          0 :             free( gvals );
    8573                 :          0 :         }
    8574                 :            :     }
    8575                 :            : 
    8576                 :            :     // Free the send data
    8577                 :          0 :     free( senddata );
    8578                 :            : 
    8579                 :          0 :     return MB_SUCCESS;
    8580                 :            : }
    8581                 :            : 
    8582                 :            : /*
    8583                 :            :  * This call is collective, so we will use the message ids for tag communications;
    8584                 :            :  * they are similar, but simpler
    8585                 :            :  * Pack the number of edges, the remote edge handles, then for each edge, the number
    8586                 :            :  *    of intersection points, and then 3 doubles for each intersection point
    8587                 :            :  * On average, there is one intx point per edge, in some cases 2, in some cases 0
    8588                 :            :  *   so on average, the message size is num_edges * (sizeof(eh) + sizeof(int) + 1*3*sizeof(double))
    8589                 :            :  *          = num_edges * (8 + 4 + 24)
    8590                 :            :  */
    8591                 :          0 : ErrorCode ParallelComm::settle_intersection_points( Range& edges, Range& shared_edges_owned,
    8592                 :            :                                                     std::vector< std::vector< EntityHandle >* >& extraNodesVec,
    8593                 :            :                                                     double tolerance )
    8594                 :            : {
    8595                 :            :     // The index of an edge in the edges Range will give the index for extraNodesVec
    8596                 :            :     // the strategy of this follows exchange tags strategy:
    8597                 :            :     ErrorCode result;
    8598                 :            :     int success;
    8599                 :            : 
    8600         [ #  # ]:          0 :     myDebug->tprintf( 1, "Entering settle_intersection_points\n" );
    8601                 :            : 
    8602                 :            :     // Get all procs interfacing to this proc
    8603         [ #  # ]:          0 :     std::set< unsigned int > exch_procs;
    8604         [ #  # ]:          0 :     result = get_comm_procs( exch_procs );
    8605                 :            : 
    8606                 :            :     // Post ghost irecv's for all interface procs
    8607                 :            :     // Index requests the same as buffer/sharing procs indices
    8608         [ #  # ]:          0 :     std::vector< MPI_Request > recv_intx_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    8609                 :          0 :     std::vector< unsigned int >::iterator sit;
    8610                 :            :     int ind;
    8611                 :            : 
    8612         [ #  # ]:          0 :     reset_all_buffers();
    8613                 :          0 :     int incoming = 0;
    8614                 :            : 
    8615 [ #  # ][ #  # ]:          0 :     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
                 [ #  # ]
    8616                 :            :     {
    8617                 :          0 :         incoming++;
    8618 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                 [ #  # ]
    8619         [ #  # ]:          0 :                            MB_MESG_TAGS_SIZE, incoming );
    8620                 :            : 
    8621 [ #  # ][ #  # ]:          0 :         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
    8622 [ #  # ][ #  # ]:          0 :                              MB_MESG_TAGS_SIZE, procConfig.proc_comm(), &recv_intx_reqs[3 * ind] );
                 [ #  # ]
    8623 [ #  # ][ #  # ]:          0 :         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in settle intersection point" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8624                 :            :     }
    8625                 :            : 
    8626                 :            :     // Pack and send intersection points from this proc to others
    8627                 :            :     // Make sendReqs vector to simplify initialization
    8628         [ #  # ]:          0 :     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
    8629                 :            : 
    8630                 :            :     // Take all shared entities if incoming list is empty
    8631                 :          0 :     Range& entities = shared_edges_owned;
    8632                 :            : 
    8633                 :            :     int dum_ack_buff;
    8634                 :            : 
    8635 [ #  # ][ #  # ]:          0 :     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
                 [ #  # ]
    8636                 :            :     {
    8637         [ #  # ]:          0 :         Range edges_to_send = entities;
    8638                 :            : 
    8639                 :            :         // Get ents shared by proc *sit
    8640 [ #  # ][ #  # ]:          0 :         result = filter_pstatus( edges_to_send, PSTATUS_SHARED, PSTATUS_AND, *sit );MB_CHK_SET_ERR( result, "Failed pstatus AND check" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8641                 :            : 
    8642                 :            :         // Remote nonowned entities; not needed, edges are already owned by this proc
    8643                 :            : 
    8644                 :            :         // Pack the data
    8645                 :            :         // Reserve space on front for size and for initial buff size
    8646         [ #  # ]:          0 :         Buffer* buff = localOwnedBuffs[ind];
    8647         [ #  # ]:          0 :         buff->reset_ptr( sizeof( int ) );
    8648                 :            : 
    8649                 :            :         /*result = pack_intx_points(edges_to_send, edges, extraNodesVec,
    8650                 :            :             localOwnedBuffs[ind], *sit);*/
    8651                 :            : 
    8652                 :            :         // Count first data, and see if it is enough room?
    8653                 :            :         // Send the remote handles
    8654 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > dum_remote_edges( edges_to_send.size() );
                 [ #  # ]
    8655                 :            :         /*
    8656                 :            :          *  get_remote_handles(const bool store_remote_handles,
    8657                 :            :                                    EntityHandle *from_vec,
    8658                 :            :                                    EntityHandle *to_vec_tmp,
    8659                 :            :                                    int num_ents, int to_proc,
    8660                 :            :                                    const std::vector<EntityHandle> &new_ents);
    8661                 :            :          */
    8662                 :            :         // We are sending count, num edges, remote edges handles, and then, for each edge:
    8663                 :            :         //          -- nb intx points, 3*nbintPointsforEdge "doubles"
    8664 [ #  # ][ #  # ]:          0 :         std::vector< EntityHandle > dum_vec;
    8665 [ #  # ][ #  # ]:          0 :         result = get_remote_handles( true, edges_to_send, &dum_remote_edges[0], *sit, dum_vec );MB_CHK_SET_ERR( result, "Failed to get remote handles" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8666                 :          0 :         int count = 4;  // Size of data
    8667         [ #  # ]:          0 :         count += sizeof( int ) * (int)edges_to_send.size();
    8668         [ #  # ]:          0 :         count += sizeof( EntityHandle ) * (int)edges_to_send.size();  // We will send the remote handles
    8669 [ #  # ][ #  # ]:          0 :         for( Range::iterator eit = edges_to_send.begin(); eit != edges_to_send.end(); ++eit )
         [ #  # ][ #  # ]
                 [ #  # ]
    8670                 :            :         {
    8671         [ #  # ]:          0 :             EntityHandle edge                       = *eit;
    8672 [ #  # ][ #  # ]:          0 :             unsigned int indx                       = edges.find( edge ) - edges.begin();
                 [ #  # ]
    8673         [ #  # ]:          0 :             std::vector< EntityHandle >& intx_nodes = *( extraNodesVec[indx] );
    8674                 :          0 :             count += (int)intx_nodes.size() * 3 * sizeof( double );  // 3 integer for each entity handle
    8675                 :            :         }
    8676                 :            :         //
    8677         [ #  # ]:          0 :         buff->check_space( count );
    8678 [ #  # ][ #  # ]:          0 :         PACK_INT( buff->buff_ptr, edges_to_send.size() );
    8679 [ #  # ][ #  # ]:          0 :         PACK_EH( buff->buff_ptr, &dum_remote_edges[0], dum_remote_edges.size() );
    8680 [ #  # ][ #  # ]:          0 :         for( Range::iterator eit = edges_to_send.begin(); eit != edges_to_send.end(); ++eit )
         [ #  # ][ #  # ]
                 [ #  # ]
    8681                 :            :         {
    8682         [ #  # ]:          0 :             EntityHandle edge = *eit;
    8683                 :            :             // Pack the remote edge
    8684 [ #  # ][ #  # ]:          0 :             unsigned int indx                       = edges.find( edge ) - edges.begin();
                 [ #  # ]
    8685         [ #  # ]:          0 :             std::vector< EntityHandle >& intx_nodes = *( extraNodesVec[indx] );
    8686         [ #  # ]:          0 :             PACK_INT( buff->buff_ptr, intx_nodes.size() );
    8687                 :            : 
    8688 [ #  # ][ #  # ]:          0 :             result = mbImpl->get_coords( &intx_nodes[0], intx_nodes.size(), (double*)buff->buff_ptr );MB_CHK_SET_ERR( result, "Failed to get coords" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8689                 :          0 :             buff->buff_ptr += 3 * sizeof( double ) * intx_nodes.size();
    8690                 :            :         }
    8691                 :            : 
    8692                 :            :         // Done packing the intx points and remote edges
    8693         [ #  # ]:          0 :         buff->set_stored_size();
    8694                 :            : 
    8695                 :            :         // Now send it
    8696 [ #  # ][ #  # ]:          0 :         result = send_buffer( *sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3 * ind],
                 [ #  # ]
    8697 [ #  # ][ #  # ]:          0 :                               recv_intx_reqs[3 * ind + 2], &dum_ack_buff, incoming );MB_CHK_SET_ERR( result, "Failed to send buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8698                 :          0 :     }
    8699                 :            : 
    8700                 :            :     // Receive/unpack intx points
    8701         [ #  # ]:          0 :     while( incoming )
    8702                 :            :     {
    8703                 :            :         MPI_Status status;
    8704                 :            :         int index_in_recv_requests;
    8705 [ #  # ][ #  # ]:          0 :         PRINT_DEBUG_WAITANY( recv_intx_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
    8706 [ #  # ][ #  # ]:          0 :         success = MPI_Waitany( 3 * buffProcs.size(), &recv_intx_reqs[0], &index_in_recv_requests, &status );
    8707 [ #  # ][ #  # ]:          0 :         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8708                 :            :         // Processor index in the list is divided by 3
    8709                 :          0 :         ind = index_in_recv_requests / 3;
    8710                 :            : 
    8711         [ #  # ]:          0 :         PRINT_DEBUG_RECD( status );
    8712                 :            : 
    8713                 :            :         // OK, received something; decrement incoming counter
    8714                 :          0 :         incoming--;
    8715                 :            : 
    8716                 :          0 :         bool done = false;
    8717         [ #  # ]:          0 :         result    = recv_buffer( MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind],
    8718         [ #  # ]:          0 :                               recv_intx_reqs[3 * ind + 1],  // This is for receiving the second message
    8719         [ #  # ]:          0 :                               recv_intx_reqs[3 * ind + 2],  // This would be for ack, but it is not
    8720                 :            :                                                             // used; consider removing it
    8721         [ #  # ]:          0 :                               incoming, localOwnedBuffs[ind],
    8722         [ #  # ]:          0 :                               sendReqs[3 * ind + 1],  // Send request for sending the second message
    8723         [ #  # ]:          0 :                               sendReqs[3 * ind + 2],  // This is for sending the ack
    8724 [ #  # ][ #  # ]:          0 :                               done );MB_CHK_SET_ERR( result, "Failed to resize recv buffer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8725         [ #  # ]:          0 :         if( done )
    8726                 :            :         {
    8727         [ #  # ]:          0 :             Buffer* buff = remoteOwnedBuffs[ind];
    8728         [ #  # ]:          0 :             buff->reset_ptr( sizeof( int ) );
    8729                 :            :             /*result = unpack_tags(remoteOwnedBuffs[ind/2]->buff_ptr, dum_vec, true,
    8730                 :            :                 buffProcs[ind/2]);*/
    8731                 :            :             // Unpack now the edges and vertex info; compare with the existing vertex positions
    8732                 :            : 
    8733                 :            :             int num_edges;
    8734                 :            : 
    8735         [ #  # ]:          0 :             UNPACK_INT( buff->buff_ptr, num_edges );
    8736         [ #  # ]:          0 :             std::vector< EntityHandle > rec_edges;
    8737         [ #  # ]:          0 :             rec_edges.resize( num_edges );
    8738 [ #  # ][ #  # ]:          0 :             UNPACK_EH( buff->buff_ptr, &rec_edges[0], num_edges );
    8739 [ #  # ][ #  # ]:          0 :             for( int i = 0; i < num_edges; i++ )
    8740                 :            :             {
    8741         [ #  # ]:          0 :                 EntityHandle edge                       = rec_edges[i];
    8742 [ #  # ][ #  # ]:          0 :                 unsigned int indx                       = edges.find( edge ) - edges.begin();
                 [ #  # ]
    8743         [ #  # ]:          0 :                 std::vector< EntityHandle >& intx_nodes = *( extraNodesVec[indx] );
    8744                 :            :                 // Now get the number of nodes on this (now local) edge
    8745                 :            :                 int nverts;
    8746         [ #  # ]:          0 :                 UNPACK_INT( buff->buff_ptr, nverts );
    8747         [ #  # ]:          0 :                 std::vector< double > pos_from_owner;
    8748         [ #  # ]:          0 :                 pos_from_owner.resize( 3 * nverts );
    8749 [ #  # ][ #  # ]:          0 :                 UNPACK_DBLS( buff->buff_ptr, &pos_from_owner[0], 3 * nverts );
    8750 [ #  # ][ #  # ]:          0 :                 std::vector< double > current_positions( 3 * intx_nodes.size() );
    8751 [ #  # ][ #  # ]:          0 :                 result = mbImpl->get_coords( &intx_nodes[0], intx_nodes.size(), &current_positions[0] );MB_CHK_SET_ERR( result, "Failed to get current positions" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8752                 :            :                 // Now, look at what we have in current pos, compare to pos from owner, and reset
    8753         [ #  # ]:          0 :                 for( int k = 0; k < (int)intx_nodes.size(); k++ )
    8754                 :            :                 {
    8755         [ #  # ]:          0 :                     double* pk = &current_positions[3 * k];
    8756                 :            :                     // Take the current pos k, and settle among the ones from owner:
    8757                 :          0 :                     bool found = false;
    8758 [ #  # ][ #  # ]:          0 :                     for( int j = 0; j < nverts && !found; j++ )
    8759                 :            :                     {
    8760         [ #  # ]:          0 :                         double* pj   = &pos_from_owner[3 * j];
    8761                 :          0 :                         double dist2 = ( pk[0] - pj[0] ) * ( pk[0] - pj[0] ) + ( pk[1] - pj[1] ) * ( pk[1] - pj[1] ) +
    8762                 :          0 :                                        ( pk[2] - pj[2] ) * ( pk[2] - pj[2] );
    8763         [ #  # ]:          0 :                         if( dist2 < tolerance )
    8764                 :            :                         {
    8765                 :          0 :                             pk[0] = pj[0];
    8766                 :          0 :                             pk[1] = pj[1];
    8767                 :          0 :                             pk[2] = pj[2];  // Correct it!
    8768                 :          0 :                             found = true;
    8769                 :          0 :                             break;
    8770                 :            :                         }
    8771                 :            :                     }
    8772         [ #  # ]:          0 :                     if( !found )
    8773                 :            :                     {
    8774                 :            : #ifndef NDEBUG
    8775 [ #  # ][ #  # ]:          0 :                         std::cout << " pk:" << pk[0] << " " << pk[1] << " " << pk[2] << " not found \n";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8776                 :            : #endif
    8777                 :          0 :                         result = MB_FAILURE;
    8778                 :            :                     }
    8779                 :            :                 }
    8780                 :            :                 // After we are done resetting, we can set the new positions of nodes:
    8781 [ #  # ][ #  # ]:          0 :                 result = mbImpl->set_coords( &intx_nodes[0], (int)intx_nodes.size(), &current_positions[0] );MB_CHK_SET_ERR( result, "Failed to set new current positions" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8782                 :          0 :             }
    8783                 :            :         }
    8784                 :            :     }
    8785                 :            : 
    8786                 :            :     // OK, now wait
    8787 [ #  # ][ #  # ]:          0 :     if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
         [ #  # ][ #  # ]
    8788                 :            :     else
    8789                 :            :     {
    8790                 :            :         MPI_Status status[3 * MAX_SHARING_PROCS];
    8791 [ #  # ][ #  # ]:          0 :         success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], status );
    8792                 :            :     }
    8793 [ #  # ][ #  # ]:          0 :     if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failure in waitall in tag exchange" ); }
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8794                 :            : 
    8795         [ #  # ]:          0 :     myDebug->tprintf( 1, "Exiting settle_intersection_points" );
    8796                 :            : 
    8797                 :          0 :     return MB_SUCCESS;
    8798                 :            : }
    8799                 :            : 
    8800                 :          0 : ErrorCode ParallelComm::delete_entities( Range& to_delete )
    8801                 :            : {
    8802                 :            :     // Will not look at shared sets yet, but maybe we should
    8803                 :            :     // First, see if any of the entities to delete is shared; then inform the other processors
    8804                 :            :     // about their fate (to be deleted), using a crystal router transfer
    8805                 :          0 :     ErrorCode rval = MB_SUCCESS;
    8806                 :            :     unsigned char pstat;
    8807                 :            :     EntityHandle tmp_handles[MAX_SHARING_PROCS];
    8808                 :            :     int tmp_procs[MAX_SHARING_PROCS];
    8809                 :            :     unsigned int num_ps;
    8810         [ #  # ]:          0 :     TupleList ents_to_delete;
    8811 [ #  # ][ #  # ]:          0 :     ents_to_delete.initialize( 1, 0, 1, 0, to_delete.size() * ( MAX_SHARING_PROCS + 1 ) );  // A little bit of overkill
    8812         [ #  # ]:          0 :     ents_to_delete.enableWriteAccess();
    8813                 :          0 :     unsigned int i = 0;
    8814 [ #  # ][ #  # ]:          0 :     for( Range::iterator it = to_delete.begin(); it != to_delete.end(); ++it )
         [ #  # ][ #  # ]
                 [ #  # ]
    8815                 :            :     {
    8816         [ #  # ]:          0 :         EntityHandle eh = *it;  // Entity to be deleted
    8817                 :            : 
    8818         [ #  # ]:          0 :         rval = get_sharing_data( eh, tmp_procs, tmp_handles, pstat, num_ps );
    8819 [ #  # ][ #  # ]:          0 :         if( rval != MB_SUCCESS || num_ps == 0 ) continue;
    8820                 :            :         // Add to the tuple list the information to be sent (to the remote procs)
    8821         [ #  # ]:          0 :         for( unsigned int p = 0; p < num_ps; p++ )
    8822                 :            :         {
    8823                 :          0 :             ents_to_delete.vi_wr[i]  = tmp_procs[p];
    8824                 :          0 :             ents_to_delete.vul_wr[i] = (unsigned long)tmp_handles[p];
    8825                 :          0 :             i++;
    8826         [ #  # ]:          0 :             ents_to_delete.inc_n();
    8827                 :            :         }
    8828                 :            :     }
    8829                 :            : 
    8830         [ #  # ]:          0 :     gs_data::crystal_data* cd = this->procConfig.crystal_router();
    8831                 :            :     // All communication happens here; no other mpi calls
    8832                 :            :     // Also, this is a collective call
    8833 [ #  # ][ #  # ]:          0 :     rval = cd->gs_transfer( 1, ents_to_delete, 0 );MB_CHK_SET_ERR( rval, "Error in tuple transfer" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8834                 :            : 
    8835                 :            :     // Add to the range of ents to delete the new ones that were sent from other procs
    8836         [ #  # ]:          0 :     unsigned int received = ents_to_delete.get_n();
    8837         [ #  # ]:          0 :     for( i = 0; i < received; i++ )
    8838                 :            :     {
    8839                 :            :         // int from = ents_to_delete.vi_rd[i];
    8840                 :          0 :         unsigned long valrec = ents_to_delete.vul_rd[i];
    8841         [ #  # ]:          0 :         to_delete.insert( (EntityHandle)valrec );
    8842                 :            :     }
    8843 [ #  # ][ #  # ]:          0 :     rval = mbImpl->delete_entities( to_delete );MB_CHK_SET_ERR( rval, "Error in deleting actual entities" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8844                 :            : 
    8845         [ #  # ]:          0 :     std::set< EntityHandle > good_ents;
    8846 [ #  # ][ #  # ]:          0 :     for( std::set< EntityHandle >::iterator sst = sharedEnts.begin(); sst != sharedEnts.end(); sst++ )
                 [ #  # ]
    8847                 :            :     {
    8848         [ #  # ]:          0 :         EntityHandle eh = *sst;
    8849         [ #  # ]:          0 :         int index       = to_delete.index( eh );
    8850 [ #  # ][ #  # ]:          0 :         if( -1 == index ) good_ents.insert( eh );
    8851                 :            :     }
    8852         [ #  # ]:          0 :     sharedEnts = good_ents;
    8853                 :            : 
    8854                 :            :     // What about shared sets? Who is updating them?
    8855                 :          0 :     return MB_SUCCESS;
    8856                 :            : }
    8857                 :            : 
    8858                 :          0 : void ParallelComm::print_pstatus( unsigned char pstat, std::string& ostr )
    8859                 :            : {
    8860         [ #  # ]:          0 :     std::ostringstream str;
    8861                 :          0 :     int num = 0;
    8862                 :            : #define ppstat( a, b )             \
    8863                 :            :     {                              \
    8864                 :            :         if( pstat & a )            \
    8865                 :            :         {                          \
    8866                 :            :             if( num ) str << ", "; \
    8867                 :            :             str << b;              \
    8868                 :            :             num++;                 \
    8869                 :            :         }                          \
    8870                 :            :     }
    8871                 :            : 
    8872 [ #  # ][ #  # ]:          0 :     ppstat( PSTATUS_NOT_OWNED, "NOT_OWNED" );
         [ #  # ][ #  # ]
    8873 [ #  # ][ #  # ]:          0 :     ppstat( PSTATUS_SHARED, "SHARED" );
         [ #  # ][ #  # ]
    8874 [ #  # ][ #  # ]:          0 :     ppstat( PSTATUS_MULTISHARED, "MULTISHARED" );
         [ #  # ][ #  # ]
    8875 [ #  # ][ #  # ]:          0 :     ppstat( PSTATUS_INTERFACE, "INTERFACE" );
         [ #  # ][ #  # ]
    8876 [ #  # ][ #  # ]:          0 :     ppstat( PSTATUS_GHOST, "GHOST" );
         [ #  # ][ #  # ]
    8877                 :            : 
    8878 [ #  # ][ #  # ]:          0 :     ostr = str.str();
    8879                 :          0 : }
    8880                 :            : 
    8881                 :          0 : void ParallelComm::print_pstatus( unsigned char pstat )
    8882                 :            : {
    8883         [ #  # ]:          0 :     std::string str;
    8884         [ #  # ]:          0 :     print_pstatus( pstat, str );
    8885 [ #  # ][ #  # ]:          0 :     std::cout << str.c_str() << std::endl;
    8886                 :          0 : }
    8887                 :            : 
    8888                 :          0 : ErrorCode ParallelComm::correct_thin_ghost_layers()
    8889                 :            : {
    8890                 :            : 
    8891                 :            :     // Get all shared ent data from other procs
    8892 [ #  # ][ #  # ]:          0 :     std::vector< std::vector< SharedEntityData > > shents( buffProcs.size() ), send_data( buffProcs.size() );
    8893                 :            : 
    8894                 :            :     // will work only on multi-shared tags  sharedps_tag(), sharedhs_tag();
    8895                 :            : 
    8896                 :            :     /*
    8897                 :            :      *   domain0 | domain1 | domain2 | domain3
    8898                 :            :      *   vertices from domain 1 and 2 are visible from both 0 and 3, but
    8899                 :            :      *   domain 0 might not have info about multi-sharing from domain 3
    8900                 :            :      *   so we will force that domain 0 vertices owned by 1 and 2 have information
    8901                 :            :      *   about the domain 3 sharing
    8902                 :            :      *
    8903                 :            :      *   SharedEntityData will have :
    8904                 :            :      *    struct SharedEntityData {
    8905                 :            :             EntityHandle local;  // this is same meaning, for the proc we sent to, it is local
    8906                 :            :             EntityHandle remote; // this will be the far away handle that will need to be added
    8907                 :            :             EntityID owner;      // this will be the remote proc
    8908                 :            :           };
    8909                 :            :           // so we need to add data like this:
    8910                 :            :            a multishared entity owned by proc x will have data like
    8911                 :            :              multishared procs:  proc x, a, b, c
    8912                 :            :              multishared handles:     h1, h2, h3, h4
    8913                 :            :              we will need to send data from proc x like this:
    8914                 :            :                to proc a we will send
    8915                 :            :                  (h2, h3, b), (h2, h4, c)
    8916                 :            :                to proc b we will send
    8917                 :            :                   (h3, h2, a), (h3, h4, c)
    8918                 :            :                to proc c we will send
    8919                 :            :                   (h4, h2, a), (h4, h3, b)
    8920                 :            :      *
    8921                 :            :      */
    8922                 :            : 
    8923                 :          0 :     ErrorCode result = MB_SUCCESS;
    8924                 :            :     int ent_procs[MAX_SHARING_PROCS];
    8925                 :            :     EntityHandle handles[MAX_SHARING_PROCS];
    8926                 :            :     int num_sharing;
    8927                 :            :     SharedEntityData tmp;
    8928                 :            : 
    8929 [ #  # ][ #  # ]:          0 :     for( std::set< EntityHandle >::iterator i = sharedEnts.begin(); i != sharedEnts.end(); ++i )
                 [ #  # ]
    8930                 :            :     {
    8931                 :            : 
    8932                 :            :         unsigned char pstat;
    8933 [ #  # ][ #  # ]:          0 :         result = get_sharing_data( *i, ent_procs, handles, pstat, num_sharing );MB_CHK_SET_ERR( result, "can't get sharing data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8934 [ #  # ][ #  # ]:          0 :         if( !( pstat & PSTATUS_MULTISHARED ) ||
    8935                 :          0 :             num_sharing <= 2 )  // if not multishared, skip, it should have no problems
    8936                 :          0 :             continue;
    8937                 :            :         // we should skip the ones that are not owned locally
    8938                 :            :         // the owned ones will have the most multi-shared info, because the info comes from other
    8939                 :            :         // remote processors
    8940         [ #  # ]:          0 :         if( pstat & PSTATUS_NOT_OWNED ) continue;
    8941         [ #  # ]:          0 :         for( int j = 1; j < num_sharing; j++ )
    8942                 :            :         {
    8943                 :            :             // we will send to proc
    8944                 :          0 :             int send_to_proc = ent_procs[j];  //
    8945                 :          0 :             tmp.local        = handles[j];
    8946         [ #  # ]:          0 :             int ind          = get_buffers( send_to_proc );
    8947         [ #  # ]:          0 :             assert( -1 != ind );  // THIS SHOULD NEVER HAPPEN
    8948         [ #  # ]:          0 :             for( int k = 1; k < num_sharing; k++ )
    8949                 :            :             {
    8950                 :            :                 // do not send to self proc
    8951         [ #  # ]:          0 :                 if( j == k ) continue;
    8952                 :          0 :                 tmp.remote = handles[k];  // this will be the handle of entity on proc
    8953                 :          0 :                 tmp.owner  = ent_procs[k];
    8954 [ #  # ][ #  # ]:          0 :                 send_data[ind].push_back( tmp );
    8955                 :            :             }
    8956                 :            :         }
    8957                 :            :     }
    8958                 :            : 
    8959 [ #  # ][ #  # ]:          0 :     result = exchange_all_shared_handles( send_data, shents );MB_CHK_ERR( result );
         [ #  # ][ #  # ]
    8960                 :            : 
    8961                 :            :     // loop over all shents and add if vertex type, add if missing
    8962         [ #  # ]:          0 :     for( size_t i = 0; i < shents.size(); i++ )
    8963                 :            :     {
    8964         [ #  # ]:          0 :         std::vector< SharedEntityData >& shEnts = shents[i];
    8965         [ #  # ]:          0 :         for( size_t j = 0; j < shEnts.size(); j++ )
    8966                 :            :         {
    8967         [ #  # ]:          0 :             tmp = shEnts[j];
    8968                 :            :             // basically, check the shared data for tmp.local entity
    8969                 :            :             // it should have inside the tmp.owner and tmp.remote
    8970                 :          0 :             EntityHandle eh = tmp.local;
    8971                 :            :             unsigned char pstat;
    8972 [ #  # ][ #  # ]:          0 :             result = get_sharing_data( eh, ent_procs, handles, pstat, num_sharing );MB_CHK_SET_ERR( result, "can't get sharing data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    8973                 :            :             // see if the proc tmp.owner is in the list of ent_procs; if not, we have to increase
    8974                 :            :             // handles, and ent_procs; and set
    8975                 :            : 
    8976                 :          0 :             int proc_remote = tmp.owner;  //
    8977 [ #  # ][ #  # ]:          0 :             if( std::find( ent_procs, ent_procs + num_sharing, proc_remote ) == ent_procs + num_sharing )
    8978                 :            :             {
    8979                 :            :                 // so we did not find on proc
    8980                 :            : #ifndef NDEBUG
    8981 [ #  # ][ #  # ]:          0 :                 std::cout << "THIN GHOST: we did not find on proc " << rank() << " for shared ent " << eh
         [ #  # ][ #  # ]
                 [ #  # ]
    8982 [ #  # ][ #  # ]:          0 :                           << " the proc " << proc_remote << "\n";
                 [ #  # ]
    8983                 :            : #endif
    8984                 :            :                 // increase num_sharing, and set the multi-shared tags
    8985         [ #  # ]:          0 :                 if( num_sharing >= MAX_SHARING_PROCS ) return MB_FAILURE;
    8986                 :          0 :                 handles[num_sharing]       = tmp.remote;
    8987                 :          0 :                 handles[num_sharing + 1]   = 0;  // end of list
    8988                 :          0 :                 ent_procs[num_sharing]     = tmp.owner;
    8989                 :          0 :                 ent_procs[num_sharing + 1] = -1;  // this should be already set
    8990 [ #  # ][ #  # ]:          0 :                 result                     = mbImpl->tag_set_data( sharedps_tag(), &eh, 1, ent_procs );MB_CHK_SET_ERR( result, "Failed to set sharedps tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8991 [ #  # ][ #  # ]:          0 :                 result = mbImpl->tag_set_data( sharedhs_tag(), &eh, 1, handles );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    8992         [ #  # ]:          0 :                 if( 2 == num_sharing )  // it means the sharedp and sharedh tags were set with a
    8993                 :            :                                         // value non default
    8994                 :            :                 {
    8995                 :            :                     // so entity eh was simple shared before, we need to set those dense tags back
    8996                 :            :                     // to default
    8997                 :            :                     //  values
    8998                 :          0 :                     EntityHandle zero = 0;
    8999                 :          0 :                     int no_proc       = -1;
    9000 [ #  # ][ #  # ]:          0 :                     result            = mbImpl->tag_set_data( sharedp_tag(), &eh, 1, &no_proc );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    9001 [ #  # ][ #  # ]:          0 :                     result = mbImpl->tag_set_data( sharedh_tag(), &eh, 1, &zero );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    9002                 :            :                     // also, add multishared pstatus tag
    9003                 :            :                     // also add multishared status to pstatus
    9004                 :          0 :                     pstat  = pstat | PSTATUS_MULTISHARED;
    9005 [ #  # ][ #  # ]:          0 :                     result = mbImpl->tag_set_data( pstatus_tag(), &eh, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    9006                 :            :                 }
    9007                 :            :             }
    9008                 :            :         }
    9009                 :            :     }
    9010                 :          0 :     return MB_SUCCESS;
    9011                 :            : }
    9012 [ +  - ][ +  - ]:        228 : }  // namespace moab

Generated by: LCOV version 1.11