LCOV - code coverage report
Current view: top level - src/parallel - ParCommGraph.cpp (source / functions) Hit Total Coverage
Test: coverage_sk.info Lines: 1 637 0.2 %
Date: 2020-12-16 07:07:30 Functions: 2 23 8.7 %
Branches: 2 1762 0.1 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * ParCommGraph.cpp
       3                 :            :  *
       4                 :            :  */
       5                 :            : 
       6                 :            : #include "moab/ParCommGraph.hpp"
       7                 :            : // we need to recompute adjacencies for merging to work
       8                 :            : #include "moab/Core.hpp"
       9                 :            : #include "AEntityFactory.hpp"
      10                 :            : 
      11                 :            : #ifdef MOAB_HAVE_ZOLTAN
      12                 :            : #include "moab/ZoltanPartitioner.hpp"
      13                 :            : #endif
      14                 :            : 
      15                 :            : // #define VERBOSE
      16                 :            : // #define GRAPH_INFO
      17                 :            : 
      18                 :            : namespace moab
      19                 :            : {
      20                 :          0 : ParCommGraph::ParCommGraph( MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2 )
      21 [ #  # ][ #  # ]:          0 :     : comm( joincomm ), compid1( coid1 ), compid2( coid2 )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
      22                 :            : {
      23                 :            :     // find out the tasks from each group, in the joint communicator
      24         [ #  # ]:          0 :     find_group_ranks( group1, comm, senderTasks );
      25         [ #  # ]:          0 :     find_group_ranks( group2, comm, receiverTasks );
      26                 :            : 
      27                 :          0 :     rootSender = rootReceiver = false;
      28                 :          0 :     rankInGroup1 = rankInGroup2 = rankInJoin = -1;  // not initialized, or not part of the group
      29                 :            : 
      30         [ #  # ]:          0 :     int mpierr = MPI_Group_rank( group1, &rankInGroup1 );
      31 [ #  # ][ #  # ]:          0 :     if( MPI_SUCCESS != mpierr || rankInGroup1 == MPI_UNDEFINED ) rankInGroup1 = -1;
      32                 :            : 
      33         [ #  # ]:          0 :     mpierr = MPI_Group_rank( group2, &rankInGroup2 );
      34 [ #  # ][ #  # ]:          0 :     if( MPI_SUCCESS != mpierr || rankInGroup2 == MPI_UNDEFINED ) rankInGroup2 = -1;
      35                 :            : 
      36         [ #  # ]:          0 :     mpierr = MPI_Comm_rank( comm, &rankInJoin );
      37         [ #  # ]:          0 :     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
      38                 :          0 :         rankInJoin = -1;
      39                 :            : 
      40         [ #  # ]:          0 :     mpierr = MPI_Comm_size( comm, &joinSize );
      41         [ #  # ]:          0 :     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
      42                 :          0 :         joinSize = -1;
      43                 :            : 
      44         [ #  # ]:          0 :     if( 0 == rankInGroup1 ) rootSender = true;
      45         [ #  # ]:          0 :     if( 0 == rankInGroup2 ) rootReceiver = true;
      46                 :          0 :     graph_type = INITIAL_MIGRATE;  // 0
      47                 :          0 :     comm_graph = NULL;
      48                 :          0 :     context_id = -1;
      49                 :          0 :     cover_set  = 0;  // refers to nothing yet
      50                 :          0 : }
      51                 :            : 
      52                 :            : // copy constructor will copy only few basic things; split ranges will not be copied
      53 [ #  # ][ #  # ]:          0 : ParCommGraph::ParCommGraph( const ParCommGraph& src )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
      54                 :            : {
      55                 :          0 :     comm          = src.comm;
      56         [ #  # ]:          0 :     senderTasks   = src.senderTasks;    // these are the sender tasks in joint comm
      57         [ #  # ]:          0 :     receiverTasks = src.receiverTasks;  // these are all the receiver tasks in joint comm
      58                 :          0 :     rootSender    = src.rootSender;
      59                 :          0 :     rootReceiver  = src.rootReceiver;
      60                 :          0 :     rankInGroup1  = src.rankInGroup1;
      61                 :          0 :     rankInGroup2  = src.rankInGroup2;  // group 1 is sender, 2 is receiver
      62                 :          0 :     rankInJoin    = src.rankInJoin;
      63                 :          0 :     joinSize      = src.joinSize;
      64                 :          0 :     compid1       = src.compid1;
      65                 :          0 :     compid2       = src.compid2;
      66                 :          0 :     comm_graph    = NULL;
      67                 :          0 :     graph_type    = src.graph_type;
      68                 :          0 :     context_id    = src.context_id;
      69                 :          0 :     cover_set     = src.cover_set;
      70                 :          0 :     return;
      71                 :            : }
      72                 :            : 
      73                 :          0 : ParCommGraph::~ParCommGraph()
      74                 :            : {
      75                 :            :     // TODO Auto-generated destructor stub
      76         [ #  # ]:          0 : }
      77                 :            : 
      78                 :            : // utility to find out the ranks of the processes of a group, with respect to a joint comm,
      79                 :            : // which spans for sure the group
      80                 :            : // it is used locally (in the constructor), but it can be used as a utility
      81                 :          0 : void ParCommGraph::find_group_ranks( MPI_Group group, MPI_Comm joincomm, std::vector< int >& ranks )
      82                 :            : {
      83                 :            :     MPI_Group global_grp;
      84         [ #  # ]:          0 :     MPI_Comm_group( joincomm, &global_grp );
      85                 :            : 
      86                 :            :     int grp_size;
      87                 :            : 
      88         [ #  # ]:          0 :     MPI_Group_size( group, &grp_size );
      89         [ #  # ]:          0 :     std::vector< int > rks( grp_size );
      90         [ #  # ]:          0 :     ranks.resize( grp_size );
      91                 :            : 
      92         [ #  # ]:          0 :     for( int i = 0; i < grp_size; i++ )
      93         [ #  # ]:          0 :         rks[i] = i;
      94                 :            : 
      95 [ #  # ][ #  # ]:          0 :     MPI_Group_translate_ranks( group, grp_size, &rks[0], global_grp, &ranks[0] );
                 [ #  # ]
      96         [ #  # ]:          0 :     MPI_Group_free( &global_grp );
      97                 :          0 :     return;
      98                 :            : }
      99                 :            : 
     100                 :          0 : ErrorCode ParCommGraph::compute_trivial_partition( std::vector< int >& numElemsPerTaskInGroup1 )
     101                 :            : {
     102                 :            : 
     103                 :          0 :     recv_graph.clear();
     104                 :          0 :     recv_sizes.clear();
     105                 :          0 :     sender_graph.clear();
     106                 :          0 :     sender_sizes.clear();
     107                 :            : 
     108         [ #  # ]:          0 :     if( numElemsPerTaskInGroup1.size() != senderTasks.size() )
     109                 :          0 :         return MB_FAILURE;  // each sender has a number of elements that it owns
     110                 :            : 
     111                 :            :     // first find out total number of elements to be sent from all senders
     112                 :          0 :     int total_elems = 0;
     113         [ #  # ]:          0 :     std::vector< int > accum;
     114         [ #  # ]:          0 :     accum.push_back( 0 );
     115                 :            : 
     116                 :          0 :     int num_senders = (int)senderTasks.size();
     117                 :            : 
     118         [ #  # ]:          0 :     for( size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ )
     119                 :            :     {
     120         [ #  # ]:          0 :         total_elems += numElemsPerTaskInGroup1[k];
     121         [ #  # ]:          0 :         accum.push_back( total_elems );
     122                 :            :     }
     123                 :            : 
     124                 :          0 :     int num_recv = ( (int)receiverTasks.size() );
     125                 :            :     // in trivial partition, every receiver should get about total_elems/num_receivers elements
     126                 :          0 :     int num_per_receiver = (int)( total_elems / num_recv );
     127                 :          0 :     int leftover         = total_elems - num_per_receiver * num_recv;
     128                 :            : 
     129                 :            :     // so receiver k will receive  [starts[k], starts[k+1] ) interval
     130         [ #  # ]:          0 :     std::vector< int > starts;
     131         [ #  # ]:          0 :     starts.resize( num_recv + 1 );
     132         [ #  # ]:          0 :     starts[0] = 0;
     133         [ #  # ]:          0 :     for( int k = 0; k < num_recv; k++ )
     134                 :            :     {
     135 [ #  # ][ #  # ]:          0 :         starts[k + 1] = starts[k] + num_per_receiver;
     136 [ #  # ][ #  # ]:          0 :         if( k < leftover ) starts[k + 1]++;
     137                 :            :     }
     138                 :            : 
     139                 :            :     // each sender will send to a number of receivers, based on how the
     140                 :            :     // arrays starts[0:num_recv] and accum[0:sendr] overlap
     141                 :          0 :     int lastUsedReceiverRank = 0;  // first receiver was not treated yet
     142         [ #  # ]:          0 :     for( int j = 0; j < num_senders; j++ )
     143                 :            :     {
     144                 :            :         // we could start the receiver loop with the latest receiver that received from previous
     145                 :            :         // sender
     146         [ #  # ]:          0 :         for( int k = lastUsedReceiverRank; k < num_recv; k++ )
     147                 :            :         {
     148                 :            :             // if overlap:
     149 [ #  # ][ #  # ]:          0 :             if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] )
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     150                 :            :             {
     151 [ #  # ][ #  # ]:          0 :                 recv_graph[receiverTasks[k]].push_back( senderTasks[j] );
         [ #  # ][ #  # ]
     152 [ #  # ][ #  # ]:          0 :                 sender_graph[senderTasks[j]].push_back( receiverTasks[k] );
         [ #  # ][ #  # ]
     153                 :            : 
     154                 :            :                 // we still need to decide what is the overlap
     155                 :          0 :                 int sizeOverlap = 1;  // at least 1, for sure
     156                 :            :                 // 1
     157 [ #  # ][ #  # ]:          0 :                 if( starts[k] >= accum[j] )  // one end is starts[k]
                 [ #  # ]
     158                 :            :                 {
     159 [ #  # ][ #  # ]:          0 :                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
                 [ #  # ]
     160 [ #  # ][ #  # ]:          0 :                         sizeOverlap = accum[j + 1] - starts[k];
     161                 :            :                     else  //
     162 [ #  # ][ #  # ]:          0 :                         sizeOverlap = starts[k + 1] - starts[k];
     163                 :            :                 }
     164                 :            :                 else  // one end is accum[j]
     165                 :            :                 {
     166 [ #  # ][ #  # ]:          0 :                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
                 [ #  # ]
     167 [ #  # ][ #  # ]:          0 :                         sizeOverlap = accum[j + 1] - accum[j];
     168                 :            :                     else
     169 [ #  # ][ #  # ]:          0 :                         sizeOverlap = starts[k + 1] - accum[j];
     170                 :            :                 }
     171 [ #  # ][ #  # ]:          0 :                 recv_sizes[receiverTasks[k]].push_back( sizeOverlap );  // basically, task k will receive from
                 [ #  # ]
     172                 :            :                                                                         //   sender j, sizeOverlap elems
     173 [ #  # ][ #  # ]:          0 :                 sender_sizes[senderTasks[j]].push_back( sizeOverlap );
                 [ #  # ]
     174 [ #  # ][ #  # ]:          0 :                 if( starts[k] > accum[j + 1] )
                 [ #  # ]
     175                 :            :                 {
     176                 :          0 :                     lastUsedReceiverRank = k - 1;  // so next k loop will start a little higher, we
     177                 :            :                                                    // probably finished with first few receivers (up
     178                 :            :                                                    // to receiver lastUsedReceiverRank)
     179                 :          0 :                     break;  // break the k loop, we distributed all elements from sender j to some
     180                 :            :                             // receivers
     181                 :            :                 }
     182                 :            :             }
     183                 :            :         }
     184                 :            :     }
     185                 :            : 
     186                 :          0 :     return MB_SUCCESS;
     187                 :            : }
     188                 :            : 
     189                 :          0 : ErrorCode ParCommGraph::pack_receivers_graph( std::vector< int >& packed_recv_array )
     190                 :            : {
     191                 :            :     // it will basically look at local data, to pack communication graph, each receiver task will
     192                 :            :     // have to post receives for each sender task that will send data to it; the array will be
     193                 :            :     // communicated to root receiver, and eventually distributed to receiver tasks
     194                 :            : 
     195                 :            :     /*
     196                 :            :      * packed_array will have receiver, number of senders, then senders, etc
     197                 :            :      */
     198         [ #  # ]:          0 :     if( recv_graph.size() < receiverTasks.size() )
     199                 :            :     {
     200                 :            :         // big problem, we have empty partitions in receive
     201                 :          0 :         std::cout << " WARNING: empty partitions, some receiver tasks will receive nothing.\n";
     202                 :            :     }
     203 [ #  # ][ #  # ]:          0 :     for( std::map< int, std::vector< int > >::iterator it = recv_graph.begin(); it != recv_graph.end(); it++ )
                 [ #  # ]
     204                 :            :     {
     205         [ #  # ]:          0 :         int recv                    = it->first;
     206         [ #  # ]:          0 :         std::vector< int >& senders = it->second;
     207         [ #  # ]:          0 :         packed_recv_array.push_back( recv );
     208         [ #  # ]:          0 :         packed_recv_array.push_back( (int)senders.size() );
     209                 :            : 
     210         [ #  # ]:          0 :         for( int k = 0; k < (int)senders.size(); k++ )
     211 [ #  # ][ #  # ]:          0 :             packed_recv_array.push_back( senders[k] );
     212                 :            :     }
     213                 :            : 
     214                 :          0 :     return MB_SUCCESS;
     215                 :            : }
     216                 :            : 
     217                 :          0 : ErrorCode ParCommGraph::split_owned_range( int sender_rank, Range& owned )
     218                 :            : {
     219         [ #  # ]:          0 :     int senderTask                   = senderTasks[sender_rank];
     220         [ #  # ]:          0 :     std::vector< int >& distribution = sender_sizes[senderTask];
     221         [ #  # ]:          0 :     std::vector< int >& receivers    = sender_graph[senderTask];
     222         [ #  # ]:          0 :     if( distribution.size() != receivers.size() )  //
     223                 :          0 :         return MB_FAILURE;
     224                 :            : 
     225         [ #  # ]:          0 :     Range current = owned;  // get the full range first, then we will subtract stuff, for
     226                 :            :     // the following ranges
     227                 :            : 
     228         [ #  # ]:          0 :     Range rleftover = current;
     229         [ #  # ]:          0 :     for( size_t k = 0; k < receivers.size(); k++ )
     230                 :            :     {
     231         [ #  # ]:          0 :         Range newr;
     232 [ #  # ][ #  # ]:          0 :         newr.insert( current.begin(), current.begin() + distribution[k] );
         [ #  # ][ #  # ]
                 [ #  # ]
     233 [ #  # ][ #  # ]:          0 :         split_ranges[receivers[k]] = newr;
                 [ #  # ]
     234                 :            : 
     235 [ #  # ][ #  # ]:          0 :         rleftover = subtract( current, newr );
     236         [ #  # ]:          0 :         current   = rleftover;
     237                 :          0 :     }
     238                 :            : 
     239                 :          0 :     return MB_SUCCESS;
     240                 :            : }
     241                 :            : 
     242                 :            : // use for this the corresponding tasks and sizes
     243                 :          0 : ErrorCode ParCommGraph::split_owned_range( Range& owned )
     244                 :            : {
     245         [ #  # ]:          0 :     if( corr_tasks.size() != corr_sizes.size() )  //
     246                 :          0 :         return MB_FAILURE;
     247                 :            : 
     248         [ #  # ]:          0 :     Range current = owned;  // get the full range first, then we will subtract stuff, for
     249                 :            :     // the following ranges
     250                 :            : 
     251         [ #  # ]:          0 :     Range rleftover = current;
     252         [ #  # ]:          0 :     for( size_t k = 0; k < corr_tasks.size(); k++ )
     253                 :            :     {
     254         [ #  # ]:          0 :         Range newr;
     255 [ #  # ][ #  # ]:          0 :         newr.insert( current.begin(), current.begin() + corr_sizes[k] );
         [ #  # ][ #  # ]
                 [ #  # ]
     256 [ #  # ][ #  # ]:          0 :         split_ranges[corr_tasks[k]] = newr;
                 [ #  # ]
     257                 :            : 
     258 [ #  # ][ #  # ]:          0 :         rleftover = subtract( current, newr );
     259         [ #  # ]:          0 :         current   = rleftover;
     260                 :          0 :     }
     261                 :            : 
     262                 :          0 :     return MB_SUCCESS;
     263                 :            : }
     264                 :            : 
     265                 :          0 : ErrorCode ParCommGraph::send_graph( MPI_Comm jcomm )
     266                 :            : {
     267         [ #  # ]:          0 :     if( is_root_sender() )
     268                 :            :     {
     269                 :            :         int ierr;
     270                 :            :         // will need to build a communication graph, because each sender knows now to which receiver
     271                 :            :         // to send data the receivers need to post receives for each sender that will send data to
     272                 :            :         // them will need to gather on rank 0 on the sender comm, global ranks of sender with
     273                 :            :         // receivers to send build communication matrix, each receiver will receive from what sender
     274                 :            : 
     275         [ #  # ]:          0 :         std::vector< int > packed_recv_array;
     276         [ #  # ]:          0 :         ErrorCode rval = pack_receivers_graph( packed_recv_array );
     277         [ #  # ]:          0 :         if( MB_SUCCESS != rval ) return rval;
     278                 :            : 
     279                 :          0 :         int size_pack_array = (int)packed_recv_array.size();
     280 [ #  # ][ #  # ]:          0 :         comm_graph          = new int[size_pack_array + 1];
     281                 :          0 :         comm_graph[0]       = size_pack_array;
     282         [ #  # ]:          0 :         for( int k = 0; k < size_pack_array; k++ )
     283         [ #  # ]:          0 :             comm_graph[k + 1] = packed_recv_array[k];
     284                 :            :         // will add 2 requests
     285                 :            :         /// use tag 10 to send size and tag 20 to send the packed array
     286         [ #  # ]:          0 :         sendReqs.resize( 1 );
     287                 :            :         // do not send the size in advance, because we use probe now
     288                 :            :         /*ierr = MPI_Isend(&comm_graph[0], 1, MPI_INT, receiver(0), 10, jcomm, &sendReqs[0]); // we
     289                 :            :         have to use global communicator if (ierr!=0) return MB_FAILURE;*/
     290                 :            :         ierr = MPI_Isend( &comm_graph[1], size_pack_array, MPI_INT, receiver( 0 ), 20, jcomm,
     291 [ #  # ][ #  # ]:          0 :                           &sendReqs[0] );  // we have to use global communicator
                 [ #  # ]
     292 [ #  # ][ #  # ]:          0 :         if( ierr != 0 ) return MB_FAILURE;
     293                 :            :     }
     294                 :          0 :     return MB_SUCCESS;
     295                 :            : }
     296                 :            : 
     297                 :            : // pco has MOAB too get_moab()
     298                 :            : // do we need to store "method" as a member variable ?
     299                 :          0 : ErrorCode ParCommGraph::send_mesh_parts( MPI_Comm jcomm, ParallelComm* pco, Range& owned )
     300                 :            : {
     301                 :            : 
     302                 :            :     ErrorCode rval;
     303         [ #  # ]:          0 :     if( split_ranges.empty() )  // in trivial partition
     304                 :            :     {
     305                 :          0 :         rval = split_owned_range( rankInGroup1, owned );
     306         [ #  # ]:          0 :         if( rval != MB_SUCCESS ) return rval;
     307                 :            :         // we know this on the sender side:
     308                 :          0 :         corr_tasks = sender_graph[senderTasks[rankInGroup1]];  // copy
     309                 :          0 :         corr_sizes = sender_sizes[senderTasks[rankInGroup1]];  // another copy
     310                 :            :     }
     311                 :            : 
     312                 :          0 :     int indexReq = 0;
     313                 :            :     int ierr;                             // MPI error
     314         [ #  # ]:          0 :     if( is_root_sender() ) indexReq = 1;  // for sendReqs
     315                 :          0 :     sendReqs.resize( indexReq + split_ranges.size() );
     316 [ #  # ][ #  # ]:          0 :     for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
                 [ #  # ]
     317                 :            :     {
     318         [ #  # ]:          0 :         int receiver_proc = it->first;
     319 [ #  # ][ #  # ]:          0 :         Range ents        = it->second;
     320                 :            : 
     321                 :            :         // add necessary vertices too
     322 [ #  # ][ #  # ]:          0 :         Range verts;
     323 [ #  # ][ #  # ]:          0 :         rval = pco->get_moab()->get_adjacencies( ents, 0, false, verts, Interface::UNION );
     324         [ #  # ]:          0 :         if( rval != MB_SUCCESS )
     325                 :            :         {
     326         [ #  # ]:          0 :             std::cout << " can't get adjacencies. for entities to send\n";
     327                 :          0 :             return rval;
     328                 :            :         }
     329         [ #  # ]:          0 :         ents.merge( verts );
     330 [ #  # ][ #  # ]:          0 :         ParallelComm::Buffer* buffer = new ParallelComm::Buffer( ParallelComm::INITIAL_BUFF_SIZE );
     331         [ #  # ]:          0 :         buffer->reset_ptr( sizeof( int ) );
     332         [ #  # ]:          0 :         rval = pco->pack_buffer( ents, false, true, false, -1, buffer );
     333         [ #  # ]:          0 :         if( rval != MB_SUCCESS )
     334                 :            :         {
     335         [ #  # ]:          0 :             std::cout << " can't pack buffer for entities to send\n";
     336                 :          0 :             return rval;
     337                 :            :         }
     338         [ #  # ]:          0 :         int size_pack = buffer->get_current_size();
     339                 :            : 
     340                 :            :         // TODO there could be an issue with endian things; check !!!!!
     341                 :            :         // we are sending the size of the buffer first as an int!!!
     342                 :            :         /// not anymore !
     343                 :            :         /* ierr = MPI_Isend(buffer->mem_ptr, 1, MPI_INT, receiver_proc, 1, jcomm,
     344                 :            :          &sendReqs[indexReq]); // we have to use global communicator if (ierr!=0) return MB_FAILURE;
     345                 :            :          indexReq++;*/
     346                 :            : 
     347                 :            :         ierr = MPI_Isend( buffer->mem_ptr, size_pack, MPI_CHAR, receiver_proc, 2, jcomm,
     348 [ #  # ][ #  # ]:          0 :                           &sendReqs[indexReq] );  // we have to use global communicator
     349         [ #  # ]:          0 :         if( ierr != 0 ) return MB_FAILURE;
     350                 :          0 :         indexReq++;
     351 [ #  # ][ #  # ]:          0 :         localSendBuffs.push_back( buffer );
     352                 :          0 :     }
     353                 :          0 :     return MB_SUCCESS;
     354                 :            : }
     355                 :            : 
     356                 :            : // this is called on receiver side
     357                 :          0 : ErrorCode ParCommGraph::receive_comm_graph( MPI_Comm jcomm, ParallelComm* pco, std::vector< int >& pack_array )
     358                 :            : {
     359                 :            :     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
     360                 :            :     // knows what data to expect
     361         [ #  # ]:          0 :     MPI_Comm receive = pco->comm();
     362                 :            :     int size_pack_array, ierr;
     363                 :            :     MPI_Status status;
     364         [ #  # ]:          0 :     if( rootReceiver )
     365                 :            :     {
     366                 :            :         /*
     367                 :            :          * MPI_Probe(
     368                 :            :         int source,
     369                 :            :         int tag,
     370                 :            :         MPI_Comm comm,
     371                 :            :         MPI_Status* status)
     372                 :            :          *
     373                 :            :          */
     374 [ #  # ][ #  # ]:          0 :         ierr = MPI_Probe( sender( 0 ), 20, jcomm, &status );
     375         [ #  # ]:          0 :         if( 0 != ierr )
     376                 :            :         {
     377 [ #  # ][ #  # ]:          0 :             std::cout << " MPI_Probe failure: " << ierr << "\n";
                 [ #  # ]
     378                 :          0 :             return MB_FAILURE;
     379                 :            :         }
     380                 :            :         // get the count of data received from the MPI_Status structure
     381         [ #  # ]:          0 :         ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array );
     382         [ #  # ]:          0 :         if( 0 != ierr )
     383                 :            :         {
     384 [ #  # ][ #  # ]:          0 :             std::cout << " MPI_Get_count failure: " << ierr << "\n";
                 [ #  # ]
     385                 :          0 :             return MB_FAILURE;
     386                 :            :         }
     387                 :            : #ifdef VERBOSE
     388                 :            :         std::cout << " receive comm graph size: " << size_pack_array << "\n";
     389                 :            : #endif
     390         [ #  # ]:          0 :         pack_array.resize( size_pack_array );
     391 [ #  # ][ #  # ]:          0 :         ierr = MPI_Recv( &pack_array[0], size_pack_array, MPI_INT, sender( 0 ), 20, jcomm, &status );
                 [ #  # ]
     392         [ #  # ]:          0 :         if( 0 != ierr ) return MB_FAILURE;
     393                 :            : #ifdef VERBOSE
     394                 :            :         std::cout << " receive comm graph ";
     395                 :            :         for( int k = 0; k < (int)pack_array.size(); k++ )
     396                 :            :             std::cout << " " << pack_array[k];
     397                 :            :         std::cout << "\n";
     398                 :            : #endif
     399                 :            :     }
     400                 :            : 
     401                 :            :     // now broadcast this whole array to all receivers, so they know what to expect
     402         [ #  # ]:          0 :     ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive );
     403         [ #  # ]:          0 :     if( 0 != ierr ) return MB_FAILURE;
     404         [ #  # ]:          0 :     pack_array.resize( size_pack_array );
     405 [ #  # ][ #  # ]:          0 :     ierr = MPI_Bcast( &pack_array[0], size_pack_array, MPI_INT, 0, receive );
     406         [ #  # ]:          0 :     if( 0 != ierr ) return MB_FAILURE;
     407                 :          0 :     return MB_SUCCESS;
     408                 :            : }
     409                 :            : 
     410                 :          0 : ErrorCode ParCommGraph::receive_mesh( MPI_Comm jcomm, ParallelComm* pco, EntityHandle local_set,
     411                 :            :                                       std::vector< int >& senders_local )
     412                 :            : {
     413                 :            :     ErrorCode rval;
     414                 :            :     int ierr;
     415                 :            :     MPI_Status status;
     416                 :            :     // we also need to fill corresponding mesh info on the other side
     417         [ #  # ]:          0 :     corr_tasks = senders_local;
     418         [ #  # ]:          0 :     Range newEnts;
     419                 :            : 
     420                 :            :     Tag orgSendProcTag;  // this will be a tag set on the received mesh, with info about from what
     421                 :            :                          // task / PE the
     422                 :            :     // primary element came from, in the joint communicator ; this will be forwarded by coverage
     423                 :            :     // mesh
     424                 :          0 :     int defaultInt = -1;  // no processor, so it was not migrated from somewhere else
     425         [ #  # ]:          0 :     rval           = pco->get_moab()->tag_get_handle( "orig_sending_processor", 1, MB_TYPE_INTEGER, orgSendProcTag,
     426 [ #  # ][ #  # ]:          0 :                                             MB_TAG_DENSE | MB_TAG_CREAT, &defaultInt );MB_CHK_SET_ERR( rval, "can't create original sending processor tag" );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     427         [ #  # ]:          0 :     if( !senders_local.empty() )
     428                 :            :     {
     429         [ #  # ]:          0 :         for( size_t k = 0; k < senders_local.size(); k++ )
     430                 :            :         {
     431         [ #  # ]:          0 :             int sender1 = senders_local[k];
     432                 :            :             // first receive the size of the buffer using probe
     433                 :            :             /*
     434                 :            :                  * MPI_Probe(
     435                 :            :                 int source,
     436                 :            :                 int tag,
     437                 :            :                 MPI_Comm comm,
     438                 :            :                 MPI_Status* status)
     439                 :            :                  *
     440                 :            :                  */
     441         [ #  # ]:          0 :             ierr = MPI_Probe( sender1, 2, jcomm, &status );
     442         [ #  # ]:          0 :             if( 0 != ierr )
     443                 :            :             {
     444 [ #  # ][ #  # ]:          0 :                 std::cout << " MPI_Probe failure in ParCommGraph::receive_mesh " << ierr << "\n";
                 [ #  # ]
     445                 :          0 :                 return MB_FAILURE;
     446                 :            :             }
     447                 :            :             // get the count of data received from the MPI_Status structure
     448                 :            :             int size_pack;
     449         [ #  # ]:          0 :             ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack );
     450         [ #  # ]:          0 :             if( 0 != ierr )
     451                 :            :             {
     452 [ #  # ][ #  # ]:          0 :                 std::cout << " MPI_Get_count failure in ParCommGraph::receive_mesh  " << ierr << "\n";
                 [ #  # ]
     453                 :          0 :                 return MB_FAILURE;
     454                 :            :             }
     455                 :            : 
     456                 :            :             /* ierr = MPI_Recv (&size_pack, 1, MPI_INT, sender1, 1, jcomm, &status);
     457                 :            :              if (0!=ierr) return MB_FAILURE;*/
     458                 :            :             // now resize the buffer, then receive it
     459 [ #  # ][ #  # ]:          0 :             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_pack );
     460                 :            :             // buffer->reserve(size_pack);
     461                 :            : 
     462         [ #  # ]:          0 :             ierr = MPI_Recv( buffer->mem_ptr, size_pack, MPI_CHAR, sender1, 2, jcomm, &status );
     463         [ #  # ]:          0 :             if( 0 != ierr )
     464                 :            :             {
     465 [ #  # ][ #  # ]:          0 :                 std::cout << " MPI_Recv failure in ParCommGraph::receive_mesh " << ierr << "\n";
                 [ #  # ]
     466                 :          0 :                 return MB_FAILURE;
     467                 :            :             }
     468                 :            :             // now unpack the buffer we just received
     469         [ #  # ]:          0 :             Range entities;
     470 [ #  # ][ #  # ]:          0 :             std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
         [ #  # ][ #  # ]
     471 [ #  # ][ #  # ]:          0 :             std::vector< std::vector< int > > L1p;
     472 [ #  # ][ #  # ]:          0 :             std::vector< EntityHandle > L2hloc, L2hrem;
         [ #  # ][ #  # ]
     473 [ #  # ][ #  # ]:          0 :             std::vector< unsigned int > L2p;
     474                 :            : 
     475         [ #  # ]:          0 :             buffer->reset_ptr( sizeof( int ) );
     476 [ #  # ][ #  # ]:          0 :             std::vector< EntityHandle > entities_vec( entities.size() );
                 [ #  # ]
     477 [ #  # ][ #  # ]:          0 :             std::copy( entities.begin(), entities.end(), entities_vec.begin() );
                 [ #  # ]
     478                 :            :             rval = pco->unpack_buffer( buffer->buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
     479         [ #  # ]:          0 :                                        entities_vec );
     480         [ #  # ]:          0 :             delete buffer;
     481         [ #  # ]:          0 :             if( MB_SUCCESS != rval ) return rval;
     482                 :            : 
     483 [ #  # ][ #  # ]:          0 :             std::copy( entities_vec.begin(), entities_vec.end(), range_inserter( entities ) );
     484                 :            :             // we have to add them to the local set
     485 [ #  # ][ #  # ]:          0 :             rval = pco->get_moab()->add_entities( local_set, entities );
     486         [ #  # ]:          0 :             if( MB_SUCCESS != rval ) return rval;
     487                 :            :             // corr_sizes is the size of primary entities received
     488 [ #  # ][ #  # ]:          0 :             Range verts              = entities.subset_by_dimension( 0 );
     489         [ #  # ]:          0 :             Range local_primary_ents = subtract( entities, verts );
     490 [ #  # ][ #  # ]:          0 :             if( local_primary_ents.empty() )
     491                 :            :             {
     492                 :            :                 // it is possible that all ents sent were vertices (point cloud)
     493                 :            :                 // then consider primary entities the vertices
     494         [ #  # ]:          0 :                 local_primary_ents = verts;
     495                 :            :             }
     496                 :            :             else
     497                 :            :             {
     498                 :            :                 // set a tag with the original sender for the primary entity
     499                 :            :                 // will be used later for coverage mesh
     500 [ #  # ][ #  # ]:          0 :                 std::vector< int > orig_senders( local_primary_ents.size(), sender1 );
     501 [ #  # ][ #  # ]:          0 :                 rval = pco->get_moab()->tag_set_data( orgSendProcTag, local_primary_ents, &orig_senders[0] );
                 [ #  # ]
     502                 :            :             }
     503 [ #  # ][ #  # ]:          0 :             corr_sizes.push_back( (int)local_primary_ents.size() );
     504                 :            : 
     505         [ #  # ]:          0 :             newEnts.merge( entities );
     506                 :            :             // make these in split ranges
     507 [ #  # ][ #  # ]:          0 :             split_ranges[sender1] = local_primary_ents;
     508                 :            : 
     509                 :            : #ifdef VERBOSE
     510                 :            :             std::ostringstream partial_outFile;
     511                 :            : 
     512                 :            :             partial_outFile << "part_send_" << sender1 << "."
     513                 :            :                             << "recv" << rankInJoin << ".vtk";
     514                 :            : 
     515                 :            :             // the mesh contains ghosts too, but they are not part of mat/neumann set
     516                 :            :             // write in serial the file, to see what tags are missing
     517                 :            :             std::cout << " writing from receiver " << rankInJoin << " from sender " << sender1
     518                 :            :                       << " entities: " << entities.size() << std::endl;
     519                 :            :             rval = pco->get_moab()->write_file( partial_outFile.str().c_str(), 0, 0, &local_set,
     520                 :            :                                                 1 );  // everything on local set received
     521                 :            :             if( MB_SUCCESS != rval ) return rval;
     522                 :            : #endif
     523                 :          0 :         }
     524                 :            :     }
     525                 :            :     // make sure adjacencies are updated on the new elements
     526                 :            : 
     527 [ #  # ][ #  # ]:          0 :     if( newEnts.empty() ) { std::cout << " WARNING: this task did not receive any entities \n"; }
                 [ #  # ]
     528                 :            :     // in order for the merging to work, we need to be sure that the adjacencies are updated
     529                 :            :     // (created)
     530         [ #  # ]:          0 :     Range local_verts        = newEnts.subset_by_type( MBVERTEX );
     531 [ #  # ][ #  # ]:          0 :     newEnts                  = subtract( newEnts, local_verts );
     532         [ #  # ]:          0 :     Core* mb                 = (Core*)pco->get_moab();
     533         [ #  # ]:          0 :     AEntityFactory* adj_fact = mb->a_entity_factory();
     534 [ #  # ][ #  # ]:          0 :     if( !adj_fact->vert_elem_adjacencies() )
     535         [ #  # ]:          0 :         adj_fact->create_vert_elem_adjacencies();
     536                 :            :     else
     537                 :            :     {
     538 [ #  # ][ #  # ]:          0 :         for( Range::iterator it = newEnts.begin(); it != newEnts.end(); it++ )
         [ #  # ][ #  # ]
                 [ #  # ]
     539                 :            :         {
     540         [ #  # ]:          0 :             EntityHandle eh          = *it;
     541                 :          0 :             const EntityHandle* conn = NULL;
     542                 :          0 :             int num_nodes            = 0;
     543         [ #  # ]:          0 :             rval                     = mb->get_connectivity( eh, conn, num_nodes );
     544         [ #  # ]:          0 :             if( MB_SUCCESS != rval ) return rval;
     545         [ #  # ]:          0 :             adj_fact->notify_create_entity( eh, conn, num_nodes );
     546                 :            :         }
     547                 :            :     }
     548                 :            : 
     549                 :          0 :     return MB_SUCCESS;
     550                 :            : }
     551                 :            : 
     552                 :            : // VSM: Why is the communicator never used. Remove the argument ?
     553                 :          0 : ErrorCode ParCommGraph::release_send_buffers()
     554                 :            : {
     555                 :          0 :     int ierr, nsize = (int)sendReqs.size();
     556         [ #  # ]:          0 :     std::vector< MPI_Status > mult_status;
     557         [ #  # ]:          0 :     mult_status.resize( sendReqs.size() );
     558 [ #  # ][ #  # ]:          0 :     ierr = MPI_Waitall( nsize, &sendReqs[0], &mult_status[0] );
                 [ #  # ]
     559                 :            : 
     560         [ #  # ]:          0 :     if( ierr != 0 ) return MB_FAILURE;
     561                 :            :     // now we can free all buffers
     562         [ #  # ]:          0 :     delete[] comm_graph;
     563                 :          0 :     comm_graph = NULL;
     564                 :          0 :     std::vector< ParallelComm::Buffer* >::iterator vit;
     565 [ #  # ][ #  # ]:          0 :     for( vit = localSendBuffs.begin(); vit != localSendBuffs.end(); ++vit )
                 [ #  # ]
     566 [ #  # ][ #  # ]:          0 :         delete( *vit );
     567                 :          0 :     localSendBuffs.clear();
     568                 :          0 :     return MB_SUCCESS;
     569                 :            : }
     570                 :            : 
     571                 :            : // again, will use the send buffers, for nonblocking sends;
     572                 :            : // should be the receives non-blocking too?
     573                 :          0 : ErrorCode ParCommGraph::send_tag_values( MPI_Comm jcomm, ParallelComm* pco, Range& owned,
     574                 :            :                                          std::vector< Tag >& tag_handles )
     575                 :            : {
     576                 :            :     // basically, owned.size() needs to be equal to sum(corr_sizes)
     577                 :            :     // get info about the tag size, type, etc
     578                 :            :     int ierr;
     579         [ #  # ]:          0 :     Core* mb = (Core*)pco->get_moab();
     580                 :            :     // get info about the tag
     581                 :            :     //! Get the size of the specified tag in bytes
     582                 :          0 :     int total_bytes_per_entity = 0;  // we need to know, to allocate buffers
     583                 :            :     ErrorCode rval;
     584         [ #  # ]:          0 :     std::vector< int > vect_bytes_per_tag;
     585                 :            : #ifdef VERBOSE
     586                 :            :     std::vector< int > tag_sizes;
     587                 :            : #endif
     588         [ #  # ]:          0 :     for( size_t i = 0; i < tag_handles.size(); i++ )
     589                 :            :     {
     590                 :            :         int bytes_per_tag;
     591 [ #  # ][ #  # ]:          0 :         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     592                 :            :         int tag_size1;  // length
     593 [ #  # ][ #  # ]:          0 :         rval = mb->tag_get_length( tag_handles[i], tag_size1 );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     594         [ #  # ]:          0 :         if( graph_type == DOF_BASED )
     595                 :          0 :             bytes_per_tag = bytes_per_tag / tag_size1;  // we know we have one double per tag , per ID sent;
     596                 :            :                                                         // could be 8 for double, 4 for int, etc
     597                 :          0 :         total_bytes_per_entity += bytes_per_tag;
     598         [ #  # ]:          0 :         vect_bytes_per_tag.push_back( bytes_per_tag );
     599                 :            : #ifdef VERBOSE
     600                 :            :         int tag_size;
     601                 :            :         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
     602                 :            :         tag_sizes.push_back( tag_size );
     603                 :            : #endif
     604                 :            :     }
     605                 :            : 
     606                 :          0 :     int indexReq = 0;
     607         [ #  # ]:          0 :     if( graph_type == INITIAL_MIGRATE )  // original send
     608                 :            :     {
     609                 :            :         // use the buffers data structure to allocate memory for sending the tags
     610         [ #  # ]:          0 :         sendReqs.resize( split_ranges.size() );
     611                 :            : 
     612 [ #  # ][ #  # ]:          0 :         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
                 [ #  # ]
     613                 :            :         {
     614         [ #  # ]:          0 :             int receiver_proc = it->first;
     615 [ #  # ][ #  # ]:          0 :             Range ents        = it->second;  // primary entities, with the tag data
     616                 :          0 :             int size_buffer   = 4 + total_bytes_per_entity *
     617         [ #  # ]:          0 :                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
     618 [ #  # ][ #  # ]:          0 :             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
     619                 :            : 
     620         [ #  # ]:          0 :             buffer->reset_ptr( sizeof( int ) );
     621         [ #  # ]:          0 :             for( size_t i = 0; i < tag_handles.size(); i++ )
     622                 :            :             {
     623                 :            :                 // copy tag data to buffer->buff_ptr, and send the buffer (we could have used
     624                 :            :                 // regular char arrays)
     625 [ #  # ][ #  # ]:          0 :                 rval = mb->tag_get_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     626                 :            :                 // advance the butter
     627 [ #  # ][ #  # ]:          0 :                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
     628                 :            :             }
     629                 :          0 :             *( (int*)buffer->mem_ptr ) = size_buffer;
     630                 :            :             // int size_pack = buffer->get_current_size(); // debug check
     631                 :            :             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
     632 [ #  # ][ #  # ]:          0 :                               &sendReqs[indexReq] );  // we have to use global communicator
     633         [ #  # ]:          0 :             if( ierr != 0 ) return MB_FAILURE;
     634                 :          0 :             indexReq++;
     635 [ #  # ][ #  # ]:          0 :             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
     636                 :          0 :         }
     637                 :            :     }
     638         [ #  # ]:          0 :     else if( graph_type == COVERAGE )
     639                 :            :     {
     640                 :            :         // we know that we will need to send some tag data in a specific order
     641                 :            :         // first, get the ids of the local elements, from owned Range; arrange the buffer in order
     642                 :            :         // of increasing global id
     643                 :            :         Tag gidTag;
     644 [ #  # ][ #  # ]:          0 :         rval = mb->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
     645         [ #  # ]:          0 :         std::vector< int > gids;
     646 [ #  # ][ #  # ]:          0 :         gids.resize( owned.size() );
     647 [ #  # ][ #  # ]:          0 :         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     648 [ #  # ][ #  # ]:          0 :         std::map< int, EntityHandle > gidToHandle;
     649                 :          0 :         size_t i = 0;
     650 [ #  # ][ #  # ]:          0 :         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
         [ #  # ][ #  # ]
                 [ #  # ]
     651                 :            :         {
     652         [ #  # ]:          0 :             EntityHandle eh        = *it;
     653 [ #  # ][ #  # ]:          0 :             gidToHandle[gids[i++]] = eh;
     654                 :            :         }
     655                 :            :         // now, pack the data and send it
     656         [ #  # ]:          0 :         sendReqs.resize( involved_IDs_map.size() );
     657 [ #  # ][ #  # ]:          0 :         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
         [ #  # ][ #  # ]
     658                 :          0 :              mit != involved_IDs_map.end(); mit++ )
     659                 :            :         {
     660         [ #  # ]:          0 :             int receiver_proc        = mit->first;
     661         [ #  # ]:          0 :             std::vector< int >& eids = mit->second;
     662                 :          0 :             int size_buffer          = 4 + total_bytes_per_entity *
     663                 :          0 :                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
     664 [ #  # ][ #  # ]:          0 :             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
     665         [ #  # ]:          0 :             buffer->reset_ptr( sizeof( int ) );
     666                 :            : #ifdef VERBOSE
     667                 :            :             std::ofstream dbfile;
     668                 :            :             std::stringstream outf;
     669                 :            :             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
     670                 :            :             dbfile.open( outf.str().c_str() );
     671                 :            :             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
     672                 :            : #endif
     673                 :            :             // copy tag data to buffer->buff_ptr, and send the buffer (we could have used regular
     674                 :            :             // char arrays) pack data by tag, to be consistent with above, even though we loop
     675                 :            :             // through the entities for each tag
     676                 :            : 
     677 [ #  # ][ #  # ]:          0 :             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
                 [ #  # ]
     678                 :            :             {
     679         [ #  # ]:          0 :                 int eID         = *it;
     680         [ #  # ]:          0 :                 EntityHandle eh = gidToHandle[eID];
     681         [ #  # ]:          0 :                 for( i = 0; i < tag_handles.size(); i++ )
     682                 :            :                 {
     683 [ #  # ][ #  # ]:          0 :                     rval = mb->tag_get_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     684                 :            : #ifdef VERBOSE
     685                 :            :                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
     686                 :            :                     double* vals = (double*)( buffer->buff_ptr );
     687                 :            :                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
     688                 :            :                     {
     689                 :            :                         dbfile << " " << *vals;
     690                 :            :                         vals++;
     691                 :            :                     }
     692                 :            :                     dbfile << "\n";
     693                 :            : #endif
     694         [ #  # ]:          0 :                     buffer->buff_ptr += vect_bytes_per_tag[i];
     695                 :            :                 }
     696                 :            :             }
     697                 :            : 
     698                 :            : #ifdef VERBOSE
     699                 :            :             dbfile.close();
     700                 :            : #endif
     701                 :          0 :             *( (int*)buffer->mem_ptr ) = size_buffer;
     702                 :            :             // int size_pack = buffer->get_current_size(); // debug check
     703                 :            :             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
     704 [ #  # ][ #  # ]:          0 :                               &sendReqs[indexReq] );  // we have to use global communicator
     705         [ #  # ]:          0 :             if( ierr != 0 ) return MB_FAILURE;
     706                 :          0 :             indexReq++;
     707         [ #  # ]:          0 :             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
     708                 :          0 :         }
     709                 :            :     }
     710         [ #  # ]:          0 :     else if( graph_type == DOF_BASED )
     711                 :            :     {
     712                 :            :         // need to fill up the buffer, in the order desired, send it
     713                 :            :         // get all the tags, for all owned entities, and pack the buffers accordingly
     714                 :            :         // we do not want to get the tags by entity, it may be too expensive
     715         [ #  # ]:          0 :         std::vector< std::vector< double > > valuesTags;
     716         [ #  # ]:          0 :         valuesTags.resize( tag_handles.size() );
     717         [ #  # ]:          0 :         for( size_t i = 0; i < tag_handles.size(); i++ )
     718                 :            :         {
     719                 :            : 
     720                 :            :             int bytes_per_tag;
     721 [ #  # ][ #  # ]:          0 :             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     722 [ #  # ][ #  # ]:          0 :             valuesTags[i].resize( owned.size() * bytes_per_tag );
                 [ #  # ]
     723                 :            :             // fill the whole array, we will pick up from here
     724 [ #  # ][ #  # ]:          0 :             rval = mb->tag_get_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     725                 :            :         }
     726                 :            :         // now, pack the data and send it
     727         [ #  # ]:          0 :         sendReqs.resize( involved_IDs_map.size() );
     728   [ #  #  #  # ]:          0 :         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
         [ #  # ][ #  # ]
     729                 :          0 :              mit != involved_IDs_map.end(); mit++ )
     730                 :            :         {
     731         [ #  # ]:          0 :             int receiver_proc                   = mit->first;
     732         [ #  # ]:          0 :             std::vector< int >& eids            = mit->second;
     733         [ #  # ]:          0 :             std::vector< int >& index_in_values = map_index[receiver_proc];
     734         [ #  # ]:          0 :             std::vector< int >& index_ptr       = map_ptr[receiver_proc];  // this is eids.size()+1;
     735                 :          0 :             int size_buffer                     = 4 + total_bytes_per_entity *
     736                 :          0 :                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
     737 [ #  # ][ #  # ]:          0 :             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
     738         [ #  # ]:          0 :             buffer->reset_ptr( sizeof( int ) );
     739                 :            : #ifdef VERBOSE
     740                 :            :             std::ofstream dbfile;
     741                 :            :             std::stringstream outf;
     742                 :            :             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
     743                 :            :             dbfile.open( outf.str().c_str() );
     744                 :            :             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
     745                 :            : #endif
     746                 :            :             // copy tag data to buffer->buff_ptr, and send the buffer
     747                 :            :             // pack data by tag, to be consistent with above
     748                 :          0 :             int j = 0;
     749 [ #  # ][ #  # ]:          0 :             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
                 [ #  # ]
     750                 :            :             {
     751 [ #  # ][ #  # ]:          0 :                 int index_in_v = index_in_values[index_ptr[j]];
     752         [ #  # ]:          0 :                 for( size_t i = 0; i < tag_handles.size(); i++ )
     753                 :            :                 {
     754                 :            :                     // right now, move just doubles; but it could be any type of tag
     755 [ #  # ][ #  # ]:          0 :                     *( (double*)( buffer->buff_ptr ) ) = valuesTags[i][index_in_v];
     756                 :          0 :                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
     757                 :            :                 }
     758                 :            :             };
     759                 :          0 :             *( (int*)buffer->mem_ptr ) = size_buffer;
     760                 :            :             // int size_pack = buffer->get_current_size(); // debug check
     761                 :            :             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
     762 [ #  # ][ #  # ]:          0 :                               &sendReqs[indexReq] );  // we have to use global communicator
     763         [ #  # ]:          0 :             if( ierr != 0 ) return MB_FAILURE;
     764                 :          0 :             indexReq++;
     765         [ #  # ]:          0 :             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
     766                 :          0 :         }
     767                 :            :     }
     768                 :          0 :     return MB_SUCCESS;
     769                 :            : }
     770                 :            : 
     771                 :          0 : ErrorCode ParCommGraph::receive_tag_values( MPI_Comm jcomm, ParallelComm* pco, Range& owned,
     772                 :            :                                             std::vector< Tag >& tag_handles )
     773                 :            : {
     774                 :            :     // opposite to sending, we will use blocking receives
     775                 :            :     int ierr;
     776                 :            :     MPI_Status status;
     777                 :            :     // basically, owned.size() needs to be equal to sum(corr_sizes)
     778                 :            :     // get info about the tag size, type, etc
     779         [ #  # ]:          0 :     Core* mb = (Core*)pco->get_moab();
     780                 :            :     // get info about the tag
     781                 :            :     //! Get the size of the specified tag in bytes
     782                 :            :     ErrorCode rval;
     783                 :          0 :     int total_bytes_per_entity = 0;
     784         [ #  # ]:          0 :     std::vector< int > vect_bytes_per_tag;
     785                 :            : #ifdef VERBOSE
     786                 :            :     std::vector< int > tag_sizes;
     787                 :            : #endif
     788         [ #  # ]:          0 :     for( size_t i = 0; i < tag_handles.size(); i++ )
     789                 :            :     {
     790                 :            :         int bytes_per_tag;
     791 [ #  # ][ #  # ]:          0 :         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     792                 :          0 :         total_bytes_per_entity += bytes_per_tag;
     793         [ #  # ]:          0 :         vect_bytes_per_tag.push_back( bytes_per_tag );
     794                 :            : #ifdef VERBOSE
     795                 :            :         int tag_size;
     796                 :            :         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
     797                 :            :         tag_sizes.push_back( tag_size );
     798                 :            : #endif
     799                 :            :     }
     800                 :            : 
     801         [ #  # ]:          0 :     if( graph_type == INITIAL_MIGRATE )
     802                 :            :     {
     803                 :            :         // std::map<int, Range> split_ranges;
     804                 :            :         // rval = split_owned_range ( owned);MB_CHK_ERR ( rval );
     805                 :            : 
     806                 :            :         // use the buffers data structure to allocate memory for receiving the tags
     807 [ #  # ][ #  # ]:          0 :         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
                 [ #  # ]
     808                 :            :         {
     809         [ #  # ]:          0 :             int sender_proc = it->first;
     810 [ #  # ][ #  # ]:          0 :             Range ents      = it->second;  // primary entities, with the tag data, we will receive
     811                 :          0 :             int size_buffer = 4 + total_bytes_per_entity *
     812         [ #  # ]:          0 :                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
     813 [ #  # ][ #  # ]:          0 :             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
     814                 :            : 
     815         [ #  # ]:          0 :             buffer->reset_ptr( sizeof( int ) );
     816                 :            : 
     817                 :          0 :             *( (int*)buffer->mem_ptr ) = size_buffer;
     818                 :            :             // int size_pack = buffer->get_current_size(); // debug check
     819                 :            : 
     820         [ #  # ]:          0 :             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
     821         [ #  # ]:          0 :             if( ierr != 0 ) return MB_FAILURE;
     822                 :            :             // now set the tag
     823                 :            :             // copy to tag
     824                 :            : 
     825         [ #  # ]:          0 :             for( size_t i = 0; i < tag_handles.size(); i++ )
     826                 :            :             {
     827 [ #  # ][ #  # ]:          0 :                 rval = mb->tag_set_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );
     828 [ #  # ][ #  # ]:          0 :                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
     829                 :            :             }
     830         [ #  # ]:          0 :             delete buffer;  // no need for it afterwards
     831 [ #  # ][ #  # ]:          0 :             MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
     832                 :          0 :         }
     833                 :            :     }
     834         [ #  # ]:          0 :     else if( graph_type == COVERAGE )  // receive buffer, then extract tag data, in a loop
     835                 :            :     {
     836                 :            :         // we know that we will need to receive some tag data in a specific order (by ids stored)
     837                 :            :         // first, get the ids of the local elements, from owned Range; unpack the buffer in order
     838                 :            :         Tag gidTag;
     839 [ #  # ][ #  # ]:          0 :         rval = mb->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
     840         [ #  # ]:          0 :         std::vector< int > gids;
     841 [ #  # ][ #  # ]:          0 :         gids.resize( owned.size() );
     842 [ #  # ][ #  # ]:          0 :         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     843 [ #  # ][ #  # ]:          0 :         std::map< int, EntityHandle > gidToHandle;
     844                 :          0 :         size_t i = 0;
     845 [ #  # ][ #  # ]:          0 :         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
         [ #  # ][ #  # ]
                 [ #  # ]
     846                 :            :         {
     847         [ #  # ]:          0 :             EntityHandle eh        = *it;
     848 [ #  # ][ #  # ]:          0 :             gidToHandle[gids[i++]] = eh;
     849                 :            :         }
     850                 :            :         //
     851                 :            :         // now, unpack the data and set it to the tag
     852   [ #  #  #  # ]:          0 :         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
                 [ #  # ]
     853                 :          0 :              mit != involved_IDs_map.end(); mit++ )
     854                 :            :         {
     855         [ #  # ]:          0 :             int sender_proc          = mit->first;
     856         [ #  # ]:          0 :             std::vector< int >& eids = mit->second;
     857                 :          0 :             int size_buffer          = 4 + total_bytes_per_entity *
     858                 :          0 :                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
     859 [ #  # ][ #  # ]:          0 :             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
     860         [ #  # ]:          0 :             buffer->reset_ptr( sizeof( int ) );
     861                 :          0 :             *( (int*)buffer->mem_ptr ) = size_buffer;  // this is really not necessary, it should receive this too
     862                 :            : 
     863                 :            :             // receive the buffer
     864         [ #  # ]:          0 :             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
     865 [ #  # ][ #  # ]:          0 :             if( ierr != 0 ) return MB_FAILURE;
     866                 :            : // start copy
     867                 :            : #ifdef VERBOSE
     868                 :            :             std::ofstream dbfile;
     869                 :            :             std::stringstream outf;
     870                 :            :             outf << "recvFrom_" << sender_proc << "_on_proc_" << rankInJoin << ".txt";
     871                 :            :             dbfile.open( outf.str().c_str() );
     872                 :            :             dbfile << "recvFrom_" << sender_proc << " on proc  " << rankInJoin << "\n";
     873                 :            : #endif
     874                 :            : 
     875                 :            :             // copy tag data from buffer->buff_ptr
     876                 :            :             // data is arranged by tag , and repeat the loop for each entity ()
     877                 :            :             // maybe it should be arranged by entity now, not by tag (so one loop for entities,
     878                 :            :             // outside)
     879                 :            : 
     880 [ #  # ][ #  # ]:          0 :             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
                 [ #  # ]
     881                 :            :             {
     882         [ #  # ]:          0 :                 int eID                                     = *it;
     883         [ #  # ]:          0 :                 std::map< int, EntityHandle >::iterator mit = gidToHandle.find( eID );
     884 [ #  # ][ #  # ]:          0 :                 if( mit == gidToHandle.end() )
     885                 :            :                 {
     886 [ #  # ][ #  # ]:          0 :                     std::cout << " on rank: " << rankInJoin << " cannot find entity handle with global ID " << eID
         [ #  # ][ #  # ]
     887         [ #  # ]:          0 :                               << "\n";
     888                 :          0 :                     return MB_FAILURE;
     889                 :            :                 }
     890         [ #  # ]:          0 :                 EntityHandle eh = mit->second;
     891         [ #  # ]:          0 :                 for( i = 0; i < tag_handles.size(); i++ )
     892                 :            :                 {
     893 [ #  # ][ #  # ]:          0 :                     rval = mb->tag_set_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     894                 :            : #ifdef VERBOSE
     895                 :            :                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
     896                 :            :                     double* vals = (double*)( buffer->buff_ptr );
     897                 :            :                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
     898                 :            :                     {
     899                 :            :                         dbfile << " " << *vals;
     900                 :            :                         vals++;
     901                 :            :                     }
     902                 :            :                     dbfile << "\n";
     903                 :            : #endif
     904         [ #  # ]:          0 :                     buffer->buff_ptr += vect_bytes_per_tag[i];
     905                 :            :                 }
     906                 :            :             }
     907                 :            : 
     908                 :            :             // delete receive buffer
     909         [ #  # ]:          0 :             delete buffer;
     910                 :            : #ifdef VERBOSE
     911                 :            :             dbfile.close();
     912                 :            : #endif
     913                 :          0 :         }
     914                 :            :     }
     915         [ #  # ]:          0 :     else if( graph_type == DOF_BASED )
     916                 :            :     {
     917                 :            :         // need to fill up the values for each tag, in the order desired, from the buffer received
     918                 :            :         //
     919                 :            :         // get all the tags, for all owned entities, and pack the buffers accordingly
     920                 :            :         // we do not want to get the tags by entity, it may be too expensive
     921         [ #  # ]:          0 :         std::vector< std::vector< double > > valuesTags;
     922         [ #  # ]:          0 :         valuesTags.resize( tag_handles.size() );
     923         [ #  # ]:          0 :         for( size_t i = 0; i < tag_handles.size(); i++ )
     924                 :            :         {
     925                 :            :             int bytes_per_tag;
     926 [ #  # ][ #  # ]:          0 :             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
     927 [ #  # ][ #  # ]:          0 :             valuesTags[i].resize( owned.size() * bytes_per_tag );
                 [ #  # ]
     928                 :            :             // fill the whole array, we will pick up from here
     929                 :            :             // we will fill this array, using data from received buffer
     930                 :            :             // rval = mb->tag_get_data(owned, (void*)( &(valuesTags[i][0])) );MB_CHK_ERR ( rval );
     931                 :            :         }
     932                 :            :         // now, unpack the data and set the tags
     933         [ #  # ]:          0 :         sendReqs.resize( involved_IDs_map.size() );
     934   [ #  #  #  # ]:          0 :         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
                 [ #  # ]
     935                 :          0 :              mit != involved_IDs_map.end(); mit++ )
     936                 :            :         {
     937         [ #  # ]:          0 :             int sender_proc                     = mit->first;
     938         [ #  # ]:          0 :             std::vector< int >& eids            = mit->second;
     939         [ #  # ]:          0 :             std::vector< int >& index_in_values = map_index[sender_proc];
     940         [ #  # ]:          0 :             std::vector< int >& index_ptr       = map_ptr[sender_proc];  // this is eids.size()+1;
     941                 :          0 :             int size_buffer                     = 4 + total_bytes_per_entity *
     942                 :          0 :                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
     943 [ #  # ][ #  # ]:          0 :             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
     944         [ #  # ]:          0 :             buffer->reset_ptr( sizeof( int ) );
     945                 :            : 
     946                 :            :             // receive the buffer
     947         [ #  # ]:          0 :             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
     948         [ #  # ]:          0 :             if( ierr != 0 ) return MB_FAILURE;
     949                 :            :             // use the values in buffer to populate valuesTag arrays, fill it up!
     950                 :          0 :             int j = 0;
     951 [ #  # ][ #  # ]:          0 :             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
                 [ #  # ]
     952                 :            :             {
     953         [ #  # ]:          0 :                 for( size_t i = 0; i < tag_handles.size(); i++ )
     954                 :            :                 {
     955                 :            :                     // right now, move just doubles; but it could be any type of tag
     956                 :          0 :                     double val = *( (double*)( buffer->buff_ptr ) );
     957                 :          0 :                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
     958 [ #  # ][ #  # ]:          0 :                     for( int k = index_ptr[j]; k < index_ptr[j + 1]; k++ )
                 [ #  # ]
     959 [ #  # ][ #  # ]:          0 :                         valuesTags[i][index_in_values[k]] = val;
                 [ #  # ]
     960                 :            :                 }
     961                 :            :             }
     962                 :            :             // we are done with the buffer in which we received tags, release / delete it
     963         [ #  # ]:          0 :             delete buffer;
     964                 :            :         }
     965                 :            :         // now we populated the values for all tags; set now the tags!
     966 [ #  # ][ #  # ]:          0 :         for( size_t i = 0; i < tag_handles.size(); i++ )
     967                 :            :         {
     968                 :            :             // we will fill this array, using data from received buffer
     969 [ #  # ][ #  # ]:          0 :             rval = mb->tag_set_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     970                 :          0 :         }
     971                 :            :     }
     972                 :          0 :     return MB_SUCCESS;
     973                 :            : }
     974                 :            : /*
     975                 :            :  * for example
     976                 :            :  */
     977                 :          0 : ErrorCode ParCommGraph::settle_send_graph( TupleList& TLcovIDs )
     978                 :            : {
     979                 :            :     // fill involved_IDs_map with data
     980                 :            :     // will have "receiving proc" and global id of element
     981                 :          0 :     int n      = TLcovIDs.get_n();
     982                 :          0 :     graph_type = COVERAGE;  // do not rely only on involved_IDs_map.size(); this can be 0 in some cases
     983         [ #  # ]:          0 :     for( int i = 0; i < n; i++ )
     984                 :            :     {
     985                 :          0 :         int to_proc      = TLcovIDs.vi_wr[2 * i];
     986                 :          0 :         int globalIdElem = TLcovIDs.vi_wr[2 * i + 1];
     987 [ #  # ][ #  # ]:          0 :         involved_IDs_map[to_proc].push_back( globalIdElem );
     988                 :            :     }
     989                 :            : #ifdef VERBOSE
     990                 :            :     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
     991                 :            :          mit++ )
     992                 :            :     {
     993                 :            :         std::cout << " towards task " << mit->first << " send: " << mit->second.size() << " cells " << std::endl;
     994                 :            :         for( size_t i = 0; i < mit->second.size(); i++ )
     995                 :            :         {
     996                 :            :             std::cout << " " << mit->second[i];
     997                 :            :         }
     998                 :            :         std::cout << std::endl;
     999                 :            :     }
    1000                 :            : #endif
    1001                 :          0 :     return MB_SUCCESS;
    1002                 :            : }
    1003                 :            : 
    1004                 :            : // this will set involved_IDs_map will store all ids to be received from one sender task
    1005                 :          0 : void ParCommGraph::SetReceivingAfterCoverage(
    1006                 :            :     std::map< int, std::set< int > >& idsFromProcs )  // will make sense only on receivers, right now after cov
    1007                 :            : {
    1008 [ #  # ][ #  # ]:          0 :     for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
                 [ #  # ]
    1009                 :            :     {
    1010         [ #  # ]:          0 :         int fromProc            = mt->first;
    1011         [ #  # ]:          0 :         std::set< int >& setIds = mt->second;
    1012 [ #  # ][ #  # ]:          0 :         involved_IDs_map[fromProc].resize( setIds.size() );
    1013         [ #  # ]:          0 :         std::vector< int >& listIDs = involved_IDs_map[fromProc];
    1014                 :          0 :         size_t indx                 = 0;
    1015 [ #  # ][ #  # ]:          0 :         for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
                 [ #  # ]
    1016                 :            :         {
    1017         [ #  # ]:          0 :             int valueID     = *st;
    1018         [ #  # ]:          0 :             listIDs[indx++] = valueID;
    1019                 :            :         }
    1020                 :            :     }
    1021                 :          0 :     graph_type = COVERAGE;
    1022                 :          0 :     return;
    1023                 :            : }
    1024                 :            : //#define VERBOSE
    1025                 :          0 : void ParCommGraph::settle_comm_by_ids( int comp, TupleList& TLBackToComp, std::vector< int >& valuesComp )
    1026                 :            : {
    1027                 :            :     // settle comm graph on comp
    1028 [ #  # ][ #  # ]:          0 :     if( rootSender || rootReceiver ) std::cout << " settle comm graph by id on component " << comp << "\n";
         [ #  # ][ #  # ]
                 [ #  # ]
    1029         [ #  # ]:          0 :     int n = TLBackToComp.get_n();
    1030                 :            :     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
    1031                 :            :     // cases
    1032         [ #  # ]:          0 :     std::map< int, std::set< int > > uniqueIDs;
    1033                 :            : 
    1034         [ #  # ]:          0 :     for( int i = 0; i < n; i++ )
    1035                 :            :     {
    1036                 :          0 :         int to_proc  = TLBackToComp.vi_wr[3 * i + 2];
    1037                 :          0 :         int globalId = TLBackToComp.vi_wr[3 * i + 1];
    1038 [ #  # ][ #  # ]:          0 :         uniqueIDs[to_proc].insert( globalId );
    1039                 :            :     }
    1040                 :            : 
    1041                 :            :     // Vector to store element
    1042                 :            :     // with respective present index
    1043         [ #  # ]:          0 :     std::vector< std::pair< int, int > > vp;
    1044         [ #  # ]:          0 :     vp.reserve( valuesComp.size() );
    1045                 :            : 
    1046                 :            :     // Inserting element in pair vector
    1047                 :            :     // to keep track of previous indexes in valuesComp
    1048         [ #  # ]:          0 :     for( int i = 0; i < (int)valuesComp.size(); ++i )
    1049                 :            :     {
    1050 [ #  # ][ #  # ]:          0 :         vp.push_back( std::make_pair( valuesComp[i], i ) );
                 [ #  # ]
    1051                 :            :     }
    1052                 :            :     // Sorting pair vector
    1053         [ #  # ]:          0 :     sort( vp.begin(), vp.end() );
    1054                 :            : 
    1055                 :            :     // vp[i].first, second
    1056                 :            : 
    1057                 :            :     // count now how many times some value appears in ordered (so in valuesComp)
    1058 [ #  # ][ #  # ]:          0 :     for( std::map< int, std::set< int > >::iterator it = uniqueIDs.begin(); it != uniqueIDs.end(); it++ )
                 [ #  # ]
    1059                 :            :     {
    1060         [ #  # ]:          0 :         int procId                  = it->first;
    1061         [ #  # ]:          0 :         std::set< int >& nums       = it->second;
    1062         [ #  # ]:          0 :         std::vector< int >& indx    = map_ptr[procId];
    1063         [ #  # ]:          0 :         std::vector< int >& indices = map_index[procId];
    1064         [ #  # ]:          0 :         indx.resize( nums.size() + 1 );
    1065                 :          0 :         int indexInVp = 0;
    1066                 :          0 :         int indexVal  = 0;
    1067         [ #  # ]:          0 :         indx[0]       = 0;  // start from 0
    1068 [ #  # ][ #  # ]:          0 :         for( std::set< int >::iterator sst = nums.begin(); sst != nums.end(); sst++, indexVal++ )
                 [ #  # ]
    1069                 :            :         {
    1070         [ #  # ]:          0 :             int val = *sst;
    1071 [ #  # ][ #  # ]:          0 :             involved_IDs_map[procId].push_back( val );
    1072 [ #  # ][ #  # ]:          0 :             indx[indexVal + 1] = indx[indexVal];
    1073 [ #  # ][ #  # ]:          0 :             while( ( indexInVp < (int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) )  // should be equal !
         [ #  # ][ #  # ]
    1074                 :            :             {
    1075 [ #  # ][ #  # ]:          0 :                 if( vp[indexInVp].first == val )
    1076                 :            :                 {
    1077         [ #  # ]:          0 :                     indx[indexVal + 1]++;
    1078 [ #  # ][ #  # ]:          0 :                     indices.push_back( vp[indexInVp].second );
    1079                 :            :                 }
    1080                 :          0 :                 indexInVp++;
    1081                 :            :             }
    1082                 :            :         }
    1083                 :            :     }
    1084                 :            : #ifdef VERBOSE
    1085                 :            :     std::stringstream f1;
    1086                 :            :     std::ofstream dbfile;
    1087                 :            :     f1 << "Involve_" << comp << "_" << rankInJoin << ".txt";
    1088                 :            :     dbfile.open( f1.str().c_str() );
    1089                 :            :     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
    1090                 :            :          mit++ )
    1091                 :            :     {
    1092                 :            :         int corrTask                = mit->first;
    1093                 :            :         std::vector< int >& corrIds = mit->second;
    1094                 :            :         std::vector< int >& indx    = map_ptr[corrTask];
    1095                 :            :         std::vector< int >& indices = map_index[corrTask];
    1096                 :            : 
    1097                 :            :         dbfile << " towards proc " << corrTask << " \n";
    1098                 :            :         for( int i = 0; i < (int)corrIds.size(); i++ )
    1099                 :            :         {
    1100                 :            :             dbfile << corrIds[i] << " [" << indx[i] << "," << indx[i + 1] << ")  : ";
    1101                 :            :             for( int j = indx[i]; j < indx[i + 1]; j++ )
    1102                 :            :                 dbfile << indices[j] << " ";
    1103                 :            :             dbfile << "\n";
    1104                 :            :         }
    1105                 :            :         dbfile << " \n";
    1106                 :            :     }
    1107                 :            :     dbfile.close();
    1108                 :            : #endif
    1109                 :            : 
    1110                 :          0 :     graph_type = DOF_BASED;
    1111                 :            :     // now we need to fill back and forth information, needed to fill the arrays
    1112                 :            :     // for example, from spectral to involved_IDs_map, in case we want to send data from
    1113                 :            :     // spectral to phys
    1114                 :          0 : }
    1115                 :            : //#undef VERBOSE
    1116                 :            : // new partition calculation
    1117                 :          0 : ErrorCode ParCommGraph::compute_partition( ParallelComm* pco, Range& owned, int met )
    1118                 :            : {
    1119                 :            :     // we are on a task on sender, and need to compute a new partition;
    1120                 :            :     // primary cells need to be distributed to nb receivers tasks
    1121                 :            :     // first, we will use graph partitioner, with zoltan;
    1122                 :            :     // in the graph that we need to build, the first layer of ghosts is needed;
    1123                 :            :     // can we avoid that ? For example, we can find out from each boundary edge/face what is the
    1124                 :            :     // other cell (on the other side), then form the global graph, and call zoltan in parallel met 1
    1125                 :            :     // would be a geometric partitioner, and met 2 would be a graph partitioner for method 1 we do
    1126                 :            :     // not need any ghost exchange
    1127                 :            : 
    1128                 :            :     // find first edges that are shared
    1129 [ #  # ][ #  # ]:          0 :     if( owned.empty() )
    1130                 :          0 :         return MB_SUCCESS;  // nothing to do? empty partition is not allowed, maybe we should return
    1131                 :            :                             // error?
    1132         [ #  # ]:          0 :     Core* mb = (Core*)pco->get_moab();
    1133                 :            : 
    1134                 :            :     double t1, t2, t3;
    1135         [ #  # ]:          0 :     t1               = MPI_Wtime();
    1136 [ #  # ][ #  # ]:          0 :     int primaryDim   = mb->dimension_from_handle( *owned.rbegin() );
                 [ #  # ]
    1137                 :          0 :     int interfaceDim = primaryDim - 1;  // should be 1 or 2
    1138         [ #  # ]:          0 :     Range sharedEdges;
    1139                 :            :     ErrorCode rval;
    1140                 :            : 
    1141         [ #  # ]:          0 :     std::vector< int > shprocs( MAX_SHARING_PROCS );
    1142         [ #  # ]:          0 :     std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
    1143                 :            : 
    1144                 :            :     Tag gidTag;  //
    1145 [ #  # ][ #  # ]:          0 :     rval = mb->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
    1146                 :            :     int np;
    1147                 :            :     unsigned char pstatus;
    1148                 :            : 
    1149         [ #  # ]:          0 :     std::multimap< int, int > extraGraphEdges;
    1150                 :            :     // std::map<int, int> adjCellsId;
    1151         [ #  # ]:          0 :     std::map< int, int > extraCellsProc;
    1152                 :            :     // if method is 2, no need to do the exchange for adjacent cells across partition boundary
    1153                 :            :     // these maps above will be empty for method 2 (geometry)
    1154         [ #  # ]:          0 :     if( 1 == met )
    1155                 :            :     {
    1156                 :            :         rval = pco->get_shared_entities( /*int other_proc*/ -1, sharedEdges, interfaceDim,
    1157 [ #  # ][ #  # ]:          0 :                                          /*const bool iface*/ true );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
    1158                 :            : 
    1159                 :            : #ifdef VERBOSE
    1160                 :            :         std::cout << " on sender task " << pco->rank() << " number of shared interface cells " << sharedEdges.size()
    1161                 :            :                   << "\n";
    1162                 :            : #endif
    1163                 :            :         // find to what processors we need to send the ghost info about the edge
    1164                 :            :         // first determine the local graph; what elements are adjacent to each cell in owned range
    1165                 :            :         // cells that are sharing a partition interface edge, are identified first, and form a map
    1166         [ #  # ]:          0 :         TupleList TLe;                                     // tuple list for cells
    1167 [ #  # ][ #  # ]:          0 :         TLe.initialize( 2, 0, 1, 0, sharedEdges.size() );  // send to, id of adj cell, remote edge
    1168         [ #  # ]:          0 :         TLe.enableWriteAccess();
    1169                 :            : 
    1170 [ #  # ][ #  # ]:          0 :         std::map< EntityHandle, int > edgeToCell;  // from local boundary edge to adjacent cell id
    1171                 :            :         // will be changed after
    1172 [ #  # ][ #  # ]:          0 :         for( Range::iterator eit = sharedEdges.begin(); eit != sharedEdges.end(); eit++ )
         [ #  # ][ #  # ]
                 [ #  # ]
    1173                 :            :         {
    1174         [ #  # ]:          0 :             EntityHandle edge = *eit;
    1175                 :            :             // get the adjacent cell
    1176         [ #  # ]:          0 :             Range adjEnts;
    1177 [ #  # ][ #  # ]:          0 :             rval = mb->get_adjacencies( &edge, 1, primaryDim, false, adjEnts );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
    1178 [ #  # ][ #  # ]:          0 :             if( adjEnts.size() > 0 )
    1179                 :            :             {
    1180         [ #  # ]:          0 :                 EntityHandle adjCell = adjEnts[0];
    1181                 :            :                 int gid;
    1182 [ #  # ][ #  # ]:          0 :                 rval = mb->tag_get_data( gidTag, &adjCell, 1, &gid );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
                 [ #  # ]
    1183 [ #  # ][ #  # ]:          0 :                 rval = pco->get_sharing_data( edge, &shprocs[0], &shhandles[0], pstatus, np );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1184         [ #  # ]:          0 :                 int n                = TLe.get_n();
    1185         [ #  # ]:          0 :                 TLe.vi_wr[2 * n]     = shprocs[0];
    1186                 :          0 :                 TLe.vi_wr[2 * n + 1] = gid;
    1187         [ #  # ]:          0 :                 TLe.vul_wr[n]        = shhandles[0];  // the remote edge corresponding to shared edge
    1188         [ #  # ]:          0 :                 edgeToCell[edge]     = gid;           // store the map between edge and local id of adj cell
    1189         [ #  # ]:          0 :                 TLe.inc_n();
    1190                 :            :             }
    1191                 :          0 :         }
    1192                 :            : 
    1193                 :            : #ifdef VERBOSE
    1194                 :            :         std::stringstream ff2;
    1195                 :            :         ff2 << "TLe_" << pco->rank() << ".txt";
    1196                 :            :         TLe.print_to_file( ff2.str().c_str() );
    1197                 :            : #endif
    1198                 :            :         // send the data to the other processors:
    1199 [ #  # ][ #  # ]:          0 :         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLe, 0 );
                 [ #  # ]
    1200                 :            :         // on receiver side, each local edge will have the remote cell adjacent to it!
    1201                 :            : 
    1202         [ #  # ]:          0 :         int ne = TLe.get_n();
    1203 [ #  # ][ #  # ]:          0 :         for( int i = 0; i < ne; i++ )
    1204                 :            :         {
    1205                 :          0 :             int sharedProc         = TLe.vi_rd[2 * i];       // this info is coming from here, originally
    1206                 :          0 :             int remoteCellID       = TLe.vi_rd[2 * i + 1];   // this is the id of the remote cell, on sharedProc
    1207                 :          0 :             EntityHandle localCell = TLe.vul_rd[i];          // this is now local edge/face on this proc
    1208         [ #  # ]:          0 :             int localCellId        = edgeToCell[localCell];  // this is the local cell  adjacent to edge/face
    1209                 :            :             // now, we will need to add to the graph the pair <localCellId, remoteCellID>
    1210         [ #  # ]:          0 :             std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID );
    1211         [ #  # ]:          0 :             extraGraphEdges.insert( extraAdj );
    1212                 :            :             // adjCellsId [edgeToCell[localCell]] = remoteCellID;
    1213         [ #  # ]:          0 :             extraCellsProc[remoteCellID] = sharedProc;
    1214                 :            : #ifdef VERBOSE
    1215                 :            :             std::cout << "local ID " << edgeToCell[localCell] << " remote cell ID: " << remoteCellID << "\n";
    1216                 :            : #endif
    1217                 :          0 :         }
    1218                 :            :     }
    1219         [ #  # ]:          0 :     t2 = MPI_Wtime();
    1220 [ #  # ][ #  # ]:          0 :     if( rootSender ) std::cout << " time preparing the input for Zoltan:" << t2 - t1 << " seconds. \n";
         [ #  # ][ #  # ]
    1221                 :            :         // so adj cells ids; need to call zoltan for parallel partition
    1222                 :            : #ifdef MOAB_HAVE_ZOLTAN
    1223                 :            :     ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mb );
    1224                 :            :     if( 1 <= met )  //  partition in zoltan, either graph or geometric partitioner
    1225                 :            :     {
    1226                 :            :         std::map< int, Range > distribution;  // how to distribute owned elements by processors in receiving groups
    1227                 :            :         // in how many tasks do we want to be distributed?
    1228                 :            :         int numNewPartitions = (int)receiverTasks.size();
    1229                 :            :         Range primaryCells   = owned.subset_by_dimension( primaryDim );
    1230                 :            :         rval = mbZTool->partition_owned_cells( primaryCells, pco, extraGraphEdges, extraCellsProc, numNewPartitions,
    1231                 :            :                                                distribution, met );MB_CHK_ERR( rval );
    1232                 :            :         for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ )
    1233                 :            :         {
    1234                 :            :             int part_index = mit->first;
    1235                 :            :             assert( part_index < numNewPartitions );
    1236                 :            :             split_ranges[receiverTasks[part_index]] = mit->second;
    1237                 :            :         }
    1238                 :            :     }
    1239                 :            :     // delete now the partitioner
    1240                 :            :     delete mbZTool;
    1241                 :            : #endif
    1242         [ #  # ]:          0 :     t3 = MPI_Wtime();
    1243 [ #  # ][ #  # ]:          0 :     if( rootSender ) std::cout << " time spent by Zoltan " << t3 - t2 << " seconds. \n";
         [ #  # ][ #  # ]
    1244                 :          0 :     return MB_SUCCESS;
    1245                 :            : }
    1246                 :            : // at this moment, each sender task has split_ranges formed;
    1247                 :            : // we need to aggregate that info and send it to receiver
    1248                 :          0 : ErrorCode ParCommGraph::send_graph_partition( ParallelComm* pco, MPI_Comm jcomm )
    1249                 :            : {
    1250                 :            :     // first, accumulate the info to root of sender; use gatherv
    1251                 :            :     // first, accumulate number of receivers from each sender, to the root receiver
    1252                 :            :     int numberReceivers =
    1253                 :          0 :         (int)split_ranges.size();            // these are ranges of elements to be sent to each receiver, from this task
    1254                 :          0 :     int nSenders = (int)senderTasks.size();  //
    1255                 :            :     // this sender will have to send to this many receivers
    1256         [ #  # ]:          0 :     std::vector< int > displs( 1 );  // displacements for gatherv
    1257         [ #  # ]:          0 :     std::vector< int > counts( 1 );
    1258 [ #  # ][ #  # ]:          0 :     if( is_root_sender() )
    1259                 :            :     {
    1260         [ #  # ]:          0 :         displs.resize( nSenders + 1 );
    1261         [ #  # ]:          0 :         counts.resize( nSenders );
    1262                 :            :     }
    1263                 :            : 
    1264 [ #  # ][ #  # ]:          0 :     int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, pco->comm() );
                 [ #  # ]
    1265         [ #  # ]:          0 :     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
    1266                 :            :     // compute now displacements
    1267 [ #  # ][ #  # ]:          0 :     if( is_root_sender() )
    1268                 :            :     {
    1269         [ #  # ]:          0 :         displs[0] = 0;
    1270         [ #  # ]:          0 :         for( int k = 0; k < nSenders; k++ )
    1271                 :            :         {
    1272 [ #  # ][ #  # ]:          0 :             displs[k + 1] = displs[k] + counts[k];
                 [ #  # ]
    1273                 :            :         }
    1274                 :            :     }
    1275         [ #  # ]:          0 :     std::vector< int > buffer;
    1276 [ #  # ][ #  # ]:          0 :     if( is_root_sender() ) buffer.resize( displs[nSenders] );  // the last one will have the total count now
         [ #  # ][ #  # ]
    1277                 :            : 
    1278         [ #  # ]:          0 :     std::vector< int > recvs;
    1279 [ #  # ][ #  # ]:          0 :     for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
                 [ #  # ]
    1280                 :            :     {
    1281 [ #  # ][ #  # ]:          0 :         recvs.push_back( mit->first );
    1282                 :            :     }
    1283                 :            :     ierr =
    1284 [ #  # ][ #  # ]:          0 :         MPI_Gatherv( &recvs[0], numberReceivers, MPI_INT, &buffer[0], &counts[0], &displs[0], MPI_INT, 0, pco->comm() );
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1285         [ #  # ]:          0 :     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
    1286                 :            : 
    1287                 :            :     // now form recv_graph map; points from the
    1288                 :            :     // now form the graph to be sent to the other side; only on root
    1289 [ #  # ][ #  # ]:          0 :     if( is_root_sender() )
    1290                 :            :     {
    1291                 :            : #ifdef GRAPH_INFO
    1292                 :            :         std::ofstream dbfileSender;
    1293                 :            :         std::stringstream outf;
    1294                 :            :         outf << "S_" << compid1 << "_R_" << compid2 << "_SenderGraph.txt";
    1295                 :            :         dbfileSender.open( outf.str().c_str() );
    1296                 :            :         dbfileSender << " number senders: " << nSenders << "\n";
    1297                 :            :         dbfileSender << " senderRank \treceivers \n";
    1298                 :            :         for( int k = 0; k < nSenders; k++ )
    1299                 :            :         {
    1300                 :            :             int indexInBuff = displs[k];
    1301                 :            :             int senderTask  = senderTasks[k];
    1302                 :            :             dbfileSender << senderTask << "\t\t";
    1303                 :            :             for( int j = 0; j < counts[k]; j++ )
    1304                 :            :             {
    1305                 :            :                 int recvTask = buffer[indexInBuff + j];
    1306                 :            :                 dbfileSender << recvTask << " ";
    1307                 :            :             }
    1308                 :            :             dbfileSender << "\n";
    1309                 :            :         }
    1310                 :            :         dbfileSender.close();
    1311                 :            : #endif
    1312         [ #  # ]:          0 :         for( int k = 0; k < nSenders; k++ )
    1313                 :            :         {
    1314         [ #  # ]:          0 :             int indexInBuff = displs[k];
    1315         [ #  # ]:          0 :             int senderTask  = senderTasks[k];
    1316 [ #  # ][ #  # ]:          0 :             for( int j = 0; j < counts[k]; j++ )
    1317                 :            :             {
    1318         [ #  # ]:          0 :                 int recvTask = buffer[indexInBuff + j];
    1319 [ #  # ][ #  # ]:          0 :                 recv_graph[recvTask].push_back( senderTask );  // this will be packed and sent to root receiver, with
    1320                 :            :                                                                // nonblocking send
    1321                 :            :             }
    1322                 :            :         }
    1323                 :            : #ifdef GRAPH_INFO
    1324                 :            :         std::ofstream dbfile;
    1325                 :            :         std::stringstream outf2;
    1326                 :            :         outf2 << "S_" << compid1 << "_R_" << compid2 << "_RecvGraph.txt";
    1327                 :            :         dbfile.open( outf2.str().c_str() );
    1328                 :            :         dbfile << " number receivers: " << recv_graph.size() << "\n";
    1329                 :            :         dbfile << " receiverRank \tsenders \n";
    1330                 :            :         for( std::map< int, std::vector< int > >::iterator mit = recv_graph.begin(); mit != recv_graph.end(); mit++ )
    1331                 :            :         {
    1332                 :            :             int recvTask                = mit->first;
    1333                 :            :             std::vector< int >& senders = mit->second;
    1334                 :            :             dbfile << recvTask << "\t\t";
    1335                 :            :             for( std::vector< int >::iterator vit = senders.begin(); vit != senders.end(); vit++ )
    1336                 :            :                 dbfile << *vit << " ";
    1337                 :            :             dbfile << "\n";
    1338                 :            :         }
    1339                 :            :         dbfile.close();
    1340                 :            : #endif
    1341                 :            :         // this is the same as trivial partition
    1342 [ #  # ][ #  # ]:          0 :         ErrorCode rval = send_graph( jcomm );MB_CHK_ERR( rval );
         [ #  # ][ #  # ]
    1343                 :            :     }
    1344                 :            : 
    1345                 :          0 :     return MB_SUCCESS;
    1346                 :            : }
    1347                 :            : // method to expose local graph info: sender id, receiver id, sizes of elements to send, after or
    1348                 :            : // before intersection
    1349                 :          0 : ErrorCode ParCommGraph::dump_comm_information( std::string prefix, int is_send )
    1350                 :            : {
    1351                 :            :     //
    1352 [ #  # ][ #  # ]:          0 :     if( -1 != rankInGroup1 && 1 == is_send )  // it is a sender task
    1353                 :            :     {
    1354         [ #  # ]:          0 :         std::ofstream dbfile;
    1355 [ #  # ][ #  # ]:          0 :         std::stringstream outf;
    1356 [ #  # ][ #  # ]:          0 :         outf << prefix << "_sender_" << rankInGroup1 << "_joint" << rankInJoin << "_type_" << (int)graph_type << ".txt";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1357 [ #  # ][ #  # ]:          0 :         dbfile.open( outf.str().c_str() );
                 [ #  # ]
    1358                 :            : 
    1359         [ #  # ]:          0 :         if( graph_type == COVERAGE )
    1360                 :            :         {
    1361   [ #  #  #  # ]:          0 :             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
                 [ #  # ]
    1362                 :          0 :                  mit != involved_IDs_map.end(); mit++ )
    1363                 :            :             {
    1364         [ #  # ]:          0 :                 int receiver_proc        = mit->first;
    1365         [ #  # ]:          0 :                 std::vector< int >& eids = mit->second;
    1366 [ #  # ][ #  # ]:          0 :                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
         [ #  # ][ #  # ]
                 [ #  # ]
    1367                 :            :             }
    1368                 :            :         }
    1369         [ #  # ]:          0 :         else if( graph_type == INITIAL_MIGRATE )  // just after migration
    1370                 :            :         {
    1371 [ #  # ][ #  # ]:          0 :             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
                 [ #  # ]
    1372                 :            :             {
    1373         [ #  # ]:          0 :                 int receiver_proc = mit->first;
    1374         [ #  # ]:          0 :                 Range& eids       = mit->second;
    1375 [ #  # ][ #  # ]:          0 :                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1376                 :            :             }
    1377                 :            :         }
    1378         [ #  # ]:          0 :         else if( graph_type == DOF_BASED )  // just after migration, or from computeGraph
    1379                 :            :         {
    1380   [ #  #  #  # ]:          0 :             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
                 [ #  # ]
    1381                 :          0 :                  mit != involved_IDs_map.end(); mit++ )
    1382                 :            :             {
    1383         [ #  # ]:          0 :                 int receiver_proc = mit->first;
    1384 [ #  # ][ #  # ]:          0 :                 dbfile << "receiver: " << receiver_proc << " size:" << mit->second.size() << "\n";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1385                 :            :             }
    1386                 :            :         }
    1387         [ #  # ]:          0 :         dbfile.close();
    1388                 :            :     }
    1389 [ #  # ][ #  # ]:          0 :     if( -1 != rankInGroup2 && 0 == is_send )  // it is a receiver task
    1390                 :            :     {
    1391         [ #  # ]:          0 :         std::ofstream dbfile;
    1392 [ #  # ][ #  # ]:          0 :         std::stringstream outf;
    1393 [ #  # ][ #  # ]:          0 :         outf << prefix << "_receiver_" << rankInGroup2 << "_joint" << rankInJoin << "_type_" << (int)graph_type
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
    1394         [ #  # ]:          0 :              << ".txt";
    1395 [ #  # ][ #  # ]:          0 :         dbfile.open( outf.str().c_str() );
                 [ #  # ]
    1396                 :            : 
    1397         [ #  # ]:          0 :         if( graph_type == COVERAGE )
    1398                 :            :         {
    1399   [ #  #  #  # ]:          0 :             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
                 [ #  # ]
    1400                 :          0 :                  mit != involved_IDs_map.end(); mit++ )
    1401                 :            :             {
    1402         [ #  # ]:          0 :                 int sender_proc          = mit->first;
    1403         [ #  # ]:          0 :                 std::vector< int >& eids = mit->second;
    1404 [ #  # ][ #  # ]:          0 :                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
         [ #  # ][ #  # ]
                 [ #  # ]
    1405                 :            :             }
    1406                 :            :         }
    1407         [ #  # ]:          0 :         else if( graph_type == INITIAL_MIGRATE )  // just after migration
    1408                 :            :         {
    1409 [ #  # ][ #  # ]:          0 :             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
                 [ #  # ]
    1410                 :            :             {
    1411         [ #  # ]:          0 :                 int sender_proc = mit->first;
    1412         [ #  # ]:          0 :                 Range& eids     = mit->second;
    1413 [ #  # ][ #  # ]:          0 :                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1414                 :            :             }
    1415                 :            :         }
    1416         [ #  # ]:          0 :         else if( graph_type == DOF_BASED )  // just after migration
    1417                 :            :         {
    1418   [ #  #  #  # ]:          0 :             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
                 [ #  # ]
    1419                 :          0 :                  mit != involved_IDs_map.end(); mit++ )
    1420                 :            :             {
    1421         [ #  # ]:          0 :                 int sender_proc = mit->first;
    1422 [ #  # ][ #  # ]:          0 :                 dbfile << "receiver: " << sender_proc << " size:" << mit->second.size() << "\n";
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
    1423                 :            :             }
    1424                 :            :         }
    1425         [ #  # ]:          0 :         dbfile.close();
    1426                 :            :     }
    1427                 :          0 :     return MB_SUCCESS;
    1428                 :            : }
    1429 [ +  - ][ +  - ]:          8 : }  // namespace moab

Generated by: LCOV version 1.11