MOAB: Mesh Oriented datABase  (version 5.4.0)
ParCommGraph.cpp
Go to the documentation of this file.
00001 /*
00002  * ParCommGraph.cpp
00003  *
00004  */
00005 
00006 #include "moab/ParCommGraph.hpp"
00007 // we need to recompute adjacencies for merging to work
00008 #include "moab/Core.hpp"
00009 #include "AEntityFactory.hpp"
00010 
00011 #ifdef MOAB_HAVE_ZOLTAN
00012 #include "moab/ZoltanPartitioner.hpp"
00013 #endif
00014 
00015 // #define VERBOSE
00016 // #define GRAPH_INFO
00017 
00018 namespace moab
00019 {
00020 ParCommGraph::ParCommGraph( MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2 )
00021     : comm( joincomm ), compid1( coid1 ), compid2( coid2 )
00022 {
00023     // find out the tasks from each group, in the joint communicator
00024     find_group_ranks( group1, comm, senderTasks );
00025     find_group_ranks( group2, comm, receiverTasks );
00026 
00027     rootSender = rootReceiver = false;
00028     rankInGroup1 = rankInGroup2 = rankInJoin = -1;  // not initialized, or not part of the group
00029 
00030     int mpierr = MPI_Group_rank( group1, &rankInGroup1 );
00031     if( MPI_SUCCESS != mpierr || rankInGroup1 == MPI_UNDEFINED ) rankInGroup1 = -1;
00032 
00033     mpierr = MPI_Group_rank( group2, &rankInGroup2 );
00034     if( MPI_SUCCESS != mpierr || rankInGroup2 == MPI_UNDEFINED ) rankInGroup2 = -1;
00035 
00036     mpierr = MPI_Comm_rank( comm, &rankInJoin );
00037     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00038         rankInJoin = -1;
00039 
00040     mpierr = MPI_Comm_size( comm, &joinSize );
00041     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00042         joinSize = -1;
00043 
00044     if( 0 == rankInGroup1 ) rootSender = true;
00045     if( 0 == rankInGroup2 ) rootReceiver = true;
00046     graph_type = INITIAL_MIGRATE;  // 0
00047     comm_graph = NULL;
00048     context_id = -1;
00049     cover_set  = 0;  // refers to nothing yet
00050 }
00051 
00052 // copy constructor will copy only few basic things; split ranges will not be copied
00053 ParCommGraph::ParCommGraph( const ParCommGraph& src )
00054 {
00055     comm          = src.comm;
00056     senderTasks   = src.senderTasks;    // these are the sender tasks in joint comm
00057     receiverTasks = src.receiverTasks;  // these are all the receiver tasks in joint comm
00058     rootSender    = src.rootSender;
00059     rootReceiver  = src.rootReceiver;
00060     rankInGroup1  = src.rankInGroup1;
00061     rankInGroup2  = src.rankInGroup2;  // group 1 is sender, 2 is receiver
00062     rankInJoin    = src.rankInJoin;
00063     joinSize      = src.joinSize;
00064     compid1       = src.compid1;
00065     compid2       = src.compid2;
00066     comm_graph    = NULL;
00067     graph_type    = src.graph_type;
00068     context_id    = src.context_id;
00069     cover_set     = src.cover_set;
00070     return;
00071 }
00072 
00073 ParCommGraph::~ParCommGraph()
00074 {
00075     // TODO Auto-generated destructor stub
00076 }
00077 
00078 // utility to find out the ranks of the processes of a group, with respect to a joint comm,
00079 // which spans for sure the group
00080 // it is used locally (in the constructor), but it can be used as a utility
00081 void ParCommGraph::find_group_ranks( MPI_Group group, MPI_Comm joincomm, std::vector< int >& ranks )
00082 {
00083     MPI_Group global_grp;
00084     MPI_Comm_group( joincomm, &global_grp );
00085 
00086     int grp_size;
00087 
00088     MPI_Group_size( group, &grp_size );
00089     std::vector< int > rks( grp_size );
00090     ranks.resize( grp_size );
00091 
00092     for( int i = 0; i < grp_size; i++ )
00093         rks[i] = i;
00094 
00095     MPI_Group_translate_ranks( group, grp_size, &rks[0], global_grp, &ranks[0] );
00096     MPI_Group_free( &global_grp );
00097     return;
00098 }
00099 
00100 ErrorCode ParCommGraph::compute_trivial_partition( std::vector< int >& numElemsPerTaskInGroup1 )
00101 {
00102 
00103     recv_graph.clear();
00104     recv_sizes.clear();
00105     sender_graph.clear();
00106     sender_sizes.clear();
00107 
00108     if( numElemsPerTaskInGroup1.size() != senderTasks.size() )
00109         return MB_FAILURE;  // each sender has a number of elements that it owns
00110 
00111     // first find out total number of elements to be sent from all senders
00112     int total_elems = 0;
00113     std::vector< int > accum;
00114     accum.push_back( 0 );
00115 
00116     int num_senders = (int)senderTasks.size();
00117 
00118     for( size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ )
00119     {
00120         total_elems += numElemsPerTaskInGroup1[k];
00121         accum.push_back( total_elems );
00122     }
00123 
00124     int num_recv = ( (int)receiverTasks.size() );
00125     // in trivial partition, every receiver should get about total_elems/num_receivers elements
00126     int num_per_receiver = (int)( total_elems / num_recv );
00127     int leftover         = total_elems - num_per_receiver * num_recv;
00128 
00129     // so receiver k will receive  [starts[k], starts[k+1] ) interval
00130     std::vector< int > starts;
00131     starts.resize( num_recv + 1 );
00132     starts[0] = 0;
00133     for( int k = 0; k < num_recv; k++ )
00134     {
00135         starts[k + 1] = starts[k] + num_per_receiver;
00136         if( k < leftover ) starts[k + 1]++;
00137     }
00138 
00139     // each sender will send to a number of receivers, based on how the
00140     // arrays starts[0:num_recv] and accum[0:sendr] overlap
00141     int lastUsedReceiverRank = 0;  // first receiver was not treated yet
00142     for( int j = 0; j < num_senders; j++ )
00143     {
00144         // we could start the receiver loop with the latest receiver that received from previous
00145         // sender
00146         for( int k = lastUsedReceiverRank; k < num_recv; k++ )
00147         {
00148             // if overlap:
00149             if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] )
00150             {
00151                 recv_graph[receiverTasks[k]].push_back( senderTasks[j] );
00152                 sender_graph[senderTasks[j]].push_back( receiverTasks[k] );
00153 
00154                 // we still need to decide what is the overlap
00155                 int sizeOverlap = 1;  // at least 1, for sure
00156                 // 1
00157                 if( starts[k] >= accum[j] )  // one end is starts[k]
00158                 {
00159                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00160                         sizeOverlap = accum[j + 1] - starts[k];
00161                     else  //
00162                         sizeOverlap = starts[k + 1] - starts[k];
00163                 }
00164                 else  // one end is accum[j]
00165                 {
00166                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00167                         sizeOverlap = accum[j + 1] - accum[j];
00168                     else
00169                         sizeOverlap = starts[k + 1] - accum[j];
00170                 }
00171                 recv_sizes[receiverTasks[k]].push_back( sizeOverlap );  // basically, task k will receive from
00172                                                                         //   sender j, sizeOverlap elems
00173                 sender_sizes[senderTasks[j]].push_back( sizeOverlap );
00174                 if( starts[k] > accum[j + 1] )
00175                 {
00176                     lastUsedReceiverRank = k - 1;  // so next k loop will start a little higher, we
00177                                                    // probably finished with first few receivers (up
00178                                                    // to receiver lastUsedReceiverRank)
00179                     break;  // break the k loop, we distributed all elements from sender j to some
00180                             // receivers
00181                 }
00182             }
00183         }
00184     }
00185 
00186     return MB_SUCCESS;
00187 }
00188 
00189 ErrorCode ParCommGraph::pack_receivers_graph( std::vector< int >& packed_recv_array )
00190 {
00191     // it will basically look at local data, to pack communication graph, each receiver task will
00192     // have to post receives for each sender task that will send data to it; the array will be
00193     // communicated to root receiver, and eventually distributed to receiver tasks
00194 
00195     /*
00196      * packed_array will have receiver, number of senders, then senders, etc
00197      */
00198     if( recv_graph.size() < receiverTasks.size() )
00199     {
00200         // big problem, we have empty partitions in receive
00201         std::cout << " WARNING: empty partitions, some receiver tasks will receive nothing.\n";
00202     }
00203     for( std::map< int, std::vector< int > >::iterator it = recv_graph.begin(); it != recv_graph.end(); it++ )
00204     {
00205         int recv                    = it->first;
00206         std::vector< int >& senders = it->second;
00207         packed_recv_array.push_back( recv );
00208         packed_recv_array.push_back( (int)senders.size() );
00209 
00210         for( int k = 0; k < (int)senders.size(); k++ )
00211             packed_recv_array.push_back( senders[k] );
00212     }
00213 
00214     return MB_SUCCESS;
00215 }
00216 
00217 ErrorCode ParCommGraph::split_owned_range( int sender_rank, Range& owned )
00218 {
00219     int senderTask                   = senderTasks[sender_rank];
00220     std::vector< int >& distribution = sender_sizes[senderTask];
00221     std::vector< int >& receivers    = sender_graph[senderTask];
00222     if( distribution.size() != receivers.size() )  //
00223         return MB_FAILURE;
00224 
00225     Range current = owned;  // get the full range first, then we will subtract stuff, for
00226     // the following ranges
00227 
00228     Range rleftover = current;
00229     for( size_t k = 0; k < receivers.size(); k++ )
00230     {
00231         Range newr;
00232         newr.insert( current.begin(), current.begin() + distribution[k] );
00233         split_ranges[receivers[k]] = newr;
00234 
00235         rleftover = subtract( current, newr );
00236         current   = rleftover;
00237     }
00238 
00239     return MB_SUCCESS;
00240 }
00241 
00242 // use for this the corresponding tasks and sizes
00243 ErrorCode ParCommGraph::split_owned_range( Range& owned )
00244 {
00245     if( corr_tasks.size() != corr_sizes.size() )  //
00246         return MB_FAILURE;
00247 
00248     Range current = owned;  // get the full range first, then we will subtract stuff, for
00249     // the following ranges
00250 
00251     Range rleftover = current;
00252     for( size_t k = 0; k < corr_tasks.size(); k++ )
00253     {
00254         Range newr;
00255         newr.insert( current.begin(), current.begin() + corr_sizes[k] );
00256         split_ranges[corr_tasks[k]] = newr;
00257 
00258         rleftover = subtract( current, newr );
00259         current   = rleftover;
00260     }
00261 
00262     return MB_SUCCESS;
00263 }
00264 
00265 ErrorCode ParCommGraph::send_graph( MPI_Comm jcomm )
00266 {
00267     if( is_root_sender() )
00268     {
00269         int ierr;
00270         // will need to build a communication graph, because each sender knows now to which receiver
00271         // to send data the receivers need to post receives for each sender that will send data to
00272         // them will need to gather on rank 0 on the sender comm, global ranks of sender with
00273         // receivers to send build communication matrix, each receiver will receive from what sender
00274 
00275         std::vector< int > packed_recv_array;
00276         ErrorCode rval = pack_receivers_graph( packed_recv_array );
00277         if( MB_SUCCESS != rval ) return rval;
00278 
00279         int size_pack_array = (int)packed_recv_array.size();
00280         comm_graph          = new int[size_pack_array + 1];
00281         comm_graph[0]       = size_pack_array;
00282         for( int k = 0; k < size_pack_array; k++ )
00283             comm_graph[k + 1] = packed_recv_array[k];
00284         // will add 2 requests
00285         /// use tag 10 to send size and tag 20 to send the packed array
00286         sendReqs.resize( 1 );
00287         // do not send the size in advance, because we use probe now
00288         /*ierr = MPI_Isend(&comm_graph[0], 1, MPI_INT, receiver(0), 10, jcomm, &sendReqs[0]); // we
00289         have to use global communicator if (ierr!=0) return MB_FAILURE;*/
00290         ierr = MPI_Isend( &comm_graph[1], size_pack_array, MPI_INT, receiver( 0 ), 20, jcomm,
00291                           &sendReqs[0] );  // we have to use global communicator
00292         if( ierr != 0 ) return MB_FAILURE;
00293     }
00294     return MB_SUCCESS;
00295 }
00296 
00297 // pco has MOAB too get_moab()
00298 // do we need to store "method" as a member variable ?
00299 ErrorCode ParCommGraph::send_mesh_parts( MPI_Comm jcomm, ParallelComm* pco, Range& owned )
00300 {
00301 
00302     ErrorCode rval;
00303     if( split_ranges.empty() )  // in trivial partition
00304     {
00305         rval = split_owned_range( rankInGroup1, owned );
00306         if( rval != MB_SUCCESS ) return rval;
00307         // we know this on the sender side:
00308         corr_tasks = sender_graph[senderTasks[rankInGroup1]];  // copy
00309         corr_sizes = sender_sizes[senderTasks[rankInGroup1]];  // another copy
00310     }
00311 
00312     int indexReq = 0;
00313     int ierr;                             // MPI error
00314     if( is_root_sender() ) indexReq = 1;  // for sendReqs
00315     sendReqs.resize( indexReq + split_ranges.size() );
00316     for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00317     {
00318         int receiver_proc = it->first;
00319         Range ents        = it->second;
00320 
00321         // add necessary vertices too
00322         Range verts;
00323         rval = pco->get_moab()->get_adjacencies( ents, 0, false, verts, Interface::UNION );
00324         if( rval != MB_SUCCESS )
00325         {
00326             std::cout << " can't get adjacencies. for entities to send\n";
00327             return rval;
00328         }
00329         ents.merge( verts );
00330         ParallelComm::Buffer* buffer = new ParallelComm::Buffer( ParallelComm::INITIAL_BUFF_SIZE );
00331         buffer->reset_ptr( sizeof( int ) );
00332         rval = pco->pack_buffer( ents, false, true, false, -1, buffer );
00333         if( rval != MB_SUCCESS )
00334         {
00335             std::cout << " can't pack buffer for entities to send\n";
00336             return rval;
00337         }
00338         int size_pack = buffer->get_current_size();
00339 
00340         // TODO there could be an issue with endian things; check !!!!!
00341         // we are sending the size of the buffer first as an int!!!
00342         /// not anymore !
00343         /* ierr = MPI_Isend(buffer->mem_ptr, 1, MPI_INT, receiver_proc, 1, jcomm,
00344          &sendReqs[indexReq]); // we have to use global communicator if (ierr!=0) return MB_FAILURE;
00345          indexReq++;*/
00346 
00347         ierr = MPI_Isend( buffer->mem_ptr, size_pack, MPI_CHAR, receiver_proc, 2, jcomm,
00348                           &sendReqs[indexReq] );  // we have to use global communicator
00349         if( ierr != 0 ) return MB_FAILURE;
00350         indexReq++;
00351         localSendBuffs.push_back( buffer );
00352     }
00353     return MB_SUCCESS;
00354 }
00355 
00356 // this is called on receiver side
00357 ErrorCode ParCommGraph::receive_comm_graph( MPI_Comm jcomm, ParallelComm* pco, std::vector< int >& pack_array )
00358 {
00359     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
00360     // knows what data to expect
00361     MPI_Comm receive = pco->comm();
00362     int size_pack_array, ierr;
00363     MPI_Status status;
00364     if( rootReceiver )
00365     {
00366         /*
00367          * MPI_Probe(
00368         int source,
00369         int tag,
00370         MPI_Comm comm,
00371         MPI_Status* status)
00372          *
00373          */
00374         ierr = MPI_Probe( sender( 0 ), 20, jcomm, &status );
00375         if( 0 != ierr )
00376         {
00377             std::cout << " MPI_Probe failure: " << ierr << "\n";
00378             return MB_FAILURE;
00379         }
00380         // get the count of data received from the MPI_Status structure
00381         ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array );
00382         if( 0 != ierr )
00383         {
00384             std::cout << " MPI_Get_count failure: " << ierr << "\n";
00385             return MB_FAILURE;
00386         }
00387 #ifdef VERBOSE
00388         std::cout << " receive comm graph size: " << size_pack_array << "\n";
00389 #endif
00390         pack_array.resize( size_pack_array );
00391         ierr = MPI_Recv( &pack_array[0], size_pack_array, MPI_INT, sender( 0 ), 20, jcomm, &status );
00392         if( 0 != ierr ) return MB_FAILURE;
00393 #ifdef VERBOSE
00394         std::cout << " receive comm graph ";
00395         for( int k = 0; k < (int)pack_array.size(); k++ )
00396             std::cout << " " << pack_array[k];
00397         std::cout << "\n";
00398 #endif
00399     }
00400 
00401     // now broadcast this whole array to all receivers, so they know what to expect
00402     ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive );
00403     if( 0 != ierr ) return MB_FAILURE;
00404     pack_array.resize( size_pack_array );
00405     ierr = MPI_Bcast( &pack_array[0], size_pack_array, MPI_INT, 0, receive );
00406     if( 0 != ierr ) return MB_FAILURE;
00407     return MB_SUCCESS;
00408 }
00409 
00410 ErrorCode ParCommGraph::receive_mesh( MPI_Comm jcomm,
00411                                       ParallelComm* pco,
00412                                       EntityHandle local_set,
00413                                       std::vector< int >& senders_local )
00414 {
00415     ErrorCode rval;
00416     int ierr;
00417     MPI_Status status;
00418     // we also need to fill corresponding mesh info on the other side
00419     corr_tasks = senders_local;
00420     Range newEnts;
00421 
00422     Tag orgSendProcTag;  // this will be a tag set on the received mesh, with info about from what
00423                          // task / PE the
00424     // primary element came from, in the joint communicator ; this will be forwarded by coverage
00425     // mesh
00426     int defaultInt = -1;  // no processor, so it was not migrated from somewhere else
00427     rval           = pco->get_moab()->tag_get_handle( "orig_sending_processor", 1, MB_TYPE_INTEGER, orgSendProcTag,
00428                                                       MB_TAG_DENSE | MB_TAG_CREAT, &defaultInt );MB_CHK_SET_ERR( rval, "can't create original sending processor tag" );
00429     if( !senders_local.empty() )
00430     {
00431         for( size_t k = 0; k < senders_local.size(); k++ )
00432         {
00433             int sender1 = senders_local[k];
00434             // first receive the size of the buffer using probe
00435             /*
00436                  * MPI_Probe(
00437                 int source,
00438                 int tag,
00439                 MPI_Comm comm,
00440                 MPI_Status* status)
00441                  *
00442                  */
00443             ierr = MPI_Probe( sender1, 2, jcomm, &status );
00444             if( 0 != ierr )
00445             {
00446                 std::cout << " MPI_Probe failure in ParCommGraph::receive_mesh " << ierr << "\n";
00447                 return MB_FAILURE;
00448             }
00449             // get the count of data received from the MPI_Status structure
00450             int size_pack;
00451             ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack );
00452             if( 0 != ierr )
00453             {
00454                 std::cout << " MPI_Get_count failure in ParCommGraph::receive_mesh  " << ierr << "\n";
00455                 return MB_FAILURE;
00456             }
00457 
00458             /* ierr = MPI_Recv (&size_pack, 1, MPI_INT, sender1, 1, jcomm, &status);
00459              if (0!=ierr) return MB_FAILURE;*/
00460             // now resize the buffer, then receive it
00461             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_pack );
00462             // buffer->reserve(size_pack);
00463 
00464             ierr = MPI_Recv( buffer->mem_ptr, size_pack, MPI_CHAR, sender1, 2, jcomm, &status );
00465             if( 0 != ierr )
00466             {
00467                 std::cout << " MPI_Recv failure in ParCommGraph::receive_mesh " << ierr << "\n";
00468                 return MB_FAILURE;
00469             }
00470             // now unpack the buffer we just received
00471             Range entities;
00472             std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
00473             std::vector< std::vector< int > > L1p;
00474             std::vector< EntityHandle > L2hloc, L2hrem;
00475             std::vector< unsigned int > L2p;
00476 
00477             buffer->reset_ptr( sizeof( int ) );
00478             std::vector< EntityHandle > entities_vec( entities.size() );
00479             std::copy( entities.begin(), entities.end(), entities_vec.begin() );
00480             rval = pco->unpack_buffer( buffer->buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
00481                                        entities_vec );
00482             delete buffer;
00483             if( MB_SUCCESS != rval ) return rval;
00484 
00485             std::copy( entities_vec.begin(), entities_vec.end(), range_inserter( entities ) );
00486             // we have to add them to the local set
00487             rval = pco->get_moab()->add_entities( local_set, entities );
00488             if( MB_SUCCESS != rval ) return rval;
00489             // corr_sizes is the size of primary entities received
00490             Range verts              = entities.subset_by_dimension( 0 );
00491             Range local_primary_ents = subtract( entities, verts );
00492             if( local_primary_ents.empty() )
00493             {
00494                 // it is possible that all ents sent were vertices (point cloud)
00495                 // then consider primary entities the vertices
00496                 local_primary_ents = verts;
00497             }
00498             else
00499             {
00500                 // set a tag with the original sender for the primary entity
00501                 // will be used later for coverage mesh
00502                 std::vector< int > orig_senders( local_primary_ents.size(), sender1 );
00503                 rval = pco->get_moab()->tag_set_data( orgSendProcTag, local_primary_ents, &orig_senders[0] );
00504             }
00505             corr_sizes.push_back( (int)local_primary_ents.size() );
00506 
00507             newEnts.merge( entities );
00508             // make these in split ranges
00509             split_ranges[sender1] = local_primary_ents;
00510 
00511 #ifdef VERBOSE
00512             std::ostringstream partial_outFile;
00513 
00514             partial_outFile << "part_send_" << sender1 << "."
00515                             << "recv" << rankInJoin << ".vtk";
00516 
00517             // the mesh contains ghosts too, but they are not part of mat/neumann set
00518             // write in serial the file, to see what tags are missing
00519             std::cout << " writing from receiver " << rankInJoin << " from sender " << sender1
00520                       << " entities: " << entities.size() << std::endl;
00521             rval = pco->get_moab()->write_file( partial_outFile.str().c_str(), 0, 0, &local_set,
00522                                                 1 );  // everything on local set received
00523             if( MB_SUCCESS != rval ) return rval;
00524 #endif
00525         }
00526     }
00527     // make sure adjacencies are updated on the new elements
00528 
00529     if( newEnts.empty() )
00530     {
00531         std::cout << " WARNING: this task did not receive any entities \n";
00532     }
00533     // in order for the merging to work, we need to be sure that the adjacencies are updated
00534     // (created)
00535     Range local_verts        = newEnts.subset_by_type( MBVERTEX );
00536     newEnts                  = subtract( newEnts, local_verts );
00537     Core* mb                 = (Core*)pco->get_moab();
00538     AEntityFactory* adj_fact = mb->a_entity_factory();
00539     if( !adj_fact->vert_elem_adjacencies() )
00540         adj_fact->create_vert_elem_adjacencies();
00541     else
00542     {
00543         for( Range::iterator it = newEnts.begin(); it != newEnts.end(); it++ )
00544         {
00545             EntityHandle eh          = *it;
00546             const EntityHandle* conn = NULL;
00547             int num_nodes            = 0;
00548             rval                     = mb->get_connectivity( eh, conn, num_nodes );
00549             if( MB_SUCCESS != rval ) return rval;
00550             adj_fact->notify_create_entity( eh, conn, num_nodes );
00551         }
00552     }
00553 
00554     return MB_SUCCESS;
00555 }
00556 
00557 // VSM: Why is the communicator never used. Remove the argument ?
00558 ErrorCode ParCommGraph::release_send_buffers()
00559 {
00560     int ierr, nsize = (int)sendReqs.size();
00561     std::vector< MPI_Status > mult_status;
00562     mult_status.resize( sendReqs.size() );
00563     ierr = MPI_Waitall( nsize, &sendReqs[0], &mult_status[0] );
00564 
00565     if( ierr != 0 ) return MB_FAILURE;
00566     // now we can free all buffers
00567     delete[] comm_graph;
00568     comm_graph = NULL;
00569     std::vector< ParallelComm::Buffer* >::iterator vit;
00570     for( vit = localSendBuffs.begin(); vit != localSendBuffs.end(); ++vit )
00571         delete( *vit );
00572     localSendBuffs.clear();
00573     return MB_SUCCESS;
00574 }
00575 
00576 // again, will use the send buffers, for nonblocking sends;
00577 // should be the receives non-blocking too?
00578 ErrorCode ParCommGraph::send_tag_values( MPI_Comm jcomm,
00579                                          ParallelComm* pco,
00580                                          Range& owned,
00581                                          std::vector< Tag >& tag_handles )
00582 {
00583     // basically, owned.size() needs to be equal to sum(corr_sizes)
00584     // get info about the tag size, type, etc
00585     int ierr;
00586     Core* mb = (Core*)pco->get_moab();
00587     // get info about the tag
00588     //! Get the size of the specified tag in bytes
00589     int total_bytes_per_entity = 0;  // we need to know, to allocate buffers
00590     ErrorCode rval;
00591     std::vector< int > vect_bytes_per_tag;
00592 #ifdef VERBOSE
00593     std::vector< int > tag_sizes;
00594 #endif
00595     for( size_t i = 0; i < tag_handles.size(); i++ )
00596     {
00597         int bytes_per_tag;
00598         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00599         int tag_size1;  // length
00600         rval = mb->tag_get_length( tag_handles[i], tag_size1 );MB_CHK_ERR( rval );
00601         if( graph_type == DOF_BASED )
00602             bytes_per_tag = bytes_per_tag / tag_size1;  // we know we have one double per tag , per ID sent;
00603                                                         // could be 8 for double, 4 for int, etc
00604         total_bytes_per_entity += bytes_per_tag;
00605         vect_bytes_per_tag.push_back( bytes_per_tag );
00606 #ifdef VERBOSE
00607         int tag_size;
00608         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00609         tag_sizes.push_back( tag_size );
00610 #endif
00611     }
00612 
00613     int indexReq = 0;
00614     if( graph_type == INITIAL_MIGRATE )  // original send
00615     {
00616         // use the buffers data structure to allocate memory for sending the tags
00617         sendReqs.resize( split_ranges.size() );
00618 
00619         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00620         {
00621             int receiver_proc = it->first;
00622             Range ents        = it->second;  // primary entities, with the tag data
00623             int size_buffer   = 4 + total_bytes_per_entity *
00624                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00625             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00626 
00627             buffer->reset_ptr( sizeof( int ) );
00628             for( size_t i = 0; i < tag_handles.size(); i++ )
00629             {
00630                 // copy tag data to buffer->buff_ptr, and send the buffer (we could have used
00631                 // regular char arrays)
00632                 rval = mb->tag_get_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00633                 // advance the butter
00634                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00635             }
00636             *( (int*)buffer->mem_ptr ) = size_buffer;
00637             // int size_pack = buffer->get_current_size(); // debug check
00638             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00639                               &sendReqs[indexReq] );  // we have to use global communicator
00640             if( ierr != 0 ) return MB_FAILURE;
00641             indexReq++;
00642             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00643         }
00644     }
00645     else if( graph_type == COVERAGE )
00646     {
00647         // we know that we will need to send some tag data in a specific order
00648         // first, get the ids of the local elements, from owned Range; arrange the buffer in order
00649         // of increasing global id
00650         Tag gidTag = mb->globalId_tag();
00651         std::vector< int > gids;
00652         gids.resize( owned.size() );
00653         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00654         std::map< int, EntityHandle > gidToHandle;
00655         size_t i = 0;
00656         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00657         {
00658             EntityHandle eh        = *it;
00659             gidToHandle[gids[i++]] = eh;
00660         }
00661         // now, pack the data and send it
00662         sendReqs.resize( involved_IDs_map.size() );
00663         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00664              mit != involved_IDs_map.end(); mit++ )
00665         {
00666             int receiver_proc        = mit->first;
00667             std::vector< int >& eids = mit->second;
00668             int size_buffer          = 4 + total_bytes_per_entity *
00669                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00670             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00671             buffer->reset_ptr( sizeof( int ) );
00672 #ifdef VERBOSE
00673             std::ofstream dbfile;
00674             std::stringstream outf;
00675             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00676             dbfile.open( outf.str().c_str() );
00677             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00678 #endif
00679             // copy tag data to buffer->buff_ptr, and send the buffer (we could have used regular
00680             // char arrays) pack data by tag, to be consistent with above, even though we loop
00681             // through the entities for each tag
00682 
00683             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00684             {
00685                 int eID         = *it;
00686                 EntityHandle eh = gidToHandle[eID];
00687                 for( i = 0; i < tag_handles.size(); i++ )
00688                 {
00689                     rval = mb->tag_get_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );
00690                     if( rval != MB_SUCCESS )
00691                     {
00692                         delete buffer;  // free parallel comm buffer first
00693 
00694                         MB_SET_ERR( rval, "Tag get data failed" );
00695                     }
00696 #ifdef VERBOSE
00697                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00698                     double* vals = (double*)( buffer->buff_ptr );
00699                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00700                     {
00701                         dbfile << " " << *vals;
00702                         vals++;
00703                     }
00704                     dbfile << "\n";
00705 #endif
00706                     buffer->buff_ptr += vect_bytes_per_tag[i];
00707                 }
00708             }
00709 
00710 #ifdef VERBOSE
00711             dbfile.close();
00712 #endif
00713             *( (int*)buffer->mem_ptr ) = size_buffer;
00714             // int size_pack = buffer->get_current_size(); // debug check
00715             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00716                               &sendReqs[indexReq] );  // we have to use global communicator
00717             if( ierr != 0 ) return MB_FAILURE;
00718             indexReq++;
00719             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00720         }
00721     }
00722     else if( graph_type == DOF_BASED )
00723     {
00724         // need to fill up the buffer, in the order desired, send it
00725         // get all the tags, for all owned entities, and pack the buffers accordingly
00726         // we do not want to get the tags by entity, it may be too expensive
00727         std::vector< std::vector< double > > valuesTags;
00728         valuesTags.resize( tag_handles.size() );
00729         for( size_t i = 0; i < tag_handles.size(); i++ )
00730         {
00731 
00732             int bytes_per_tag;
00733             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00734             valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) );
00735             // fill the whole array, we will pick up from here
00736             rval = mb->tag_get_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
00737         }
00738         // now, pack the data and send it
00739         sendReqs.resize( involved_IDs_map.size() );
00740         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00741              mit != involved_IDs_map.end(); mit++ )
00742         {
00743             int receiver_proc                   = mit->first;
00744             std::vector< int >& eids            = mit->second;
00745             std::vector< int >& index_in_values = map_index[receiver_proc];
00746             std::vector< int >& index_ptr       = map_ptr[receiver_proc];  // this is eids.size()+1;
00747             int size_buffer                     = 4 + total_bytes_per_entity *
00748                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00749             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00750             buffer->reset_ptr( sizeof( int ) );
00751 #ifdef VERBOSE
00752             std::ofstream dbfile;
00753             std::stringstream outf;
00754             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00755             dbfile.open( outf.str().c_str() );
00756             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00757 #endif
00758             // copy tag data to buffer->buff_ptr, and send the buffer
00759             // pack data by tag, to be consistent with above
00760             int j = 0;
00761             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00762             {
00763                 int index_in_v = index_in_values[index_ptr[j]];
00764                 for( size_t i = 0; i < tag_handles.size(); i++ )
00765                 {
00766                     // right now, move just doubles; but it could be any type of tag
00767                     *( (double*)( buffer->buff_ptr ) ) = valuesTags[i][index_in_v];
00768                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
00769                 }
00770             };
00771             *( (int*)buffer->mem_ptr ) = size_buffer;
00772             // int size_pack = buffer->get_current_size(); // debug check
00773             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00774                               &sendReqs[indexReq] );  // we have to use global communicator
00775             if( ierr != 0 ) return MB_FAILURE;
00776             indexReq++;
00777             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00778         }
00779     }
00780     return MB_SUCCESS;
00781 }
00782 
00783 ErrorCode ParCommGraph::receive_tag_values( MPI_Comm jcomm,
00784                                             ParallelComm* pco,
00785                                             Range& owned,
00786                                             std::vector< Tag >& tag_handles )
00787 {
00788     // opposite to sending, we will use blocking receives
00789     int ierr;
00790     MPI_Status status;
00791     // basically, owned.size() needs to be equal to sum(corr_sizes)
00792     // get info about the tag size, type, etc
00793     Core* mb = (Core*)pco->get_moab();
00794     // get info about the tag
00795     //! Get the size of the specified tag in bytes
00796     ErrorCode rval;
00797     int total_bytes_per_entity = 0;
00798     std::vector< int > vect_bytes_per_tag;
00799 #ifdef VERBOSE
00800     std::vector< int > tag_sizes;
00801 #endif
00802     for( size_t i = 0; i < tag_handles.size(); i++ )
00803     {
00804         int bytes_per_tag;
00805         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00806         total_bytes_per_entity += bytes_per_tag;
00807         vect_bytes_per_tag.push_back( bytes_per_tag );
00808 #ifdef VERBOSE
00809         int tag_size;
00810         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00811         tag_sizes.push_back( tag_size );
00812 #endif
00813     }
00814 
00815     if( graph_type == INITIAL_MIGRATE )
00816     {
00817         // std::map<int, Range> split_ranges;
00818         // rval = split_owned_range ( owned);MB_CHK_ERR ( rval );
00819 
00820         // use the buffers data structure to allocate memory for receiving the tags
00821         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00822         {
00823             int sender_proc = it->first;
00824             Range ents      = it->second;  // primary entities, with the tag data, we will receive
00825             int size_buffer = 4 + total_bytes_per_entity *
00826                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00827             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00828 
00829             buffer->reset_ptr( sizeof( int ) );
00830 
00831             *( (int*)buffer->mem_ptr ) = size_buffer;
00832             // int size_pack = buffer->get_current_size(); // debug check
00833 
00834             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00835             if( ierr != 0 ) return MB_FAILURE;
00836             // now set the tag
00837             // copy to tag
00838 
00839             for( size_t i = 0; i < tag_handles.size(); i++ )
00840             {
00841                 rval = mb->tag_set_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );
00842                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00843             }
00844             delete buffer;  // no need for it afterwards
00845             MB_CHK_ERR( rval );
00846         }
00847     }
00848     else if( graph_type == COVERAGE )  // receive buffer, then extract tag data, in a loop
00849     {
00850         // we know that we will need to receive some tag data in a specific order (by ids stored)
00851         // first, get the ids of the local elements, from owned Range; unpack the buffer in order
00852         Tag gidTag = mb->globalId_tag();
00853         std::vector< int > gids;
00854         gids.resize( owned.size() );
00855         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00856         std::map< int, EntityHandle > gidToHandle;
00857         size_t i = 0;
00858         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00859         {
00860             EntityHandle eh        = *it;
00861             gidToHandle[gids[i++]] = eh;
00862         }
00863         //
00864         // now, unpack the data and set it to the tag
00865         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00866              mit != involved_IDs_map.end(); mit++ )
00867         {
00868             int sender_proc          = mit->first;
00869             std::vector< int >& eids = mit->second;
00870             int size_buffer          = 4 + total_bytes_per_entity *
00871                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00872             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00873             buffer->reset_ptr( sizeof( int ) );
00874             *( (int*)buffer->mem_ptr ) = size_buffer;  // this is really not necessary, it should receive this too
00875 
00876             // receive the buffer
00877             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00878             if( ierr != 0 ) return MB_FAILURE;
00879 // start copy
00880 #ifdef VERBOSE
00881             std::ofstream dbfile;
00882             std::stringstream outf;
00883             outf << "recvFrom_" << sender_proc << "_on_proc_" << rankInJoin << ".txt";
00884             dbfile.open( outf.str().c_str() );
00885             dbfile << "recvFrom_" << sender_proc << " on proc  " << rankInJoin << "\n";
00886 #endif
00887 
00888             // copy tag data from buffer->buff_ptr
00889             // data is arranged by tag , and repeat the loop for each entity ()
00890             // maybe it should be arranged by entity now, not by tag (so one loop for entities,
00891             // outside)
00892 
00893             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00894             {
00895                 int eID                                      = *it;
00896                 std::map< int, EntityHandle >::iterator mit2 = gidToHandle.find( eID );
00897                 if( mit2 == gidToHandle.end() )
00898                 {
00899                     std::cout << " on rank: " << rankInJoin << " cannot find entity handle with global ID " << eID
00900                               << "\n";
00901                     return MB_FAILURE;
00902                 }
00903                 EntityHandle eh = mit2->second;
00904                 for( i = 0; i < tag_handles.size(); i++ )
00905                 {
00906                     rval = mb->tag_set_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00907 #ifdef VERBOSE
00908                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00909                     double* vals = (double*)( buffer->buff_ptr );
00910                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00911                     {
00912                         dbfile << " " << *vals;
00913                         vals++;
00914                     }
00915                     dbfile << "\n";
00916 #endif
00917                     buffer->buff_ptr += vect_bytes_per_tag[i];
00918                 }
00919             }
00920 
00921             // delete receive buffer
00922             delete buffer;
00923 #ifdef VERBOSE
00924             dbfile.close();
00925 #endif
00926         }
00927     }
00928     else if( graph_type == DOF_BASED )
00929     {
00930         // need to fill up the values for each tag, in the order desired, from the buffer received
00931         //
00932         // get all the tags, for all owned entities, and pack the buffers accordingly
00933         // we do not want to get the tags by entity, it may be too expensive
00934         std::vector< std::vector< double > > valuesTags;
00935         valuesTags.resize( tag_handles.size() );
00936         for( size_t i = 0; i < tag_handles.size(); i++ )
00937         {
00938             int bytes_per_tag;
00939             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00940             valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) );
00941             // fill the whole array, we will pick up from here
00942             // we will fill this array, using data from received buffer
00943             // rval = mb->tag_get_data(owned, (void*)( &(valuesTags[i][0])) );MB_CHK_ERR ( rval );
00944         }
00945         // now, unpack the data and set the tags
00946         sendReqs.resize( involved_IDs_map.size() );
00947         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00948              mit != involved_IDs_map.end(); mit++ )
00949         {
00950             int sender_proc                     = mit->first;
00951             std::vector< int >& eids            = mit->second;
00952             std::vector< int >& index_in_values = map_index[sender_proc];
00953             std::vector< int >& index_ptr       = map_ptr[sender_proc];  // this is eids.size()+1;
00954             int size_buffer                     = 4 + total_bytes_per_entity *
00955                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00956             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00957             buffer->reset_ptr( sizeof( int ) );
00958 
00959             // receive the buffer
00960             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00961             if( ierr != 0 ) return MB_FAILURE;
00962             // use the values in buffer to populate valuesTag arrays, fill it up!
00963             int j = 0;
00964             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00965             {
00966                 for( size_t i = 0; i < tag_handles.size(); i++ )
00967                 {
00968                     // right now, move just doubles; but it could be any type of tag
00969                     double val = *( (double*)( buffer->buff_ptr ) );
00970                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
00971                     for( int k = index_ptr[j]; k < index_ptr[j + 1]; k++ )
00972                         valuesTags[i][index_in_values[k]] = val;
00973                 }
00974             }
00975             // we are done with the buffer in which we received tags, release / delete it
00976             delete buffer;
00977         }
00978         // now we populated the values for all tags; set now the tags!
00979         for( size_t i = 0; i < tag_handles.size(); i++ )
00980         {
00981             // we will fill this array, using data from received buffer
00982             rval = mb->tag_set_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
00983         }
00984     }
00985     return MB_SUCCESS;
00986 }
00987 /*
00988  * for example
00989  */
00990 ErrorCode ParCommGraph::settle_send_graph( TupleList& TLcovIDs )
00991 {
00992     // fill involved_IDs_map with data
00993     // will have "receiving proc" and global id of element
00994     int n      = TLcovIDs.get_n();
00995     graph_type = COVERAGE;  // do not rely only on involved_IDs_map.size(); this can be 0 in some cases
00996     for( int i = 0; i < n; i++ )
00997     {
00998         int to_proc      = TLcovIDs.vi_wr[2 * i];
00999         int globalIdElem = TLcovIDs.vi_wr[2 * i + 1];
01000         involved_IDs_map[to_proc].push_back( globalIdElem );
01001     }
01002 #ifdef VERBOSE
01003     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
01004          mit++ )
01005     {
01006         std::cout << " towards task " << mit->first << " send: " << mit->second.size() << " cells " << std::endl;
01007         for( size_t i = 0; i < mit->second.size(); i++ )
01008         {
01009             std::cout << " " << mit->second[i];
01010         }
01011         std::cout << std::endl;
01012     }
01013 #endif
01014     return MB_SUCCESS;
01015 }
01016 
01017 // this will set involved_IDs_map will store all ids to be received from one sender task
01018 void ParCommGraph::SetReceivingAfterCoverage(
01019     std::map< int, std::set< int > >& idsFromProcs )  // will make sense only on receivers, right now after cov
01020 {
01021     for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
01022     {
01023         int fromProc            = mt->first;
01024         std::set< int >& setIds = mt->second;
01025         involved_IDs_map[fromProc].resize( setIds.size() );
01026         std::vector< int >& listIDs = involved_IDs_map[fromProc];
01027         size_t indx                 = 0;
01028         for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
01029         {
01030             int valueID     = *st;
01031             listIDs[indx++] = valueID;
01032         }
01033     }
01034     graph_type = COVERAGE;
01035     return;
01036 }
01037 //#define VERBOSE
01038 void ParCommGraph::settle_comm_by_ids( int comp, TupleList& TLBackToComp, std::vector< int >& valuesComp )
01039 {
01040     // settle comm graph on comp
01041     if( rootSender || rootReceiver ) std::cout << " settle comm graph by id on component " << comp << "\n";
01042     int n = TLBackToComp.get_n();
01043     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01044     // cases
01045     std::map< int, std::set< int > > uniqueIDs;
01046 
01047     for( int i = 0; i < n; i++ )
01048     {
01049         int to_proc  = TLBackToComp.vi_wr[3 * i + 2];
01050         int globalId = TLBackToComp.vi_wr[3 * i + 1];
01051         uniqueIDs[to_proc].insert( globalId );
01052     }
01053 
01054     // Vector to store element
01055     // with respective present index
01056     std::vector< std::pair< int, int > > vp;
01057     vp.reserve( valuesComp.size() );
01058 
01059     // Inserting element in pair vector
01060     // to keep track of previous indexes in valuesComp
01061     for( int i = 0; i < (int)valuesComp.size(); ++i )
01062     {
01063         vp.push_back( std::make_pair( valuesComp[i], i ) );
01064     }
01065     // Sorting pair vector
01066     sort( vp.begin(), vp.end() );
01067 
01068     // vp[i].first, second
01069 
01070     // count now how many times some value appears in ordered (so in valuesComp)
01071     for( std::map< int, std::set< int > >::iterator it = uniqueIDs.begin(); it != uniqueIDs.end(); it++ )
01072     {
01073         int procId                  = it->first;
01074         std::set< int >& nums       = it->second;
01075         std::vector< int >& indx    = map_ptr[procId];
01076         std::vector< int >& indices = map_index[procId];
01077         indx.resize( nums.size() + 1 );
01078         int indexInVp = 0;
01079         int indexVal  = 0;
01080         indx[0]       = 0;  // start from 0
01081         for( std::set< int >::iterator sst = nums.begin(); sst != nums.end(); sst++, indexVal++ )
01082         {
01083             int val = *sst;
01084             involved_IDs_map[procId].push_back( val );
01085             indx[indexVal + 1] = indx[indexVal];
01086             while( ( indexInVp < (int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) )  // should be equal !
01087             {
01088                 if( vp[indexInVp].first == val )
01089                 {
01090                     indx[indexVal + 1]++;
01091                     indices.push_back( vp[indexInVp].second );
01092                 }
01093                 indexInVp++;
01094             }
01095         }
01096     }
01097 #ifdef VERBOSE
01098     std::stringstream f1;
01099     std::ofstream dbfile;
01100     f1 << "Involve_" << comp << "_" << rankInJoin << ".txt";
01101     dbfile.open( f1.str().c_str() );
01102     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
01103          mit++ )
01104     {
01105         int corrTask                = mit->first;
01106         std::vector< int >& corrIds = mit->second;
01107         std::vector< int >& indx    = map_ptr[corrTask];
01108         std::vector< int >& indices = map_index[corrTask];
01109 
01110         dbfile << " towards proc " << corrTask << " \n";
01111         for( int i = 0; i < (int)corrIds.size(); i++ )
01112         {
01113             dbfile << corrIds[i] << " [" << indx[i] << "," << indx[i + 1] << ")  : ";
01114             for( int j = indx[i]; j < indx[i + 1]; j++ )
01115                 dbfile << indices[j] << " ";
01116             dbfile << "\n";
01117         }
01118         dbfile << " \n";
01119     }
01120     dbfile.close();
01121 #endif
01122 
01123     graph_type = DOF_BASED;
01124     // now we need to fill back and forth information, needed to fill the arrays
01125     // for example, from spectral to involved_IDs_map, in case we want to send data from
01126     // spectral to phys
01127 }
01128 //#undef VERBOSE
01129 // new partition calculation
01130 ErrorCode ParCommGraph::compute_partition( ParallelComm* pco, Range& owned, int met )
01131 {
01132     // we are on a task on sender, and need to compute a new partition;
01133     // primary cells need to be distributed to nb receivers tasks
01134     // first, we will use graph partitioner, with zoltan;
01135     // in the graph that we need to build, the first layer of ghosts is needed;
01136     // can we avoid that ? For example, we can find out from each boundary edge/face what is the
01137     // other cell (on the other side), then form the global graph, and call zoltan in parallel met 1
01138     // would be a geometric partitioner, and met 2 would be a graph partitioner for method 1 we do
01139     // not need any ghost exchange
01140 
01141     // find first edges that are shared
01142     if( owned.empty() )
01143         return MB_SUCCESS;  // nothing to do? empty partition is not allowed, maybe we should return
01144                             // error?
01145     Core* mb = (Core*)pco->get_moab();
01146 
01147     double t1, t2, t3;
01148     t1               = MPI_Wtime();
01149     int primaryDim   = mb->dimension_from_handle( *owned.rbegin() );
01150     int interfaceDim = primaryDim - 1;  // should be 1 or 2
01151     Range sharedEdges;
01152     ErrorCode rval;
01153 
01154     std::vector< int > shprocs( MAX_SHARING_PROCS );
01155     std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
01156 
01157     Tag gidTag = mb->globalId_tag();
01158     int np;
01159     unsigned char pstatus;
01160 
01161     std::multimap< int, int > extraGraphEdges;
01162     // std::map<int, int> adjCellsId;
01163     std::map< int, int > extraCellsProc;
01164     // if method is 2, no need to do the exchange for adjacent cells across partition boundary
01165     // these maps above will be empty for method 2 (geometry)
01166     if( 1 == met )
01167     {
01168         rval = pco->get_shared_entities( /*int other_proc*/ -1, sharedEdges, interfaceDim,
01169                                          /*const bool iface*/ true );MB_CHK_ERR( rval );
01170 
01171 #ifdef VERBOSE
01172         std::cout << " on sender task " << pco->rank() << " number of shared interface cells " << sharedEdges.size()
01173                   << "\n";
01174 #endif
01175         // find to what processors we need to send the ghost info about the edge
01176         // first determine the local graph; what elements are adjacent to each cell in owned range
01177         // cells that are sharing a partition interface edge, are identified first, and form a map
01178         TupleList TLe;                                     // tuple list for cells
01179         TLe.initialize( 2, 0, 1, 0, sharedEdges.size() );  // send to, id of adj cell, remote edge
01180         TLe.enableWriteAccess();
01181 
01182         std::map< EntityHandle, int > edgeToCell;  // from local boundary edge to adjacent cell id
01183         // will be changed after
01184         for( Range::iterator eit = sharedEdges.begin(); eit != sharedEdges.end(); eit++ )
01185         {
01186             EntityHandle edge = *eit;
01187             // get the adjacent cell
01188             Range adjEnts;
01189             rval = mb->get_adjacencies( &edge, 1, primaryDim, false, adjEnts );MB_CHK_ERR( rval );
01190             if( adjEnts.size() > 0 )
01191             {
01192                 EntityHandle adjCell = adjEnts[0];
01193                 int gid;
01194                 rval = mb->tag_get_data( gidTag, &adjCell, 1, &gid );MB_CHK_ERR( rval );
01195                 rval = pco->get_sharing_data( edge, &shprocs[0], &shhandles[0], pstatus, np );MB_CHK_ERR( rval );
01196                 int n                = TLe.get_n();
01197                 TLe.vi_wr[2 * n]     = shprocs[0];
01198                 TLe.vi_wr[2 * n + 1] = gid;
01199                 TLe.vul_wr[n]        = shhandles[0];  // the remote edge corresponding to shared edge
01200                 edgeToCell[edge]     = gid;           // store the map between edge and local id of adj cell
01201                 TLe.inc_n();
01202             }
01203         }
01204 
01205 #ifdef VERBOSE
01206         std::stringstream ff2;
01207         ff2 << "TLe_" << pco->rank() << ".txt";
01208         TLe.print_to_file( ff2.str().c_str() );
01209 #endif
01210         // send the data to the other processors:
01211         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLe, 0 );
01212         // on receiver side, each local edge will have the remote cell adjacent to it!
01213 
01214         int ne = TLe.get_n();
01215         for( int i = 0; i < ne; i++ )
01216         {
01217             int sharedProc         = TLe.vi_rd[2 * i];       // this info is coming from here, originally
01218             int remoteCellID       = TLe.vi_rd[2 * i + 1];   // this is the id of the remote cell, on sharedProc
01219             EntityHandle localCell = TLe.vul_rd[i];          // this is now local edge/face on this proc
01220             int localCellId        = edgeToCell[localCell];  // this is the local cell  adjacent to edge/face
01221             // now, we will need to add to the graph the pair <localCellId, remoteCellID>
01222             std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID );
01223             extraGraphEdges.insert( extraAdj );
01224             // adjCellsId [edgeToCell[localCell]] = remoteCellID;
01225             extraCellsProc[remoteCellID] = sharedProc;
01226 #ifdef VERBOSE
01227             std::cout << "local ID " << edgeToCell[localCell] << " remote cell ID: " << remoteCellID << "\n";
01228 #endif
01229         }
01230     }
01231     t2 = MPI_Wtime();
01232     if( rootSender ) std::cout << " time preparing the input for Zoltan:" << t2 - t1 << " seconds. \n";
01233         // so adj cells ids; need to call zoltan for parallel partition
01234 #ifdef MOAB_HAVE_ZOLTAN
01235     ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mb );
01236     if( 1 <= met )  //  partition in zoltan, either graph or geometric partitioner
01237     {
01238         std::map< int, Range > distribution;  // how to distribute owned elements by processors in receiving groups
01239         // in how many tasks do we want to be distributed?
01240         int numNewPartitions = (int)receiverTasks.size();
01241         Range primaryCells   = owned.subset_by_dimension( primaryDim );
01242         rval = mbZTool->partition_owned_cells( primaryCells, pco, extraGraphEdges, extraCellsProc, numNewPartitions,
01243                                                distribution, met );MB_CHK_ERR( rval );
01244         for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ )
01245         {
01246             int part_index = mit->first;
01247             assert( part_index < numNewPartitions );
01248             split_ranges[receiverTasks[part_index]] = mit->second;
01249         }
01250     }
01251     // delete now the partitioner
01252     delete mbZTool;
01253 #endif
01254     t3 = MPI_Wtime();
01255     if( rootSender ) std::cout << " time spent by Zoltan " << t3 - t2 << " seconds. \n";
01256     return MB_SUCCESS;
01257 }
01258 
01259 // after map read, we need to know what entities we need to send to receiver
01260 ErrorCode ParCommGraph::set_split_ranges( int comp,
01261                                           TupleList& TLBackToComp1,
01262                                           std::vector< int >& valuesComp1,
01263                                           int lenTag,
01264                                           Range& ents_of_interest,
01265                                           int /*type*/ )
01266 {
01267     // settle split_ranges // same role as partitioning
01268     if( rootSender ) std::cout << " find split_ranges on component " << comp << "  according to read map \n";
01269     // Vector to store element
01270     // with respective present index
01271     int n = TLBackToComp1.get_n();
01272     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01273     // cases
01274     std::map< int, std::set< int > > uniqueIDs;
01275 
01276     for( int i = 0; i < n; i++ )
01277     {
01278         int to_proc  = TLBackToComp1.vi_wr[3 * i + 2];
01279         int globalId = TLBackToComp1.vi_wr[3 * i + 1];
01280         uniqueIDs[to_proc].insert( globalId );
01281     }
01282 
01283     for( int i = 0; i < (int)ents_of_interest.size(); i++ )
01284     {
01285         EntityHandle ent = ents_of_interest[i];
01286         for( int j = 0; j < lenTag; j++ )
01287         {
01288             int marker = valuesComp1[i * lenTag + j];
01289             for( auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ )
01290             {
01291                 int proc                = mit->first;
01292                 std::set< int >& setIds = mit->second;
01293                 if( setIds.find( marker ) != setIds.end() )
01294                 {
01295                     split_ranges[proc].insert( ent );
01296                 }
01297             }
01298         }
01299     }
01300 
01301     return MB_SUCCESS;
01302 }
01303 
01304 // new methods to migrate mesh after reading map
01305 ErrorCode ParCommGraph::form_tuples_to_migrate_mesh( Interface* mb,
01306                                                      TupleList& TLv,
01307                                                      TupleList& TLc,
01308                                                      int type,
01309                                                      int lenTagType1 )
01310 {
01311     // we will always need GlobalID tag
01312     Tag gidtag = mb->globalId_tag();
01313     // for Type1, we need GLOBAL_DOFS tag;
01314     Tag gds;
01315     ErrorCode rval;
01316     if( type == 1 )
01317     {
01318         rval = mb->tag_get_handle( "GLOBAL_DOFS", gds );
01319     }
01320     // find vertices to be sent, for each of the split_ranges procs
01321     std::map< int, Range > verts_to_proc;
01322     int numv = 0, numc = 0;
01323     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01324     {
01325         int to_proc = it->first;
01326         Range verts;
01327         if( type != 2 )
01328         {
01329             rval = mb->get_connectivity( it->second, verts );MB_CHK_ERR( rval );
01330             numc += (int)it->second.size();
01331         }
01332         else
01333             verts = it->second;
01334         verts_to_proc[to_proc] = verts;
01335         numv += (int)verts.size();
01336     }
01337     // first vertices:
01338     TLv.initialize( 2, 0, 0, 3, numv );  // to proc, GLOBAL ID, 3 real coordinates
01339     TLv.enableWriteAccess();
01340     // use the global id of vertices for connectivity
01341     for( auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ )
01342     {
01343         int to_proc  = it->first;
01344         Range& verts = it->second;
01345         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01346         {
01347             EntityHandle v   = *vit;
01348             int n            = TLv.get_n();  // current size of tuple list
01349             TLv.vi_wr[2 * n] = to_proc;      // send to processor
01350 
01351             rval = mb->tag_get_data( gidtag, &v, 1, &( TLv.vi_wr[2 * n + 1] ) );MB_CHK_ERR( rval );
01352             rval = mb->get_coords( &v, 1, &( TLv.vr_wr[3 * n] ) );MB_CHK_ERR( rval );
01353             TLv.inc_n();  // increment tuple list size
01354         }
01355     }
01356     if( type == 2 ) return MB_SUCCESS;  // no need to fill TLc
01357     // to proc, ID cell, gdstag, nbv, id conn,
01358     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01359 
01360     std::vector< int > gdvals;
01361 
01362     TLc.initialize( size_tuple, 0, 0, 0, numc );  // to proc, GLOBAL ID, 3 real coordinates
01363     TLc.enableWriteAccess();
01364     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01365     {
01366         int to_proc  = it->first;
01367         Range& cells = it->second;
01368         for( Range::iterator cit = cells.begin(); cit != cells.end(); ++cit )
01369         {
01370             EntityHandle cell         = *cit;
01371             int n                     = TLc.get_n();  // current size of tuple list
01372             TLc.vi_wr[size_tuple * n] = to_proc;
01373             int current_index         = 2;
01374             rval                      = mb->tag_get_data( gidtag, &cell, 1, &( TLc.vi_wr[size_tuple * n + 1] ) );MB_CHK_ERR( rval );
01375             if( 1 == type )
01376             {
01377                 rval = mb->tag_get_data( gds, &cell, 1, &( TLc.vi_wr[size_tuple * n + current_index] ) );MB_CHK_ERR( rval );
01378                 current_index += lenTagType1;
01379             }
01380             // now get connectivity
01381             const EntityHandle* conn = NULL;
01382             int nnodes               = 0;
01383             rval                     = mb->get_connectivity( cell, conn, nnodes );MB_CHK_ERR( rval );
01384             // fill nnodes:
01385             TLc.vi_wr[size_tuple * n + current_index] = nnodes;
01386             rval = mb->tag_get_data( gidtag, conn, nnodes, &( TLc.vi_wr[size_tuple * n + current_index + 1] ) );MB_CHK_ERR( rval );
01387             TLc.inc_n();  // increment tuple list size
01388         }
01389     }
01390     return MB_SUCCESS;
01391 }
01392 ErrorCode ParCommGraph::form_mesh_from_tuples( Interface* mb,
01393                                                TupleList& TLv,
01394                                                TupleList& TLc,
01395                                                int type,
01396                                                int lenTagType1,
01397                                                EntityHandle fset,
01398                                                Range& primary_ents,
01399                                                std::vector< int >& values_entities )
01400 {
01401     // might need to fill also the split_range things
01402     // we will always need GlobalID tag
01403     Tag gidtag = mb->globalId_tag();
01404     // for Type1, we need GLOBAL_DOFS tag;
01405     Tag gds;
01406     ErrorCode rval;
01407     std::vector< int > def_val( lenTagType1, 0 );
01408     if( type == 1 )
01409     {
01410         // we may need to create this tag
01411         rval = mb->tag_get_handle( "GLOBAL_DOFS", lenTagType1, MB_TYPE_INTEGER, gds, MB_TAG_CREAT | MB_TAG_DENSE,
01412                                    &def_val[0] );MB_CHK_ERR( rval );
01413     }
01414 
01415     std::map< int, EntityHandle > vertexMap;  //
01416     Range verts;
01417     // always form vertices and add them to the fset;
01418     int n = TLv.get_n();
01419     EntityHandle vertex;
01420     for( int i = 0; i < n; i++ )
01421     {
01422         int gid = TLv.vi_rd[2 * i + 1];
01423         if( vertexMap.find( gid ) == vertexMap.end() )
01424         {
01425             // need to form this vertex
01426             rval = mb->create_vertex( &( TLv.vr_rd[3 * i] ), vertex );MB_CHK_ERR( rval );
01427             vertexMap[gid] = vertex;
01428             verts.insert( vertex );
01429             rval = mb->tag_set_data( gidtag, &vertex, 1, &gid );MB_CHK_ERR( rval );
01430             // if point cloud,
01431         }
01432         if( 2 == type )  // point cloud, add it to the split_ranges ?
01433         {
01434             split_ranges[TLv.vi_rd[2 * i]].insert( vertexMap[gid] );
01435         }
01436         // todo : when to add the values_entities ?
01437     }
01438     rval = mb->add_entities( fset, verts );MB_CHK_ERR( rval );
01439     if( 2 == type )
01440     {
01441         primary_ents = verts;
01442         values_entities.resize( verts.size() );  // just get the ids of vertices
01443         rval = mb->tag_get_data( gidtag, verts, &values_entities[0] );MB_CHK_ERR( rval );
01444         return MB_SUCCESS;
01445     }
01446 
01447     n              = TLc.get_n();
01448     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01449 
01450     EntityHandle new_element;
01451     Range cells;
01452     std::map< int, EntityHandle > cellMap;  // do no tcreate one if it already exists, maybe from other processes
01453     for( int i = 0; i < n; i++ )
01454     {
01455         int from_proc  = TLc.vi_rd[size_tuple * i];
01456         int globalIdEl = TLc.vi_rd[size_tuple * i + 1];
01457         if( cellMap.find( globalIdEl ) == cellMap.end() )  // need to create the cell
01458         {
01459             int current_index = 2;
01460             if( 1 == type ) current_index += lenTagType1;
01461             int nnodes = TLc.vi_rd[size_tuple * i + current_index];
01462             std::vector< EntityHandle > conn;
01463             conn.resize( nnodes );
01464             for( int j = 0; j < nnodes; j++ )
01465             {
01466                 conn[j] = vertexMap[TLc.vi_rd[size_tuple * i + current_index + j + 1]];
01467             }
01468             //
01469             EntityType entType = MBQUAD;
01470             if( nnodes > 4 ) entType = MBPOLYGON;
01471             if( nnodes < 4 ) entType = MBTRI;
01472             rval = mb->create_element( entType, &conn[0], nnodes, new_element );MB_CHK_SET_ERR( rval, "can't create new element " );
01473             cells.insert( new_element );
01474             cellMap[globalIdEl] = new_element;
01475             rval                = mb->tag_set_data( gidtag, &new_element, 1, &globalIdEl );MB_CHK_SET_ERR( rval, "can't set global id tag on cell " );
01476             if( 1 == type )
01477             {
01478                 // set the gds tag
01479                 rval = mb->tag_set_data( gds, &new_element, 1, &( TLc.vi_rd[size_tuple * i + 2] ) );MB_CHK_SET_ERR( rval, "can't set gds tag on cell " );
01480             }
01481         }
01482         split_ranges[from_proc].insert( cellMap[globalIdEl] );
01483     }
01484     rval = mb->add_entities( fset, cells );MB_CHK_ERR( rval );
01485     primary_ents = cells;
01486     if( 1 == type )
01487     {
01488         values_entities.resize( lenTagType1 * primary_ents.size() );
01489         rval = mb->tag_get_data( gds, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01490     }
01491     else  // type == 3
01492     {
01493         values_entities.resize( primary_ents.size() );  // just get the ids !
01494         rval = mb->tag_get_data( gidtag, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01495     }
01496     return MB_SUCCESS;
01497 }
01498 
01499 // at this moment, each sender task has split_ranges formed;
01500 // we need to aggregate that info and send it to receiver
01501 ErrorCode ParCommGraph::send_graph_partition( ParallelComm* pco, MPI_Comm jcomm )
01502 {
01503     // first, accumulate the info to root of sender; use gatherv
01504     // first, accumulate number of receivers from each sender, to the root receiver
01505     int numberReceivers =
01506         (int)split_ranges.size();            // these are ranges of elements to be sent to each receiver, from this task
01507     int nSenders = (int)senderTasks.size();  //
01508     // this sender will have to send to this many receivers
01509     std::vector< int > displs( 1 );  // displacements for gatherv
01510     std::vector< int > counts( 1 );
01511     if( is_root_sender() )
01512     {
01513         displs.resize( nSenders + 1 );
01514         counts.resize( nSenders );
01515     }
01516 
01517     int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, pco->comm() );
01518     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01519     // compute now displacements
01520     if( is_root_sender() )
01521     {
01522         displs[0] = 0;
01523         for( int k = 0; k < nSenders; k++ )
01524         {
01525             displs[k + 1] = displs[k] + counts[k];
01526         }
01527     }
01528     std::vector< int > buffer;
01529     if( is_root_sender() ) buffer.resize( displs[nSenders] );  // the last one will have the total count now
01530 
01531     std::vector< int > recvs;
01532     for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01533     {
01534         recvs.push_back( mit->first );
01535     }
01536     ierr =
01537         MPI_Gatherv( &recvs[0], numberReceivers, MPI_INT, &buffer[0], &counts[0], &displs[0], MPI_INT, 0, pco->comm() );
01538     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01539 
01540     // now form recv_graph map; points from the
01541     // now form the graph to be sent to the other side; only on root
01542     if( is_root_sender() )
01543     {
01544 #ifdef GRAPH_INFO
01545         std::ofstream dbfileSender;
01546         std::stringstream outf;
01547         outf << "S_" << compid1 << "_R_" << compid2 << "_SenderGraph.txt";
01548         dbfileSender.open( outf.str().c_str() );
01549         dbfileSender << " number senders: " << nSenders << "\n";
01550         dbfileSender << " senderRank \treceivers \n";
01551         for( int k = 0; k < nSenders; k++ )
01552         {
01553             int indexInBuff = displs[k];
01554             int senderTask  = senderTasks[k];
01555             dbfileSender << senderTask << "\t\t";
01556             for( int j = 0; j < counts[k]; j++ )
01557             {
01558                 int recvTask = buffer[indexInBuff + j];
01559                 dbfileSender << recvTask << " ";
01560             }
01561             dbfileSender << "\n";
01562         }
01563         dbfileSender.close();
01564 #endif
01565         for( int k = 0; k < nSenders; k++ )
01566         {
01567             int indexInBuff = displs[k];
01568             int senderTask  = senderTasks[k];
01569             for( int j = 0; j < counts[k]; j++ )
01570             {
01571                 int recvTask = buffer[indexInBuff + j];
01572                 recv_graph[recvTask].push_back( senderTask );  // this will be packed and sent to root receiver, with
01573                                                                // nonblocking send
01574             }
01575         }
01576 #ifdef GRAPH_INFO
01577         std::ofstream dbfile;
01578         std::stringstream outf2;
01579         outf2 << "S_" << compid1 << "_R_" << compid2 << "_RecvGraph.txt";
01580         dbfile.open( outf2.str().c_str() );
01581         dbfile << " number receivers: " << recv_graph.size() << "\n";
01582         dbfile << " receiverRank \tsenders \n";
01583         for( std::map< int, std::vector< int > >::iterator mit = recv_graph.begin(); mit != recv_graph.end(); mit++ )
01584         {
01585             int recvTask                = mit->first;
01586             std::vector< int >& senders = mit->second;
01587             dbfile << recvTask << "\t\t";
01588             for( std::vector< int >::iterator vit = senders.begin(); vit != senders.end(); vit++ )
01589                 dbfile << *vit << " ";
01590             dbfile << "\n";
01591         }
01592         dbfile.close();
01593 #endif
01594         // this is the same as trivial partition
01595         ErrorCode rval = send_graph( jcomm );MB_CHK_ERR( rval );
01596     }
01597 
01598     return MB_SUCCESS;
01599 }
01600 // method to expose local graph info: sender id, receiver id, sizes of elements to send, after or
01601 // before intersection
01602 ErrorCode ParCommGraph::dump_comm_information( std::string prefix, int is_send )
01603 {
01604     //
01605     if( -1 != rankInGroup1 && 1 == is_send )  // it is a sender task
01606     {
01607         std::ofstream dbfile;
01608         std::stringstream outf;
01609         outf << prefix << "_sender_" << rankInGroup1 << "_joint" << rankInJoin << "_type_" << (int)graph_type << ".txt";
01610         dbfile.open( outf.str().c_str() );
01611 
01612         if( graph_type == COVERAGE )
01613         {
01614             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01615                  mit != involved_IDs_map.end(); mit++ )
01616             {
01617                 int receiver_proc        = mit->first;
01618                 std::vector< int >& eids = mit->second;
01619                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01620             }
01621         }
01622         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01623         {
01624             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01625             {
01626                 int receiver_proc = mit->first;
01627                 Range& eids       = mit->second;
01628                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01629             }
01630         }
01631         else if( graph_type == DOF_BASED )  // just after migration, or from computeGraph
01632         {
01633             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01634                  mit != involved_IDs_map.end(); mit++ )
01635             {
01636                 int receiver_proc = mit->first;
01637                 dbfile << "receiver: " << receiver_proc << " size:" << mit->second.size() << "\n";
01638             }
01639         }
01640         dbfile.close();
01641     }
01642     if( -1 != rankInGroup2 && 0 == is_send )  // it is a receiver task
01643     {
01644         std::ofstream dbfile;
01645         std::stringstream outf;
01646         outf << prefix << "_receiver_" << rankInGroup2 << "_joint" << rankInJoin << "_type_" << (int)graph_type
01647              << ".txt";
01648         dbfile.open( outf.str().c_str() );
01649 
01650         if( graph_type == COVERAGE )
01651         {
01652             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01653                  mit != involved_IDs_map.end(); mit++ )
01654             {
01655                 int sender_proc          = mit->first;
01656                 std::vector< int >& eids = mit->second;
01657                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01658             }
01659         }
01660         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01661         {
01662             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01663             {
01664                 int sender_proc = mit->first;
01665                 Range& eids     = mit->second;
01666                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01667             }
01668         }
01669         else if( graph_type == DOF_BASED )  // just after migration
01670         {
01671             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01672                  mit != involved_IDs_map.end(); mit++ )
01673             {
01674                 int sender_proc = mit->first;
01675                 dbfile << "receiver: " << sender_proc << " size:" << mit->second.size() << "\n";
01676             }
01677         }
01678         dbfile.close();
01679     }
01680     return MB_SUCCESS;
01681 }
01682 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines