MOAB: Mesh Oriented datABase  (version 5.3.1)
ParCommGraph.cpp
Go to the documentation of this file.
00001 /*
00002  * ParCommGraph.cpp
00003  *
00004  */
00005 
00006 #include "moab/ParCommGraph.hpp"
00007 // we need to recompute adjacencies for merging to work
00008 #include "moab/Core.hpp"
00009 #include "AEntityFactory.hpp"
00010 
00011 #ifdef MOAB_HAVE_ZOLTAN
00012 #include "moab/ZoltanPartitioner.hpp"
00013 #endif
00014 
00015 // #define VERBOSE
00016 // #define GRAPH_INFO
00017 
00018 namespace moab
00019 {
00020 ParCommGraph::ParCommGraph( MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2 )
00021     : comm( joincomm ), compid1( coid1 ), compid2( coid2 )
00022 {
00023     // find out the tasks from each group, in the joint communicator
00024     find_group_ranks( group1, comm, senderTasks );
00025     find_group_ranks( group2, comm, receiverTasks );
00026 
00027     rootSender = rootReceiver = false;
00028     rankInGroup1 = rankInGroup2 = rankInJoin = -1;  // not initialized, or not part of the group
00029 
00030     int mpierr = MPI_Group_rank( group1, &rankInGroup1 );
00031     if( MPI_SUCCESS != mpierr || rankInGroup1 == MPI_UNDEFINED ) rankInGroup1 = -1;
00032 
00033     mpierr = MPI_Group_rank( group2, &rankInGroup2 );
00034     if( MPI_SUCCESS != mpierr || rankInGroup2 == MPI_UNDEFINED ) rankInGroup2 = -1;
00035 
00036     mpierr = MPI_Comm_rank( comm, &rankInJoin );
00037     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00038         rankInJoin = -1;
00039 
00040     mpierr = MPI_Comm_size( comm, &joinSize );
00041     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00042         joinSize = -1;
00043 
00044     if( 0 == rankInGroup1 ) rootSender = true;
00045     if( 0 == rankInGroup2 ) rootReceiver = true;
00046     graph_type = INITIAL_MIGRATE;  // 0
00047     comm_graph = NULL;
00048     context_id = -1;
00049     cover_set  = 0;  // refers to nothing yet
00050 }
00051 
00052 // copy constructor will copy only few basic things; split ranges will not be copied
00053 ParCommGraph::ParCommGraph( const ParCommGraph& src )
00054 {
00055     comm          = src.comm;
00056     senderTasks   = src.senderTasks;    // these are the sender tasks in joint comm
00057     receiverTasks = src.receiverTasks;  // these are all the receiver tasks in joint comm
00058     rootSender    = src.rootSender;
00059     rootReceiver  = src.rootReceiver;
00060     rankInGroup1  = src.rankInGroup1;
00061     rankInGroup2  = src.rankInGroup2;  // group 1 is sender, 2 is receiver
00062     rankInJoin    = src.rankInJoin;
00063     joinSize      = src.joinSize;
00064     compid1       = src.compid1;
00065     compid2       = src.compid2;
00066     comm_graph    = NULL;
00067     graph_type    = src.graph_type;
00068     context_id    = src.context_id;
00069     cover_set     = src.cover_set;
00070     return;
00071 }
00072 
00073 ParCommGraph::~ParCommGraph()
00074 {
00075     // TODO Auto-generated destructor stub
00076 }
00077 
00078 // utility to find out the ranks of the processes of a group, with respect to a joint comm,
00079 // which spans for sure the group
00080 // it is used locally (in the constructor), but it can be used as a utility
00081 void ParCommGraph::find_group_ranks( MPI_Group group, MPI_Comm joincomm, std::vector< int >& ranks )
00082 {
00083     MPI_Group global_grp;
00084     MPI_Comm_group( joincomm, &global_grp );
00085 
00086     int grp_size;
00087 
00088     MPI_Group_size( group, &grp_size );
00089     std::vector< int > rks( grp_size );
00090     ranks.resize( grp_size );
00091 
00092     for( int i = 0; i < grp_size; i++ )
00093         rks[i] = i;
00094 
00095     MPI_Group_translate_ranks( group, grp_size, &rks[0], global_grp, &ranks[0] );
00096     MPI_Group_free( &global_grp );
00097     return;
00098 }
00099 
00100 ErrorCode ParCommGraph::compute_trivial_partition( std::vector< int >& numElemsPerTaskInGroup1 )
00101 {
00102 
00103     recv_graph.clear();
00104     recv_sizes.clear();
00105     sender_graph.clear();
00106     sender_sizes.clear();
00107 
00108     if( numElemsPerTaskInGroup1.size() != senderTasks.size() )
00109         return MB_FAILURE;  // each sender has a number of elements that it owns
00110 
00111     // first find out total number of elements to be sent from all senders
00112     int total_elems = 0;
00113     std::vector< int > accum;
00114     accum.push_back( 0 );
00115 
00116     int num_senders = (int)senderTasks.size();
00117 
00118     for( size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ )
00119     {
00120         total_elems += numElemsPerTaskInGroup1[k];
00121         accum.push_back( total_elems );
00122     }
00123 
00124     int num_recv = ( (int)receiverTasks.size() );
00125     // in trivial partition, every receiver should get about total_elems/num_receivers elements
00126     int num_per_receiver = (int)( total_elems / num_recv );
00127     int leftover         = total_elems - num_per_receiver * num_recv;
00128 
00129     // so receiver k will receive  [starts[k], starts[k+1] ) interval
00130     std::vector< int > starts;
00131     starts.resize( num_recv + 1 );
00132     starts[0] = 0;
00133     for( int k = 0; k < num_recv; k++ )
00134     {
00135         starts[k + 1] = starts[k] + num_per_receiver;
00136         if( k < leftover ) starts[k + 1]++;
00137     }
00138 
00139     // each sender will send to a number of receivers, based on how the
00140     // arrays starts[0:num_recv] and accum[0:sendr] overlap
00141     int lastUsedReceiverRank = 0;  // first receiver was not treated yet
00142     for( int j = 0; j < num_senders; j++ )
00143     {
00144         // we could start the receiver loop with the latest receiver that received from previous
00145         // sender
00146         for( int k = lastUsedReceiverRank; k < num_recv; k++ )
00147         {
00148             // if overlap:
00149             if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] )
00150             {
00151                 recv_graph[receiverTasks[k]].push_back( senderTasks[j] );
00152                 sender_graph[senderTasks[j]].push_back( receiverTasks[k] );
00153 
00154                 // we still need to decide what is the overlap
00155                 int sizeOverlap = 1;  // at least 1, for sure
00156                 // 1
00157                 if( starts[k] >= accum[j] )  // one end is starts[k]
00158                 {
00159                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00160                         sizeOverlap = accum[j + 1] - starts[k];
00161                     else  //
00162                         sizeOverlap = starts[k + 1] - starts[k];
00163                 }
00164                 else  // one end is accum[j]
00165                 {
00166                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00167                         sizeOverlap = accum[j + 1] - accum[j];
00168                     else
00169                         sizeOverlap = starts[k + 1] - accum[j];
00170                 }
00171                 recv_sizes[receiverTasks[k]].push_back( sizeOverlap );  // basically, task k will receive from
00172                                                                         //   sender j, sizeOverlap elems
00173                 sender_sizes[senderTasks[j]].push_back( sizeOverlap );
00174                 if( starts[k] > accum[j + 1] )
00175                 {
00176                     lastUsedReceiverRank = k - 1;  // so next k loop will start a little higher, we
00177                                                    // probably finished with first few receivers (up
00178                                                    // to receiver lastUsedReceiverRank)
00179                     break;  // break the k loop, we distributed all elements from sender j to some
00180                             // receivers
00181                 }
00182             }
00183         }
00184     }
00185 
00186     return MB_SUCCESS;
00187 }
00188 
00189 ErrorCode ParCommGraph::pack_receivers_graph( std::vector< int >& packed_recv_array )
00190 {
00191     // it will basically look at local data, to pack communication graph, each receiver task will
00192     // have to post receives for each sender task that will send data to it; the array will be
00193     // communicated to root receiver, and eventually distributed to receiver tasks
00194 
00195     /*
00196      * packed_array will have receiver, number of senders, then senders, etc
00197      */
00198     if( recv_graph.size() < receiverTasks.size() )
00199     {
00200         // big problem, we have empty partitions in receive
00201         std::cout << " WARNING: empty partitions, some receiver tasks will receive nothing.\n";
00202     }
00203     for( std::map< int, std::vector< int > >::iterator it = recv_graph.begin(); it != recv_graph.end(); it++ )
00204     {
00205         int recv                    = it->first;
00206         std::vector< int >& senders = it->second;
00207         packed_recv_array.push_back( recv );
00208         packed_recv_array.push_back( (int)senders.size() );
00209 
00210         for( int k = 0; k < (int)senders.size(); k++ )
00211             packed_recv_array.push_back( senders[k] );
00212     }
00213 
00214     return MB_SUCCESS;
00215 }
00216 
00217 ErrorCode ParCommGraph::split_owned_range( int sender_rank, Range& owned )
00218 {
00219     int senderTask                   = senderTasks[sender_rank];
00220     std::vector< int >& distribution = sender_sizes[senderTask];
00221     std::vector< int >& receivers    = sender_graph[senderTask];
00222     if( distribution.size() != receivers.size() )  //
00223         return MB_FAILURE;
00224 
00225     Range current = owned;  // get the full range first, then we will subtract stuff, for
00226     // the following ranges
00227 
00228     Range rleftover = current;
00229     for( size_t k = 0; k < receivers.size(); k++ )
00230     {
00231         Range newr;
00232         newr.insert( current.begin(), current.begin() + distribution[k] );
00233         split_ranges[receivers[k]] = newr;
00234 
00235         rleftover = subtract( current, newr );
00236         current   = rleftover;
00237     }
00238 
00239     return MB_SUCCESS;
00240 }
00241 
00242 // use for this the corresponding tasks and sizes
00243 ErrorCode ParCommGraph::split_owned_range( Range& owned )
00244 {
00245     if( corr_tasks.size() != corr_sizes.size() )  //
00246         return MB_FAILURE;
00247 
00248     Range current = owned;  // get the full range first, then we will subtract stuff, for
00249     // the following ranges
00250 
00251     Range rleftover = current;
00252     for( size_t k = 0; k < corr_tasks.size(); k++ )
00253     {
00254         Range newr;
00255         newr.insert( current.begin(), current.begin() + corr_sizes[k] );
00256         split_ranges[corr_tasks[k]] = newr;
00257 
00258         rleftover = subtract( current, newr );
00259         current   = rleftover;
00260     }
00261 
00262     return MB_SUCCESS;
00263 }
00264 
00265 ErrorCode ParCommGraph::send_graph( MPI_Comm jcomm )
00266 {
00267     if( is_root_sender() )
00268     {
00269         int ierr;
00270         // will need to build a communication graph, because each sender knows now to which receiver
00271         // to send data the receivers need to post receives for each sender that will send data to
00272         // them will need to gather on rank 0 on the sender comm, global ranks of sender with
00273         // receivers to send build communication matrix, each receiver will receive from what sender
00274 
00275         std::vector< int > packed_recv_array;
00276         ErrorCode rval = pack_receivers_graph( packed_recv_array );
00277         if( MB_SUCCESS != rval ) return rval;
00278 
00279         int size_pack_array = (int)packed_recv_array.size();
00280         comm_graph          = new int[size_pack_array + 1];
00281         comm_graph[0]       = size_pack_array;
00282         for( int k = 0; k < size_pack_array; k++ )
00283             comm_graph[k + 1] = packed_recv_array[k];
00284         // will add 2 requests
00285         /// use tag 10 to send size and tag 20 to send the packed array
00286         sendReqs.resize( 1 );
00287         // do not send the size in advance, because we use probe now
00288         /*ierr = MPI_Isend(&comm_graph[0], 1, MPI_INT, receiver(0), 10, jcomm, &sendReqs[0]); // we
00289         have to use global communicator if (ierr!=0) return MB_FAILURE;*/
00290         ierr = MPI_Isend( &comm_graph[1], size_pack_array, MPI_INT, receiver( 0 ), 20, jcomm,
00291                           &sendReqs[0] );  // we have to use global communicator
00292         if( ierr != 0 ) return MB_FAILURE;
00293     }
00294     return MB_SUCCESS;
00295 }
00296 
00297 // pco has MOAB too get_moab()
00298 // do we need to store "method" as a member variable ?
00299 ErrorCode ParCommGraph::send_mesh_parts( MPI_Comm jcomm, ParallelComm* pco, Range& owned )
00300 {
00301 
00302     ErrorCode rval;
00303     if( split_ranges.empty() )  // in trivial partition
00304     {
00305         rval = split_owned_range( rankInGroup1, owned );
00306         if( rval != MB_SUCCESS ) return rval;
00307         // we know this on the sender side:
00308         corr_tasks = sender_graph[senderTasks[rankInGroup1]];  // copy
00309         corr_sizes = sender_sizes[senderTasks[rankInGroup1]];  // another copy
00310     }
00311 
00312     int indexReq = 0;
00313     int ierr;                             // MPI error
00314     if( is_root_sender() ) indexReq = 1;  // for sendReqs
00315     sendReqs.resize( indexReq + split_ranges.size() );
00316     for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00317     {
00318         int receiver_proc = it->first;
00319         Range ents        = it->second;
00320 
00321         // add necessary vertices too
00322         Range verts;
00323         rval = pco->get_moab()->get_adjacencies( ents, 0, false, verts, Interface::UNION );
00324         if( rval != MB_SUCCESS )
00325         {
00326             std::cout << " can't get adjacencies. for entities to send\n";
00327             return rval;
00328         }
00329         ents.merge( verts );
00330         ParallelComm::Buffer* buffer = new ParallelComm::Buffer( ParallelComm::INITIAL_BUFF_SIZE );
00331         buffer->reset_ptr( sizeof( int ) );
00332         rval = pco->pack_buffer( ents, false, true, false, -1, buffer );
00333         if( rval != MB_SUCCESS )
00334         {
00335             std::cout << " can't pack buffer for entities to send\n";
00336             return rval;
00337         }
00338         int size_pack = buffer->get_current_size();
00339 
00340         // TODO there could be an issue with endian things; check !!!!!
00341         // we are sending the size of the buffer first as an int!!!
00342         /// not anymore !
00343         /* ierr = MPI_Isend(buffer->mem_ptr, 1, MPI_INT, receiver_proc, 1, jcomm,
00344          &sendReqs[indexReq]); // we have to use global communicator if (ierr!=0) return MB_FAILURE;
00345          indexReq++;*/
00346 
00347         ierr = MPI_Isend( buffer->mem_ptr, size_pack, MPI_CHAR, receiver_proc, 2, jcomm,
00348                           &sendReqs[indexReq] );  // we have to use global communicator
00349         if( ierr != 0 ) return MB_FAILURE;
00350         indexReq++;
00351         localSendBuffs.push_back( buffer );
00352     }
00353     return MB_SUCCESS;
00354 }
00355 
00356 // this is called on receiver side
00357 ErrorCode ParCommGraph::receive_comm_graph( MPI_Comm jcomm, ParallelComm* pco, std::vector< int >& pack_array )
00358 {
00359     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
00360     // knows what data to expect
00361     MPI_Comm receive = pco->comm();
00362     int size_pack_array, ierr;
00363     MPI_Status status;
00364     if( rootReceiver )
00365     {
00366         /*
00367          * MPI_Probe(
00368         int source,
00369         int tag,
00370         MPI_Comm comm,
00371         MPI_Status* status)
00372          *
00373          */
00374         ierr = MPI_Probe( sender( 0 ), 20, jcomm, &status );
00375         if( 0 != ierr )
00376         {
00377             std::cout << " MPI_Probe failure: " << ierr << "\n";
00378             return MB_FAILURE;
00379         }
00380         // get the count of data received from the MPI_Status structure
00381         ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array );
00382         if( 0 != ierr )
00383         {
00384             std::cout << " MPI_Get_count failure: " << ierr << "\n";
00385             return MB_FAILURE;
00386         }
00387 #ifdef VERBOSE
00388         std::cout << " receive comm graph size: " << size_pack_array << "\n";
00389 #endif
00390         pack_array.resize( size_pack_array );
00391         ierr = MPI_Recv( &pack_array[0], size_pack_array, MPI_INT, sender( 0 ), 20, jcomm, &status );
00392         if( 0 != ierr ) return MB_FAILURE;
00393 #ifdef VERBOSE
00394         std::cout << " receive comm graph ";
00395         for( int k = 0; k < (int)pack_array.size(); k++ )
00396             std::cout << " " << pack_array[k];
00397         std::cout << "\n";
00398 #endif
00399     }
00400 
00401     // now broadcast this whole array to all receivers, so they know what to expect
00402     ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive );
00403     if( 0 != ierr ) return MB_FAILURE;
00404     pack_array.resize( size_pack_array );
00405     ierr = MPI_Bcast( &pack_array[0], size_pack_array, MPI_INT, 0, receive );
00406     if( 0 != ierr ) return MB_FAILURE;
00407     return MB_SUCCESS;
00408 }
00409 
00410 ErrorCode ParCommGraph::receive_mesh( MPI_Comm jcomm,
00411                                       ParallelComm* pco,
00412                                       EntityHandle local_set,
00413                                       std::vector< int >& senders_local )
00414 {
00415     ErrorCode rval;
00416     int ierr;
00417     MPI_Status status;
00418     // we also need to fill corresponding mesh info on the other side
00419     corr_tasks = senders_local;
00420     Range newEnts;
00421 
00422     Tag orgSendProcTag;  // this will be a tag set on the received mesh, with info about from what
00423                          // task / PE the
00424     // primary element came from, in the joint communicator ; this will be forwarded by coverage
00425     // mesh
00426     int defaultInt = -1;  // no processor, so it was not migrated from somewhere else
00427     rval           = pco->get_moab()->tag_get_handle( "orig_sending_processor", 1, MB_TYPE_INTEGER, orgSendProcTag,
00428                                             MB_TAG_DENSE | MB_TAG_CREAT, &defaultInt );MB_CHK_SET_ERR( rval, "can't create original sending processor tag" );
00429     if( !senders_local.empty() )
00430     {
00431         for( size_t k = 0; k < senders_local.size(); k++ )
00432         {
00433             int sender1 = senders_local[k];
00434             // first receive the size of the buffer using probe
00435             /*
00436                  * MPI_Probe(
00437                 int source,
00438                 int tag,
00439                 MPI_Comm comm,
00440                 MPI_Status* status)
00441                  *
00442                  */
00443             ierr = MPI_Probe( sender1, 2, jcomm, &status );
00444             if( 0 != ierr )
00445             {
00446                 std::cout << " MPI_Probe failure in ParCommGraph::receive_mesh " << ierr << "\n";
00447                 return MB_FAILURE;
00448             }
00449             // get the count of data received from the MPI_Status structure
00450             int size_pack;
00451             ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack );
00452             if( 0 != ierr )
00453             {
00454                 std::cout << " MPI_Get_count failure in ParCommGraph::receive_mesh  " << ierr << "\n";
00455                 return MB_FAILURE;
00456             }
00457 
00458             /* ierr = MPI_Recv (&size_pack, 1, MPI_INT, sender1, 1, jcomm, &status);
00459              if (0!=ierr) return MB_FAILURE;*/
00460             // now resize the buffer, then receive it
00461             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_pack );
00462             // buffer->reserve(size_pack);
00463 
00464             ierr = MPI_Recv( buffer->mem_ptr, size_pack, MPI_CHAR, sender1, 2, jcomm, &status );
00465             if( 0 != ierr )
00466             {
00467                 std::cout << " MPI_Recv failure in ParCommGraph::receive_mesh " << ierr << "\n";
00468                 return MB_FAILURE;
00469             }
00470             // now unpack the buffer we just received
00471             Range entities;
00472             std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
00473             std::vector< std::vector< int > > L1p;
00474             std::vector< EntityHandle > L2hloc, L2hrem;
00475             std::vector< unsigned int > L2p;
00476 
00477             buffer->reset_ptr( sizeof( int ) );
00478             std::vector< EntityHandle > entities_vec( entities.size() );
00479             std::copy( entities.begin(), entities.end(), entities_vec.begin() );
00480             rval = pco->unpack_buffer( buffer->buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
00481                                        entities_vec );
00482             delete buffer;
00483             if( MB_SUCCESS != rval ) return rval;
00484 
00485             std::copy( entities_vec.begin(), entities_vec.end(), range_inserter( entities ) );
00486             // we have to add them to the local set
00487             rval = pco->get_moab()->add_entities( local_set, entities );
00488             if( MB_SUCCESS != rval ) return rval;
00489             // corr_sizes is the size of primary entities received
00490             Range verts              = entities.subset_by_dimension( 0 );
00491             Range local_primary_ents = subtract( entities, verts );
00492             if( local_primary_ents.empty() )
00493             {
00494                 // it is possible that all ents sent were vertices (point cloud)
00495                 // then consider primary entities the vertices
00496                 local_primary_ents = verts;
00497             }
00498             else
00499             {
00500                 // set a tag with the original sender for the primary entity
00501                 // will be used later for coverage mesh
00502                 std::vector< int > orig_senders( local_primary_ents.size(), sender1 );
00503                 rval = pco->get_moab()->tag_set_data( orgSendProcTag, local_primary_ents, &orig_senders[0] );
00504             }
00505             corr_sizes.push_back( (int)local_primary_ents.size() );
00506 
00507             newEnts.merge( entities );
00508             // make these in split ranges
00509             split_ranges[sender1] = local_primary_ents;
00510 
00511 #ifdef VERBOSE
00512             std::ostringstream partial_outFile;
00513 
00514             partial_outFile << "part_send_" << sender1 << "."
00515                             << "recv" << rankInJoin << ".vtk";
00516 
00517             // the mesh contains ghosts too, but they are not part of mat/neumann set
00518             // write in serial the file, to see what tags are missing
00519             std::cout << " writing from receiver " << rankInJoin << " from sender " << sender1
00520                       << " entities: " << entities.size() << std::endl;
00521             rval = pco->get_moab()->write_file( partial_outFile.str().c_str(), 0, 0, &local_set,
00522                                                 1 );  // everything on local set received
00523             if( MB_SUCCESS != rval ) return rval;
00524 #endif
00525         }
00526     }
00527     // make sure adjacencies are updated on the new elements
00528 
00529     if( newEnts.empty() )
00530     {
00531         std::cout << " WARNING: this task did not receive any entities \n";
00532     }
00533     // in order for the merging to work, we need to be sure that the adjacencies are updated
00534     // (created)
00535     Range local_verts        = newEnts.subset_by_type( MBVERTEX );
00536     newEnts                  = subtract( newEnts, local_verts );
00537     Core* mb                 = (Core*)pco->get_moab();
00538     AEntityFactory* adj_fact = mb->a_entity_factory();
00539     if( !adj_fact->vert_elem_adjacencies() )
00540         adj_fact->create_vert_elem_adjacencies();
00541     else
00542     {
00543         for( Range::iterator it = newEnts.begin(); it != newEnts.end(); it++ )
00544         {
00545             EntityHandle eh          = *it;
00546             const EntityHandle* conn = NULL;
00547             int num_nodes            = 0;
00548             rval                     = mb->get_connectivity( eh, conn, num_nodes );
00549             if( MB_SUCCESS != rval ) return rval;
00550             adj_fact->notify_create_entity( eh, conn, num_nodes );
00551         }
00552     }
00553 
00554     return MB_SUCCESS;
00555 }
00556 
00557 // VSM: Why is the communicator never used. Remove the argument ?
00558 ErrorCode ParCommGraph::release_send_buffers()
00559 {
00560     int ierr, nsize = (int)sendReqs.size();
00561     std::vector< MPI_Status > mult_status;
00562     mult_status.resize( sendReqs.size() );
00563     ierr = MPI_Waitall( nsize, &sendReqs[0], &mult_status[0] );
00564 
00565     if( ierr != 0 ) return MB_FAILURE;
00566     // now we can free all buffers
00567     delete[] comm_graph;
00568     comm_graph = NULL;
00569     std::vector< ParallelComm::Buffer* >::iterator vit;
00570     for( vit = localSendBuffs.begin(); vit != localSendBuffs.end(); ++vit )
00571         delete( *vit );
00572     localSendBuffs.clear();
00573     return MB_SUCCESS;
00574 }
00575 
00576 // again, will use the send buffers, for nonblocking sends;
00577 // should be the receives non-blocking too?
00578 ErrorCode ParCommGraph::send_tag_values( MPI_Comm jcomm,
00579                                          ParallelComm* pco,
00580                                          Range& owned,
00581                                          std::vector< Tag >& tag_handles )
00582 {
00583     // basically, owned.size() needs to be equal to sum(corr_sizes)
00584     // get info about the tag size, type, etc
00585     int ierr;
00586     Core* mb = (Core*)pco->get_moab();
00587     // get info about the tag
00588     //! Get the size of the specified tag in bytes
00589     int total_bytes_per_entity = 0;  // we need to know, to allocate buffers
00590     ErrorCode rval;
00591     std::vector< int > vect_bytes_per_tag;
00592 #ifdef VERBOSE
00593     std::vector< int > tag_sizes;
00594 #endif
00595     for( size_t i = 0; i < tag_handles.size(); i++ )
00596     {
00597         int bytes_per_tag;
00598         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00599         int tag_size1;  // length
00600         rval = mb->tag_get_length( tag_handles[i], tag_size1 );MB_CHK_ERR( rval );
00601         if( graph_type == DOF_BASED )
00602             bytes_per_tag = bytes_per_tag / tag_size1;  // we know we have one double per tag , per ID sent;
00603                                                         // could be 8 for double, 4 for int, etc
00604         total_bytes_per_entity += bytes_per_tag;
00605         vect_bytes_per_tag.push_back( bytes_per_tag );
00606 #ifdef VERBOSE
00607         int tag_size;
00608         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00609         tag_sizes.push_back( tag_size );
00610 #endif
00611     }
00612 
00613     int indexReq = 0;
00614     if( graph_type == INITIAL_MIGRATE )  // original send
00615     {
00616         // use the buffers data structure to allocate memory for sending the tags
00617         sendReqs.resize( split_ranges.size() );
00618 
00619         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00620         {
00621             int receiver_proc = it->first;
00622             Range ents        = it->second;  // primary entities, with the tag data
00623             int size_buffer   = 4 + total_bytes_per_entity *
00624                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00625             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00626 
00627             buffer->reset_ptr( sizeof( int ) );
00628             for( size_t i = 0; i < tag_handles.size(); i++ )
00629             {
00630                 // copy tag data to buffer->buff_ptr, and send the buffer (we could have used
00631                 // regular char arrays)
00632                 rval = mb->tag_get_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00633                 // advance the butter
00634                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00635             }
00636             *( (int*)buffer->mem_ptr ) = size_buffer;
00637             // int size_pack = buffer->get_current_size(); // debug check
00638             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00639                               &sendReqs[indexReq] );  // we have to use global communicator
00640             if( ierr != 0 ) return MB_FAILURE;
00641             indexReq++;
00642             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00643         }
00644     }
00645     else if( graph_type == COVERAGE )
00646     {
00647         // we know that we will need to send some tag data in a specific order
00648         // first, get the ids of the local elements, from owned Range; arrange the buffer in order
00649         // of increasing global id
00650         Tag gidTag = mb->globalId_tag();
00651         std::vector< int > gids;
00652         gids.resize( owned.size() );
00653         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00654         std::map< int, EntityHandle > gidToHandle;
00655         size_t i = 0;
00656         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00657         {
00658             EntityHandle eh        = *it;
00659             gidToHandle[gids[i++]] = eh;
00660         }
00661         // now, pack the data and send it
00662         sendReqs.resize( involved_IDs_map.size() );
00663         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00664              mit != involved_IDs_map.end(); mit++ )
00665         {
00666             int receiver_proc        = mit->first;
00667             std::vector< int >& eids = mit->second;
00668             int size_buffer          = 4 + total_bytes_per_entity *
00669                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00670             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00671             buffer->reset_ptr( sizeof( int ) );
00672 #ifdef VERBOSE
00673             std::ofstream dbfile;
00674             std::stringstream outf;
00675             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00676             dbfile.open( outf.str().c_str() );
00677             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00678 #endif
00679             // copy tag data to buffer->buff_ptr, and send the buffer (we could have used regular
00680             // char arrays) pack data by tag, to be consistent with above, even though we loop
00681             // through the entities for each tag
00682 
00683             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00684             {
00685                 int eID         = *it;
00686                 EntityHandle eh = gidToHandle[eID];
00687                 for( i = 0; i < tag_handles.size(); i++ )
00688                 {
00689                     rval = mb->tag_get_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00690 #ifdef VERBOSE
00691                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00692                     double* vals = (double*)( buffer->buff_ptr );
00693                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00694                     {
00695                         dbfile << " " << *vals;
00696                         vals++;
00697                     }
00698                     dbfile << "\n";
00699 #endif
00700                     buffer->buff_ptr += vect_bytes_per_tag[i];
00701                 }
00702             }
00703 
00704 #ifdef VERBOSE
00705             dbfile.close();
00706 #endif
00707             *( (int*)buffer->mem_ptr ) = size_buffer;
00708             // int size_pack = buffer->get_current_size(); // debug check
00709             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00710                               &sendReqs[indexReq] );  // we have to use global communicator
00711             if( ierr != 0 ) return MB_FAILURE;
00712             indexReq++;
00713             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00714         }
00715     }
00716     else if( graph_type == DOF_BASED )
00717     {
00718         // need to fill up the buffer, in the order desired, send it
00719         // get all the tags, for all owned entities, and pack the buffers accordingly
00720         // we do not want to get the tags by entity, it may be too expensive
00721         std::vector< std::vector< double > > valuesTags;
00722         valuesTags.resize( tag_handles.size() );
00723         for( size_t i = 0; i < tag_handles.size(); i++ )
00724         {
00725 
00726             int bytes_per_tag;
00727             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00728             valuesTags[i].resize( owned.size() * bytes_per_tag );
00729             // fill the whole array, we will pick up from here
00730             rval = mb->tag_get_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
00731         }
00732         // now, pack the data and send it
00733         sendReqs.resize( involved_IDs_map.size() );
00734         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00735              mit != involved_IDs_map.end(); mit++ )
00736         {
00737             int receiver_proc                   = mit->first;
00738             std::vector< int >& eids            = mit->second;
00739             std::vector< int >& index_in_values = map_index[receiver_proc];
00740             std::vector< int >& index_ptr       = map_ptr[receiver_proc];  // this is eids.size()+1;
00741             int size_buffer                     = 4 + total_bytes_per_entity *
00742                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00743             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00744             buffer->reset_ptr( sizeof( int ) );
00745 #ifdef VERBOSE
00746             std::ofstream dbfile;
00747             std::stringstream outf;
00748             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00749             dbfile.open( outf.str().c_str() );
00750             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00751 #endif
00752             // copy tag data to buffer->buff_ptr, and send the buffer
00753             // pack data by tag, to be consistent with above
00754             int j = 0;
00755             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00756             {
00757                 int index_in_v = index_in_values[index_ptr[j]];
00758                 for( size_t i = 0; i < tag_handles.size(); i++ )
00759                 {
00760                     // right now, move just doubles; but it could be any type of tag
00761                     *( (double*)( buffer->buff_ptr ) ) = valuesTags[i][index_in_v];
00762                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
00763                 }
00764             };
00765             *( (int*)buffer->mem_ptr ) = size_buffer;
00766             // int size_pack = buffer->get_current_size(); // debug check
00767             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00768                               &sendReqs[indexReq] );  // we have to use global communicator
00769             if( ierr != 0 ) return MB_FAILURE;
00770             indexReq++;
00771             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00772         }
00773     }
00774     return MB_SUCCESS;
00775 }
00776 
00777 ErrorCode ParCommGraph::receive_tag_values( MPI_Comm jcomm,
00778                                             ParallelComm* pco,
00779                                             Range& owned,
00780                                             std::vector< Tag >& tag_handles )
00781 {
00782     // opposite to sending, we will use blocking receives
00783     int ierr;
00784     MPI_Status status;
00785     // basically, owned.size() needs to be equal to sum(corr_sizes)
00786     // get info about the tag size, type, etc
00787     Core* mb = (Core*)pco->get_moab();
00788     // get info about the tag
00789     //! Get the size of the specified tag in bytes
00790     ErrorCode rval;
00791     int total_bytes_per_entity = 0;
00792     std::vector< int > vect_bytes_per_tag;
00793 #ifdef VERBOSE
00794     std::vector< int > tag_sizes;
00795 #endif
00796     for( size_t i = 0; i < tag_handles.size(); i++ )
00797     {
00798         int bytes_per_tag;
00799         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00800         total_bytes_per_entity += bytes_per_tag;
00801         vect_bytes_per_tag.push_back( bytes_per_tag );
00802 #ifdef VERBOSE
00803         int tag_size;
00804         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00805         tag_sizes.push_back( tag_size );
00806 #endif
00807     }
00808 
00809     if( graph_type == INITIAL_MIGRATE )
00810     {
00811         // std::map<int, Range> split_ranges;
00812         // rval = split_owned_range ( owned);MB_CHK_ERR ( rval );
00813 
00814         // use the buffers data structure to allocate memory for receiving the tags
00815         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00816         {
00817             int sender_proc = it->first;
00818             Range ents      = it->second;  // primary entities, with the tag data, we will receive
00819             int size_buffer = 4 + total_bytes_per_entity *
00820                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00821             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00822 
00823             buffer->reset_ptr( sizeof( int ) );
00824 
00825             *( (int*)buffer->mem_ptr ) = size_buffer;
00826             // int size_pack = buffer->get_current_size(); // debug check
00827 
00828             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00829             if( ierr != 0 ) return MB_FAILURE;
00830             // now set the tag
00831             // copy to tag
00832 
00833             for( size_t i = 0; i < tag_handles.size(); i++ )
00834             {
00835                 rval = mb->tag_set_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );
00836                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00837             }
00838             delete buffer;  // no need for it afterwards
00839             MB_CHK_ERR( rval );
00840         }
00841     }
00842     else if( graph_type == COVERAGE )  // receive buffer, then extract tag data, in a loop
00843     {
00844         // we know that we will need to receive some tag data in a specific order (by ids stored)
00845         // first, get the ids of the local elements, from owned Range; unpack the buffer in order
00846         Tag gidTag = mb->globalId_tag();
00847         std::vector< int > gids;
00848         gids.resize( owned.size() );
00849         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00850         std::map< int, EntityHandle > gidToHandle;
00851         size_t i = 0;
00852         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00853         {
00854             EntityHandle eh        = *it;
00855             gidToHandle[gids[i++]] = eh;
00856         }
00857         //
00858         // now, unpack the data and set it to the tag
00859         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00860              mit != involved_IDs_map.end(); mit++ )
00861         {
00862             int sender_proc          = mit->first;
00863             std::vector< int >& eids = mit->second;
00864             int size_buffer          = 4 + total_bytes_per_entity *
00865                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00866             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00867             buffer->reset_ptr( sizeof( int ) );
00868             *( (int*)buffer->mem_ptr ) = size_buffer;  // this is really not necessary, it should receive this too
00869 
00870             // receive the buffer
00871             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00872             if( ierr != 0 ) return MB_FAILURE;
00873 // start copy
00874 #ifdef VERBOSE
00875             std::ofstream dbfile;
00876             std::stringstream outf;
00877             outf << "recvFrom_" << sender_proc << "_on_proc_" << rankInJoin << ".txt";
00878             dbfile.open( outf.str().c_str() );
00879             dbfile << "recvFrom_" << sender_proc << " on proc  " << rankInJoin << "\n";
00880 #endif
00881 
00882             // copy tag data from buffer->buff_ptr
00883             // data is arranged by tag , and repeat the loop for each entity ()
00884             // maybe it should be arranged by entity now, not by tag (so one loop for entities,
00885             // outside)
00886 
00887             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00888             {
00889                 int eID                                     = *it;
00890                 std::map< int, EntityHandle >::iterator mit = gidToHandle.find( eID );
00891                 if( mit == gidToHandle.end() )
00892                 {
00893                     std::cout << " on rank: " << rankInJoin << " cannot find entity handle with global ID " << eID
00894                               << "\n";
00895                     return MB_FAILURE;
00896                 }
00897                 EntityHandle eh = mit->second;
00898                 for( i = 0; i < tag_handles.size(); i++ )
00899                 {
00900                     rval = mb->tag_set_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00901 #ifdef VERBOSE
00902                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00903                     double* vals = (double*)( buffer->buff_ptr );
00904                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00905                     {
00906                         dbfile << " " << *vals;
00907                         vals++;
00908                     }
00909                     dbfile << "\n";
00910 #endif
00911                     buffer->buff_ptr += vect_bytes_per_tag[i];
00912                 }
00913             }
00914 
00915             // delete receive buffer
00916             delete buffer;
00917 #ifdef VERBOSE
00918             dbfile.close();
00919 #endif
00920         }
00921     }
00922     else if( graph_type == DOF_BASED )
00923     {
00924         // need to fill up the values for each tag, in the order desired, from the buffer received
00925         //
00926         // get all the tags, for all owned entities, and pack the buffers accordingly
00927         // we do not want to get the tags by entity, it may be too expensive
00928         std::vector< std::vector< double > > valuesTags;
00929         valuesTags.resize( tag_handles.size() );
00930         for( size_t i = 0; i < tag_handles.size(); i++ )
00931         {
00932             int bytes_per_tag;
00933             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00934             valuesTags[i].resize( owned.size() * bytes_per_tag );
00935             // fill the whole array, we will pick up from here
00936             // we will fill this array, using data from received buffer
00937             // rval = mb->tag_get_data(owned, (void*)( &(valuesTags[i][0])) );MB_CHK_ERR ( rval );
00938         }
00939         // now, unpack the data and set the tags
00940         sendReqs.resize( involved_IDs_map.size() );
00941         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00942              mit != involved_IDs_map.end(); mit++ )
00943         {
00944             int sender_proc                     = mit->first;
00945             std::vector< int >& eids            = mit->second;
00946             std::vector< int >& index_in_values = map_index[sender_proc];
00947             std::vector< int >& index_ptr       = map_ptr[sender_proc];  // this is eids.size()+1;
00948             int size_buffer                     = 4 + total_bytes_per_entity *
00949                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00950             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00951             buffer->reset_ptr( sizeof( int ) );
00952 
00953             // receive the buffer
00954             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00955             if( ierr != 0 ) return MB_FAILURE;
00956             // use the values in buffer to populate valuesTag arrays, fill it up!
00957             int j = 0;
00958             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00959             {
00960                 for( size_t i = 0; i < tag_handles.size(); i++ )
00961                 {
00962                     // right now, move just doubles; but it could be any type of tag
00963                     double val = *( (double*)( buffer->buff_ptr ) );
00964                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
00965                     for( int k = index_ptr[j]; k < index_ptr[j + 1]; k++ )
00966                         valuesTags[i][index_in_values[k]] = val;
00967                 }
00968             }
00969             // we are done with the buffer in which we received tags, release / delete it
00970             delete buffer;
00971         }
00972         // now we populated the values for all tags; set now the tags!
00973         for( size_t i = 0; i < tag_handles.size(); i++ )
00974         {
00975             // we will fill this array, using data from received buffer
00976             rval = mb->tag_set_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
00977         }
00978     }
00979     return MB_SUCCESS;
00980 }
00981 /*
00982  * for example
00983  */
00984 ErrorCode ParCommGraph::settle_send_graph( TupleList& TLcovIDs )
00985 {
00986     // fill involved_IDs_map with data
00987     // will have "receiving proc" and global id of element
00988     int n      = TLcovIDs.get_n();
00989     graph_type = COVERAGE;  // do not rely only on involved_IDs_map.size(); this can be 0 in some cases
00990     for( int i = 0; i < n; i++ )
00991     {
00992         int to_proc      = TLcovIDs.vi_wr[2 * i];
00993         int globalIdElem = TLcovIDs.vi_wr[2 * i + 1];
00994         involved_IDs_map[to_proc].push_back( globalIdElem );
00995     }
00996 #ifdef VERBOSE
00997     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
00998          mit++ )
00999     {
01000         std::cout << " towards task " << mit->first << " send: " << mit->second.size() << " cells " << std::endl;
01001         for( size_t i = 0; i < mit->second.size(); i++ )
01002         {
01003             std::cout << " " << mit->second[i];
01004         }
01005         std::cout << std::endl;
01006     }
01007 #endif
01008     return MB_SUCCESS;
01009 }
01010 
01011 // this will set involved_IDs_map will store all ids to be received from one sender task
01012 void ParCommGraph::SetReceivingAfterCoverage(
01013     std::map< int, std::set< int > >& idsFromProcs )  // will make sense only on receivers, right now after cov
01014 {
01015     for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
01016     {
01017         int fromProc            = mt->first;
01018         std::set< int >& setIds = mt->second;
01019         involved_IDs_map[fromProc].resize( setIds.size() );
01020         std::vector< int >& listIDs = involved_IDs_map[fromProc];
01021         size_t indx                 = 0;
01022         for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
01023         {
01024             int valueID     = *st;
01025             listIDs[indx++] = valueID;
01026         }
01027     }
01028     graph_type = COVERAGE;
01029     return;
01030 }
01031 //#define VERBOSE
01032 void ParCommGraph::settle_comm_by_ids( int comp, TupleList& TLBackToComp, std::vector< int >& valuesComp )
01033 {
01034     // settle comm graph on comp
01035     if( rootSender || rootReceiver ) std::cout << " settle comm graph by id on component " << comp << "\n";
01036     int n = TLBackToComp.get_n();
01037     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01038     // cases
01039     std::map< int, std::set< int > > uniqueIDs;
01040 
01041     for( int i = 0; i < n; i++ )
01042     {
01043         int to_proc  = TLBackToComp.vi_wr[3 * i + 2];
01044         int globalId = TLBackToComp.vi_wr[3 * i + 1];
01045         uniqueIDs[to_proc].insert( globalId );
01046     }
01047 
01048     // Vector to store element
01049     // with respective present index
01050     std::vector< std::pair< int, int > > vp;
01051     vp.reserve( valuesComp.size() );
01052 
01053     // Inserting element in pair vector
01054     // to keep track of previous indexes in valuesComp
01055     for( int i = 0; i < (int)valuesComp.size(); ++i )
01056     {
01057         vp.push_back( std::make_pair( valuesComp[i], i ) );
01058     }
01059     // Sorting pair vector
01060     sort( vp.begin(), vp.end() );
01061 
01062     // vp[i].first, second
01063 
01064     // count now how many times some value appears in ordered (so in valuesComp)
01065     for( std::map< int, std::set< int > >::iterator it = uniqueIDs.begin(); it != uniqueIDs.end(); it++ )
01066     {
01067         int procId                  = it->first;
01068         std::set< int >& nums       = it->second;
01069         std::vector< int >& indx    = map_ptr[procId];
01070         std::vector< int >& indices = map_index[procId];
01071         indx.resize( nums.size() + 1 );
01072         int indexInVp = 0;
01073         int indexVal  = 0;
01074         indx[0]       = 0;  // start from 0
01075         for( std::set< int >::iterator sst = nums.begin(); sst != nums.end(); sst++, indexVal++ )
01076         {
01077             int val = *sst;
01078             involved_IDs_map[procId].push_back( val );
01079             indx[indexVal + 1] = indx[indexVal];
01080             while( ( indexInVp < (int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) )  // should be equal !
01081             {
01082                 if( vp[indexInVp].first == val )
01083                 {
01084                     indx[indexVal + 1]++;
01085                     indices.push_back( vp[indexInVp].second );
01086                 }
01087                 indexInVp++;
01088             }
01089         }
01090     }
01091 #ifdef VERBOSE
01092     std::stringstream f1;
01093     std::ofstream dbfile;
01094     f1 << "Involve_" << comp << "_" << rankInJoin << ".txt";
01095     dbfile.open( f1.str().c_str() );
01096     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
01097          mit++ )
01098     {
01099         int corrTask                = mit->first;
01100         std::vector< int >& corrIds = mit->second;
01101         std::vector< int >& indx    = map_ptr[corrTask];
01102         std::vector< int >& indices = map_index[corrTask];
01103 
01104         dbfile << " towards proc " << corrTask << " \n";
01105         for( int i = 0; i < (int)corrIds.size(); i++ )
01106         {
01107             dbfile << corrIds[i] << " [" << indx[i] << "," << indx[i + 1] << ")  : ";
01108             for( int j = indx[i]; j < indx[i + 1]; j++ )
01109                 dbfile << indices[j] << " ";
01110             dbfile << "\n";
01111         }
01112         dbfile << " \n";
01113     }
01114     dbfile.close();
01115 #endif
01116 
01117     graph_type = DOF_BASED;
01118     // now we need to fill back and forth information, needed to fill the arrays
01119     // for example, from spectral to involved_IDs_map, in case we want to send data from
01120     // spectral to phys
01121 }
01122 //#undef VERBOSE
01123 // new partition calculation
01124 ErrorCode ParCommGraph::compute_partition( ParallelComm* pco, Range& owned, int met )
01125 {
01126     // we are on a task on sender, and need to compute a new partition;
01127     // primary cells need to be distributed to nb receivers tasks
01128     // first, we will use graph partitioner, with zoltan;
01129     // in the graph that we need to build, the first layer of ghosts is needed;
01130     // can we avoid that ? For example, we can find out from each boundary edge/face what is the
01131     // other cell (on the other side), then form the global graph, and call zoltan in parallel met 1
01132     // would be a geometric partitioner, and met 2 would be a graph partitioner for method 1 we do
01133     // not need any ghost exchange
01134 
01135     // find first edges that are shared
01136     if( owned.empty() )
01137         return MB_SUCCESS;  // nothing to do? empty partition is not allowed, maybe we should return
01138                             // error?
01139     Core* mb = (Core*)pco->get_moab();
01140 
01141     double t1, t2, t3;
01142     t1               = MPI_Wtime();
01143     int primaryDim   = mb->dimension_from_handle( *owned.rbegin() );
01144     int interfaceDim = primaryDim - 1;  // should be 1 or 2
01145     Range sharedEdges;
01146     ErrorCode rval;
01147 
01148     std::vector< int > shprocs( MAX_SHARING_PROCS );
01149     std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
01150 
01151     Tag gidTag = mb->globalId_tag();
01152     int np;
01153     unsigned char pstatus;
01154 
01155     std::multimap< int, int > extraGraphEdges;
01156     // std::map<int, int> adjCellsId;
01157     std::map< int, int > extraCellsProc;
01158     // if method is 2, no need to do the exchange for adjacent cells across partition boundary
01159     // these maps above will be empty for method 2 (geometry)
01160     if( 1 == met )
01161     {
01162         rval = pco->get_shared_entities( /*int other_proc*/ -1, sharedEdges, interfaceDim,
01163                                          /*const bool iface*/ true );MB_CHK_ERR( rval );
01164 
01165 #ifdef VERBOSE
01166         std::cout << " on sender task " << pco->rank() << " number of shared interface cells " << sharedEdges.size()
01167                   << "\n";
01168 #endif
01169         // find to what processors we need to send the ghost info about the edge
01170         // first determine the local graph; what elements are adjacent to each cell in owned range
01171         // cells that are sharing a partition interface edge, are identified first, and form a map
01172         TupleList TLe;                                     // tuple list for cells
01173         TLe.initialize( 2, 0, 1, 0, sharedEdges.size() );  // send to, id of adj cell, remote edge
01174         TLe.enableWriteAccess();
01175 
01176         std::map< EntityHandle, int > edgeToCell;  // from local boundary edge to adjacent cell id
01177         // will be changed after
01178         for( Range::iterator eit = sharedEdges.begin(); eit != sharedEdges.end(); eit++ )
01179         {
01180             EntityHandle edge = *eit;
01181             // get the adjacent cell
01182             Range adjEnts;
01183             rval = mb->get_adjacencies( &edge, 1, primaryDim, false, adjEnts );MB_CHK_ERR( rval );
01184             if( adjEnts.size() > 0 )
01185             {
01186                 EntityHandle adjCell = adjEnts[0];
01187                 int gid;
01188                 rval = mb->tag_get_data( gidTag, &adjCell, 1, &gid );MB_CHK_ERR( rval );
01189                 rval = pco->get_sharing_data( edge, &shprocs[0], &shhandles[0], pstatus, np );MB_CHK_ERR( rval );
01190                 int n                = TLe.get_n();
01191                 TLe.vi_wr[2 * n]     = shprocs[0];
01192                 TLe.vi_wr[2 * n + 1] = gid;
01193                 TLe.vul_wr[n]        = shhandles[0];  // the remote edge corresponding to shared edge
01194                 edgeToCell[edge]     = gid;           // store the map between edge and local id of adj cell
01195                 TLe.inc_n();
01196             }
01197         }
01198 
01199 #ifdef VERBOSE
01200         std::stringstream ff2;
01201         ff2 << "TLe_" << pco->rank() << ".txt";
01202         TLe.print_to_file( ff2.str().c_str() );
01203 #endif
01204         // send the data to the other processors:
01205         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLe, 0 );
01206         // on receiver side, each local edge will have the remote cell adjacent to it!
01207 
01208         int ne = TLe.get_n();
01209         for( int i = 0; i < ne; i++ )
01210         {
01211             int sharedProc         = TLe.vi_rd[2 * i];       // this info is coming from here, originally
01212             int remoteCellID       = TLe.vi_rd[2 * i + 1];   // this is the id of the remote cell, on sharedProc
01213             EntityHandle localCell = TLe.vul_rd[i];          // this is now local edge/face on this proc
01214             int localCellId        = edgeToCell[localCell];  // this is the local cell  adjacent to edge/face
01215             // now, we will need to add to the graph the pair <localCellId, remoteCellID>
01216             std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID );
01217             extraGraphEdges.insert( extraAdj );
01218             // adjCellsId [edgeToCell[localCell]] = remoteCellID;
01219             extraCellsProc[remoteCellID] = sharedProc;
01220 #ifdef VERBOSE
01221             std::cout << "local ID " << edgeToCell[localCell] << " remote cell ID: " << remoteCellID << "\n";
01222 #endif
01223         }
01224     }
01225     t2 = MPI_Wtime();
01226     if( rootSender ) std::cout << " time preparing the input for Zoltan:" << t2 - t1 << " seconds. \n";
01227         // so adj cells ids; need to call zoltan for parallel partition
01228 #ifdef MOAB_HAVE_ZOLTAN
01229     ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mb );
01230     if( 1 <= met )  //  partition in zoltan, either graph or geometric partitioner
01231     {
01232         std::map< int, Range > distribution;  // how to distribute owned elements by processors in receiving groups
01233         // in how many tasks do we want to be distributed?
01234         int numNewPartitions = (int)receiverTasks.size();
01235         Range primaryCells   = owned.subset_by_dimension( primaryDim );
01236         rval = mbZTool->partition_owned_cells( primaryCells, pco, extraGraphEdges, extraCellsProc, numNewPartitions,
01237                                                distribution, met );MB_CHK_ERR( rval );
01238         for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ )
01239         {
01240             int part_index = mit->first;
01241             assert( part_index < numNewPartitions );
01242             split_ranges[receiverTasks[part_index]] = mit->second;
01243         }
01244     }
01245     // delete now the partitioner
01246     delete mbZTool;
01247 #endif
01248     t3 = MPI_Wtime();
01249     if( rootSender ) std::cout << " time spent by Zoltan " << t3 - t2 << " seconds. \n";
01250     return MB_SUCCESS;
01251 }
01252 
01253 // after map read, we need to know what entities we need to send to receiver
01254 ErrorCode ParCommGraph::set_split_ranges( int comp,
01255                                           TupleList& TLBackToComp1,
01256                                           std::vector< int >& valuesComp1,
01257                                           int lenTag,
01258                                           Range& ents_of_interest,
01259                                           int type )
01260 {
01261     // settle split_ranges // same role as partitioning
01262     if( rootSender ) std::cout << " find split_ranges on component " << comp << "  according to read map \n";
01263     // Vector to store element
01264     // with respective present index
01265     int n = TLBackToComp1.get_n();
01266     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01267     // cases
01268     std::map< int, std::set< int > > uniqueIDs;
01269 
01270     for( int i = 0; i < n; i++ )
01271     {
01272         int to_proc  = TLBackToComp1.vi_wr[3 * i + 2];
01273         int globalId = TLBackToComp1.vi_wr[3 * i + 1];
01274         uniqueIDs[to_proc].insert( globalId );
01275     }
01276 
01277     for( int i = 0; i < (int)ents_of_interest.size(); i++ )
01278     {
01279         EntityHandle ent = ents_of_interest[i];
01280         for( int j = 0; j < lenTag; j++ )
01281         {
01282             int marker = valuesComp1[i * lenTag + j];
01283             for( auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ )
01284             {
01285                 int proc                = mit->first;
01286                 std::set< int >& setIds = mit->second;
01287                 if( setIds.find( marker ) != setIds.end() )
01288                 {
01289                     split_ranges[proc].insert( ent );
01290                 }
01291             }
01292         }
01293     }
01294 
01295     return MB_SUCCESS;
01296 }
01297 
01298 // new methods to migrate mesh after reading map
01299 ErrorCode ParCommGraph::form_tuples_to_migrate_mesh( Interface* mb,
01300                                                      TupleList& TLv,
01301                                                      TupleList& TLc,
01302                                                      int type,
01303                                                      int lenTagType1 )
01304 {
01305     // we will always need GlobalID tag
01306     Tag gidtag = mb->globalId_tag();
01307     // for Type1, we need GLOBAL_DOFS tag;
01308     Tag gds;
01309     ErrorCode rval;
01310     if( type == 1 )
01311     {
01312         rval = mb->tag_get_handle( "GLOBAL_DOFS", gds );
01313     }
01314     // find vertices to be sent, for each of the split_ranges procs
01315     std::map< int, Range > verts_to_proc;
01316     int numv = 0, numc = 0;
01317     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01318     {
01319         int to_proc = it->first;
01320         Range verts;
01321         if( type != 2 )
01322         {
01323             rval = mb->get_connectivity( it->second, verts );MB_CHK_ERR( rval );
01324             numc += (int)it->second.size();
01325         }
01326         else
01327             verts = it->second;
01328         verts_to_proc[to_proc] = verts;
01329         numv += (int)verts.size();
01330     }
01331     // first vertices:
01332     TLv.initialize( 2, 0, 0, 3, numv );  // to proc, GLOBAL ID, 3 real coordinates
01333     TLv.enableWriteAccess();
01334     // use the global id of vertices for connectivity
01335     for( auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ )
01336     {
01337         int to_proc  = it->first;
01338         Range& verts = it->second;
01339         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01340         {
01341             EntityHandle v   = *vit;
01342             int n            = TLv.get_n();  // current size of tuple list
01343             TLv.vi_wr[2 * n] = to_proc;      // send to processor
01344 
01345             rval = mb->tag_get_data( gidtag, &v, 1, &( TLv.vi_wr[2 * n + 1] ) );MB_CHK_ERR( rval );
01346             rval = mb->get_coords( &v, 1, &( TLv.vr_wr[3 * n] ) );MB_CHK_ERR( rval );
01347             TLv.inc_n();  // increment tuple list size
01348         }
01349     }
01350     if( type == 2 ) return MB_SUCCESS;  // no need to fill TLc
01351     // to proc, ID cell, gdstag, nbv, id conn,
01352     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01353 
01354     std::vector< int > gdvals;
01355 
01356     TLc.initialize( size_tuple, 0, 0, 0, numc );  // to proc, GLOBAL ID, 3 real coordinates
01357     TLc.enableWriteAccess();
01358     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01359     {
01360         int to_proc  = it->first;
01361         Range& cells = it->second;
01362         for( Range::iterator cit = cells.begin(); cit != cells.end(); ++cit )
01363         {
01364             EntityHandle cell         = *cit;
01365             int n                     = TLc.get_n();  // current size of tuple list
01366             TLc.vi_wr[size_tuple * n] = to_proc;
01367             int current_index         = 2;
01368             rval                      = mb->tag_get_data( gidtag, &cell, 1, &( TLc.vi_wr[size_tuple * n + 1] ) );MB_CHK_ERR( rval );
01369             if( 1 == type )
01370             {
01371                 rval = mb->tag_get_data( gds, &cell, 1, &( TLc.vi_wr[size_tuple * n + current_index] ) );MB_CHK_ERR( rval );
01372                 current_index += lenTagType1;
01373             }
01374             // now get connectivity
01375             const EntityHandle* conn = NULL;
01376             int nnodes               = 0;
01377             rval                     = mb->get_connectivity( cell, conn, nnodes );MB_CHK_ERR( rval );
01378             // fill nnodes:
01379             TLc.vi_wr[size_tuple * n + current_index] = nnodes;
01380             rval = mb->tag_get_data( gidtag, conn, nnodes, &( TLc.vi_wr[size_tuple * n + current_index + 1] ) );MB_CHK_ERR( rval );
01381             TLc.inc_n();  // increment tuple list size
01382         }
01383     }
01384     return MB_SUCCESS;
01385 }
01386 ErrorCode ParCommGraph::form_mesh_from_tuples( Interface* mb,
01387                                                TupleList& TLv,
01388                                                TupleList& TLc,
01389                                                int type,
01390                                                int lenTagType1,
01391                                                EntityHandle fset,
01392                                                Range& primary_ents,
01393                                                std::vector< int >& values_entities )
01394 {
01395     // might need to fill also the split_range things
01396     // we will always need GlobalID tag
01397     Tag gidtag = mb->globalId_tag();
01398     // for Type1, we need GLOBAL_DOFS tag;
01399     Tag gds;
01400     ErrorCode rval;
01401     std::vector< int > def_val( lenTagType1, 0 );
01402     if( type == 1 )
01403     {
01404         // we may need to create this tag
01405         rval = mb->tag_get_handle( "GLOBAL_DOFS", lenTagType1, MB_TYPE_INTEGER, gds, MB_TAG_CREAT | MB_TAG_DENSE,
01406                                    &def_val[0] );MB_CHK_ERR( rval );
01407     }
01408 
01409     std::map< int, EntityHandle > vertexMap;  //
01410     Range verts;
01411     // always form vertices and add them to the fset;
01412     int n = TLv.get_n();
01413     EntityHandle vertex;
01414     for( int i = 0; i < n; i++ )
01415     {
01416         int gid = TLv.vi_rd[2 * i + 1];
01417         if( vertexMap.find( gid ) == vertexMap.end() )
01418         {
01419             // need to form this vertex
01420             rval = mb->create_vertex( &( TLv.vr_rd[3 * i] ), vertex );MB_CHK_ERR( rval );
01421             vertexMap[gid] = vertex;
01422             verts.insert( vertex );
01423             rval = mb->tag_set_data( gidtag, &vertex, 1, &gid );MB_CHK_ERR( rval );
01424             // if point cloud,
01425         }
01426         if( 2 == type )  // point cloud, add it to the split_ranges ?
01427         {
01428             split_ranges[TLv.vi_rd[2 * i]].insert( vertexMap[gid] );
01429         }
01430         // todo : when to add the values_entities ?
01431     }
01432     rval = mb->add_entities( fset, verts );MB_CHK_ERR( rval );
01433     if( 2 == type )
01434     {
01435         primary_ents = verts;
01436         values_entities.resize( verts.size() );  // just get the ids of vertices
01437         rval = mb->tag_get_data( gidtag, verts, &values_entities[0] );MB_CHK_ERR( rval );
01438         return MB_SUCCESS;
01439     }
01440 
01441     n              = TLc.get_n();
01442     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01443 
01444     EntityHandle new_element;
01445     Range cells;
01446     std::map< int, EntityHandle > cellMap;  // do no tcreate one if it already exists, maybe from other processes
01447     for( int i = 0; i < n; i++ )
01448     {
01449         int from_proc  = TLc.vi_rd[size_tuple * i];
01450         int globalIdEl = TLc.vi_rd[size_tuple * i + 1];
01451         if( cellMap.find( globalIdEl ) == cellMap.end() )  // need to create the cell
01452         {
01453             int current_index = 2;
01454             if( 1 == type ) current_index += lenTagType1;
01455             int nnodes = TLc.vi_rd[size_tuple * i + current_index];
01456             std::vector< EntityHandle > conn;
01457             conn.resize( nnodes );
01458             for( int j = 0; j < nnodes; j++ )
01459             {
01460                 conn[j] = vertexMap[TLc.vi_rd[size_tuple * i + current_index + j + 1]];
01461             }
01462             //
01463             EntityType entType = MBQUAD;
01464             if( nnodes > 4 ) entType = MBPOLYGON;
01465             if( nnodes < 4 ) entType = MBTRI;
01466             rval = mb->create_element( entType, &conn[0], nnodes, new_element );MB_CHK_SET_ERR( rval, "can't create new element " );
01467             cells.insert( new_element );
01468             cellMap[globalIdEl] = new_element;
01469             rval                = mb->tag_set_data( gidtag, &new_element, 1, &globalIdEl );MB_CHK_SET_ERR( rval, "can't set global id tag on cell " );
01470             if( 1 == type )
01471             {
01472                 // set the gds tag
01473                 rval = mb->tag_set_data( gds, &new_element, 1, &( TLc.vi_rd[size_tuple * i + 2] ) );MB_CHK_SET_ERR( rval, "can't set gds tag on cell " );
01474             }
01475         }
01476         split_ranges[from_proc].insert( cellMap[globalIdEl] );
01477     }
01478     rval = mb->add_entities( fset, cells );MB_CHK_ERR( rval );
01479     primary_ents = cells;
01480     if( 1 == type )
01481     {
01482         values_entities.resize( lenTagType1 * primary_ents.size() );
01483         rval = mb->tag_get_data( gds, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01484     }
01485     else  // type == 3
01486     {
01487         values_entities.resize( primary_ents.size() );  // just get the ids !
01488         rval = mb->tag_get_data( gidtag, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01489     }
01490     return MB_SUCCESS;
01491 }
01492 
01493 // at this moment, each sender task has split_ranges formed;
01494 // we need to aggregate that info and send it to receiver
01495 ErrorCode ParCommGraph::send_graph_partition( ParallelComm* pco, MPI_Comm jcomm )
01496 {
01497     // first, accumulate the info to root of sender; use gatherv
01498     // first, accumulate number of receivers from each sender, to the root receiver
01499     int numberReceivers =
01500         (int)split_ranges.size();            // these are ranges of elements to be sent to each receiver, from this task
01501     int nSenders = (int)senderTasks.size();  //
01502     // this sender will have to send to this many receivers
01503     std::vector< int > displs( 1 );  // displacements for gatherv
01504     std::vector< int > counts( 1 );
01505     if( is_root_sender() )
01506     {
01507         displs.resize( nSenders + 1 );
01508         counts.resize( nSenders );
01509     }
01510 
01511     int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, pco->comm() );
01512     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01513     // compute now displacements
01514     if( is_root_sender() )
01515     {
01516         displs[0] = 0;
01517         for( int k = 0; k < nSenders; k++ )
01518         {
01519             displs[k + 1] = displs[k] + counts[k];
01520         }
01521     }
01522     std::vector< int > buffer;
01523     if( is_root_sender() ) buffer.resize( displs[nSenders] );  // the last one will have the total count now
01524 
01525     std::vector< int > recvs;
01526     for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01527     {
01528         recvs.push_back( mit->first );
01529     }
01530     ierr =
01531         MPI_Gatherv( &recvs[0], numberReceivers, MPI_INT, &buffer[0], &counts[0], &displs[0], MPI_INT, 0, pco->comm() );
01532     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01533 
01534     // now form recv_graph map; points from the
01535     // now form the graph to be sent to the other side; only on root
01536     if( is_root_sender() )
01537     {
01538 #ifdef GRAPH_INFO
01539         std::ofstream dbfileSender;
01540         std::stringstream outf;
01541         outf << "S_" << compid1 << "_R_" << compid2 << "_SenderGraph.txt";
01542         dbfileSender.open( outf.str().c_str() );
01543         dbfileSender << " number senders: " << nSenders << "\n";
01544         dbfileSender << " senderRank \treceivers \n";
01545         for( int k = 0; k < nSenders; k++ )
01546         {
01547             int indexInBuff = displs[k];
01548             int senderTask  = senderTasks[k];
01549             dbfileSender << senderTask << "\t\t";
01550             for( int j = 0; j < counts[k]; j++ )
01551             {
01552                 int recvTask = buffer[indexInBuff + j];
01553                 dbfileSender << recvTask << " ";
01554             }
01555             dbfileSender << "\n";
01556         }
01557         dbfileSender.close();
01558 #endif
01559         for( int k = 0; k < nSenders; k++ )
01560         {
01561             int indexInBuff = displs[k];
01562             int senderTask  = senderTasks[k];
01563             for( int j = 0; j < counts[k]; j++ )
01564             {
01565                 int recvTask = buffer[indexInBuff + j];
01566                 recv_graph[recvTask].push_back( senderTask );  // this will be packed and sent to root receiver, with
01567                                                                // nonblocking send
01568             }
01569         }
01570 #ifdef GRAPH_INFO
01571         std::ofstream dbfile;
01572         std::stringstream outf2;
01573         outf2 << "S_" << compid1 << "_R_" << compid2 << "_RecvGraph.txt";
01574         dbfile.open( outf2.str().c_str() );
01575         dbfile << " number receivers: " << recv_graph.size() << "\n";
01576         dbfile << " receiverRank \tsenders \n";
01577         for( std::map< int, std::vector< int > >::iterator mit = recv_graph.begin(); mit != recv_graph.end(); mit++ )
01578         {
01579             int recvTask                = mit->first;
01580             std::vector< int >& senders = mit->second;
01581             dbfile << recvTask << "\t\t";
01582             for( std::vector< int >::iterator vit = senders.begin(); vit != senders.end(); vit++ )
01583                 dbfile << *vit << " ";
01584             dbfile << "\n";
01585         }
01586         dbfile.close();
01587 #endif
01588         // this is the same as trivial partition
01589         ErrorCode rval = send_graph( jcomm );MB_CHK_ERR( rval );
01590     }
01591 
01592     return MB_SUCCESS;
01593 }
01594 // method to expose local graph info: sender id, receiver id, sizes of elements to send, after or
01595 // before intersection
01596 ErrorCode ParCommGraph::dump_comm_information( std::string prefix, int is_send )
01597 {
01598     //
01599     if( -1 != rankInGroup1 && 1 == is_send )  // it is a sender task
01600     {
01601         std::ofstream dbfile;
01602         std::stringstream outf;
01603         outf << prefix << "_sender_" << rankInGroup1 << "_joint" << rankInJoin << "_type_" << (int)graph_type << ".txt";
01604         dbfile.open( outf.str().c_str() );
01605 
01606         if( graph_type == COVERAGE )
01607         {
01608             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01609                  mit != involved_IDs_map.end(); mit++ )
01610             {
01611                 int receiver_proc        = mit->first;
01612                 std::vector< int >& eids = mit->second;
01613                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01614             }
01615         }
01616         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01617         {
01618             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01619             {
01620                 int receiver_proc = mit->first;
01621                 Range& eids       = mit->second;
01622                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01623             }
01624         }
01625         else if( graph_type == DOF_BASED )  // just after migration, or from computeGraph
01626         {
01627             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01628                  mit != involved_IDs_map.end(); mit++ )
01629             {
01630                 int receiver_proc = mit->first;
01631                 dbfile << "receiver: " << receiver_proc << " size:" << mit->second.size() << "\n";
01632             }
01633         }
01634         dbfile.close();
01635     }
01636     if( -1 != rankInGroup2 && 0 == is_send )  // it is a receiver task
01637     {
01638         std::ofstream dbfile;
01639         std::stringstream outf;
01640         outf << prefix << "_receiver_" << rankInGroup2 << "_joint" << rankInJoin << "_type_" << (int)graph_type
01641              << ".txt";
01642         dbfile.open( outf.str().c_str() );
01643 
01644         if( graph_type == COVERAGE )
01645         {
01646             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01647                  mit != involved_IDs_map.end(); mit++ )
01648             {
01649                 int sender_proc          = mit->first;
01650                 std::vector< int >& eids = mit->second;
01651                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01652             }
01653         }
01654         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01655         {
01656             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01657             {
01658                 int sender_proc = mit->first;
01659                 Range& eids     = mit->second;
01660                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01661             }
01662         }
01663         else if( graph_type == DOF_BASED )  // just after migration
01664         {
01665             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01666                  mit != involved_IDs_map.end(); mit++ )
01667             {
01668                 int sender_proc = mit->first;
01669                 dbfile << "receiver: " << sender_proc << " size:" << mit->second.size() << "\n";
01670             }
01671         }
01672         dbfile.close();
01673     }
01674     return MB_SUCCESS;
01675 }
01676 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines