MOAB: Mesh Oriented datABase  (version 5.4.1)
ParCommGraph.cpp
Go to the documentation of this file.
00001 /*
00002  * ParCommGraph.cpp
00003  *
00004  */
00005 
00006 #include "moab/ParCommGraph.hpp"
00007 // we need to recompute adjacencies for merging to work
00008 #include "moab/Core.hpp"
00009 #include "AEntityFactory.hpp"
00010 
00011 #ifdef MOAB_HAVE_ZOLTAN
00012 #include "moab/ZoltanPartitioner.hpp"
00013 #endif
00014 
00015 // #define VERBOSE
00016 // #define GRAPH_INFO
00017 
00018 namespace moab
00019 {
00020 ParCommGraph::ParCommGraph( MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2 )
00021     : comm( joincomm ), compid1( coid1 ), compid2( coid2 )
00022 {
00023     // find out the tasks from each group, in the joint communicator
00024     find_group_ranks( group1, comm, senderTasks );
00025     find_group_ranks( group2, comm, receiverTasks );
00026 
00027     rootSender = rootReceiver = false;
00028     rankInGroup1 = rankInGroup2 = rankInJoin = -1;  // not initialized, or not part of the group
00029 
00030     int mpierr = MPI_Group_rank( group1, &rankInGroup1 );
00031     if( MPI_SUCCESS != mpierr || rankInGroup1 == MPI_UNDEFINED ) rankInGroup1 = -1;
00032 
00033     mpierr = MPI_Group_rank( group2, &rankInGroup2 );
00034     if( MPI_SUCCESS != mpierr || rankInGroup2 == MPI_UNDEFINED ) rankInGroup2 = -1;
00035 
00036     mpierr = MPI_Comm_rank( comm, &rankInJoin );
00037     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00038         rankInJoin = -1;
00039 
00040     mpierr = MPI_Comm_size( comm, &joinSize );
00041     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00042         joinSize = -1;
00043 
00044     if( 0 == rankInGroup1 ) rootSender = true;
00045     if( 0 == rankInGroup2 ) rootReceiver = true;
00046     graph_type = INITIAL_MIGRATE;  // 0
00047     comm_graph = NULL;
00048     context_id = -1;
00049     cover_set  = 0;  // refers to nothing yet
00050 }
00051 
00052 // copy constructor will copy only few basic things; split ranges will not be copied
00053 ParCommGraph::ParCommGraph( const ParCommGraph& src )
00054 {
00055     comm          = src.comm;
00056     senderTasks   = src.senderTasks;    // these are the sender tasks in joint comm
00057     receiverTasks = src.receiverTasks;  // these are all the receiver tasks in joint comm
00058     rootSender    = src.rootSender;
00059     rootReceiver  = src.rootReceiver;
00060     rankInGroup1  = src.rankInGroup1;
00061     rankInGroup2  = src.rankInGroup2;  // group 1 is sender, 2 is receiver
00062     rankInJoin    = src.rankInJoin;
00063     joinSize      = src.joinSize;
00064     compid1       = src.compid1;
00065     compid2       = src.compid2;
00066     comm_graph    = NULL;
00067     graph_type    = src.graph_type;
00068     context_id    = src.context_id;
00069     cover_set     = src.cover_set;
00070     return;
00071 }
00072 
00073 ParCommGraph::~ParCommGraph()
00074 {
00075     // TODO Auto-generated destructor stub
00076 }
00077 
00078 // utility to find out the ranks of the processes of a group, with respect to a joint comm,
00079 // which spans for sure the group
00080 // it is used locally (in the constructor), but it can be used as a utility
00081 void ParCommGraph::find_group_ranks( MPI_Group group, MPI_Comm joincomm, std::vector< int >& ranks )
00082 {
00083     MPI_Group global_grp;
00084     MPI_Comm_group( joincomm, &global_grp );
00085 
00086     int grp_size;
00087 
00088     MPI_Group_size( group, &grp_size );
00089     std::vector< int > rks( grp_size );
00090     ranks.resize( grp_size );
00091 
00092     for( int i = 0; i < grp_size; i++ )
00093         rks[i] = i;
00094 
00095     MPI_Group_translate_ranks( group, grp_size, &rks[0], global_grp, &ranks[0] );
00096     MPI_Group_free( &global_grp );
00097     return;
00098 }
00099 
00100 ErrorCode ParCommGraph::compute_trivial_partition( std::vector< int >& numElemsPerTaskInGroup1 )
00101 {
00102 
00103     recv_graph.clear();
00104     recv_sizes.clear();
00105     sender_graph.clear();
00106     sender_sizes.clear();
00107 
00108     if( numElemsPerTaskInGroup1.size() != senderTasks.size() )
00109         return MB_FAILURE;  // each sender has a number of elements that it owns
00110 
00111     // first find out total number of elements to be sent from all senders
00112     int total_elems = 0;
00113     std::vector< int > accum;
00114     accum.push_back( 0 );
00115 
00116     int num_senders = (int)senderTasks.size();
00117 
00118     for( size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ )
00119     {
00120         total_elems += numElemsPerTaskInGroup1[k];
00121         accum.push_back( total_elems );
00122     }
00123 
00124     int num_recv = ( (int)receiverTasks.size() );
00125     // in trivial partition, every receiver should get about total_elems/num_receivers elements
00126     int num_per_receiver = (int)( total_elems / num_recv );
00127     int leftover         = total_elems - num_per_receiver * num_recv;
00128 
00129     // so receiver k will receive  [starts[k], starts[k+1] ) interval
00130     std::vector< int > starts;
00131     starts.resize( num_recv + 1 );
00132     starts[0] = 0;
00133     for( int k = 0; k < num_recv; k++ )
00134     {
00135         starts[k + 1] = starts[k] + num_per_receiver;
00136         if( k < leftover ) starts[k + 1]++;
00137     }
00138 
00139     // each sender will send to a number of receivers, based on how the
00140     // arrays starts[0:num_recv] and accum[0:sendr] overlap
00141     int lastUsedReceiverRank = 0;  // first receiver was not treated yet
00142     for( int j = 0; j < num_senders; j++ )
00143     {
00144         // we could start the receiver loop with the latest receiver that received from previous
00145         // sender
00146         for( int k = lastUsedReceiverRank; k < num_recv; k++ )
00147         {
00148             // if overlap:
00149             if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] )
00150             {
00151                 recv_graph[receiverTasks[k]].push_back( senderTasks[j] );
00152                 sender_graph[senderTasks[j]].push_back( receiverTasks[k] );
00153 
00154                 // we still need to decide what is the overlap
00155                 int sizeOverlap = 1;  // at least 1, for sure
00156                 // 1
00157                 if( starts[k] >= accum[j] )  // one end is starts[k]
00158                 {
00159                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00160                         sizeOverlap = accum[j + 1] - starts[k];
00161                     else  //
00162                         sizeOverlap = starts[k + 1] - starts[k];
00163                 }
00164                 else  // one end is accum[j]
00165                 {
00166                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00167                         sizeOverlap = accum[j + 1] - accum[j];
00168                     else
00169                         sizeOverlap = starts[k + 1] - accum[j];
00170                 }
00171                 recv_sizes[receiverTasks[k]].push_back( sizeOverlap );  // basically, task k will receive from
00172                                                                         //   sender j, sizeOverlap elems
00173                 sender_sizes[senderTasks[j]].push_back( sizeOverlap );
00174                 if( starts[k] > accum[j + 1] )
00175                 {
00176                     lastUsedReceiverRank = k - 1;  // so next k loop will start a little higher, we
00177                                                    // probably finished with first few receivers (up
00178                                                    // to receiver lastUsedReceiverRank)
00179                     break;  // break the k loop, we distributed all elements from sender j to some
00180                             // receivers
00181                 }
00182             }
00183         }
00184     }
00185 
00186     return MB_SUCCESS;
00187 }
00188 
00189 ErrorCode ParCommGraph::pack_receivers_graph( std::vector< int >& packed_recv_array )
00190 {
00191     // it will basically look at local data, to pack communication graph, each receiver task will
00192     // have to post receives for each sender task that will send data to it; the array will be
00193     // communicated to root receiver, and eventually distributed to receiver tasks
00194 
00195     /*
00196      * packed_array will have receiver, number of senders, then senders, etc
00197      */
00198     if( recv_graph.size() < receiverTasks.size() )
00199     {
00200         // big problem, we have empty partitions in receive
00201         std::cout << " WARNING: empty partitions, some receiver tasks will receive nothing.\n";
00202     }
00203     for( std::map< int, std::vector< int > >::iterator it = recv_graph.begin(); it != recv_graph.end(); it++ )
00204     {
00205         int recv                    = it->first;
00206         std::vector< int >& senders = it->second;
00207         packed_recv_array.push_back( recv );
00208         packed_recv_array.push_back( (int)senders.size() );
00209 
00210         for( int k = 0; k < (int)senders.size(); k++ )
00211             packed_recv_array.push_back( senders[k] );
00212     }
00213 
00214     return MB_SUCCESS;
00215 }
00216 
00217 ErrorCode ParCommGraph::split_owned_range( int sender_rank, Range& owned )
00218 {
00219     int senderTask                   = senderTasks[sender_rank];
00220     std::vector< int >& distribution = sender_sizes[senderTask];
00221     std::vector< int >& receivers    = sender_graph[senderTask];
00222     if( distribution.size() != receivers.size() )  //
00223         return MB_FAILURE;
00224 
00225     Range current = owned;  // get the full range first, then we will subtract stuff, for
00226     // the following ranges
00227 
00228     Range rleftover = current;
00229     for( size_t k = 0; k < receivers.size(); k++ )
00230     {
00231         Range newr;
00232         newr.insert( current.begin(), current.begin() + distribution[k] );
00233         split_ranges[receivers[k]] = newr;
00234 
00235         rleftover = subtract( current, newr );
00236         current   = rleftover;
00237     }
00238 
00239     return MB_SUCCESS;
00240 }
00241 
00242 // use for this the corresponding tasks and sizes
00243 ErrorCode ParCommGraph::split_owned_range( Range& owned )
00244 {
00245     if( corr_tasks.size() != corr_sizes.size() )  //
00246         return MB_FAILURE;
00247 
00248     Range current = owned;  // get the full range first, then we will subtract stuff, for
00249     // the following ranges
00250 
00251     Range rleftover = current;
00252     for( size_t k = 0; k < corr_tasks.size(); k++ )
00253     {
00254         Range newr;
00255         newr.insert( current.begin(), current.begin() + corr_sizes[k] );
00256         split_ranges[corr_tasks[k]] = newr;
00257 
00258         rleftover = subtract( current, newr );
00259         current   = rleftover;
00260     }
00261 
00262     return MB_SUCCESS;
00263 }
00264 
00265 ErrorCode ParCommGraph::send_graph( MPI_Comm jcomm )
00266 {
00267     if( is_root_sender() )
00268     {
00269         int ierr;
00270         // will need to build a communication graph, because each sender knows now to which receiver
00271         // to send data the receivers need to post receives for each sender that will send data to
00272         // them will need to gather on rank 0 on the sender comm, global ranks of sender with
00273         // receivers to send build communication matrix, each receiver will receive from what sender
00274 
00275         std::vector< int > packed_recv_array;
00276         ErrorCode rval = pack_receivers_graph( packed_recv_array );
00277         if( MB_SUCCESS != rval ) return rval;
00278 
00279         int size_pack_array = (int)packed_recv_array.size();
00280         comm_graph          = new int[size_pack_array + 1];
00281         comm_graph[0]       = size_pack_array;
00282         for( int k = 0; k < size_pack_array; k++ )
00283             comm_graph[k + 1] = packed_recv_array[k];
00284         // will add 2 requests
00285         /// use tag 10 to send size and tag 20 to send the packed array
00286         sendReqs.resize( 1 );
00287         // do not send the size in advance, because we use probe now
00288         /*ierr = MPI_Isend(&comm_graph[0], 1, MPI_INT, receiver(0), 10, jcomm, &sendReqs[0]); // we
00289         have to use global communicator if (ierr!=0) return MB_FAILURE;*/
00290         ierr = MPI_Isend( &comm_graph[1], size_pack_array, MPI_INT, receiver( 0 ), 20, jcomm,
00291                           &sendReqs[0] );  // we have to use global communicator
00292         if( ierr != 0 ) return MB_FAILURE;
00293     }
00294     return MB_SUCCESS;
00295 }
00296 
00297 // pco has MOAB too get_moab()
00298 // do we need to store "method" as a member variable ?
00299 ErrorCode ParCommGraph::send_mesh_parts( MPI_Comm jcomm, ParallelComm* pco, Range& owned )
00300 {
00301 
00302     ErrorCode rval;
00303     if( split_ranges.empty() )  // in trivial partition
00304     {
00305         rval = split_owned_range( rankInGroup1, owned );
00306         if( rval != MB_SUCCESS ) return rval;
00307         // we know this on the sender side:
00308         corr_tasks = sender_graph[senderTasks[rankInGroup1]];  // copy
00309         corr_sizes = sender_sizes[senderTasks[rankInGroup1]];  // another copy
00310     }
00311 
00312     int indexReq = 0;
00313     int ierr;                             // MPI error
00314     if( is_root_sender() ) indexReq = 1;  // for sendReqs
00315     sendReqs.resize( indexReq + split_ranges.size() );
00316     for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00317     {
00318         int receiver_proc = it->first;
00319         Range ents        = it->second;
00320 
00321         // add necessary vertices too
00322         Range verts;
00323         rval = pco->get_moab()->get_adjacencies( ents, 0, false, verts, Interface::UNION );
00324         if( rval != MB_SUCCESS )
00325         {
00326             std::cout << " can't get adjacencies. for entities to send\n";
00327             return rval;
00328         }
00329         ents.merge( verts );
00330         ParallelComm::Buffer* buffer = new ParallelComm::Buffer( ParallelComm::INITIAL_BUFF_SIZE );
00331         buffer->reset_ptr( sizeof( int ) );
00332         rval = pco->pack_buffer( ents, false, true, false, -1, buffer );
00333         if( rval != MB_SUCCESS )
00334         {
00335             std::cout << " can't pack buffer for entities to send\n";
00336             return rval;
00337         }
00338         int size_pack = buffer->get_current_size();
00339 
00340         // TODO there could be an issue with endian things; check !!!!!
00341         // we are sending the size of the buffer first as an int!!!
00342         /// not anymore !
00343         /* ierr = MPI_Isend(buffer->mem_ptr, 1, MPI_INT, receiver_proc, 1, jcomm,
00344          &sendReqs[indexReq]); // we have to use global communicator if (ierr!=0) return MB_FAILURE;
00345          indexReq++;*/
00346 
00347         ierr = MPI_Isend( buffer->mem_ptr, size_pack, MPI_CHAR, receiver_proc, 2, jcomm,
00348                           &sendReqs[indexReq] );  // we have to use global communicator
00349         if( ierr != 0 ) return MB_FAILURE;
00350         indexReq++;
00351         localSendBuffs.push_back( buffer );
00352     }
00353     return MB_SUCCESS;
00354 }
00355 
00356 // this is called on receiver side
00357 ErrorCode ParCommGraph::receive_comm_graph( MPI_Comm jcomm, ParallelComm* pco, std::vector< int >& pack_array )
00358 {
00359     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
00360     // knows what data to expect
00361     MPI_Comm receive = pco->comm();
00362     int size_pack_array, ierr;
00363     MPI_Status status;
00364     if( rootReceiver )
00365     {
00366         /*
00367          * MPI_Probe(
00368         int source,
00369         int tag,
00370         MPI_Comm comm,
00371         MPI_Status* status)
00372          *
00373          */
00374         ierr = MPI_Probe( sender( 0 ), 20, jcomm, &status );
00375         if( 0 != ierr )
00376         {
00377             std::cout << " MPI_Probe failure: " << ierr << "\n";
00378             return MB_FAILURE;
00379         }
00380         // get the count of data received from the MPI_Status structure
00381         ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array );
00382         if( 0 != ierr )
00383         {
00384             std::cout << " MPI_Get_count failure: " << ierr << "\n";
00385             return MB_FAILURE;
00386         }
00387 #ifdef VERBOSE
00388         std::cout << " receive comm graph size: " << size_pack_array << "\n";
00389 #endif
00390         pack_array.resize( size_pack_array );
00391         ierr = MPI_Recv( &pack_array[0], size_pack_array, MPI_INT, sender( 0 ), 20, jcomm, &status );
00392         if( 0 != ierr ) return MB_FAILURE;
00393 #ifdef VERBOSE
00394         std::cout << " receive comm graph ";
00395         for( int k = 0; k < (int)pack_array.size(); k++ )
00396             std::cout << " " << pack_array[k];
00397         std::cout << "\n";
00398 #endif
00399     }
00400 
00401     // now broadcast this whole array to all receivers, so they know what to expect
00402     ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive );
00403     if( 0 != ierr ) return MB_FAILURE;
00404     pack_array.resize( size_pack_array );
00405     ierr = MPI_Bcast( &pack_array[0], size_pack_array, MPI_INT, 0, receive );
00406     if( 0 != ierr ) return MB_FAILURE;
00407     return MB_SUCCESS;
00408 }
00409 
00410 ErrorCode ParCommGraph::receive_mesh( MPI_Comm jcomm,
00411                                       ParallelComm* pco,
00412                                       EntityHandle local_set,
00413                                       std::vector< int >& senders_local )
00414 {
00415     ErrorCode rval;
00416     int ierr;
00417     MPI_Status status;
00418     // we also need to fill corresponding mesh info on the other side
00419     corr_tasks = senders_local;
00420     Range newEnts;
00421 
00422     Tag orgSendProcTag;  // this will be a tag set on the received mesh, with info about from what
00423                          // task / PE the
00424     // primary element came from, in the joint communicator ; this will be forwarded by coverage
00425     // mesh
00426     int defaultInt = -1;  // no processor, so it was not migrated from somewhere else
00427     rval           = pco->get_moab()->tag_get_handle( "orig_sending_processor", 1, MB_TYPE_INTEGER, orgSendProcTag,
00428                                                       MB_TAG_DENSE | MB_TAG_CREAT, &defaultInt );MB_CHK_SET_ERR( rval, "can't create original sending processor tag" );
00429     if( !senders_local.empty() )
00430     {
00431         for( size_t k = 0; k < senders_local.size(); k++ )
00432         {
00433             int sender1 = senders_local[k];
00434             // first receive the size of the buffer using probe
00435             /*
00436                  * MPI_Probe(
00437                 int source,
00438                 int tag,
00439                 MPI_Comm comm,
00440                 MPI_Status* status)
00441                  *
00442                  */
00443             ierr = MPI_Probe( sender1, 2, jcomm, &status );
00444             if( 0 != ierr )
00445             {
00446                 std::cout << " MPI_Probe failure in ParCommGraph::receive_mesh " << ierr << "\n";
00447                 return MB_FAILURE;
00448             }
00449             // get the count of data received from the MPI_Status structure
00450             int size_pack;
00451             ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack );
00452             if( 0 != ierr )
00453             {
00454                 std::cout << " MPI_Get_count failure in ParCommGraph::receive_mesh  " << ierr << "\n";
00455                 return MB_FAILURE;
00456             }
00457 
00458             /* ierr = MPI_Recv (&size_pack, 1, MPI_INT, sender1, 1, jcomm, &status);
00459              if (0!=ierr) return MB_FAILURE;*/
00460             // now resize the buffer, then receive it
00461             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_pack );
00462             // buffer->reserve(size_pack);
00463 
00464             ierr = MPI_Recv( buffer->mem_ptr, size_pack, MPI_CHAR, sender1, 2, jcomm, &status );
00465             if( 0 != ierr )
00466             {
00467                 std::cout << " MPI_Recv failure in ParCommGraph::receive_mesh " << ierr << "\n";
00468                 return MB_FAILURE;
00469             }
00470             // now unpack the buffer we just received
00471             Range entities;
00472             std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
00473             std::vector< std::vector< int > > L1p;
00474             std::vector< EntityHandle > L2hloc, L2hrem;
00475             std::vector< unsigned int > L2p;
00476 
00477             buffer->reset_ptr( sizeof( int ) );
00478             std::vector< EntityHandle > entities_vec( entities.size() );
00479             std::copy( entities.begin(), entities.end(), entities_vec.begin() );
00480             rval = pco->unpack_buffer( buffer->buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
00481                                        entities_vec );
00482             delete buffer;
00483             if( MB_SUCCESS != rval ) return rval;
00484 
00485             std::copy( entities_vec.begin(), entities_vec.end(), range_inserter( entities ) );
00486             // we have to add them to the local set
00487             rval = pco->get_moab()->add_entities( local_set, entities );
00488             if( MB_SUCCESS != rval ) return rval;
00489             // corr_sizes is the size of primary entities received
00490             Range verts              = entities.subset_by_dimension( 0 );
00491             Range local_primary_ents = subtract( entities, verts );
00492             if( local_primary_ents.empty() )
00493             {
00494                 // it is possible that all ents sent were vertices (point cloud)
00495                 // then consider primary entities the vertices
00496                 local_primary_ents = verts;
00497             }
00498             else
00499             {
00500                 // set a tag with the original sender for the primary entity
00501                 // will be used later for coverage mesh
00502                 std::vector< int > orig_senders( local_primary_ents.size(), sender1 );
00503                 rval = pco->get_moab()->tag_set_data( orgSendProcTag, local_primary_ents, &orig_senders[0] );
00504             }
00505             corr_sizes.push_back( (int)local_primary_ents.size() );
00506 
00507             newEnts.merge( entities );
00508             // make these in split ranges
00509             split_ranges[sender1] = local_primary_ents;
00510 
00511 #ifdef VERBOSE
00512             std::ostringstream partial_outFile;
00513 
00514             partial_outFile << "part_send_" << sender1 << "."
00515                             << "recv" << rankInJoin << ".vtk";
00516 
00517             // the mesh contains ghosts too, but they are not part of mat/neumann set
00518             // write in serial the file, to see what tags are missing
00519             std::cout << " writing from receiver " << rankInJoin << " from sender " << sender1
00520                       << " entities: " << entities.size() << std::endl;
00521             rval = pco->get_moab()->write_file( partial_outFile.str().c_str(), 0, 0, &local_set,
00522                                                 1 );  // everything on local set received
00523             if( MB_SUCCESS != rval ) return rval;
00524 #endif
00525         }
00526     }
00527     // make sure adjacencies are updated on the new elements
00528 
00529     if( newEnts.empty() )
00530     {
00531         std::cout << " WARNING: this task did not receive any entities \n";
00532     }
00533     // in order for the merging to work, we need to be sure that the adjacencies are updated
00534     // (created)
00535     Range local_verts        = newEnts.subset_by_type( MBVERTEX );
00536     newEnts                  = subtract( newEnts, local_verts );
00537     Core* mb                 = (Core*)pco->get_moab();
00538     AEntityFactory* adj_fact = mb->a_entity_factory();
00539     if( !adj_fact->vert_elem_adjacencies() )
00540         adj_fact->create_vert_elem_adjacencies();
00541     else
00542     {
00543         for( Range::iterator it = newEnts.begin(); it != newEnts.end(); ++it )
00544         {
00545             EntityHandle eh          = *it;
00546             const EntityHandle* conn = NULL;
00547             int num_nodes            = 0;
00548             rval                     = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
00549             adj_fact->notify_create_entity( eh, conn, num_nodes );
00550         }
00551     }
00552 
00553     return MB_SUCCESS;
00554 }
00555 
00556 // VSM: Why is the communicator never used. Remove the argument ?
00557 ErrorCode ParCommGraph::release_send_buffers()
00558 {
00559     int ierr, nsize = (int)sendReqs.size();
00560     std::vector< MPI_Status > mult_status;
00561     mult_status.resize( sendReqs.size() );
00562     ierr = MPI_Waitall( nsize, &sendReqs[0], &mult_status[0] );
00563 
00564     if( ierr != 0 ) return MB_FAILURE;
00565     // now we can free all buffers
00566     delete[] comm_graph;
00567     comm_graph = NULL;
00568     std::vector< ParallelComm::Buffer* >::iterator vit;
00569     for( vit = localSendBuffs.begin(); vit != localSendBuffs.end(); ++vit )
00570         delete( *vit );
00571     localSendBuffs.clear();
00572     return MB_SUCCESS;
00573 }
00574 
00575 // again, will use the send buffers, for nonblocking sends;
00576 // should be the receives non-blocking too?
00577 ErrorCode ParCommGraph::send_tag_values( MPI_Comm jcomm,
00578                                          ParallelComm* pco,
00579                                          Range& owned,
00580                                          std::vector< Tag >& tag_handles )
00581 {
00582     // basically, owned.size() needs to be equal to sum(corr_sizes)
00583     // get info about the tag size, type, etc
00584     int ierr;
00585     Core* mb = (Core*)pco->get_moab();
00586     // get info about the tag
00587     //! Get the size of the specified tag in bytes
00588     int total_bytes_per_entity = 0;  // we need to know, to allocate buffers
00589     ErrorCode rval;
00590     std::vector< int > vect_bytes_per_tag;
00591 #ifdef VERBOSE
00592     std::vector< int > tag_sizes;
00593 #endif
00594     for( size_t i = 0; i < tag_handles.size(); i++ )
00595     {
00596         int bytes_per_tag;
00597         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00598         int tag_size1;  // length
00599         rval = mb->tag_get_length( tag_handles[i], tag_size1 );MB_CHK_ERR( rval );
00600         if( graph_type == DOF_BASED )
00601             bytes_per_tag = bytes_per_tag / tag_size1;  // we know we have one double per tag , per ID sent;
00602                                                         // could be 8 for double, 4 for int, etc
00603         total_bytes_per_entity += bytes_per_tag;
00604         vect_bytes_per_tag.push_back( bytes_per_tag );
00605 #ifdef VERBOSE
00606         int tag_size;
00607         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00608         tag_sizes.push_back( tag_size );
00609 #endif
00610     }
00611 
00612     int indexReq = 0;
00613     if( graph_type == INITIAL_MIGRATE )  // original send
00614     {
00615         // use the buffers data structure to allocate memory for sending the tags
00616         sendReqs.resize( split_ranges.size() );
00617 
00618         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00619         {
00620             int receiver_proc = it->first;
00621             Range ents        = it->second;  // primary entities, with the tag data
00622             int size_buffer   = 4 + total_bytes_per_entity *
00623                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00624             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00625 
00626             buffer->reset_ptr( sizeof( int ) );
00627             for( size_t i = 0; i < tag_handles.size(); i++ )
00628             {
00629                 // copy tag data to buffer->buff_ptr, and send the buffer (we could have used
00630                 // regular char arrays)
00631                 rval = mb->tag_get_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00632                 // advance the butter
00633                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00634             }
00635             *( (int*)buffer->mem_ptr ) = size_buffer;
00636             // int size_pack = buffer->get_current_size(); // debug check
00637             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00638                               &sendReqs[indexReq] );  // we have to use global communicator
00639             if( ierr != 0 ) return MB_FAILURE;
00640             indexReq++;
00641             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00642         }
00643     }
00644     else if( graph_type == COVERAGE )
00645     {
00646         // we know that we will need to send some tag data in a specific order
00647         // first, get the ids of the local elements, from owned Range; arrange the buffer in order
00648         // of increasing global id
00649         Tag gidTag = mb->globalId_tag();
00650         std::vector< int > gids;
00651         gids.resize( owned.size() );
00652         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00653         std::map< int, EntityHandle > gidToHandle;
00654         size_t i = 0;
00655         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00656         {
00657             EntityHandle eh        = *it;
00658             gidToHandle[gids[i++]] = eh;
00659         }
00660         // now, pack the data and send it
00661         sendReqs.resize( involved_IDs_map.size() );
00662         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00663              mit != involved_IDs_map.end(); mit++ )
00664         {
00665             int receiver_proc        = mit->first;
00666             std::vector< int >& eids = mit->second;
00667             int size_buffer          = 4 + total_bytes_per_entity *
00668                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00669             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00670             buffer->reset_ptr( sizeof( int ) );
00671 #ifdef VERBOSE
00672             std::ofstream dbfile;
00673             std::stringstream outf;
00674             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00675             dbfile.open( outf.str().c_str() );
00676             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00677 #endif
00678             // copy tag data to buffer->buff_ptr, and send the buffer (we could have used regular
00679             // char arrays) pack data by tag, to be consistent with above, even though we loop
00680             // through the entities for each tag
00681 
00682             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00683             {
00684                 int eID         = *it;
00685                 EntityHandle eh = gidToHandle[eID];
00686                 for( i = 0; i < tag_handles.size(); i++ )
00687                 {
00688                     rval = mb->tag_get_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );
00689                     if( rval != MB_SUCCESS )
00690                     {
00691                         delete buffer;  // free parallel comm buffer first
00692 
00693                         MB_SET_ERR( rval, "Tag get data failed" );
00694                     }
00695 #ifdef VERBOSE
00696                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00697                     double* vals = (double*)( buffer->buff_ptr );
00698                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00699                     {
00700                         dbfile << " " << *vals;
00701                         vals++;
00702                     }
00703                     dbfile << "\n";
00704 #endif
00705                     buffer->buff_ptr += vect_bytes_per_tag[i];
00706                 }
00707             }
00708 
00709 #ifdef VERBOSE
00710             dbfile.close();
00711 #endif
00712             *( (int*)buffer->mem_ptr ) = size_buffer;
00713             // int size_pack = buffer->get_current_size(); // debug check
00714             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00715                               &sendReqs[indexReq] );  // we have to use global communicator
00716             if( ierr != 0 ) return MB_FAILURE;
00717             indexReq++;
00718             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00719         }
00720     }
00721     else if( graph_type == DOF_BASED )
00722     {
00723         // need to fill up the buffer, in the order desired, send it
00724         // get all the tags, for all owned entities, and pack the buffers accordingly
00725         // we do not want to get the tags by entity, it may be too expensive
00726         std::vector< std::vector< double > > valuesTags;
00727         valuesTags.resize( tag_handles.size() );
00728         for( size_t i = 0; i < tag_handles.size(); i++ )
00729         {
00730 
00731             int bytes_per_tag;
00732             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00733             valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) );
00734             // fill the whole array, we will pick up from here
00735             rval = mb->tag_get_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
00736         }
00737         // now, pack the data and send it
00738         sendReqs.resize( involved_IDs_map.size() );
00739         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00740              mit != involved_IDs_map.end(); mit++ )
00741         {
00742             int receiver_proc                   = mit->first;
00743             std::vector< int >& eids            = mit->second;
00744             std::vector< int >& index_in_values = map_index[receiver_proc];
00745             std::vector< int >& index_ptr       = map_ptr[receiver_proc];  // this is eids.size()+1;
00746             int size_buffer                     = 4 + total_bytes_per_entity *
00747                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00748             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00749             buffer->reset_ptr( sizeof( int ) );
00750 #ifdef VERBOSE
00751             std::ofstream dbfile;
00752             std::stringstream outf;
00753             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00754             dbfile.open( outf.str().c_str() );
00755             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00756 #endif
00757             // copy tag data to buffer->buff_ptr, and send the buffer
00758             // pack data by tag, to be consistent with above
00759             int j = 0;
00760             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00761             {
00762                 int index_in_v = index_in_values[index_ptr[j]];
00763                 for( size_t i = 0; i < tag_handles.size(); i++ )
00764                 {
00765                     // right now, move just doubles; but it could be any type of tag
00766                     *( (double*)( buffer->buff_ptr ) ) = valuesTags[i][index_in_v];
00767                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
00768                 }
00769             };
00770             *( (int*)buffer->mem_ptr ) = size_buffer;
00771             // int size_pack = buffer->get_current_size(); // debug check
00772             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00773                               &sendReqs[indexReq] );  // we have to use global communicator
00774             if( ierr != 0 ) return MB_FAILURE;
00775             indexReq++;
00776             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00777         }
00778     }
00779     return MB_SUCCESS;
00780 }
00781 
00782 ErrorCode ParCommGraph::receive_tag_values( MPI_Comm jcomm,
00783                                             ParallelComm* pco,
00784                                             Range& owned,
00785                                             std::vector< Tag >& tag_handles )
00786 {
00787     // opposite to sending, we will use blocking receives
00788     int ierr;
00789     MPI_Status status;
00790     // basically, owned.size() needs to be equal to sum(corr_sizes)
00791     // get info about the tag size, type, etc
00792     Core* mb = (Core*)pco->get_moab();
00793     // get info about the tag
00794     //! Get the size of the specified tag in bytes
00795     ErrorCode rval;
00796     int total_bytes_per_entity = 0;
00797     std::vector< int > vect_bytes_per_tag;
00798 #ifdef VERBOSE
00799     std::vector< int > tag_sizes;
00800 #endif
00801     for( size_t i = 0; i < tag_handles.size(); i++ )
00802     {
00803         int bytes_per_tag;
00804         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00805         total_bytes_per_entity += bytes_per_tag;
00806         vect_bytes_per_tag.push_back( bytes_per_tag );
00807 #ifdef VERBOSE
00808         int tag_size;
00809         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00810         tag_sizes.push_back( tag_size );
00811 #endif
00812     }
00813 
00814     if( graph_type == INITIAL_MIGRATE )
00815     {
00816         // std::map<int, Range> split_ranges;
00817         // rval = split_owned_range ( owned);MB_CHK_ERR ( rval );
00818 
00819         // use the buffers data structure to allocate memory for receiving the tags
00820         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00821         {
00822             int sender_proc = it->first;
00823             Range ents      = it->second;  // primary entities, with the tag data, we will receive
00824             int size_buffer = 4 + total_bytes_per_entity *
00825                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00826             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00827 
00828             buffer->reset_ptr( sizeof( int ) );
00829 
00830             *( (int*)buffer->mem_ptr ) = size_buffer;
00831             // int size_pack = buffer->get_current_size(); // debug check
00832 
00833             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00834             if( ierr != 0 ) return MB_FAILURE;
00835             // now set the tag
00836             // copy to tag
00837 
00838             for( size_t i = 0; i < tag_handles.size(); i++ )
00839             {
00840                 rval = mb->tag_set_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );
00841                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00842             }
00843             delete buffer;  // no need for it afterwards
00844             MB_CHK_ERR( rval );
00845         }
00846     }
00847     else if( graph_type == COVERAGE )  // receive buffer, then extract tag data, in a loop
00848     {
00849         // we know that we will need to receive some tag data in a specific order (by ids stored)
00850         // first, get the ids of the local elements, from owned Range; unpack the buffer in order
00851         Tag gidTag = mb->globalId_tag();
00852         std::vector< int > gids;
00853         gids.resize( owned.size() );
00854         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00855         std::map< int, EntityHandle > gidToHandle;
00856         size_t i = 0;
00857         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00858         {
00859             EntityHandle eh        = *it;
00860             gidToHandle[gids[i++]] = eh;
00861         }
00862         //
00863         // now, unpack the data and set it to the tag
00864         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00865              mit != involved_IDs_map.end(); mit++ )
00866         {
00867             int sender_proc          = mit->first;
00868             std::vector< int >& eids = mit->second;
00869             int size_buffer          = 4 + total_bytes_per_entity *
00870                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00871             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00872             buffer->reset_ptr( sizeof( int ) );
00873             *( (int*)buffer->mem_ptr ) = size_buffer;  // this is really not necessary, it should receive this too
00874 
00875             // receive the buffer
00876             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00877             if( ierr != 0 ) return MB_FAILURE;
00878 // start copy
00879 #ifdef VERBOSE
00880             std::ofstream dbfile;
00881             std::stringstream outf;
00882             outf << "recvFrom_" << sender_proc << "_on_proc_" << rankInJoin << ".txt";
00883             dbfile.open( outf.str().c_str() );
00884             dbfile << "recvFrom_" << sender_proc << " on proc  " << rankInJoin << "\n";
00885 #endif
00886 
00887             // copy tag data from buffer->buff_ptr
00888             // data is arranged by tag , and repeat the loop for each entity ()
00889             // maybe it should be arranged by entity now, not by tag (so one loop for entities,
00890             // outside)
00891 
00892             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00893             {
00894                 int eID                                      = *it;
00895                 std::map< int, EntityHandle >::iterator mit2 = gidToHandle.find( eID );
00896                 if( mit2 == gidToHandle.end() )
00897                 {
00898                     std::cout << " on rank: " << rankInJoin << " cannot find entity handle with global ID " << eID
00899                               << "\n";
00900                     return MB_FAILURE;
00901                 }
00902                 EntityHandle eh = mit2->second;
00903                 for( i = 0; i < tag_handles.size(); i++ )
00904                 {
00905                     rval = mb->tag_set_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00906 #ifdef VERBOSE
00907                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00908                     double* vals = (double*)( buffer->buff_ptr );
00909                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00910                     {
00911                         dbfile << " " << *vals;
00912                         vals++;
00913                     }
00914                     dbfile << "\n";
00915 #endif
00916                     buffer->buff_ptr += vect_bytes_per_tag[i];
00917                 }
00918             }
00919 
00920             // delete receive buffer
00921             delete buffer;
00922 #ifdef VERBOSE
00923             dbfile.close();
00924 #endif
00925         }
00926     }
00927     else if( graph_type == DOF_BASED )
00928     {
00929         // need to fill up the values for each tag, in the order desired, from the buffer received
00930         //
00931         // get all the tags, for all owned entities, and pack the buffers accordingly
00932         // we do not want to get the tags by entity, it may be too expensive
00933         std::vector< std::vector< double > > valuesTags;
00934         valuesTags.resize( tag_handles.size() );
00935         for( size_t i = 0; i < tag_handles.size(); i++ )
00936         {
00937             int bytes_per_tag;
00938             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00939             valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) );
00940             // fill the whole array, we will pick up from here
00941             // we will fill this array, using data from received buffer
00942             // rval = mb->tag_get_data(owned, (void*)( &(valuesTags[i][0])) );MB_CHK_ERR ( rval );
00943         }
00944         // now, unpack the data and set the tags
00945         sendReqs.resize( involved_IDs_map.size() );
00946         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00947              mit != involved_IDs_map.end(); mit++ )
00948         {
00949             int sender_proc                     = mit->first;
00950             std::vector< int >& eids            = mit->second;
00951             std::vector< int >& index_in_values = map_index[sender_proc];
00952             std::vector< int >& index_ptr       = map_ptr[sender_proc];  // this is eids.size()+1;
00953             int size_buffer                     = 4 + total_bytes_per_entity *
00954                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00955             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00956             buffer->reset_ptr( sizeof( int ) );
00957 
00958             // receive the buffer
00959             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00960             if( ierr != 0 ) return MB_FAILURE;
00961             // use the values in buffer to populate valuesTag arrays, fill it up!
00962             int j = 0;
00963             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00964             {
00965                 for( size_t i = 0; i < tag_handles.size(); i++ )
00966                 {
00967                     // right now, move just doubles; but it could be any type of tag
00968                     double val = *( (double*)( buffer->buff_ptr ) );
00969                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
00970                     for( int k = index_ptr[j]; k < index_ptr[j + 1]; k++ )
00971                         valuesTags[i][index_in_values[k]] = val;
00972                 }
00973             }
00974             // we are done with the buffer in which we received tags, release / delete it
00975             delete buffer;
00976         }
00977         // now we populated the values for all tags; set now the tags!
00978         for( size_t i = 0; i < tag_handles.size(); i++ )
00979         {
00980             // we will fill this array, using data from received buffer
00981             rval = mb->tag_set_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
00982         }
00983     }
00984     return MB_SUCCESS;
00985 }
00986 /*
00987  * for example
00988  */
00989 ErrorCode ParCommGraph::settle_send_graph( TupleList& TLcovIDs )
00990 {
00991     // fill involved_IDs_map with data
00992     // will have "receiving proc" and global id of element
00993     int n      = TLcovIDs.get_n();
00994     graph_type = COVERAGE;  // do not rely only on involved_IDs_map.size(); this can be 0 in some cases
00995     for( int i = 0; i < n; i++ )
00996     {
00997         int to_proc      = TLcovIDs.vi_wr[2 * i];
00998         int globalIdElem = TLcovIDs.vi_wr[2 * i + 1];
00999         involved_IDs_map[to_proc].push_back( globalIdElem );
01000     }
01001 #ifdef VERBOSE
01002     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
01003          mit++ )
01004     {
01005         std::cout << " towards task " << mit->first << " send: " << mit->second.size() << " cells " << std::endl;
01006         for( size_t i = 0; i < mit->second.size(); i++ )
01007         {
01008             std::cout << " " << mit->second[i];
01009         }
01010         std::cout << std::endl;
01011     }
01012 #endif
01013     return MB_SUCCESS;
01014 }
01015 
01016 // this will set involved_IDs_map will store all ids to be received from one sender task
01017 void ParCommGraph::SetReceivingAfterCoverage(
01018     std::map< int, std::set< int > >& idsFromProcs )  // will make sense only on receivers, right now after cov
01019 {
01020     for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
01021     {
01022         int fromProc            = mt->first;
01023         std::set< int >& setIds = mt->second;
01024         involved_IDs_map[fromProc].resize( setIds.size() );
01025         std::vector< int >& listIDs = involved_IDs_map[fromProc];
01026         size_t indx                 = 0;
01027         for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
01028         {
01029             int valueID     = *st;
01030             listIDs[indx++] = valueID;
01031         }
01032     }
01033     graph_type = COVERAGE;
01034     return;
01035 }
01036 //#define VERBOSE
01037 void ParCommGraph::settle_comm_by_ids( int comp, TupleList& TLBackToComp, std::vector< int >& valuesComp )
01038 {
01039     // settle comm graph on comp
01040     if( rootSender || rootReceiver ) std::cout << " settle comm graph by id on component " << comp << "\n";
01041     int n = TLBackToComp.get_n();
01042     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01043     // cases
01044     std::map< int, std::set< int > > uniqueIDs;
01045 
01046     for( int i = 0; i < n; i++ )
01047     {
01048         int to_proc  = TLBackToComp.vi_wr[3 * i + 2];
01049         int globalId = TLBackToComp.vi_wr[3 * i + 1];
01050         uniqueIDs[to_proc].insert( globalId );
01051     }
01052 
01053     // Vector to store element
01054     // with respective present index
01055     std::vector< std::pair< int, int > > vp;
01056     vp.reserve( valuesComp.size() );
01057 
01058     // Inserting element in pair vector
01059     // to keep track of previous indexes in valuesComp
01060     for( int i = 0; i < (int)valuesComp.size(); ++i )
01061     {
01062         vp.push_back( std::make_pair( valuesComp[i], i ) );
01063     }
01064     // Sorting pair vector
01065     sort( vp.begin(), vp.end() );
01066 
01067     // vp[i].first, second
01068 
01069     // count now how many times some value appears in ordered (so in valuesComp)
01070     for( std::map< int, std::set< int > >::iterator it = uniqueIDs.begin(); it != uniqueIDs.end(); it++ )
01071     {
01072         int procId                  = it->first;
01073         std::set< int >& nums       = it->second;
01074         std::vector< int >& indx    = map_ptr[procId];
01075         std::vector< int >& indices = map_index[procId];
01076         indx.resize( nums.size() + 1 );
01077         int indexInVp = 0;
01078         int indexVal  = 0;
01079         indx[0]       = 0;  // start from 0
01080         for( std::set< int >::iterator sst = nums.begin(); sst != nums.end(); sst++, indexVal++ )
01081         {
01082             int val = *sst;
01083             involved_IDs_map[procId].push_back( val );
01084             indx[indexVal + 1] = indx[indexVal];
01085             while( ( indexInVp < (int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) )  // should be equal !
01086             {
01087                 if( vp[indexInVp].first == val )
01088                 {
01089                     indx[indexVal + 1]++;
01090                     indices.push_back( vp[indexInVp].second );
01091                 }
01092                 indexInVp++;
01093             }
01094         }
01095     }
01096 #ifdef VERBOSE
01097     std::stringstream f1;
01098     std::ofstream dbfile;
01099     f1 << "Involve_" << comp << "_" << rankInJoin << ".txt";
01100     dbfile.open( f1.str().c_str() );
01101     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
01102          mit++ )
01103     {
01104         int corrTask                = mit->first;
01105         std::vector< int >& corrIds = mit->second;
01106         std::vector< int >& indx    = map_ptr[corrTask];
01107         std::vector< int >& indices = map_index[corrTask];
01108 
01109         dbfile << " towards proc " << corrTask << " \n";
01110         for( int i = 0; i < (int)corrIds.size(); i++ )
01111         {
01112             dbfile << corrIds[i] << " [" << indx[i] << "," << indx[i + 1] << ")  : ";
01113             for( int j = indx[i]; j < indx[i + 1]; j++ )
01114                 dbfile << indices[j] << " ";
01115             dbfile << "\n";
01116         }
01117         dbfile << " \n";
01118     }
01119     dbfile.close();
01120 #endif
01121 
01122     graph_type = DOF_BASED;
01123     // now we need to fill back and forth information, needed to fill the arrays
01124     // for example, from spectral to involved_IDs_map, in case we want to send data from
01125     // spectral to phys
01126 }
01127 //#undef VERBOSE
01128 // new partition calculation
01129 ErrorCode ParCommGraph::compute_partition( ParallelComm* pco, Range& owned, int met )
01130 {
01131     // we are on a task on sender, and need to compute a new partition;
01132     // primary cells need to be distributed to nb receivers tasks
01133     // first, we will use graph partitioner, with zoltan;
01134     // in the graph that we need to build, the first layer of ghosts is needed;
01135     // can we avoid that ? For example, we can find out from each boundary edge/face what is the
01136     // other cell (on the other side), then form the global graph, and call zoltan in parallel met 1
01137     // would be a geometric partitioner, and met 2 would be a graph partitioner for method 1 we do
01138     // not need any ghost exchange
01139 
01140     // find first edges that are shared
01141     if( owned.empty() )
01142         return MB_SUCCESS;  // nothing to do? empty partition is not allowed, maybe we should return
01143                             // error?
01144     Core* mb = (Core*)pco->get_moab();
01145 
01146     double t1, t2, t3;
01147     t1               = MPI_Wtime();
01148     int primaryDim   = mb->dimension_from_handle( *owned.rbegin() );
01149     int interfaceDim = primaryDim - 1;  // should be 1 or 2
01150     Range sharedEdges;
01151     ErrorCode rval;
01152 
01153     std::vector< int > shprocs( MAX_SHARING_PROCS );
01154     std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
01155 
01156     Tag gidTag = mb->globalId_tag();
01157     int np;
01158     unsigned char pstatus;
01159 
01160     std::multimap< int, int > extraGraphEdges;
01161     // std::map<int, int> adjCellsId;
01162     std::map< int, int > extraCellsProc;
01163     // if method is 2, no need to do the exchange for adjacent cells across partition boundary
01164     // these maps above will be empty for method 2 (geometry)
01165     if( 1 == met )
01166     {
01167         rval = pco->get_shared_entities( /*int other_proc*/ -1, sharedEdges, interfaceDim,
01168                                          /*const bool iface*/ true );MB_CHK_ERR( rval );
01169 
01170 #ifdef VERBOSE
01171         std::cout << " on sender task " << pco->rank() << " number of shared interface cells " << sharedEdges.size()
01172                   << "\n";
01173 #endif
01174         // find to what processors we need to send the ghost info about the edge
01175         // first determine the local graph; what elements are adjacent to each cell in owned range
01176         // cells that are sharing a partition interface edge, are identified first, and form a map
01177         TupleList TLe;                                     // tuple list for cells
01178         TLe.initialize( 2, 0, 1, 0, sharedEdges.size() );  // send to, id of adj cell, remote edge
01179         TLe.enableWriteAccess();
01180 
01181         std::map< EntityHandle, int > edgeToCell;  // from local boundary edge to adjacent cell id
01182         // will be changed after
01183         for( Range::iterator eit = sharedEdges.begin(); eit != sharedEdges.end(); eit++ )
01184         {
01185             EntityHandle edge = *eit;
01186             // get the adjacent cell
01187             Range adjEnts;
01188             rval = mb->get_adjacencies( &edge, 1, primaryDim, false, adjEnts );MB_CHK_ERR( rval );
01189             if( adjEnts.size() > 0 )
01190             {
01191                 EntityHandle adjCell = adjEnts[0];
01192                 int gid;
01193                 rval = mb->tag_get_data( gidTag, &adjCell, 1, &gid );MB_CHK_ERR( rval );
01194                 rval = pco->get_sharing_data( edge, &shprocs[0], &shhandles[0], pstatus, np );MB_CHK_ERR( rval );
01195                 int n                = TLe.get_n();
01196                 TLe.vi_wr[2 * n]     = shprocs[0];
01197                 TLe.vi_wr[2 * n + 1] = gid;
01198                 TLe.vul_wr[n]        = shhandles[0];  // the remote edge corresponding to shared edge
01199                 edgeToCell[edge]     = gid;           // store the map between edge and local id of adj cell
01200                 TLe.inc_n();
01201             }
01202         }
01203 
01204 #ifdef VERBOSE
01205         std::stringstream ff2;
01206         ff2 << "TLe_" << pco->rank() << ".txt";
01207         TLe.print_to_file( ff2.str().c_str() );
01208 #endif
01209         // send the data to the other processors:
01210         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLe, 0 );
01211         // on receiver side, each local edge will have the remote cell adjacent to it!
01212 
01213         int ne = TLe.get_n();
01214         for( int i = 0; i < ne; i++ )
01215         {
01216             int sharedProc         = TLe.vi_rd[2 * i];       // this info is coming from here, originally
01217             int remoteCellID       = TLe.vi_rd[2 * i + 1];   // this is the id of the remote cell, on sharedProc
01218             EntityHandle localCell = TLe.vul_rd[i];          // this is now local edge/face on this proc
01219             int localCellId        = edgeToCell[localCell];  // this is the local cell  adjacent to edge/face
01220             // now, we will need to add to the graph the pair <localCellId, remoteCellID>
01221             std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID );
01222             extraGraphEdges.insert( extraAdj );
01223             // adjCellsId [edgeToCell[localCell]] = remoteCellID;
01224             extraCellsProc[remoteCellID] = sharedProc;
01225 #ifdef VERBOSE
01226             std::cout << "local ID " << edgeToCell[localCell] << " remote cell ID: " << remoteCellID << "\n";
01227 #endif
01228         }
01229     }
01230     t2 = MPI_Wtime();
01231     if( rootSender ) std::cout << " time preparing the input for Zoltan:" << t2 - t1 << " seconds. \n";
01232         // so adj cells ids; need to call zoltan for parallel partition
01233 #ifdef MOAB_HAVE_ZOLTAN
01234     ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mb, pco );
01235     if( 1 <= met )  //  partition in zoltan, either graph or geometric partitioner
01236     {
01237         std::map< int, Range > distribution;  // how to distribute owned elements by processors in receiving groups
01238         // in how many tasks do we want to be distributed?
01239         int numNewPartitions = (int)receiverTasks.size();
01240         Range primaryCells   = owned.subset_by_dimension( primaryDim );
01241         rval = mbZTool->partition_owned_cells( primaryCells, extraGraphEdges, extraCellsProc, numNewPartitions,
01242                                                distribution, met );MB_CHK_ERR( rval );
01243         for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ )
01244         {
01245             int part_index = mit->first;
01246             assert( part_index < numNewPartitions );
01247             split_ranges[receiverTasks[part_index]] = mit->second;
01248         }
01249     }
01250     // delete now the partitioner
01251     delete mbZTool;
01252 #endif
01253     t3 = MPI_Wtime();
01254     if( rootSender ) std::cout << " time spent by Zoltan " << t3 - t2 << " seconds. \n";
01255     return MB_SUCCESS;
01256 }
01257 
01258 // after map read, we need to know what entities we need to send to receiver
01259 ErrorCode ParCommGraph::set_split_ranges( int comp,
01260                                           TupleList& TLBackToComp1,
01261                                           std::vector< int >& valuesComp1,
01262                                           int lenTag,
01263                                           Range& ents_of_interest,
01264                                           int /*type*/ )
01265 {
01266     // settle split_ranges // same role as partitioning
01267     if( rootSender ) std::cout << " find split_ranges on component " << comp << "  according to read map \n";
01268     // Vector to store element
01269     // with respective present index
01270     int n = TLBackToComp1.get_n();
01271     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01272     // cases
01273     std::map< int, std::set< int > > uniqueIDs;
01274 
01275     for( int i = 0; i < n; i++ )
01276     {
01277         int to_proc  = TLBackToComp1.vi_wr[3 * i + 2];
01278         int globalId = TLBackToComp1.vi_wr[3 * i + 1];
01279         uniqueIDs[to_proc].insert( globalId );
01280     }
01281 
01282     for( int i = 0; i < (int)ents_of_interest.size(); i++ )
01283     {
01284         EntityHandle ent = ents_of_interest[i];
01285         for( int j = 0; j < lenTag; j++ )
01286         {
01287             int marker = valuesComp1[i * lenTag + j];
01288             for( auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ )
01289             {
01290                 int proc                = mit->first;
01291                 std::set< int >& setIds = mit->second;
01292                 if( setIds.find( marker ) != setIds.end() )
01293                 {
01294                     split_ranges[proc].insert( ent );
01295                 }
01296             }
01297         }
01298     }
01299 
01300     return MB_SUCCESS;
01301 }
01302 
01303 // new methods to migrate mesh after reading map
01304 ErrorCode ParCommGraph::form_tuples_to_migrate_mesh( Interface* mb,
01305                                                      TupleList& TLv,
01306                                                      TupleList& TLc,
01307                                                      int type,
01308                                                      int lenTagType1 )
01309 {
01310     // we will always need GlobalID tag
01311     Tag gidtag = mb->globalId_tag();
01312     // for Type1, we need GLOBAL_DOFS tag;
01313     Tag gds;
01314     ErrorCode rval;
01315     if( type == 1 )
01316     {
01317         rval = mb->tag_get_handle( "GLOBAL_DOFS", gds );
01318     }
01319     // find vertices to be sent, for each of the split_ranges procs
01320     std::map< int, Range > verts_to_proc;
01321     int numv = 0, numc = 0;
01322     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01323     {
01324         int to_proc = it->first;
01325         Range verts;
01326         if( type != 2 )
01327         {
01328             rval = mb->get_connectivity( it->second, verts );MB_CHK_ERR( rval );
01329             numc += (int)it->second.size();
01330         }
01331         else
01332             verts = it->second;
01333         verts_to_proc[to_proc] = verts;
01334         numv += (int)verts.size();
01335     }
01336     // first vertices:
01337     TLv.initialize( 2, 0, 0, 3, numv );  // to proc, GLOBAL ID, 3 real coordinates
01338     TLv.enableWriteAccess();
01339     // use the global id of vertices for connectivity
01340     for( auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ )
01341     {
01342         int to_proc  = it->first;
01343         Range& verts = it->second;
01344         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01345         {
01346             EntityHandle v   = *vit;
01347             int n            = TLv.get_n();  // current size of tuple list
01348             TLv.vi_wr[2 * n] = to_proc;      // send to processor
01349 
01350             rval = mb->tag_get_data( gidtag, &v, 1, &( TLv.vi_wr[2 * n + 1] ) );MB_CHK_ERR( rval );
01351             rval = mb->get_coords( &v, 1, &( TLv.vr_wr[3 * n] ) );MB_CHK_ERR( rval );
01352             TLv.inc_n();  // increment tuple list size
01353         }
01354     }
01355     if( type == 2 ) return MB_SUCCESS;  // no need to fill TLc
01356     // to proc, ID cell, gdstag, nbv, id conn,
01357     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01358 
01359     std::vector< int > gdvals;
01360 
01361     TLc.initialize( size_tuple, 0, 0, 0, numc );  // to proc, GLOBAL ID, 3 real coordinates
01362     TLc.enableWriteAccess();
01363     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01364     {
01365         int to_proc  = it->first;
01366         Range& cells = it->second;
01367         for( Range::iterator cit = cells.begin(); cit != cells.end(); ++cit )
01368         {
01369             EntityHandle cell         = *cit;
01370             int n                     = TLc.get_n();  // current size of tuple list
01371             TLc.vi_wr[size_tuple * n] = to_proc;
01372             int current_index         = 2;
01373             rval                      = mb->tag_get_data( gidtag, &cell, 1, &( TLc.vi_wr[size_tuple * n + 1] ) );MB_CHK_ERR( rval );
01374             if( 1 == type )
01375             {
01376                 rval = mb->tag_get_data( gds, &cell, 1, &( TLc.vi_wr[size_tuple * n + current_index] ) );MB_CHK_ERR( rval );
01377                 current_index += lenTagType1;
01378             }
01379             // now get connectivity
01380             const EntityHandle* conn = NULL;
01381             int nnodes               = 0;
01382             rval                     = mb->get_connectivity( cell, conn, nnodes );MB_CHK_ERR( rval );
01383             // fill nnodes:
01384             TLc.vi_wr[size_tuple * n + current_index] = nnodes;
01385             rval = mb->tag_get_data( gidtag, conn, nnodes, &( TLc.vi_wr[size_tuple * n + current_index + 1] ) );MB_CHK_ERR( rval );
01386             TLc.inc_n();  // increment tuple list size
01387         }
01388     }
01389     return MB_SUCCESS;
01390 }
01391 ErrorCode ParCommGraph::form_mesh_from_tuples( Interface* mb,
01392                                                TupleList& TLv,
01393                                                TupleList& TLc,
01394                                                int type,
01395                                                int lenTagType1,
01396                                                EntityHandle fset,
01397                                                Range& primary_ents,
01398                                                std::vector< int >& values_entities )
01399 {
01400     // might need to fill also the split_range things
01401     // we will always need GlobalID tag
01402     Tag gidtag = mb->globalId_tag();
01403     // for Type1, we need GLOBAL_DOFS tag;
01404     Tag gds;
01405     ErrorCode rval;
01406     std::vector< int > def_val( lenTagType1, 0 );
01407     if( type == 1 )
01408     {
01409         // we may need to create this tag
01410         rval = mb->tag_get_handle( "GLOBAL_DOFS", lenTagType1, MB_TYPE_INTEGER, gds, MB_TAG_CREAT | MB_TAG_DENSE,
01411                                    &def_val[0] );MB_CHK_ERR( rval );
01412     }
01413 
01414     std::map< int, EntityHandle > vertexMap;  //
01415     Range verts;
01416     // always form vertices and add them to the fset;
01417     int n = TLv.get_n();
01418     EntityHandle vertex;
01419     for( int i = 0; i < n; i++ )
01420     {
01421         int gid = TLv.vi_rd[2 * i + 1];
01422         if( vertexMap.find( gid ) == vertexMap.end() )
01423         {
01424             // need to form this vertex
01425             rval = mb->create_vertex( &( TLv.vr_rd[3 * i] ), vertex );MB_CHK_ERR( rval );
01426             vertexMap[gid] = vertex;
01427             verts.insert( vertex );
01428             rval = mb->tag_set_data( gidtag, &vertex, 1, &gid );MB_CHK_ERR( rval );
01429             // if point cloud,
01430         }
01431         if( 2 == type )  // point cloud, add it to the split_ranges ?
01432         {
01433             split_ranges[TLv.vi_rd[2 * i]].insert( vertexMap[gid] );
01434         }
01435         // todo : when to add the values_entities ?
01436     }
01437     rval = mb->add_entities( fset, verts );MB_CHK_ERR( rval );
01438     if( 2 == type )
01439     {
01440         primary_ents = verts;
01441         values_entities.resize( verts.size() );  // just get the ids of vertices
01442         rval = mb->tag_get_data( gidtag, verts, &values_entities[0] );MB_CHK_ERR( rval );
01443         return MB_SUCCESS;
01444     }
01445 
01446     n              = TLc.get_n();
01447     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01448 
01449     EntityHandle new_element;
01450     Range cells;
01451     std::map< int, EntityHandle > cellMap;  // do no tcreate one if it already exists, maybe from other processes
01452     for( int i = 0; i < n; i++ )
01453     {
01454         int from_proc  = TLc.vi_rd[size_tuple * i];
01455         int globalIdEl = TLc.vi_rd[size_tuple * i + 1];
01456         if( cellMap.find( globalIdEl ) == cellMap.end() )  // need to create the cell
01457         {
01458             int current_index = 2;
01459             if( 1 == type ) current_index += lenTagType1;
01460             int nnodes = TLc.vi_rd[size_tuple * i + current_index];
01461             std::vector< EntityHandle > conn;
01462             conn.resize( nnodes );
01463             for( int j = 0; j < nnodes; j++ )
01464             {
01465                 conn[j] = vertexMap[TLc.vi_rd[size_tuple * i + current_index + j + 1]];
01466             }
01467             //
01468             EntityType entType = MBQUAD;
01469             if( nnodes > 4 ) entType = MBPOLYGON;
01470             if( nnodes < 4 ) entType = MBTRI;
01471             rval = mb->create_element( entType, &conn[0], nnodes, new_element );MB_CHK_SET_ERR( rval, "can't create new element " );
01472             cells.insert( new_element );
01473             cellMap[globalIdEl] = new_element;
01474             rval                = mb->tag_set_data( gidtag, &new_element, 1, &globalIdEl );MB_CHK_SET_ERR( rval, "can't set global id tag on cell " );
01475             if( 1 == type )
01476             {
01477                 // set the gds tag
01478                 rval = mb->tag_set_data( gds, &new_element, 1, &( TLc.vi_rd[size_tuple * i + 2] ) );MB_CHK_SET_ERR( rval, "can't set gds tag on cell " );
01479             }
01480         }
01481         split_ranges[from_proc].insert( cellMap[globalIdEl] );
01482     }
01483     rval = mb->add_entities( fset, cells );MB_CHK_ERR( rval );
01484     primary_ents = cells;
01485     if( 1 == type )
01486     {
01487         values_entities.resize( lenTagType1 * primary_ents.size() );
01488         rval = mb->tag_get_data( gds, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01489     }
01490     else  // type == 3
01491     {
01492         values_entities.resize( primary_ents.size() );  // just get the ids !
01493         rval = mb->tag_get_data( gidtag, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01494     }
01495     return MB_SUCCESS;
01496 }
01497 
01498 // at this moment, each sender task has split_ranges formed;
01499 // we need to aggregate that info and send it to receiver
01500 ErrorCode ParCommGraph::send_graph_partition( ParallelComm* pco, MPI_Comm jcomm )
01501 {
01502     // first, accumulate the info to root of sender; use gatherv
01503     // first, accumulate number of receivers from each sender, to the root receiver
01504     int numberReceivers =
01505         (int)split_ranges.size();            // these are ranges of elements to be sent to each receiver, from this task
01506     int nSenders = (int)senderTasks.size();  //
01507     // this sender will have to send to this many receivers
01508     std::vector< int > displs( 1 );  // displacements for gatherv
01509     std::vector< int > counts( 1 );
01510     if( is_root_sender() )
01511     {
01512         displs.resize( nSenders + 1 );
01513         counts.resize( nSenders );
01514     }
01515 
01516     int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, pco->comm() );
01517     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01518     // compute now displacements
01519     if( is_root_sender() )
01520     {
01521         displs[0] = 0;
01522         for( int k = 0; k < nSenders; k++ )
01523         {
01524             displs[k + 1] = displs[k] + counts[k];
01525         }
01526     }
01527     std::vector< int > buffer;
01528     if( is_root_sender() ) buffer.resize( displs[nSenders] );  // the last one will have the total count now
01529 
01530     std::vector< int > recvs;
01531     for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01532     {
01533         recvs.push_back( mit->first );
01534     }
01535     ierr =
01536         MPI_Gatherv( &recvs[0], numberReceivers, MPI_INT, &buffer[0], &counts[0], &displs[0], MPI_INT, 0, pco->comm() );
01537     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01538 
01539     // now form recv_graph map; points from the
01540     // now form the graph to be sent to the other side; only on root
01541     if( is_root_sender() )
01542     {
01543 #ifdef GRAPH_INFO
01544         std::ofstream dbfileSender;
01545         std::stringstream outf;
01546         outf << "S_" << compid1 << "_R_" << compid2 << "_SenderGraph.txt";
01547         dbfileSender.open( outf.str().c_str() );
01548         dbfileSender << " number senders: " << nSenders << "\n";
01549         dbfileSender << " senderRank \treceivers \n";
01550         for( int k = 0; k < nSenders; k++ )
01551         {
01552             int indexInBuff = displs[k];
01553             int senderTask  = senderTasks[k];
01554             dbfileSender << senderTask << "\t\t";
01555             for( int j = 0; j < counts[k]; j++ )
01556             {
01557                 int recvTask = buffer[indexInBuff + j];
01558                 dbfileSender << recvTask << " ";
01559             }
01560             dbfileSender << "\n";
01561         }
01562         dbfileSender.close();
01563 #endif
01564         for( int k = 0; k < nSenders; k++ )
01565         {
01566             int indexInBuff = displs[k];
01567             int senderTask  = senderTasks[k];
01568             for( int j = 0; j < counts[k]; j++ )
01569             {
01570                 int recvTask = buffer[indexInBuff + j];
01571                 recv_graph[recvTask].push_back( senderTask );  // this will be packed and sent to root receiver, with
01572                                                                // nonblocking send
01573             }
01574         }
01575 #ifdef GRAPH_INFO
01576         std::ofstream dbfile;
01577         std::stringstream outf2;
01578         outf2 << "S_" << compid1 << "_R_" << compid2 << "_RecvGraph.txt";
01579         dbfile.open( outf2.str().c_str() );
01580         dbfile << " number receivers: " << recv_graph.size() << "\n";
01581         dbfile << " receiverRank \tsenders \n";
01582         for( std::map< int, std::vector< int > >::iterator mit = recv_graph.begin(); mit != recv_graph.end(); mit++ )
01583         {
01584             int recvTask                = mit->first;
01585             std::vector< int >& senders = mit->second;
01586             dbfile << recvTask << "\t\t";
01587             for( std::vector< int >::iterator vit = senders.begin(); vit != senders.end(); vit++ )
01588                 dbfile << *vit << " ";
01589             dbfile << "\n";
01590         }
01591         dbfile.close();
01592 #endif
01593         // this is the same as trivial partition
01594         ErrorCode rval = send_graph( jcomm );MB_CHK_ERR( rval );
01595     }
01596 
01597     return MB_SUCCESS;
01598 }
01599 // method to expose local graph info: sender id, receiver id, sizes of elements to send, after or
01600 // before intersection
01601 ErrorCode ParCommGraph::dump_comm_information( std::string prefix, int is_send )
01602 {
01603     //
01604     if( -1 != rankInGroup1 && 1 == is_send )  // it is a sender task
01605     {
01606         std::ofstream dbfile;
01607         std::stringstream outf;
01608         outf << prefix << "_sender_" << rankInGroup1 << "_joint" << rankInJoin << "_type_" << (int)graph_type << ".txt";
01609         dbfile.open( outf.str().c_str() );
01610 
01611         if( graph_type == COVERAGE )
01612         {
01613             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01614                  mit != involved_IDs_map.end(); mit++ )
01615             {
01616                 int receiver_proc        = mit->first;
01617                 std::vector< int >& eids = mit->second;
01618                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01619             }
01620         }
01621         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01622         {
01623             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01624             {
01625                 int receiver_proc = mit->first;
01626                 Range& eids       = mit->second;
01627                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01628             }
01629         }
01630         else if( graph_type == DOF_BASED )  // just after migration, or from computeGraph
01631         {
01632             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01633                  mit != involved_IDs_map.end(); mit++ )
01634             {
01635                 int receiver_proc = mit->first;
01636                 dbfile << "receiver: " << receiver_proc << " size:" << mit->second.size() << "\n";
01637             }
01638         }
01639         dbfile.close();
01640     }
01641     if( -1 != rankInGroup2 && 0 == is_send )  // it is a receiver task
01642     {
01643         std::ofstream dbfile;
01644         std::stringstream outf;
01645         outf << prefix << "_receiver_" << rankInGroup2 << "_joint" << rankInJoin << "_type_" << (int)graph_type
01646              << ".txt";
01647         dbfile.open( outf.str().c_str() );
01648 
01649         if( graph_type == COVERAGE )
01650         {
01651             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01652                  mit != involved_IDs_map.end(); mit++ )
01653             {
01654                 int sender_proc          = mit->first;
01655                 std::vector< int >& eids = mit->second;
01656                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01657             }
01658         }
01659         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01660         {
01661             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01662             {
01663                 int sender_proc = mit->first;
01664                 Range& eids     = mit->second;
01665                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01666             }
01667         }
01668         else if( graph_type == DOF_BASED )  // just after migration
01669         {
01670             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01671                  mit != involved_IDs_map.end(); mit++ )
01672             {
01673                 int sender_proc = mit->first;
01674                 dbfile << "receiver: " << sender_proc << " size:" << mit->second.size() << "\n";
01675             }
01676         }
01677         dbfile.close();
01678     }
01679     return MB_SUCCESS;
01680 }
01681 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines