Mesh Oriented datABase  (version 5.4.1)
Array-based unstructured mesh datastructure
ParCommGraph.cpp
Go to the documentation of this file.
00001 /*
00002  * ParCommGraph.cpp
00003  *
00004  */
00005 
00006 #include "moab/ParCommGraph.hpp"
00007 // we need to recompute adjacencies for merging to work
00008 #include "moab/Core.hpp"
00009 #include "AEntityFactory.hpp"
00010 
00011 #ifdef MOAB_HAVE_ZOLTAN
00012 #include "moab/ZoltanPartitioner.hpp"
00013 #endif
00014 
00015 // #define VERBOSE
00016 // #define GRAPH_INFO
00017 
00018 namespace moab
00019 {
00020 ParCommGraph::ParCommGraph( MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2 )
00021     : comm( joincomm ), compid1( coid1 ), compid2( coid2 )
00022 {
00023     // find out the tasks from each group, in the joint communicator
00024     find_group_ranks( group1, comm, senderTasks );
00025     find_group_ranks( group2, comm, receiverTasks );
00026 
00027     rootSender = rootReceiver = false;
00028     rankInGroup1 = rankInGroup2 = rankInJoin = -1;  // not initialized, or not part of the group
00029 
00030     int mpierr = MPI_Group_rank( group1, &rankInGroup1 );
00031     if( MPI_SUCCESS != mpierr || rankInGroup1 == MPI_UNDEFINED ) rankInGroup1 = -1;
00032 
00033     mpierr = MPI_Group_rank( group2, &rankInGroup2 );
00034     if( MPI_SUCCESS != mpierr || rankInGroup2 == MPI_UNDEFINED ) rankInGroup2 = -1;
00035 
00036     mpierr = MPI_Comm_rank( comm, &rankInJoin );
00037     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00038         rankInJoin = -1;
00039 
00040     mpierr = MPI_Comm_size( comm, &joinSize );
00041     if( MPI_SUCCESS != mpierr )  // it should be a fatal error
00042         joinSize = -1;
00043 
00044     if( 0 == rankInGroup1 ) rootSender = true;
00045     if( 0 == rankInGroup2 ) rootReceiver = true;
00046     graph_type = INITIAL_MIGRATE;  // 0
00047     context_id = -1;
00048     cover_set  = 0;  // refers to nothing yet
00049 }
00050 
00051 // copy constructor will copy only few basic things; split ranges will not be copied
00052 ParCommGraph::ParCommGraph( const ParCommGraph& src )
00053 {
00054     comm          = src.comm;
00055     senderTasks   = src.senderTasks;    // these are the sender tasks in joint comm
00056     receiverTasks = src.receiverTasks;  // these are all the receiver tasks in joint comm
00057     rootSender    = src.rootSender;
00058     rootReceiver  = src.rootReceiver;
00059     rankInGroup1  = src.rankInGroup1;
00060     rankInGroup2  = src.rankInGroup2;  // group 1 is sender, 2 is receiver
00061     rankInJoin    = src.rankInJoin;
00062     joinSize      = src.joinSize;
00063     compid1       = src.compid1;
00064     compid2       = src.compid2;
00065     graph_type    = src.graph_type;
00066     context_id    = src.context_id;
00067     cover_set     = src.cover_set;
00068     return;
00069 }
00070 
00071 ParCommGraph::~ParCommGraph()
00072 {
00073     // TODO Auto-generated destructor stub
00074 }
00075 
00076 // utility to find out the ranks of the processes of a group, with respect to a joint comm,
00077 // which spans for sure the group
00078 // it is used locally (in the constructor), but it can be used as a utility
00079 void ParCommGraph::find_group_ranks( MPI_Group group, MPI_Comm joincomm, std::vector< int >& ranks )
00080 {
00081     MPI_Group global_grp;
00082     MPI_Comm_group( joincomm, &global_grp );
00083 
00084     int grp_size;
00085 
00086     MPI_Group_size( group, &grp_size );
00087     std::vector< int > rks( grp_size );
00088     ranks.resize( grp_size );
00089 
00090     for( int i = 0; i < grp_size; i++ )
00091         rks[i] = i;
00092 
00093     MPI_Group_translate_ranks( group, grp_size, &rks[0], global_grp, &ranks[0] );
00094     MPI_Group_free( &global_grp );
00095     return;
00096 }
00097 
00098 ErrorCode ParCommGraph::compute_trivial_partition( std::vector< int >& numElemsPerTaskInGroup1 )
00099 {
00100 
00101     recv_graph.clear();
00102     recv_sizes.clear();
00103     sender_graph.clear();
00104     sender_sizes.clear();
00105 
00106     if( numElemsPerTaskInGroup1.size() != senderTasks.size() )
00107         return MB_FAILURE;  // each sender has a number of elements that it owns
00108 
00109     // first find out total number of elements to be sent from all senders
00110     int total_elems = 0;
00111     std::vector< int > accum;
00112     accum.push_back( 0 );
00113 
00114     int num_senders = (int)senderTasks.size();
00115 
00116     for( size_t k = 0; k < numElemsPerTaskInGroup1.size(); k++ )
00117     {
00118         total_elems += numElemsPerTaskInGroup1[k];
00119         accum.push_back( total_elems );
00120     }
00121 
00122     int num_recv = ( (int)receiverTasks.size() );
00123     // in trivial partition, every receiver should get about total_elems/num_receivers elements
00124     int num_per_receiver = (int)( total_elems / num_recv );
00125     int leftover         = total_elems - num_per_receiver * num_recv;
00126 
00127     // so receiver k will receive  [starts[k], starts[k+1] ) interval
00128     std::vector< int > starts;
00129     starts.resize( num_recv + 1 );
00130     starts[0] = 0;
00131     for( int k = 0; k < num_recv; k++ )
00132     {
00133         starts[k + 1] = starts[k] + num_per_receiver;
00134         if( k < leftover ) starts[k + 1]++;
00135     }
00136 
00137     // each sender will send to a number of receivers, based on how the
00138     // arrays starts[0:num_recv] and accum[0:sendr] overlap
00139     int lastUsedReceiverRank = 0;  // first receiver was not treated yet
00140     for( int j = 0; j < num_senders; j++ )
00141     {
00142         // we could start the receiver loop with the latest receiver that received from previous
00143         // sender
00144         for( int k = lastUsedReceiverRank; k < num_recv; k++ )
00145         {
00146             // if overlap:
00147             if( starts[k] < accum[j + 1] && starts[k + 1] > accum[j] )
00148             {
00149                 recv_graph[receiverTasks[k]].push_back( senderTasks[j] );
00150                 sender_graph[senderTasks[j]].push_back( receiverTasks[k] );
00151 
00152                 // we still need to decide what is the overlap
00153                 int sizeOverlap = 1;  // at least 1, for sure
00154                 // 1
00155                 if( starts[k] >= accum[j] )  // one end is starts[k]
00156                 {
00157                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00158                         sizeOverlap = accum[j + 1] - starts[k];
00159                     else  //
00160                         sizeOverlap = starts[k + 1] - starts[k];
00161                 }
00162                 else  // one end is accum[j]
00163                 {
00164                     if( starts[k + 1] >= accum[j + 1] )  // the other end is accum[j+1]
00165                         sizeOverlap = accum[j + 1] - accum[j];
00166                     else
00167                         sizeOverlap = starts[k + 1] - accum[j];
00168                 }
00169                 recv_sizes[receiverTasks[k]].push_back( sizeOverlap );  // basically, task k will receive from
00170                                                                         //   sender j, sizeOverlap elems
00171                 sender_sizes[senderTasks[j]].push_back( sizeOverlap );
00172                 if( starts[k] > accum[j + 1] )
00173                 {
00174                     lastUsedReceiverRank = k - 1;  // so next k loop will start a little higher, we
00175                                                    // probably finished with first few receivers (up
00176                                                    // to receiver lastUsedReceiverRank)
00177                     break;  // break the k loop, we distributed all elements from sender j to some
00178                             // receivers
00179                 }
00180             }
00181         }
00182     }
00183 
00184     return MB_SUCCESS;
00185 }
00186 
00187 ErrorCode ParCommGraph::pack_receivers_graph( std::vector< int >& packed_recv_array )
00188 {
00189     // it will basically look at local data, to pack communication graph, each receiver task will
00190     // have to post receives for each sender task that will send data to it; the array will be
00191     // communicated to root receiver, and eventually distributed to receiver tasks
00192 
00193     /*
00194      * packed_array will have receiver, number of senders, then senders, etc
00195      */
00196     if( recv_graph.size() < receiverTasks.size() )
00197     {
00198         // big problem, we have empty partitions in receive
00199         std::cout << " WARNING: empty partitions, some receiver tasks will receive nothing.\n";
00200     }
00201     for( std::map< int, std::vector< int > >::iterator it = recv_graph.begin(); it != recv_graph.end(); it++ )
00202     {
00203         int recv                    = it->first;
00204         std::vector< int >& senders = it->second;
00205         packed_recv_array.push_back( recv );
00206         packed_recv_array.push_back( (int)senders.size() );
00207 
00208         for( int k = 0; k < (int)senders.size(); k++ )
00209             packed_recv_array.push_back( senders[k] );
00210     }
00211 
00212     return MB_SUCCESS;
00213 }
00214 
00215 ErrorCode ParCommGraph::split_owned_range( int sender_rank, Range& owned )
00216 {
00217     int senderTask                   = senderTasks[sender_rank];
00218     std::vector< int >& distribution = sender_sizes[senderTask];
00219     std::vector< int >& receivers    = sender_graph[senderTask];
00220     if( distribution.size() != receivers.size() )  //
00221         return MB_FAILURE;
00222 
00223     Range current = owned;  // get the full range first, then we will subtract stuff, for
00224     // the following ranges
00225 
00226     Range rleftover = current;
00227     for( size_t k = 0; k < receivers.size(); k++ )
00228     {
00229         Range newr;
00230         newr.insert( current.begin(), current.begin() + distribution[k] );
00231         split_ranges[receivers[k]] = newr;
00232 
00233         rleftover = subtract( current, newr );
00234         current   = rleftover;
00235     }
00236 
00237     return MB_SUCCESS;
00238 }
00239 
00240 // use for this the corresponding tasks and sizes
00241 ErrorCode ParCommGraph::split_owned_range( Range& owned )
00242 {
00243     if( corr_tasks.size() != corr_sizes.size() )  //
00244         return MB_FAILURE;
00245 
00246     Range current = owned;  // get the full range first, then we will subtract stuff, for
00247     // the following ranges
00248 
00249     Range rleftover = current;
00250     for( size_t k = 0; k < corr_tasks.size(); k++ )
00251     {
00252         Range newr;
00253         newr.insert( current.begin(), current.begin() + corr_sizes[k] );
00254         split_ranges[corr_tasks[k]] = newr;
00255 
00256         rleftover = subtract( current, newr );
00257         current   = rleftover;
00258     }
00259 
00260     return MB_SUCCESS;
00261 }
00262 
00263 ErrorCode ParCommGraph::send_graph( MPI_Comm jcomm, std::vector< char >& zoltanBuffer )
00264 {
00265     if( is_root_sender() )
00266     {
00267         int ierr;
00268         // will need to build a communication graph, because each sender knows now to which receiver
00269         // to send data the receivers need to post receives for each sender that will send data to
00270         // them will need to gather on rank 0 on the sender comm, global ranks of sender with
00271         // receivers to send build communication matrix, each receiver will receive from what sender
00272 
00273         std::vector< int > packed_recv_array;
00274         ErrorCode rval = pack_receivers_graph( packed_recv_array );
00275         if( MB_SUCCESS != rval ) return rval;
00276 
00277         int size_pack_array = (int)packed_recv_array.size() + 1;
00278         // the last integer
00279         // will signal if we have to send zoltan buffer too, in a separate message 0 for no, 1 for yes
00280         comm_graph = packed_recv_array;
00281         comm_graph.resize( size_pack_array );  // bigger by 1, to store the semaphore about zBuff
00282         // will add 1 or 2 requests; first one with comm graph, second with zoltan buffer if needed
00283         if( zoltanBuffer.size() > 0 )
00284         {
00285             sendReqs.resize( 2 );
00286             comm_graph[size_pack_array - 1] = 1;  // send zoltanBuffer too :)
00287             zBuff = zoltanBuffer;  // do a deep copy here, because the zoltanBuffer will go out of scope, and
00288             // we need to keep the sent buffer, after the non-blocking sent is executed; thanks Karen Devine !
00289         }
00290         else
00291         {
00292             sendReqs.resize( 1 );
00293             comm_graph[size_pack_array - 1] = 0;  // do not send zoltan buffer
00294         }
00295         // we have to use global communicator if (ierr!=0) return MB_FAILURE;*/
00296         ierr = MPI_Isend( &comm_graph[0], size_pack_array, MPI_INT, receiver( 0 ), 20, jcomm,
00297                           &sendReqs[0] );  // we have to use global communicator
00298         if( ierr != 0 ) return MB_FAILURE;
00299         if( zBuff.size() > 0 )
00300         {
00301             ierr = MPI_Isend( &zBuff[0], (int)zBuff.size(), MPI_CHAR, receiver( 0 ), 30, jcomm,
00302                               &sendReqs[1] );  // we have to use global communicator
00303             if( ierr != 0 ) return MB_FAILURE;
00304         }
00305     }
00306     return MB_SUCCESS;
00307 }
00308 
00309 // pco has MOAB too get_moab()
00310 // do we need to store "method" as a member variable ?
00311 ErrorCode ParCommGraph::send_mesh_parts( MPI_Comm jcomm, ParallelComm* pco, Range& owned )
00312 {
00313 
00314     ErrorCode rval;
00315     if( split_ranges.empty() )  // in trivial partition
00316     {
00317         rval = split_owned_range( rankInGroup1, owned );
00318         if( rval != MB_SUCCESS ) return rval;
00319         // we know this on the sender side:
00320         corr_tasks = sender_graph[senderTasks[rankInGroup1]];  // copy
00321         corr_sizes = sender_sizes[senderTasks[rankInGroup1]];  // another copy
00322     }
00323 
00324     int indexReq = 0;
00325     int ierr;                                                // MPI error
00326     if( is_root_sender() ) indexReq = (int)sendReqs.size();  // we could have 1 or 2 MPI_Request's already
00327     sendReqs.resize( indexReq + split_ranges.size() );
00328     for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00329     {
00330         int receiver_proc = it->first;
00331         Range ents        = it->second;
00332 
00333         // add necessary vertices too
00334         Range verts;
00335         rval = pco->get_moab()->get_adjacencies( ents, 0, false, verts, Interface::UNION );
00336         if( rval != MB_SUCCESS )
00337         {
00338             std::cout << " can't get adjacencies. for entities to send\n";
00339             return rval;
00340         }
00341         ents.merge( verts );
00342         ParallelComm::Buffer* buffer = new ParallelComm::Buffer( ParallelComm::INITIAL_BUFF_SIZE );
00343         buffer->reset_ptr( sizeof( int ) );
00344         rval = pco->pack_buffer( ents, false, true, false, -1, buffer );
00345         if( rval != MB_SUCCESS )
00346         {
00347             std::cout << " can't pack buffer for entities to send\n";
00348             return rval;
00349         }
00350         int size_pack = buffer->get_current_size();
00351 
00352         // TODO there could be an issue with endian things; check !!!!!
00353         // we are sending the size of the buffer first as an int!!!
00354         /// not anymore !
00355         /* ierr = MPI_Isend(buffer->mem_ptr, 1, MPI_INT, receiver_proc, 1, jcomm,
00356          &sendReqs[indexReq]); // we have to use global communicator if (ierr!=0) return MB_FAILURE;
00357          indexReq++;*/
00358 
00359         ierr = MPI_Isend( buffer->mem_ptr, size_pack, MPI_CHAR, receiver_proc, 2, jcomm,
00360                           &sendReqs[indexReq] );  // we have to use global communicator
00361         if( ierr != 0 ) return MB_FAILURE;
00362         indexReq++;
00363         localSendBuffs.push_back(
00364             buffer );  // these buffers will be cleared only after all MPI_Request's have been waited upon
00365     }
00366     return MB_SUCCESS;
00367 }
00368 
00369 // this is called on receiver side
00370 ErrorCode ParCommGraph::receive_comm_graph( MPI_Comm jcomm,
00371                                             ParallelComm* pco,
00372                                             std::vector< int >& pack_array,
00373                                             std::vector< char >& zoltanBuffer )
00374 {
00375     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
00376     // knows what data to expect
00377     MPI_Comm receive = pco->comm();
00378     int size_pack_array, ierr;
00379     MPI_Status status;
00380     if( rootReceiver )
00381     {
00382         ierr = MPI_Probe( sender( 0 ), 20, jcomm, &status );
00383         if( 0 != ierr )
00384         {
00385             std::cout << " MPI_Probe failure: " << ierr << "\n";
00386             return MB_FAILURE;
00387         }
00388         // get the count of data received from the MPI_Status structure
00389         ierr = MPI_Get_count( &status, MPI_INT, &size_pack_array );
00390         if( 0 != ierr )
00391         {
00392             std::cout << " MPI_Get_count failure: " << ierr << "\n";
00393             return MB_FAILURE;
00394         }
00395 #ifdef VERBOSE
00396         std::cout << " receive comm graph size: " << size_pack_array << "\n";
00397 #endif
00398         pack_array.resize( size_pack_array );
00399         ierr = MPI_Recv( &pack_array[0], size_pack_array, MPI_INT, sender( 0 ), 20, jcomm, &status );
00400         if( 0 != ierr ) return MB_FAILURE;
00401 #ifdef VERBOSE
00402         std::cout << " receive comm graph ";
00403         for( int k = 0; k < (int)pack_array.size(); k++ )
00404             std::cout << " " << pack_array[k];
00405         std::cout << "\n";
00406 #endif
00407         // the last int tells if we have another receive to do or not (for  zoltan buffer)
00408         // look at the last value, to see if we receive zoltan buffer or not
00409         int semaphore = pack_array[size_pack_array - 1];
00410         if( 1 == semaphore )
00411         {
00412             // expect zoltanBuffer, the difference is the tag; we still need the joint comm
00413             // the buffer was sent with a nonblocking send, from the root sender
00414             ierr = MPI_Probe( sender( 0 ), 30, jcomm, &status );  // blocking call, to get size
00415             if( 0 != ierr )
00416             {
00417                 std::cout << " MPI_Probe failure: " << ierr << "\n";
00418                 return MB_FAILURE;
00419             }
00420             int incoming_msg_size;
00421             MPI_Get_count( &status, MPI_CHAR, &incoming_msg_size );
00422             zoltanBuffer.resize( incoming_msg_size );
00423             ierr = MPI_Recv( &zoltanBuffer[0], incoming_msg_size, MPI_CHAR, sender( 0 ), 30, jcomm, &status );
00424             if( 0 != ierr )
00425             {
00426                 std::cout << " MPI_Recv failure: " << ierr << "\n";
00427                 return MB_FAILURE;
00428             }
00429         }
00430     }
00431 
00432     // now broadcast this whole array to all receivers, so they know what to expect
00433     ierr = MPI_Bcast( &size_pack_array, 1, MPI_INT, 0, receive );
00434     if( 0 != ierr ) return MB_FAILURE;
00435     pack_array.resize( size_pack_array );
00436     ierr = MPI_Bcast( &pack_array[0], size_pack_array, MPI_INT, 0, receive );
00437     if( 0 != ierr ) return MB_FAILURE;
00438     return MB_SUCCESS;
00439 }
00440 
00441 ErrorCode ParCommGraph::receive_mesh( MPI_Comm jcomm,
00442                                       ParallelComm* pco,
00443                                       EntityHandle local_set,
00444                                       std::vector< int >& senders_local )
00445 {
00446     ErrorCode rval;
00447     int ierr;
00448     MPI_Status status;
00449     // we also need to fill corresponding mesh info on the other side
00450     corr_tasks = senders_local;
00451     Range newEnts;
00452 
00453     Tag orgSendProcTag;  // this will be a tag set on the received mesh, with info about from what
00454                          // task / PE the
00455     // primary element came from, in the joint communicator ; this will be forwarded by coverage
00456     // mesh
00457     int defaultInt = -1;  // no processor, so it was not migrated from somewhere else
00458     rval           = pco->get_moab()->tag_get_handle( "orig_sending_processor", 1, MB_TYPE_INTEGER, orgSendProcTag,
00459                                             MB_TAG_DENSE | MB_TAG_CREAT, &defaultInt );MB_CHK_SET_ERR( rval, "can't create original sending processor tag" );
00460     if( !senders_local.empty() )
00461     {
00462         for( size_t k = 0; k < senders_local.size(); k++ )
00463         {
00464             int sender1 = senders_local[k];
00465             // first receive the size of the buffer using probe
00466             /*
00467                  * MPI_Probe(
00468                 int source,
00469                 int tag,
00470                 MPI_Comm comm,
00471                 MPI_Status* status)
00472                  *
00473                  */
00474             ierr = MPI_Probe( sender1, 2, jcomm, &status );
00475             if( 0 != ierr )
00476             {
00477                 std::cout << " MPI_Probe failure in ParCommGraph::receive_mesh " << ierr << "\n";
00478                 return MB_FAILURE;
00479             }
00480             // get the count of data received from the MPI_Status structure
00481             int size_pack;
00482             ierr = MPI_Get_count( &status, MPI_CHAR, &size_pack );
00483             if( 0 != ierr )
00484             {
00485                 std::cout << " MPI_Get_count failure in ParCommGraph::receive_mesh  " << ierr << "\n";
00486                 return MB_FAILURE;
00487             }
00488 
00489             /* ierr = MPI_Recv (&size_pack, 1, MPI_INT, sender1, 1, jcomm, &status);
00490              if (0!=ierr) return MB_FAILURE;*/
00491             // now resize the buffer, then receive it
00492             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_pack );
00493             // buffer->reserve(size_pack);
00494 
00495             ierr = MPI_Recv( buffer->mem_ptr, size_pack, MPI_CHAR, sender1, 2, jcomm, &status );
00496             if( 0 != ierr )
00497             {
00498                 std::cout << " MPI_Recv failure in ParCommGraph::receive_mesh " << ierr << "\n";
00499                 return MB_FAILURE;
00500             }
00501             // now unpack the buffer we just received
00502             Range entities;
00503             std::vector< std::vector< EntityHandle > > L1hloc, L1hrem;
00504             std::vector< std::vector< int > > L1p;
00505             std::vector< EntityHandle > L2hloc, L2hrem;
00506             std::vector< unsigned int > L2p;
00507 
00508             buffer->reset_ptr( sizeof( int ) );
00509             std::vector< EntityHandle > entities_vec( entities.size() );
00510             std::copy( entities.begin(), entities.end(), entities_vec.begin() );
00511             rval = pco->unpack_buffer( buffer->buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
00512                                        entities_vec );
00513             delete buffer;
00514             if( MB_SUCCESS != rval ) return rval;
00515 
00516             std::copy( entities_vec.begin(), entities_vec.end(), range_inserter( entities ) );
00517             // we have to add them to the local set
00518             rval = pco->get_moab()->add_entities( local_set, entities );
00519             if( MB_SUCCESS != rval ) return rval;
00520             // corr_sizes is the size of primary entities received
00521             Range verts              = entities.subset_by_dimension( 0 );
00522             Range local_primary_ents = subtract( entities, verts );
00523             if( local_primary_ents.empty() )
00524             {
00525                 // it is possible that all ents sent were vertices (point cloud)
00526                 // then consider primary entities the vertices
00527                 local_primary_ents = verts;
00528             }
00529             else
00530             {
00531                 // set a tag with the original sender for the primary entity
00532                 // will be used later for coverage mesh
00533                 std::vector< int > orig_senders( local_primary_ents.size(), sender1 );
00534                 rval = pco->get_moab()->tag_set_data( orgSendProcTag, local_primary_ents, &orig_senders[0] );
00535             }
00536             corr_sizes.push_back( (int)local_primary_ents.size() );
00537 
00538             newEnts.merge( entities );
00539             // make these in split ranges
00540             split_ranges[sender1] = local_primary_ents;
00541 
00542 #ifdef VERBOSE
00543             std::ostringstream partial_outFile;
00544 
00545             partial_outFile << "part_send_" << sender1 << "."
00546                             << "recv" << rankInJoin << ".vtk";
00547 
00548             // the mesh contains ghosts too, but they are not part of mat/neumann set
00549             // write in serial the file, to see what tags are missing
00550             std::cout << " writing from receiver " << rankInJoin << " from sender " << sender1
00551                       << " entities: " << entities.size() << std::endl;
00552             rval = pco->get_moab()->write_file( partial_outFile.str().c_str(), 0, 0, &local_set,
00553                                                 1 );  // everything on local set received
00554             if( MB_SUCCESS != rval ) return rval;
00555 #endif
00556         }
00557     }
00558     // make sure adjacencies are updated on the new elements
00559 
00560     if( newEnts.empty() )
00561     {
00562         std::cout << "task:" << rankInGroup2 << " in receiver did not receive any entities \n";
00563     }
00564     else
00565     {
00566         // in order for the merging to work, we need to be sure that the adjacencies are updated
00567         // (created)
00568         Range local_verts        = newEnts.subset_by_type( MBVERTEX );
00569         newEnts                  = subtract( newEnts, local_verts );
00570         Core* mb                 = (Core*)pco->get_moab();
00571         AEntityFactory* adj_fact = mb->a_entity_factory();
00572         if( !adj_fact->vert_elem_adjacencies() )
00573             adj_fact->create_vert_elem_adjacencies();
00574         else
00575         {
00576             for( Range::iterator it = newEnts.begin(); it != newEnts.end(); ++it )
00577             {
00578                 EntityHandle eh          = *it;
00579                 const EntityHandle* conn = NULL;
00580                 int num_nodes            = 0;
00581                 rval                     = mb->get_connectivity( eh, conn, num_nodes );
00582                 if( MB_SUCCESS != rval ) return rval;
00583                 adj_fact->notify_create_entity( eh, conn, num_nodes );
00584             }
00585         }
00586     }
00587     return MB_SUCCESS;
00588 }
00589 
00590 // VSM: Why is the communicator never used. Remove the argument ?
00591 ErrorCode ParCommGraph::release_send_buffers()
00592 {
00593     int ierr, nsize = (int)sendReqs.size();
00594     std::vector< MPI_Status > mult_status;
00595     mult_status.resize( sendReqs.size() );
00596     ierr = MPI_Waitall( nsize, &sendReqs[0], &mult_status[0] );
00597 
00598     if( ierr != 0 ) return MB_FAILURE;
00599     // now we can free all buffers
00600     comm_graph.clear();
00601     zBuff.clear();  // this is for zoltan buffer, it will be used only when sending the
00602                     // the RCB tree from component (sender) root to coupler (receiver) root
00603     std::vector< ParallelComm::Buffer* >::iterator vit;
00604     for( vit = localSendBuffs.begin(); vit != localSendBuffs.end(); ++vit )
00605         delete( *vit );
00606     localSendBuffs.clear();
00607     return MB_SUCCESS;
00608 }
00609 
00610 // again, will use the send buffers, for nonblocking sends;
00611 // should be the receives non-blocking too?
00612 ErrorCode ParCommGraph::send_tag_values( MPI_Comm jcomm,
00613                                          ParallelComm* pco,
00614                                          Range& owned,
00615                                          std::vector< Tag >& tag_handles )
00616 {
00617     // basically, owned.size() needs to be equal to sum(corr_sizes)
00618     // get info about the tag size, type, etc
00619     int ierr;
00620     Core* mb = (Core*)pco->get_moab();
00621     // get info about the tag
00622     //! Get the size of the specified tag in bytes
00623     int total_bytes_per_entity = 0;  // we need to know, to allocate buffers
00624     ErrorCode rval;
00625     std::vector< int > vect_bytes_per_tag;
00626 #ifdef VERBOSE
00627     std::vector< int > tag_sizes;
00628 #endif
00629     for( size_t i = 0; i < tag_handles.size(); i++ )
00630     {
00631         int bytes_per_tag;
00632         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00633         int tag_size1;  // length
00634         rval = mb->tag_get_length( tag_handles[i], tag_size1 );MB_CHK_ERR( rval );
00635         if( graph_type == DOF_BASED )
00636             bytes_per_tag = bytes_per_tag / tag_size1;  // we know we have one double per tag , per ID sent;
00637                                                         // could be 8 for double, 4 for int, etc
00638         total_bytes_per_entity += bytes_per_tag;
00639         vect_bytes_per_tag.push_back( bytes_per_tag );
00640 #ifdef VERBOSE
00641         int tag_size;
00642         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00643         tag_sizes.push_back( tag_size );
00644 #endif
00645     }
00646 
00647     int indexReq = 0;
00648     if( graph_type == INITIAL_MIGRATE )  // original send
00649     {
00650         // use the buffers data structure to allocate memory for sending the tags
00651         sendReqs.resize( split_ranges.size() );
00652 
00653         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00654         {
00655             int receiver_proc = it->first;
00656             Range ents        = it->second;  // primary entities, with the tag data
00657             int size_buffer   = 4 + total_bytes_per_entity *
00658                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00659             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00660 
00661             buffer->reset_ptr( sizeof( int ) );
00662             for( size_t i = 0; i < tag_handles.size(); i++ )
00663             {
00664                 // copy tag data to buffer->buff_ptr, and send the buffer (we could have used
00665                 // regular char arrays)
00666                 rval = mb->tag_get_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00667                 // advance the butter
00668                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00669             }
00670             *( (int*)buffer->mem_ptr ) = size_buffer;
00671             // int size_pack = buffer->get_current_size(); // debug check
00672             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00673                               &sendReqs[indexReq] );  // we have to use global communicator
00674             if( ierr != 0 ) return MB_FAILURE;
00675             indexReq++;
00676             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00677         }
00678     }
00679     else if( graph_type == COVERAGE )
00680     {
00681         // we know that we will need to send some tag data in a specific order
00682         // first, get the ids of the local elements, from owned Range; arrange the buffer in order
00683         // of increasing global id
00684         Tag gidTag = mb->globalId_tag();
00685         std::vector< int > gids;
00686         gids.resize( owned.size() );
00687         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00688         std::map< int, EntityHandle > gidToHandle;
00689         size_t i = 0;
00690         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00691         {
00692             EntityHandle eh        = *it;
00693             gidToHandle[gids[i++]] = eh;
00694         }
00695         // now, pack the data and send it
00696         sendReqs.resize( involved_IDs_map.size() );
00697         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00698              mit != involved_IDs_map.end(); mit++ )
00699         {
00700             int receiver_proc        = mit->first;
00701             std::vector< int >& eids = mit->second;
00702             int size_buffer          = 4 + total_bytes_per_entity *
00703                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00704             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00705             buffer->reset_ptr( sizeof( int ) );
00706 #ifdef VERBOSE
00707             std::ofstream dbfile;
00708             std::stringstream outf;
00709             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00710             dbfile.open( outf.str().c_str() );
00711             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00712 #endif
00713             // copy tag data to buffer->buff_ptr, and send the buffer (we could have used regular
00714             // char arrays) pack data by tag, to be consistent with above, even though we loop
00715             // through the entities for each tag
00716 
00717             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00718             {
00719                 int eID         = *it;
00720                 EntityHandle eh = gidToHandle[eID];
00721                 for( i = 0; i < tag_handles.size(); i++ )
00722                 {
00723                     rval = mb->tag_get_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );
00724                     if( rval != MB_SUCCESS )
00725                     {
00726                         delete buffer;  // free parallel comm buffer first
00727 
00728                         MB_SET_ERR( rval, "Tag get data failed" );
00729                     }
00730 #ifdef VERBOSE
00731                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00732                     double* vals = (double*)( buffer->buff_ptr );
00733                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00734                     {
00735                         dbfile << " " << *vals;
00736                         vals++;
00737                     }
00738                     dbfile << "\n";
00739 #endif
00740                     buffer->buff_ptr += vect_bytes_per_tag[i];
00741                 }
00742             }
00743 
00744 #ifdef VERBOSE
00745             dbfile.close();
00746 #endif
00747             *( (int*)buffer->mem_ptr ) = size_buffer;
00748             // int size_pack = buffer->get_current_size(); // debug check
00749             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00750                               &sendReqs[indexReq] );  // we have to use global communicator
00751             if( ierr != 0 ) return MB_FAILURE;
00752             indexReq++;
00753             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00754         }
00755     }
00756     else if( graph_type == DOF_BASED )
00757     {
00758         // need to fill up the buffer, in the order desired, send it
00759         // get all the tags, for all owned entities, and pack the buffers accordingly
00760         // we do not want to get the tags by entity, it may be too expensive
00761         std::vector< std::vector< double > > valuesTags;
00762         valuesTags.resize( tag_handles.size() );
00763         for( size_t i = 0; i < tag_handles.size(); i++ )
00764         {
00765 
00766             int bytes_per_tag;
00767             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00768             valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) );
00769             // fill the whole array, we will pick up from here
00770             rval = mb->tag_get_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
00771         }
00772         // now, pack the data and send it
00773         sendReqs.resize( involved_IDs_map.size() );
00774         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00775              mit != involved_IDs_map.end(); mit++ )
00776         {
00777             int receiver_proc                   = mit->first;
00778             std::vector< int >& eids            = mit->second;
00779             std::vector< int >& index_in_values = map_index[receiver_proc];
00780             std::vector< int >& index_ptr       = map_ptr[receiver_proc];  // this is eids.size()+1;
00781             int size_buffer                     = 4 + total_bytes_per_entity *
00782                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00783             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00784             buffer->reset_ptr( sizeof( int ) );
00785 #ifdef VERBOSE
00786             std::ofstream dbfile;
00787             std::stringstream outf;
00788             outf << "from_" << rankInJoin << "_send_to_" << receiver_proc << ".txt";
00789             dbfile.open( outf.str().c_str() );
00790             dbfile << "from " << rankInJoin << " send to " << receiver_proc << "\n";
00791 #endif
00792             // copy tag data to buffer->buff_ptr, and send the buffer
00793             // pack data by tag, to be consistent with above
00794             int j = 0;
00795             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00796             {
00797                 int index_in_v = index_in_values[index_ptr[j]];
00798                 for( size_t i = 0; i < tag_handles.size(); i++ )
00799                 {
00800                     // right now, move just doubles; but it could be any type of tag
00801                     *( (double*)( buffer->buff_ptr ) ) = valuesTags[i][index_in_v];
00802                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
00803                 }
00804             };
00805             *( (int*)buffer->mem_ptr ) = size_buffer;
00806             // int size_pack = buffer->get_current_size(); // debug check
00807             ierr = MPI_Isend( buffer->mem_ptr, size_buffer, MPI_CHAR, receiver_proc, 222, jcomm,
00808                               &sendReqs[indexReq] );  // we have to use global communicator
00809             if( ierr != 0 ) return MB_FAILURE;
00810             indexReq++;
00811             localSendBuffs.push_back( buffer );  // we will release them after nonblocking sends are completed
00812         }
00813     }
00814     return MB_SUCCESS;
00815 }
00816 
00817 ErrorCode ParCommGraph::receive_tag_values( MPI_Comm jcomm,
00818                                             ParallelComm* pco,
00819                                             Range& owned,
00820                                             std::vector< Tag >& tag_handles )
00821 {
00822     // opposite to sending, we will use blocking receives
00823     int ierr;
00824     MPI_Status status;
00825     // basically, owned.size() needs to be equal to sum(corr_sizes)
00826     // get info about the tag size, type, etc
00827     Core* mb = (Core*)pco->get_moab();
00828     // get info about the tag
00829     //! Get the size of the specified tag in bytes
00830     ErrorCode rval;
00831     int total_bytes_per_entity = 0;
00832     std::vector< int > vect_bytes_per_tag;
00833 #ifdef VERBOSE
00834     std::vector< int > tag_sizes;
00835 #endif
00836     for( size_t i = 0; i < tag_handles.size(); i++ )
00837     {
00838         int bytes_per_tag;
00839         rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00840         total_bytes_per_entity += bytes_per_tag;
00841         vect_bytes_per_tag.push_back( bytes_per_tag );
00842 #ifdef VERBOSE
00843         int tag_size;
00844         rval = mb->tag_get_length( tag_handles[i], tag_size );MB_CHK_ERR( rval );
00845         tag_sizes.push_back( tag_size );
00846 #endif
00847     }
00848 
00849     if( graph_type == INITIAL_MIGRATE )
00850     {
00851         // std::map<int, Range> split_ranges;
00852         // rval = split_owned_range ( owned);MB_CHK_ERR ( rval );
00853 
00854         // use the buffers data structure to allocate memory for receiving the tags
00855         for( std::map< int, Range >::iterator it = split_ranges.begin(); it != split_ranges.end(); it++ )
00856         {
00857             int sender_proc = it->first;
00858             Range ents      = it->second;  // primary entities, with the tag data, we will receive
00859             int size_buffer = 4 + total_bytes_per_entity *
00860                                       (int)ents.size();  // hopefully, below 2B; if more, we have a big problem ...
00861             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00862 
00863             buffer->reset_ptr( sizeof( int ) );
00864 
00865             *( (int*)buffer->mem_ptr ) = size_buffer;
00866             // int size_pack = buffer->get_current_size(); // debug check
00867 
00868             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00869             if( ierr != 0 ) return MB_FAILURE;
00870             // now set the tag
00871             // copy to tag
00872 
00873             for( size_t i = 0; i < tag_handles.size(); i++ )
00874             {
00875                 rval = mb->tag_set_data( tag_handles[i], ents, (void*)( buffer->buff_ptr ) );
00876                 buffer->buff_ptr += vect_bytes_per_tag[i] * ents.size();
00877             }
00878             delete buffer;  // no need for it afterwards
00879             MB_CHK_ERR( rval );
00880         }
00881     }
00882     else if( graph_type == COVERAGE )  // receive buffer, then extract tag data, in a loop
00883     {
00884         // we know that we will need to receive some tag data in a specific order (by ids stored)
00885         // first, get the ids of the local elements, from owned Range; unpack the buffer in order
00886         Tag gidTag = mb->globalId_tag();
00887         std::vector< int > gids;
00888         gids.resize( owned.size() );
00889         rval = mb->tag_get_data( gidTag, owned, &gids[0] );MB_CHK_ERR( rval );
00890         std::map< int, EntityHandle > gidToHandle;
00891         size_t i = 0;
00892         for( Range::iterator it = owned.begin(); it != owned.end(); it++ )
00893         {
00894             EntityHandle eh        = *it;
00895             gidToHandle[gids[i++]] = eh;
00896         }
00897         //
00898         // now, unpack the data and set it to the tag
00899         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00900              mit != involved_IDs_map.end(); mit++ )
00901         {
00902             int sender_proc          = mit->first;
00903             std::vector< int >& eids = mit->second;
00904             int size_buffer          = 4 + total_bytes_per_entity *
00905                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00906             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00907             buffer->reset_ptr( sizeof( int ) );
00908             *( (int*)buffer->mem_ptr ) = size_buffer;  // this is really not necessary, it should receive this too
00909 
00910             // receive the buffer
00911             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00912             if( ierr != 0 ) return MB_FAILURE;
00913 // start copy
00914 #ifdef VERBOSE
00915             std::ofstream dbfile;
00916             std::stringstream outf;
00917             outf << "recvFrom_" << sender_proc << "_on_proc_" << rankInJoin << ".txt";
00918             dbfile.open( outf.str().c_str() );
00919             dbfile << "recvFrom_" << sender_proc << " on proc  " << rankInJoin << "\n";
00920 #endif
00921 
00922             // copy tag data from buffer->buff_ptr
00923             // data is arranged by tag , and repeat the loop for each entity ()
00924             // maybe it should be arranged by entity now, not by tag (so one loop for entities,
00925             // outside)
00926 
00927             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++ )
00928             {
00929                 int eID                                      = *it;
00930                 std::map< int, EntityHandle >::iterator mit2 = gidToHandle.find( eID );
00931                 if( mit2 == gidToHandle.end() )
00932                 {
00933                     std::cout << " on rank: " << rankInJoin << " cannot find entity handle with global ID " << eID
00934                               << "\n";
00935                     return MB_FAILURE;
00936                 }
00937                 EntityHandle eh = mit2->second;
00938                 for( i = 0; i < tag_handles.size(); i++ )
00939                 {
00940                     rval = mb->tag_set_data( tag_handles[i], &eh, 1, (void*)( buffer->buff_ptr ) );MB_CHK_ERR( rval );
00941 #ifdef VERBOSE
00942                     dbfile << "global ID " << eID << " local handle " << mb->id_from_handle( eh ) << " vals: ";
00943                     double* vals = (double*)( buffer->buff_ptr );
00944                     for( int kk = 0; kk < tag_sizes[i]; kk++ )
00945                     {
00946                         dbfile << " " << *vals;
00947                         vals++;
00948                     }
00949                     dbfile << "\n";
00950 #endif
00951                     buffer->buff_ptr += vect_bytes_per_tag[i];
00952                 }
00953             }
00954 
00955             // delete receive buffer
00956             delete buffer;
00957 #ifdef VERBOSE
00958             dbfile.close();
00959 #endif
00960         }
00961     }
00962     else if( graph_type == DOF_BASED )
00963     {
00964         // need to fill up the values for each tag, in the order desired, from the buffer received
00965         //
00966         // get all the tags, for all owned entities, and pack the buffers accordingly
00967         // we do not want to get the tags by entity, it may be too expensive
00968         std::vector< std::vector< double > > valuesTags;
00969         valuesTags.resize( tag_handles.size() );
00970         for( size_t i = 0; i < tag_handles.size(); i++ )
00971         {
00972             int bytes_per_tag;
00973             rval = mb->tag_get_bytes( tag_handles[i], bytes_per_tag );MB_CHK_ERR( rval );
00974             valuesTags[i].resize( owned.size() * bytes_per_tag / sizeof( double ) );
00975             // fill the whole array, we will pick up from here
00976             // we will fill this array, using data from received buffer
00977             // rval = mb->tag_get_data(owned, (void*)( &(valuesTags[i][0])) );MB_CHK_ERR ( rval );
00978         }
00979         // now, unpack the data and set the tags
00980         for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
00981              mit != involved_IDs_map.end(); mit++ )
00982         {
00983             int sender_proc                     = mit->first;
00984             std::vector< int >& eids            = mit->second;
00985             std::vector< int >& index_in_values = map_index[sender_proc];
00986             std::vector< int >& index_ptr       = map_ptr[sender_proc];  // this is eids.size()+1;
00987             int size_buffer                     = 4 + total_bytes_per_entity *
00988                                       (int)eids.size();  // hopefully, below 2B; if more, we have a big problem ...
00989             ParallelComm::Buffer* buffer = new ParallelComm::Buffer( size_buffer );
00990             buffer->reset_ptr( sizeof( int ) );
00991 
00992             // receive the buffer
00993             ierr = MPI_Recv( buffer->mem_ptr, size_buffer, MPI_CHAR, sender_proc, 222, jcomm, &status );
00994             if( ierr != 0 ) return MB_FAILURE;
00995             // use the values in buffer to populate valuesTag arrays, fill it up!
00996             int j = 0;
00997             for( std::vector< int >::iterator it = eids.begin(); it != eids.end(); it++, j++ )
00998             {
00999                 for( size_t i = 0; i < tag_handles.size(); i++ )
01000                 {
01001                     // right now, move just doubles; but it could be any type of tag
01002                     double val = *( (double*)( buffer->buff_ptr ) );
01003                     buffer->buff_ptr += 8;  // we know we are working with doubles only !!!
01004                     for( int k = index_ptr[j]; k < index_ptr[j + 1]; k++ )
01005                         valuesTags[i][index_in_values[k]] = val;
01006                 }
01007             }
01008             // we are done with the buffer in which we received tags, release / delete it
01009             delete buffer;
01010         }
01011         // now we populated the values for all tags; set now the tags!
01012         for( size_t i = 0; i < tag_handles.size(); i++ )
01013         {
01014             // we will fill this array, using data from received buffer
01015             rval = mb->tag_set_data( tag_handles[i], owned, (void*)( &( valuesTags[i][0] ) ) );MB_CHK_ERR( rval );
01016         }
01017     }
01018     return MB_SUCCESS;
01019 }
01020 /*
01021  * for example
01022  */
01023 ErrorCode ParCommGraph::settle_send_graph( TupleList& TLcovIDs )
01024 {
01025     // fill involved_IDs_map with data
01026     // will have "receiving proc" and global id of element
01027     int n      = TLcovIDs.get_n();
01028     graph_type = COVERAGE;  // do not rely only on involved_IDs_map.size(); this can be 0 in some cases
01029     for( int i = 0; i < n; i++ )
01030     {
01031         int to_proc      = TLcovIDs.vi_wr[2 * i];
01032         int globalIdElem = TLcovIDs.vi_wr[2 * i + 1];
01033         involved_IDs_map[to_proc].push_back( globalIdElem );
01034     }
01035 #ifdef VERBOSE
01036     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
01037          mit++ )
01038     {
01039         std::cout << " towards task " << mit->first << " send: " << mit->second.size() << " cells " << std::endl;
01040         for( size_t i = 0; i < mit->second.size(); i++ )
01041         {
01042             std::cout << " " << mit->second[i];
01043         }
01044         std::cout << std::endl;
01045     }
01046 #endif
01047     return MB_SUCCESS;
01048 }
01049 
01050 // this will set involved_IDs_map will store all ids to be received from one sender task
01051 void ParCommGraph::SetReceivingAfterCoverage(
01052     std::map< int, std::set< int > >& idsFromProcs )  // will make sense only on receivers, right now after cov
01053 {
01054     for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
01055     {
01056         int fromProc            = mt->first;
01057         std::set< int >& setIds = mt->second;
01058         involved_IDs_map[fromProc].resize( setIds.size() );
01059         std::vector< int >& listIDs = involved_IDs_map[fromProc];
01060         size_t indx                 = 0;
01061         for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
01062         {
01063             int valueID     = *st;
01064             listIDs[indx++] = valueID;
01065         }
01066     }
01067     graph_type = COVERAGE;
01068     return;
01069 }
01070 //#define VERBOSE
01071 void ParCommGraph::settle_comm_by_ids( int comp, TupleList& TLBackToComp, std::vector< int >& valuesComp )
01072 {
01073     // settle comm graph on comp
01074     if( rootSender || rootReceiver ) std::cout << " settle comm graph by id on component " << comp << "\n";
01075     int n = TLBackToComp.get_n();
01076     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01077     // cases
01078     std::map< int, std::set< int > > uniqueIDs;
01079 
01080     for( int i = 0; i < n; i++ )
01081     {
01082         int to_proc  = TLBackToComp.vi_wr[3 * i + 2];
01083         int globalId = TLBackToComp.vi_wr[3 * i + 1];
01084         uniqueIDs[to_proc].insert( globalId );
01085     }
01086 
01087     // Vector to store element
01088     // with respective present index
01089     std::vector< std::pair< int, int > > vp;
01090     vp.reserve( valuesComp.size() );
01091 
01092     // Inserting element in pair vector
01093     // to keep track of previous indexes in valuesComp
01094     for( int i = 0; i < (int)valuesComp.size(); ++i )
01095     {
01096         vp.push_back( std::make_pair( valuesComp[i], i ) );
01097     }
01098     // Sorting pair vector
01099     sort( vp.begin(), vp.end() );
01100 
01101     // vp[i].first, second
01102 
01103     // count now how many times some value appears in ordered (so in valuesComp)
01104     for( std::map< int, std::set< int > >::iterator it = uniqueIDs.begin(); it != uniqueIDs.end(); it++ )
01105     {
01106         int procId                  = it->first;
01107         std::set< int >& nums       = it->second;
01108         std::vector< int >& indx    = map_ptr[procId];
01109         std::vector< int >& indices = map_index[procId];
01110         indx.resize( nums.size() + 1 );
01111         int indexInVp = 0;
01112         int indexVal  = 0;
01113         indx[0]       = 0;  // start from 0
01114         for( std::set< int >::iterator sst = nums.begin(); sst != nums.end(); sst++, indexVal++ )
01115         {
01116             int val = *sst;
01117             involved_IDs_map[procId].push_back( val );
01118             indx[indexVal + 1] = indx[indexVal];
01119             while( ( indexInVp < (int)valuesComp.size() ) && ( vp[indexInVp].first <= val ) )  // should be equal !
01120             {
01121                 if( vp[indexInVp].first == val )
01122                 {
01123                     indx[indexVal + 1]++;
01124                     indices.push_back( vp[indexInVp].second );
01125                 }
01126                 indexInVp++;
01127             }
01128         }
01129     }
01130 #ifdef VERBOSE
01131     std::stringstream f1;
01132     std::ofstream dbfile;
01133     f1 << "Involve_" << comp << "_" << rankInJoin << ".txt";
01134     dbfile.open( f1.str().c_str() );
01135     for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin(); mit != involved_IDs_map.end();
01136          mit++ )
01137     {
01138         int corrTask                = mit->first;
01139         std::vector< int >& corrIds = mit->second;
01140         std::vector< int >& indx    = map_ptr[corrTask];
01141         std::vector< int >& indices = map_index[corrTask];
01142 
01143         dbfile << " towards proc " << corrTask << " \n";
01144         for( int i = 0; i < (int)corrIds.size(); i++ )
01145         {
01146             dbfile << corrIds[i] << " [" << indx[i] << "," << indx[i + 1] << ")  : ";
01147             for( int j = indx[i]; j < indx[i + 1]; j++ )
01148                 dbfile << indices[j] << " ";
01149             dbfile << "\n";
01150         }
01151         dbfile << " \n";
01152     }
01153     dbfile.close();
01154 #endif
01155 
01156     graph_type = DOF_BASED;
01157     // now we need to fill back and forth information, needed to fill the arrays
01158     // for example, from spectral to involved_IDs_map, in case we want to send data from
01159     // spectral to phys
01160 }
01161 //#undef VERBOSE
01162 // new partition calculation
01163 ErrorCode ParCommGraph::compute_partition( ParallelComm* pco, Range& owned, int met, std::vector< char >& zoltanBuffer )
01164 {
01165     // we are on a task on sender, and need to compute a new partition;
01166     // primary cells need to be distributed to nb receivers tasks
01167     // first, we will use graph partitioner, with zoltan;
01168     // in the graph that we need to build, the first layer of ghosts is needed;
01169     // can we avoid that ? For example, we can find out from each boundary edge/face what is the
01170     // other cell (on the other side), then form the global graph, and call zoltan in parallel met 1
01171     // would be a geometric partitioner, and met 2 would be a graph partitioner for method 1 we do
01172     // not need any ghost exchange
01173 
01174     // find first edges that are shared
01175     if( owned.empty() )
01176         return MB_SUCCESS;  // nothing to do? empty partition is not allowed, maybe we should return
01177                             // error?
01178     Core* mb = (Core*)pco->get_moab();
01179 
01180     double t1, t2, t3;
01181     t1               = MPI_Wtime();
01182     int primaryDim   = mb->dimension_from_handle( *owned.rbegin() );
01183     int interfaceDim = primaryDim - 1;  // should be 1 or 2
01184     Range sharedEdges;
01185     ErrorCode rval;
01186 
01187     std::vector< int > shprocs( MAX_SHARING_PROCS );
01188     std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
01189 
01190     Tag gidTag = mb->globalId_tag();
01191     int np;
01192     unsigned char pstatus;
01193 
01194     std::multimap< int, int > extraGraphEdges;
01195     // std::map<int, int> adjCellsId;
01196     std::map< int, int > extraCellsProc;
01197     // if method is 2, no need to do the exchange for adjacent cells across partition boundary
01198     // these maps above will be empty for method 2 (geometry)
01199     if( 1 == met )
01200     {
01201         rval = pco->get_shared_entities( /*int other_proc*/ -1, sharedEdges, interfaceDim,
01202                                          /*const bool iface*/ true );MB_CHK_ERR( rval );
01203 
01204 #ifdef VERBOSE
01205         std::cout << " on sender task " << pco->rank() << " number of shared interface cells " << sharedEdges.size()
01206                   << "\n";
01207 #endif
01208         // find to what processors we need to send the ghost info about the edge
01209         // first determine the local graph; what elements are adjacent to each cell in owned range
01210         // cells that are sharing a partition interface edge, are identified first, and form a map
01211         TupleList TLe;                                     // tuple list for cells
01212         TLe.initialize( 2, 0, 1, 0, sharedEdges.size() );  // send to, id of adj cell, remote edge
01213         TLe.enableWriteAccess();
01214 
01215         std::map< EntityHandle, int > edgeToCell;  // from local boundary edge to adjacent cell id
01216         // will be changed after
01217         for( Range::iterator eit = sharedEdges.begin(); eit != sharedEdges.end(); eit++ )
01218         {
01219             EntityHandle edge = *eit;
01220             // get the adjacent cell
01221             Range adjEnts;
01222             rval = mb->get_adjacencies( &edge, 1, primaryDim, false, adjEnts );MB_CHK_ERR( rval );
01223             if( adjEnts.size() > 0 )
01224             {
01225                 EntityHandle adjCell = adjEnts[0];
01226                 int gid;
01227                 rval = mb->tag_get_data( gidTag, &adjCell, 1, &gid );MB_CHK_ERR( rval );
01228                 rval = pco->get_sharing_data( edge, &shprocs[0], &shhandles[0], pstatus, np );MB_CHK_ERR( rval );
01229                 int n                = TLe.get_n();
01230                 TLe.vi_wr[2 * n]     = shprocs[0];
01231                 TLe.vi_wr[2 * n + 1] = gid;
01232                 TLe.vul_wr[n]        = shhandles[0];  // the remote edge corresponding to shared edge
01233                 edgeToCell[edge]     = gid;           // store the map between edge and local id of adj cell
01234                 TLe.inc_n();
01235             }
01236         }
01237 
01238 #ifdef VERBOSE
01239         std::stringstream ff2;
01240         ff2 << "TLe_" << pco->rank() << ".txt";
01241         TLe.print_to_file( ff2.str().c_str() );
01242 #endif
01243         // send the data to the other processors:
01244         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLe, 0 );
01245         // on receiver side, each local edge will have the remote cell adjacent to it!
01246 
01247         int ne = TLe.get_n();
01248         for( int i = 0; i < ne; i++ )
01249         {
01250             int sharedProc         = TLe.vi_rd[2 * i];       // this info is coming from here, originally
01251             int remoteCellID       = TLe.vi_rd[2 * i + 1];   // this is the id of the remote cell, on sharedProc
01252             EntityHandle localCell = TLe.vul_rd[i];          // this is now local edge/face on this proc
01253             int localCellId        = edgeToCell[localCell];  // this is the local cell  adjacent to edge/face
01254             // now, we will need to add to the graph the pair <localCellId, remoteCellID>
01255             std::pair< int, int > extraAdj = std::make_pair( localCellId, remoteCellID );
01256             extraGraphEdges.insert( extraAdj );
01257             // adjCellsId [edgeToCell[localCell]] = remoteCellID;
01258             extraCellsProc[remoteCellID] = sharedProc;
01259 #ifdef VERBOSE
01260             std::cout << "local ID " << edgeToCell[localCell] << " remote cell ID: " << remoteCellID << "\n";
01261 #endif
01262         }
01263     }
01264     t2 = MPI_Wtime();
01265     if( rootSender ) std::cout << " time preparing the input for Zoltan:" << t2 - t1 << " seconds. \n";
01266         // so adj cells ids; need to call zoltan for parallel partition
01267 #ifdef MOAB_HAVE_ZOLTAN
01268     ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mb, pco );
01269     if( 1 <= met )  //  partition in zoltan, either graph or geometric partitioner
01270     {
01271         std::map< int, Range > distribution;  // how to distribute owned elements by processors in receiving groups
01272         // in how many tasks do we want to be distributed?
01273         int numNewPartitions = (int)receiverTasks.size();
01274         Range primaryCells   = owned.subset_by_dimension( primaryDim );
01275         rval = mbZTool->partition_owned_cells( primaryCells, extraGraphEdges, extraCellsProc, numNewPartitions,
01276                                                distribution, met, zoltanBuffer );MB_CHK_ERR( rval );
01277         for( std::map< int, Range >::iterator mit = distribution.begin(); mit != distribution.end(); mit++ )
01278         {
01279             int part_index = mit->first;
01280             assert( part_index < numNewPartitions );
01281             split_ranges[receiverTasks[part_index]] = mit->second;
01282         }
01283     }
01284     // delete now the partitioner
01285     delete mbZTool;
01286 #endif
01287     t3 = MPI_Wtime();
01288     if( rootSender ) std::cout << " time spent by Zoltan " << t3 - t2 << " seconds. \n";
01289     return MB_SUCCESS;
01290 }
01291 
01292 // after map read, we need to know what entities we need to send to receiver
01293 ErrorCode ParCommGraph::set_split_ranges( int comp,
01294                                           TupleList& TLBackToComp1,
01295                                           std::vector< int >& valuesComp1,
01296                                           int lenTag,
01297                                           Range& ents_of_interest,
01298                                           int /*type*/ )
01299 {
01300     // settle split_ranges // same role as partitioning
01301     if( rootSender ) std::cout << " find split_ranges on component " << comp << "  according to read map \n";
01302     // Vector to store element
01303     // with respective present index
01304     int n = TLBackToComp1.get_n();
01305     // third_method = true; // do not rely only on involved_IDs_map.size(); this can be 0 in some
01306     // cases
01307     std::map< int, std::set< int > > uniqueIDs;
01308 
01309     for( int i = 0; i < n; i++ )
01310     {
01311         int to_proc  = TLBackToComp1.vi_wr[3 * i + 2];
01312         int globalId = TLBackToComp1.vi_wr[3 * i + 1];
01313         uniqueIDs[to_proc].insert( globalId );
01314     }
01315 
01316     for( int i = 0; i < (int)ents_of_interest.size(); i++ )
01317     {
01318         EntityHandle ent = ents_of_interest[i];
01319         for( int j = 0; j < lenTag; j++ )
01320         {
01321             int marker = valuesComp1[i * lenTag + j];
01322             for( auto mit = uniqueIDs.begin(); mit != uniqueIDs.end(); mit++ )
01323             {
01324                 int proc                = mit->first;
01325                 std::set< int >& setIds = mit->second;
01326                 if( setIds.find( marker ) != setIds.end() )
01327                 {
01328                     split_ranges[proc].insert( ent );
01329                 }
01330             }
01331         }
01332     }
01333 
01334     return MB_SUCCESS;
01335 }
01336 
01337 // new methods to migrate mesh after reading map
01338 ErrorCode ParCommGraph::form_tuples_to_migrate_mesh( Interface* mb,
01339                                                      TupleList& TLv,
01340                                                      TupleList& TLc,
01341                                                      int type,
01342                                                      int lenTagType1 )
01343 {
01344     // we will always need GlobalID tag
01345     Tag gidtag = mb->globalId_tag();
01346     // for Type1, we need GLOBAL_DOFS tag;
01347     Tag gds;
01348     ErrorCode rval;
01349     if( type == 1 )
01350     {
01351         rval = mb->tag_get_handle( "GLOBAL_DOFS", gds );
01352     }
01353     // find vertices to be sent, for each of the split_ranges procs
01354     std::map< int, Range > verts_to_proc;
01355     int numv = 0, numc = 0;
01356     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01357     {
01358         int to_proc = it->first;
01359         Range verts;
01360         if( type != 2 )
01361         {
01362             rval = mb->get_connectivity( it->second, verts );MB_CHK_ERR( rval );
01363             numc += (int)it->second.size();
01364         }
01365         else
01366             verts = it->second;
01367         verts_to_proc[to_proc] = verts;
01368         numv += (int)verts.size();
01369     }
01370     // first vertices:
01371     TLv.initialize( 2, 0, 0, 3, numv );  // to proc, GLOBAL ID, 3 real coordinates
01372     TLv.enableWriteAccess();
01373     // use the global id of vertices for connectivity
01374     for( auto it = verts_to_proc.begin(); it != verts_to_proc.end(); it++ )
01375     {
01376         int to_proc  = it->first;
01377         Range& verts = it->second;
01378         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01379         {
01380             EntityHandle v   = *vit;
01381             int n            = TLv.get_n();  // current size of tuple list
01382             TLv.vi_wr[2 * n] = to_proc;      // send to processor
01383 
01384             rval = mb->tag_get_data( gidtag, &v, 1, &( TLv.vi_wr[2 * n + 1] ) );MB_CHK_ERR( rval );
01385             rval = mb->get_coords( &v, 1, &( TLv.vr_wr[3 * n] ) );MB_CHK_ERR( rval );
01386             TLv.inc_n();  // increment tuple list size
01387         }
01388     }
01389     if( type == 2 ) return MB_SUCCESS;  // no need to fill TLc
01390     // to proc, ID cell, gdstag, nbv, id conn,
01391     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01392 
01393     std::vector< int > gdvals;
01394 
01395     TLc.initialize( size_tuple, 0, 0, 0, numc );  // to proc, GLOBAL ID, 3 real coordinates
01396     TLc.enableWriteAccess();
01397     for( auto it = split_ranges.begin(); it != split_ranges.end(); it++ )
01398     {
01399         int to_proc  = it->first;
01400         Range& cells = it->second;
01401         for( Range::iterator cit = cells.begin(); cit != cells.end(); ++cit )
01402         {
01403             EntityHandle cell         = *cit;
01404             int n                     = TLc.get_n();  // current size of tuple list
01405             TLc.vi_wr[size_tuple * n] = to_proc;
01406             int current_index         = 2;
01407             rval                      = mb->tag_get_data( gidtag, &cell, 1, &( TLc.vi_wr[size_tuple * n + 1] ) );MB_CHK_ERR( rval );
01408             if( 1 == type )
01409             {
01410                 rval = mb->tag_get_data( gds, &cell, 1, &( TLc.vi_wr[size_tuple * n + current_index] ) );MB_CHK_ERR( rval );
01411                 current_index += lenTagType1;
01412             }
01413             // now get connectivity
01414             const EntityHandle* conn = NULL;
01415             int nnodes               = 0;
01416             rval                     = mb->get_connectivity( cell, conn, nnodes );MB_CHK_ERR( rval );
01417             // fill nnodes:
01418             TLc.vi_wr[size_tuple * n + current_index] = nnodes;
01419             rval = mb->tag_get_data( gidtag, conn, nnodes, &( TLc.vi_wr[size_tuple * n + current_index + 1] ) );MB_CHK_ERR( rval );
01420             TLc.inc_n();  // increment tuple list size
01421         }
01422     }
01423     return MB_SUCCESS;
01424 }
01425 ErrorCode ParCommGraph::form_mesh_from_tuples( Interface* mb,
01426                                                TupleList& TLv,
01427                                                TupleList& TLc,
01428                                                int type,
01429                                                int lenTagType1,
01430                                                EntityHandle fset,
01431                                                Range& primary_ents,
01432                                                std::vector< int >& values_entities )
01433 {
01434     // might need to fill also the split_range things
01435     // we will always need GlobalID tag
01436     Tag gidtag = mb->globalId_tag();
01437     // for Type1, we need GLOBAL_DOFS tag;
01438     Tag gds;
01439     ErrorCode rval;
01440     std::vector< int > def_val( lenTagType1, 0 );
01441     if( type == 1 )
01442     {
01443         // we may need to create this tag
01444         rval = mb->tag_get_handle( "GLOBAL_DOFS", lenTagType1, MB_TYPE_INTEGER, gds, MB_TAG_CREAT | MB_TAG_DENSE,
01445                                    &def_val[0] );MB_CHK_ERR( rval );
01446     }
01447 
01448     std::map< int, EntityHandle > vertexMap;  //
01449     Range verts;
01450     // always form vertices and add them to the fset;
01451     int n = TLv.get_n();
01452     EntityHandle vertex;
01453     for( int i = 0; i < n; i++ )
01454     {
01455         int gid = TLv.vi_rd[2 * i + 1];
01456         if( vertexMap.find( gid ) == vertexMap.end() )
01457         {
01458             // need to form this vertex
01459             rval = mb->create_vertex( &( TLv.vr_rd[3 * i] ), vertex );MB_CHK_ERR( rval );
01460             vertexMap[gid] = vertex;
01461             verts.insert( vertex );
01462             rval = mb->tag_set_data( gidtag, &vertex, 1, &gid );MB_CHK_ERR( rval );
01463             // if point cloud,
01464         }
01465         if( 2 == type )  // point cloud, add it to the split_ranges ?
01466         {
01467             split_ranges[TLv.vi_rd[2 * i]].insert( vertexMap[gid] );
01468         }
01469         // todo : when to add the values_entities ?
01470     }
01471     rval = mb->add_entities( fset, verts );MB_CHK_ERR( rval );
01472     if( 2 == type )
01473     {
01474         primary_ents = verts;
01475         values_entities.resize( verts.size() );  // just get the ids of vertices
01476         rval = mb->tag_get_data( gidtag, verts, &values_entities[0] );MB_CHK_ERR( rval );
01477         return MB_SUCCESS;
01478     }
01479 
01480     n              = TLc.get_n();
01481     int size_tuple = 2 + ( ( type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell
01482 
01483     EntityHandle new_element;
01484     Range cells;
01485     std::map< int, EntityHandle > cellMap;  // do no tcreate one if it already exists, maybe from other processes
01486     for( int i = 0; i < n; i++ )
01487     {
01488         int from_proc  = TLc.vi_rd[size_tuple * i];
01489         int globalIdEl = TLc.vi_rd[size_tuple * i + 1];
01490         if( cellMap.find( globalIdEl ) == cellMap.end() )  // need to create the cell
01491         {
01492             int current_index = 2;
01493             if( 1 == type ) current_index += lenTagType1;
01494             int nnodes = TLc.vi_rd[size_tuple * i + current_index];
01495             std::vector< EntityHandle > conn;
01496             conn.resize( nnodes );
01497             for( int j = 0; j < nnodes; j++ )
01498             {
01499                 conn[j] = vertexMap[TLc.vi_rd[size_tuple * i + current_index + j + 1]];
01500             }
01501             //
01502             EntityType entType = MBQUAD;
01503             if( nnodes > 4 ) entType = MBPOLYGON;
01504             if( nnodes < 4 ) entType = MBTRI;
01505             rval = mb->create_element( entType, &conn[0], nnodes, new_element );MB_CHK_SET_ERR( rval, "can't create new element " );
01506             cells.insert( new_element );
01507             cellMap[globalIdEl] = new_element;
01508             rval                = mb->tag_set_data( gidtag, &new_element, 1, &globalIdEl );MB_CHK_SET_ERR( rval, "can't set global id tag on cell " );
01509             if( 1 == type )
01510             {
01511                 // set the gds tag
01512                 rval = mb->tag_set_data( gds, &new_element, 1, &( TLc.vi_rd[size_tuple * i + 2] ) );MB_CHK_SET_ERR( rval, "can't set gds tag on cell " );
01513             }
01514         }
01515         split_ranges[from_proc].insert( cellMap[globalIdEl] );
01516     }
01517     rval = mb->add_entities( fset, cells );MB_CHK_ERR( rval );
01518     primary_ents = cells;
01519     if( 1 == type )
01520     {
01521         values_entities.resize( lenTagType1 * primary_ents.size() );
01522         rval = mb->tag_get_data( gds, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01523     }
01524     else  // type == 3
01525     {
01526         values_entities.resize( primary_ents.size() );  // just get the ids !
01527         rval = mb->tag_get_data( gidtag, primary_ents, &values_entities[0] );MB_CHK_ERR( rval );
01528     }
01529     return MB_SUCCESS;
01530 }
01531 
01532 // at this moment, each sender task has split_ranges formed;
01533 // we need to aggregate that info and send it to receiver
01534 ErrorCode ParCommGraph::send_graph_partition( ParallelComm* pco, MPI_Comm jcomm, std::vector< char >& zoltanBuffer )
01535 {
01536     // first, accumulate the info to root of sender; use gatherv
01537     // first, accumulate number of receivers from each sender, to the root receiver
01538     int numberReceivers =
01539         (int)split_ranges.size();            // these are ranges of elements to be sent to each receiver, from this task
01540     int nSenders = (int)senderTasks.size();  //
01541     // this sender will have to send to this many receivers
01542     std::vector< int > displs( 1 );  // displacements for gatherv
01543     std::vector< int > counts( 1 );
01544     if( is_root_sender() )
01545     {
01546         displs.resize( nSenders + 1 );
01547         counts.resize( nSenders );
01548     }
01549 
01550     int ierr = MPI_Gather( &numberReceivers, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, pco->comm() );
01551     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01552     // compute now displacements
01553     if( is_root_sender() )
01554     {
01555         displs[0] = 0;
01556         for( int k = 0; k < nSenders; k++ )
01557         {
01558             displs[k + 1] = displs[k] + counts[k];
01559         }
01560     }
01561     std::vector< int > buffer;
01562     if( is_root_sender() ) buffer.resize( displs[nSenders] );  // the last one will have the total count now
01563 
01564     std::vector< int > recvs;
01565     for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01566     {
01567         recvs.push_back( mit->first );
01568     }
01569     ierr =
01570         MPI_Gatherv( &recvs[0], numberReceivers, MPI_INT, &buffer[0], &counts[0], &displs[0], MPI_INT, 0, pco->comm() );
01571     if( ierr != MPI_SUCCESS ) return MB_FAILURE;
01572 
01573     // now form recv_graph map; points from the
01574     // now form the graph to be sent to the other side; only on root
01575     if( is_root_sender() )
01576     {
01577 #ifdef GRAPH_INFO
01578         std::ofstream dbfileSender;
01579         std::stringstream outf;
01580         outf << "S_" << compid1 << "_R_" << compid2 << "_SenderGraph.txt";
01581         dbfileSender.open( outf.str().c_str() );
01582         dbfileSender << " number senders: " << nSenders << "\n";
01583         dbfileSender << " senderRank \treceivers \n";
01584         for( int k = 0; k < nSenders; k++ )
01585         {
01586             int indexInBuff = displs[k];
01587             int senderTask  = senderTasks[k];
01588             dbfileSender << senderTask << "\t\t";
01589             for( int j = 0; j < counts[k]; j++ )
01590             {
01591                 int recvTask = buffer[indexInBuff + j];
01592                 dbfileSender << recvTask << " ";
01593             }
01594             dbfileSender << "\n";
01595         }
01596         dbfileSender.close();
01597 #endif
01598         for( int k = 0; k < nSenders; k++ )
01599         {
01600             int indexInBuff = displs[k];
01601             int senderTask  = senderTasks[k];
01602             for( int j = 0; j < counts[k]; j++ )
01603             {
01604                 int recvTask = buffer[indexInBuff + j];
01605                 recv_graph[recvTask].push_back( senderTask );  // this will be packed and sent to root receiver, with
01606                                                                // nonblocking send
01607             }
01608         }
01609 #ifdef GRAPH_INFO
01610         std::ofstream dbfile;
01611         std::stringstream outf2;
01612         outf2 << "S_" << compid1 << "_R_" << compid2 << "_RecvGraph.txt";
01613         dbfile.open( outf2.str().c_str() );
01614         dbfile << " number receivers: " << recv_graph.size() << "\n";
01615         dbfile << " receiverRank \tsenders \n";
01616         for( std::map< int, std::vector< int > >::iterator mit = recv_graph.begin(); mit != recv_graph.end(); mit++ )
01617         {
01618             int recvTask                = mit->first;
01619             std::vector< int >& senders = mit->second;
01620             dbfile << recvTask << "\t\t";
01621             for( std::vector< int >::iterator vit = senders.begin(); vit != senders.end(); vit++ )
01622                 dbfile << *vit << " ";
01623             dbfile << "\n";
01624         }
01625         dbfile.close();
01626 #endif
01627         // this is the same as trivial partition
01628         ErrorCode rval = send_graph( jcomm, zoltanBuffer );MB_CHK_ERR( rval );
01629     }
01630 
01631     return MB_SUCCESS;
01632 }
01633 // method to expose local graph info: sender id, receiver id, sizes of elements to send, after or
01634 // before intersection
01635 ErrorCode ParCommGraph::dump_comm_information( const std::string & prefix, int is_send, int verbose )
01636 {
01637     //
01638     if( -1 != rankInGroup1 && 1 == is_send )  // it is a sender task
01639     {
01640         std::ofstream dbfile;
01641         std::stringstream outf;
01642         outf << prefix << "_sender_" << rankInGroup1 << "_joint" << rankInJoin << "_type_" << (int)graph_type << ".txt";
01643         dbfile.open( outf.str().c_str() );
01644 
01645         if( graph_type == COVERAGE )
01646         {
01647             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01648                  mit != involved_IDs_map.end(); mit++ )
01649             {
01650                 int receiver_proc        = mit->first;
01651                 std::vector< int >& eids = mit->second;
01652                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01653                 if( verbose >= 1 )
01654                 {
01655                     for( size_t i = 0; i < eids.size(); i++ )
01656                     {
01657                         dbfile << eids[i] << " ";
01658                         if( i % 20 == 19 ) dbfile << "\n";
01659                     }
01660                     dbfile << "\n";
01661                 }
01662             }
01663         }
01664         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01665         {
01666             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01667             {
01668                 int receiver_proc = mit->first;
01669                 Range& eids       = mit->second;
01670                 dbfile << "receiver: " << receiver_proc << " size:" << eids.size() << "\n";
01671             }
01672         }
01673         else if( graph_type == DOF_BASED )  // just after migration, or from computeGraph
01674         {
01675             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01676                  mit != involved_IDs_map.end(); mit++ )
01677             {
01678                 int receiver_proc = mit->first;
01679                 dbfile << "receiver: " << receiver_proc << " size:" << mit->second.size() << "\n";
01680             }
01681         }
01682         dbfile.close();
01683     }
01684     if( -1 != rankInGroup2 && 0 == is_send )  // it is a receiver task
01685     {
01686         std::ofstream dbfile;
01687         std::stringstream outf;
01688         outf << prefix << "_receiver_" << rankInGroup2 << "_joint" << rankInJoin << "_type_" << (int)graph_type
01689              << ".txt";
01690         dbfile.open( outf.str().c_str() );
01691 
01692         if( graph_type == COVERAGE )
01693         {
01694             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01695                  mit != involved_IDs_map.end(); mit++ )
01696             {
01697                 int sender_proc          = mit->first;
01698                 std::vector< int >& eids = mit->second;
01699                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01700                 if( verbose >= 1 )
01701                 {
01702                     for( size_t i = 0; i < eids.size(); i++ )
01703                     {
01704                         dbfile << eids[i] << " ";
01705                         if( i % 20 == 19 ) dbfile << "\n";
01706                     }
01707                     dbfile << "\n";
01708                 }
01709             }
01710         }
01711         else if( graph_type == INITIAL_MIGRATE )  // just after migration
01712         {
01713             for( std::map< int, Range >::iterator mit = split_ranges.begin(); mit != split_ranges.end(); mit++ )
01714             {
01715                 int sender_proc = mit->first;
01716                 Range& eids     = mit->second;
01717                 dbfile << "sender: " << sender_proc << " size:" << eids.size() << "\n";
01718             }
01719         }
01720         else if( graph_type == DOF_BASED )  // just after migration
01721         {
01722             for( std::map< int, std::vector< int > >::iterator mit = involved_IDs_map.begin();
01723                  mit != involved_IDs_map.end(); mit++ )
01724             {
01725                 int sender_proc = mit->first;
01726                 dbfile << "receiver: " << sender_proc << " size:" << mit->second.size() << "\n";
01727             }
01728         }
01729         dbfile.close();
01730     }
01731     return MB_SUCCESS;
01732 }
01733 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines