MOAB: Mesh Oriented datABase  (version 5.2.1)
ParallelComm.hpp
Go to the documentation of this file.
00001 /**
00002  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003  * storing and accessing finite element mesh data.
00004  *
00005  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00006  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007  * retains certain rights in this software.
00008  *
00009  * This library is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU Lesser General Public
00011  * License as published by the Free Software Foundation; either
00012  * version 2.1 of the License, or (at your option) any later version.
00013  *
00014  */
00015 
00016 #ifndef MOAB_PARALLEL_COMM_HPP
00017 #define MOAB_PARALLEL_COMM_HPP
00018 
00019 #include "moab/Forward.hpp"
00020 #include "moab/Interface.hpp"
00021 #include "moab/Range.hpp"
00022 #include "moab/ProcConfig.hpp"
00023 #include <map>
00024 #include <set>
00025 #include <vector>
00026 #include <iostream>
00027 #include <fstream>
00028 #include <cassert>
00029 #include <cstdlib>
00030 #include <cmath>
00031 #include "moab/TupleList.hpp"
00032 
00033 namespace moab
00034 {
00035 
00036 class SequenceManager;
00037 class Error;
00038 template < typename KeyType, typename ValType, ValType NullVal >
00039 class RangeMap;
00040 typedef RangeMap< EntityHandle, EntityHandle, 0 > HandleMap;
00041 class ParallelMergeMesh;
00042 class DebugOutput;
00043 class SharedSetData;
00044 
00045 #define MAX_SHARING_PROCS 64
00046 
00047 /**
00048  * \brief Parallel communications in MOAB
00049  * \author Tim Tautges
00050  *
00051  *  This class implements methods to communicate mesh between processors
00052  *
00053  */
00054 class ParallelComm
00055 {
00056   public:
00057     friend class ParallelMergeMesh;
00058 
00059     // ==================================
00060     // \section CONSTRUCTORS/DESTRUCTORS/PCOMM MANAGEMENT
00061     // ==================================
00062 
00063     //! constructor
00064     ParallelComm( Interface* impl, MPI_Comm comm, int* pcomm_id_out = 0 );
00065 
00066     //! constructor taking packed buffer, for testing
00067     ParallelComm( Interface* impl, std::vector< unsigned char >& tmp_buff, MPI_Comm comm, int* pcomm_id_out = 0 );
00068 
00069     //! Get ID used to reference this PCOMM instance
00070     int get_id() const
00071     {
00072         return pcommID;
00073     }
00074 
00075     //! get the indexed pcomm object from the interface
00076     static ParallelComm* get_pcomm( Interface* impl, const int index );
00077 
00078     //! Get ParallelComm instance associated with partition handle
00079     //! Will create ParallelComm instance if a) one does not already
00080     //! exist and b) a valid value for MPI_Comm is passed.
00081     static ParallelComm* get_pcomm( Interface* impl, EntityHandle partitioning, const MPI_Comm* comm = 0 );
00082 
00083     static ErrorCode get_all_pcomm( Interface* impl, std::vector< ParallelComm* >& list );
00084 
00085     //! destructor
00086     ~ParallelComm();
00087 
00088     static unsigned char PROC_SHARED, PROC_OWNER;
00089 
00090     // ==================================
00091     // \section GLOBAL IDS
00092     // ==================================
00093 
00094     //! assign a global id space, for largest-dimension or all entities (and
00095     //! in either case for vertices too)
00096     //!\param owned_only If true, do not get global IDs for non-owned entities
00097     //!                  from remote processors.
00098     ErrorCode assign_global_ids( EntityHandle this_set, const int dimension, const int start_id = 1,
00099                                  const bool largest_dim_only = true, const bool parallel = true,
00100                                  const bool owned_only = false );
00101 
00102     //! assign a global id space, for largest-dimension or all entities (and
00103     //! in either case for vertices too)
00104     ErrorCode assign_global_ids( Range entities[], const int dimension, const int start_id, const bool parallel,
00105                                  const bool owned_only );
00106 
00107     //! check for global ids; based only on tag handle being there or not;
00108     //! if it's not there, create them for the specified dimensions
00109     //!\param owned_only If true, do not get global IDs for non-owned entities
00110     //!                  from remote processors.
00111     ErrorCode check_global_ids( EntityHandle this_set, const int dimension, const int start_id = 1,
00112                                 const bool largest_dim_only = true, const bool parallel = true,
00113                                 const bool owned_only = false );
00114 
00115     // ==================================
00116     // \section HIGH-LEVEL COMMUNICATION (send/recv/bcast/scatter ents, exchange tags)
00117     // ==================================
00118 
00119     /** \brief send entities to another processor, optionally waiting until it's done
00120      *
00121      * Send entities to another processor, with adjs, sets, and tags.
00122      * If store_remote_handles is true, this call receives back handles assigned to
00123      * entities sent to destination processor and stores them in sharedh_tag or
00124      * sharedhs_tag.
00125      * \param to_proc Destination processor
00126      * \param orig_ents Entities requested to send
00127      * \param adjs If true, send adjacencies for equiv entities (currently unsupported)
00128      * \param tags If true, send tag values for all tags assigned to entities
00129      * \param store_remote_handles If true, also recv message with handles on destination processor
00130      * (currently unsupported) \param final_ents Range containing all entities sent \param incoming
00131      * keep track if any messages are coming to this processor (newly added) \param wait_all If
00132      * true, wait until all messages received/sent complete
00133      */
00134     ErrorCode send_entities( const int to_proc, Range& orig_ents, const bool adjs, const bool tags,
00135                              const bool store_remote_handles, const bool is_iface, Range& final_ents, int& incoming1,
00136                              int& incoming2,                                 // newly added
00137                              TupleList& entprocs,                            // newly added
00138                              std::vector< MPI_Request >& recv_remoteh_reqs,  // newly added
00139                              bool wait_all = true );
00140 
00141     ErrorCode send_entities( std::vector< unsigned int >& send_procs, std::vector< Range* >& send_ents, int& incoming1,
00142                              int& incoming2, const bool store_remote_handles );
00143 
00144     /** \brief Receive entities from another processor, optionally waiting until it's done
00145      *
00146      * Receive entities from another processor, with adjs, sets, and tags.
00147      * If store_remote_handles is true, this call sends back handles assigned to
00148      * the entities received.
00149      * \param from_proc Source processor
00150      * \param store_remote_handles If true, send message with new entity handles to source processor
00151      * (currently unsupported) \param final_ents Range containing all entities received \param
00152      * incoming keep track if any messages are coming to this processor (newly added) \param
00153      * wait_all If true, wait until all messages received/sent complete
00154      */
00155     ErrorCode recv_entities( const int from_proc, const bool store_remote_handles, const bool is_iface,
00156                              Range& final_ents, int& incomming1, int& incoming2,
00157                              std::vector< std::vector< EntityHandle > >& L1hloc,
00158                              std::vector< std::vector< EntityHandle > >& L1hrem, std::vector< std::vector< int > >& L1p,
00159                              std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
00160                              std::vector< unsigned int >& L2p, std::vector< MPI_Request >& recv_remoteh_reqs,
00161                              bool wait_all = true );
00162 
00163     ErrorCode recv_entities( std::set< unsigned int >& recv_procs, int incoming1, int incoming2,
00164                              const bool store_remote_handles, const bool migrate = false );
00165 
00166     /** \brief Receive messages from another processor in while loop
00167      *
00168      * Receive messages from another processor.
00169      * \param from_proc Source processor
00170      * \param store_remote_handles If true, send message with new entity handles to source processor
00171      * (currently unsupported) \param final_ents Range containing all entities received \param
00172      * incoming keep track if any messages are coming to this processor (newly added)
00173      */
00174     ErrorCode recv_messages( const int from_proc, const bool store_remote_handles, const bool is_iface,
00175                              Range& final_ents, int& incoming1, int& incoming2,
00176                              std::vector< std::vector< EntityHandle > >& L1hloc,
00177                              std::vector< std::vector< EntityHandle > >& L1hrem, std::vector< std::vector< int > >& L1p,
00178                              std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
00179                              std::vector< unsigned int >& L2p, std::vector< MPI_Request >& recv_remoteh_reqs );
00180 
00181     ErrorCode recv_remote_handle_messages( const int from_proc, int& incoming2, std::vector< EntityHandle >& L2hloc,
00182                                            std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
00183                                            std::vector< MPI_Request >& recv_remoteh_reqs );
00184 
00185     /** \brief Exchange ghost cells with neighboring procs
00186      * Neighboring processors are those sharing an interface
00187      * with this processor.  All entities of dimension ghost_dim
00188      * within num_layers of interface, measured going through bridge_dim,
00189      * are exchanged.  See MeshTopoUtil::get_bridge_adjacencies for description
00190      * of bridge adjacencies.  If wait_all is false and store_remote_handles
00191      * is true, MPI_Request objects are available in the sendReqs[2*MAX_SHARING_PROCS]
00192      * member array, with inactive requests marked as MPI_REQUEST_NULL.  If
00193      * store_remote_handles or wait_all is false, this function returns after
00194      * all entities have been received and processed.
00195      * \param ghost_dim Dimension of ghost entities to be exchanged
00196      * \param bridge_dim Dimension of entities used to measure layers from interface
00197      * \param num_layers Number of layers of ghosts requested
00198      * \param addl_ents Dimension of additional adjacent entities to exchange with ghosts, 0 if none
00199      * \param store_remote_handles If true, send message with new entity handles to source processor
00200      * \param wait_all If true, function does not return until all send buffers
00201      *       are cleared.
00202      */
00203 
00204     ErrorCode exchange_ghost_cells( int ghost_dim, int bridge_dim, int num_layers, int addl_ents,
00205                                     bool store_remote_handles, bool wait_all = true, EntityHandle* file_set = NULL );
00206 
00207     /** \brief Static version of exchange_ghost_cells, exchanging info through
00208      * buffers rather than messages
00209      */
00210     static ErrorCode exchange_ghost_cells( ParallelComm** pc, unsigned int num_procs, int ghost_dim, int bridge_dim,
00211                                            int num_layers, int addl_ents, bool store_remote_handles,
00212                                            EntityHandle* file_sets = NULL );
00213 
00214     /** \brief Post "MPI_Irecv" before meshing
00215      * \param exchange_procs processor vector exchanged
00216      */
00217     ErrorCode post_irecv( std::vector< unsigned int >& exchange_procs );
00218 
00219     ErrorCode post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs );
00220 
00221     /** \brief Exchange owned mesh for input mesh entities and sets
00222      * This function should be called collectively over the communicator for this ParallelComm.
00223      * If this version is called, all shared exchanged entities should have a value for this
00224      * tag (or the tag should have a default value).
00225      * \param exchange_procs processor vector exchanged
00226      * \param exchange_ents exchanged entities for each processors
00227      * \param migrate if the owner if entities are changed or not
00228      */
00229     ErrorCode exchange_owned_meshs( std::vector< unsigned int >& exchange_procs, std::vector< Range* >& exchange_ents,
00230                                     std::vector< MPI_Request >& recv_ent_reqs,
00231                                     std::vector< MPI_Request >& recv_remoteh_reqs, bool store_remote_handles,
00232                                     bool wait_all = true, bool migrate = false, int dim = 0 );
00233 
00234     /** \brief Exchange owned mesh for input mesh entities and sets
00235      * This function is called twice by exchange_owned_meshs to exchange entities before sets
00236      * \param migrate if the owner if entities are changed or not
00237      */
00238     ErrorCode exchange_owned_mesh( std::vector< unsigned int >& exchange_procs, std::vector< Range* >& exchange_ents,
00239                                    std::vector< MPI_Request >& recv_ent_reqs,
00240                                    std::vector< MPI_Request >& recv_remoteh_reqs, const bool recv_posted,
00241                                    bool store_remote_handles, bool wait_all, bool migrate = false );
00242 
00243     /** \brief Exchange tags for all shared and ghosted entities
00244      * This function should be called collectively over the communicator for this ParallelComm.
00245      * If this version is called, all ghosted/shared entities should have a value for this
00246      * tag (or the tag should have a default value).  If the entities vector is empty, all shared
00247      * entities participate in the exchange.  If a proc has no owned entities this function must
00248      * still be called since it is collective. \param src_tags Vector of tag handles to be exchanged
00249      * \param dst_tags Tag handles to store the tags on the non-owning procs
00250      * \param entities Entities for which tags are exchanged
00251      */
00252     ErrorCode exchange_tags( const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags,
00253                              const Range& entities );
00254 
00255     /** \brief Exchange tags for all shared and ghosted entities
00256      * This function should be called collectively over the communicator for this ParallelComm.
00257      * If the entities vector is empty, all shared entities
00258      * participate in the exchange.  If a proc has no owned entities this function must still be
00259      * called since it is collective. \param tag_name Name of tag to be exchanged \param entities
00260      * Entities for which tags are exchanged
00261      */
00262     ErrorCode exchange_tags( const char* tag_name, const Range& entities );
00263 
00264     /** \brief Exchange tags for all shared and ghosted entities
00265      * This function should be called collectively over the communicator for this ParallelComm.
00266      * If the entities vector is empty, all shared entities
00267      * participate in the exchange.  If a proc has no owned entities this function must still be
00268      * called since it is collective. \param tagh Handle of tag to be exchanged \param entities
00269      * Entities for which tags are exchanged
00270      */
00271     ErrorCode exchange_tags( Tag tagh, const Range& entities );
00272 
00273     /** \brief Perform data reduction operation for all shared and ghosted entities
00274      * This function should be called collectively over the communicator for this ParallelComm.
00275      * If this version is called, all ghosted/shared entities should have a value for this
00276      * tag (or the tag should have a default value).  Operation is any MPI_Op, with result stored
00277      * in destination tag.
00278      * \param src_tags Vector of tag handles to be reduced
00279      * \param dst_tags Vector of tag handles in which the answer will be stored
00280      * \param mpi_op Operation type
00281      * \param entities Entities on which reduction will be made; if empty, operates on all shared
00282      *                 entities
00283      */
00284     ErrorCode reduce_tags( const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags, const MPI_Op mpi_op,
00285                            const Range& entities );
00286 
00287     /** \brief Perform data reduction operation for all shared and ghosted entities
00288      * Same as std::vector variant except for one tag specified by name
00289      * \param tag_name Name of tag to be reduced
00290      * \param mpi_op Operation type
00291      * \param entities Entities on which reduction will be made; if empty, operates on all shared
00292      *                 entities
00293      */
00294     ErrorCode reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities );
00295 
00296     /** \brief Perform data reduction operation for all shared and ghosted entities
00297      * Same as std::vector variant except for one tag specified by handle
00298      * \param tag_name Name of tag to be reduced
00299      * \param mpi_op Operation type
00300      * \param entities Entities on which reduction will be made; if empty, operates on all shared
00301      *                 entities
00302      */
00303     ErrorCode reduce_tags( Tag tag_handle, const MPI_Op mpi_op, const Range& entities );
00304 
00305     /** \brief Broadcast all entities resident on from_proc to other processors
00306      * This function assumes remote handles are *not* being stored, since (usually)
00307      * every processor will know about the whole mesh.
00308      * \param from_proc Processor having the mesh to be broadcast
00309      * \param entities On return, the entities sent or received in this call
00310      * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
00311      * \param tags If true, all non-default-valued tags are sent for sent entities
00312      */
00313     ErrorCode broadcast_entities( const int from_proc, Range& entities, const bool adjacencies = false,
00314                                   const bool tags = true );
00315 
00316     /** \brief Scatter entities on from_proc to other processors
00317      * This function assumes remote handles are *not* being stored, since (usually)
00318      * every processor will know about the whole mesh.
00319      * \param from_proc Processor having the mesh to be broadcast
00320      * \param entities On return, the entities sent or received in this call
00321      * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
00322      * \param tags If true, all non-default-valued tags are sent for sent entities
00323      */
00324     ErrorCode scatter_entities( const int from_proc, std::vector< Range >& entities, const bool adjacencies = false,
00325                                 const bool tags = true );
00326 
00327     /////////////////////////////////////////////////////////////////////////////////
00328     // Send and Receive routines for a sequence of entities: use case UMR
00329     /////////////////////////////////////////////////////////////////////////////////
00330 
00331     /** \brief Send and receives data from a set of processors
00332      */
00333     ErrorCode send_recv_entities( std::vector< int >& send_procs, std::vector< std::vector< int > >& msgsizes,
00334                                   std::vector< std::vector< EntityHandle > >& senddata,
00335                                   std::vector< std::vector< EntityHandle > >& recvdata );
00336 
00337     ErrorCode update_remote_data( EntityHandle entity, std::vector< int >& procs,
00338                                   std::vector< EntityHandle >& handles );
00339 
00340     ErrorCode get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc );
00341 
00342     /////////////////////////////////////////////////////////////////////////////////
00343 
00344     // ==================================
00345     // \section INITIALIZATION OF PARALLEL DATA (resolve_shared_ents, etc.)
00346     // ==================================
00347 
00348     /** \brief Resolve shared entities between processors
00349      *
00350      * Resolve shared entities between processors for entities in proc_ents,
00351      * by comparing global id tag values on vertices on skin of elements in
00352      * proc_ents.  Shared entities are assigned a tag that's either
00353      * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
00354      * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
00355      * number of sharing processors.  Values in these tags denote the ranks
00356      * of sharing processors, and the list ends with the value -1.
00357      *
00358      * If shared_dim is input as -1 or not input, a value one less than the
00359      * maximum dimension of entities in proc_ents is used.
00360      *
00361      * \param proc_ents Entities for which to resolve shared entities
00362      * \param shared_dim Maximum dimension of shared entities to look for
00363      */
00364     ErrorCode resolve_shared_ents( EntityHandle this_set, Range& proc_ents, int resolve_dim = -1, int shared_dim = -1,
00365                                    Range* skin_ents = NULL, const Tag* id_tag = 0 );
00366 
00367     /** \brief Resolve shared entities between processors
00368      *
00369      * Same as resolve_shared_ents(Range&), except works for
00370      * all entities in instance of dimension dim.
00371      *
00372      * If shared_dim is input as -1 or not input, a value one less than the
00373      * maximum dimension of entities is used.
00374 
00375      * \param dim Dimension of entities in the partition
00376      * \param shared_dim Maximum dimension of shared entities to look for
00377      */
00378     ErrorCode resolve_shared_ents( EntityHandle this_set, int resolve_dim = 3, int shared_dim = -1,
00379                                    const Tag* id_tag = 0 );
00380 
00381     static ErrorCode resolve_shared_ents( ParallelComm** pc, const unsigned int np, EntityHandle this_set,
00382                                           const int to_dim );
00383 
00384     /** Remove shared sets.
00385      *
00386      * Generates list of candidate sets using from those (directly)
00387      * contained in passed set and passes them to the other version
00388      * of \c resolve_shared_sets.
00389      *\param this_set  Set directly containing candidate sets (e.g. file set)
00390      *\param id_tag    Tag containing global IDs for entity sets.
00391      */
00392 
00393     ErrorCode resolve_shared_sets( EntityHandle this_set, const Tag* id_tag = 0 );
00394 
00395     /** Remove shared sets.
00396      *
00397      * Use values of id_tag to match sets across processes and populate
00398      * sharing data for sets.
00399      *\param candidate_sets  Sets to consider as potentially shared.
00400      *\param id_tag    Tag containing global IDs for entity sets.
00401      */
00402     ErrorCode resolve_shared_sets( Range& candidate_sets, Tag id_tag );
00403 
00404     /** extend shared sets with ghost entities
00405      * After ghosting, ghost entities do not have yet information about
00406      * the material set, partition set, Neumann or Dirichlet set they could
00407      * belong to
00408      * This method will assign ghosted entities to the those special entity sets
00409      * In some case we might even have to create those sets, if they do not exist yet on
00410      * the local processor
00411      *
00412      * The special entity sets all have an unique identifier, in a form of an integer
00413      * tag to the set.
00414      * The shared sets data is not used, because we do not use the geometry sets, as they are
00415      * not uniquely identified
00416      *
00417      *
00418      * \param file_set : file set used per application
00419      *
00420      */
00421     ErrorCode augment_default_sets_with_ghosts( EntityHandle file_set );
00422     // ==================================
00423     // \section GET PARALLEL DATA (shared/owned/iface entities, etc.)
00424     // ==================================
00425 
00426     /** \brief Get parallel status of an entity
00427      * Returns the parallel status of an entity
00428      *
00429      * \param entity The entity being queried
00430      * \param pstatus_val Parallel status of the entity
00431      */
00432     ErrorCode get_pstatus( EntityHandle entity, unsigned char& pstatus_val );
00433 
00434     /** \brief Get entities with the given pstatus bit(s) set
00435      * Returns any entities whose pstatus tag value v satisfies (v & pstatus_val)
00436      *
00437      * \param dim Dimension of entities to be returned, or -1 if any
00438      * \param pstatus_val pstatus value of desired entities
00439      * \param pstatus_ents Entities returned from function
00440      */
00441     ErrorCode get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents );
00442 
00443     /** \brief Return the rank of the entity owner
00444      */
00445     ErrorCode get_owner( EntityHandle entity, int& owner );
00446 
00447     /** \brief Return the owner processor and handle of a given entity
00448      */
00449     ErrorCode get_owner_handle( EntityHandle entity, int& owner, EntityHandle& handle );
00450 
00451     /** \brief Get the shared processors/handles for an entity
00452      * Get the shared processors/handles for an entity.  Arrays must
00453      * be large enough to receive data for all sharing procs.  Does *not* include
00454      * this proc if only shared with one other proc.
00455      * \param entity Entity being queried
00456      * \param ps Pointer to sharing proc data
00457      * \param hs Pointer to shared proc handle data
00458      * \param pstat Reference to pstatus data returned from this function
00459      */
00460     ErrorCode get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs, unsigned char& pstat,
00461                                 unsigned int& num_ps );
00462 
00463     /** \brief Get the shared processors/handles for an entity
00464      * Same as other version but with int num_ps
00465      * \param entity Entity being queried
00466      * \param ps Pointer to sharing proc data
00467      * \param hs Pointer to shared proc handle data
00468      * \param pstat Reference to pstatus data returned from this function
00469      */
00470     ErrorCode get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs, unsigned char& pstat,
00471                                 int& num_ps );
00472 
00473     /** \brief Get the intersection or union of all sharing processors
00474      * Get the intersection or union of all sharing processors.  Processor set
00475      * is cleared as part of this function.
00476      * \param entities Entity list ptr
00477      * \param num_entities Number of entities
00478      * \param procs Processors returned
00479      * \param op Either Interface::UNION or Interface::INTERSECT
00480      */
00481     ErrorCode get_sharing_data( const EntityHandle* entities, int num_entities, std::set< int >& procs,
00482                                 int op = Interface::INTERSECT );
00483 
00484     /** \brief Get the intersection or union of all sharing processors
00485      * Same as previous variant but with range as input
00486      */
00487     ErrorCode get_sharing_data( const Range& entities, std::set< int >& procs, int op = Interface::INTERSECT );
00488 
00489     /** \brief Get shared entities of specified dimension
00490      * If other_proc is -1, any shared entities are returned.  If dim is -1,
00491      * entities of all dimensions on interface are returned.
00492      * \param other_proc Rank of processor for which interface entities are requested
00493      * \param shared_ents Entities returned from function
00494      * \param dim Dimension of interface entities requested
00495      * \param iface If true, return only entities on the interface
00496      * \param owned_filter If true, return only owned shared entities
00497      */
00498     ErrorCode get_shared_entities( int other_proc, Range& shared_ents, int dim = -1, const bool iface = false,
00499                                    const bool owned_filter = false );
00500     /*
00501     //! return partition sets; if tag_name is input, gets sets with
00502     //! that tag name, otherwise uses PARALLEL_PARTITION tag
00503     ErrorCode get_partition_sets(EntityHandle this_set,
00504     Range &part_sets,
00505     const char *tag_name = NULL);
00506     */
00507     //! get processors with which this processor shares an interface
00508     ErrorCode get_interface_procs( std::set< unsigned int >& iface_procs, const bool get_buffs = false );
00509 
00510     //! get processors with which this processor communicates
00511     ErrorCode get_comm_procs( std::set< unsigned int >& procs );
00512 
00513     // ==================================
00514     // \section SHARED SETS
00515     // ==================================
00516 
00517     //! Get array of process IDs sharing a set.  Returns zero
00518     //! and passes back NULL if set is not shared.
00519     ErrorCode get_entityset_procs( EntityHandle entity_set, std::vector< unsigned >& ranks ) const;
00520 
00521     //! Get rank of the owner of a shared set.
00522     //! Returns this proc if set is not shared.
00523     //! Optionally returns handle on owning process for shared set.
00524     ErrorCode get_entityset_owner( EntityHandle entity_set, unsigned& owner_rank,
00525                                    EntityHandle* remote_handle = 0 ) const;
00526 
00527     //! Given set owner and handle on owner, find local set handle
00528     ErrorCode get_entityset_local_handle( unsigned owning_rank, EntityHandle remote_handle,
00529                                           EntityHandle& local_handle ) const;
00530 
00531     //! Get all shared sets
00532     ErrorCode get_shared_sets( Range& result ) const;
00533 
00534     //! Get ranks of all processes that own at least one set that is
00535     //! shared with this process.  Will include the rank of this process
00536     //! if this process owns any shared set.
00537     ErrorCode get_entityset_owners( std::vector< unsigned >& ranks ) const;
00538 
00539     //! Get shared sets owned by process with specified rank.
00540     ErrorCode get_owned_sets( unsigned owning_rank, Range& sets_out ) const;
00541 
00542     // ==================================
00543     // \section LOW-LEVEL DATA (tags, sets on interface/partition, etc.)
00544     // ==================================
00545 
00546     //! Get proc config for this communication object
00547     const ProcConfig& proc_config() const
00548     {
00549         return procConfig;
00550     }
00551 
00552     //! Get proc config for this communication object
00553     ProcConfig& proc_config()
00554     {
00555         return procConfig;
00556     }
00557 
00558     unsigned rank() const
00559     {
00560         return proc_config().proc_rank();
00561     }
00562     unsigned size() const
00563     {
00564         return proc_config().proc_size();
00565     }
00566     MPI_Comm comm() const
00567     {
00568         return proc_config().proc_comm();
00569     }
00570 
00571     //! return the tags used to indicate shared procs and handles
00572     ErrorCode get_shared_proc_tags( Tag& sharedp_tag, Tag& sharedps_tag, Tag& sharedh_tag, Tag& sharedhs_tag,
00573                                     Tag& pstatus_tag );
00574 
00575     //! return partition, interface set ranges
00576     Range& partition_sets()
00577     {
00578         return partitionSets;
00579     }
00580     const Range& partition_sets() const
00581     {
00582         return partitionSets;
00583     }
00584     Range& interface_sets()
00585     {
00586         return interfaceSets;
00587     }
00588     const Range& interface_sets() const
00589     {
00590         return interfaceSets;
00591     }
00592 
00593     //! return sharedp tag
00594     Tag sharedp_tag();
00595 
00596     //! return sharedps tag
00597     Tag sharedps_tag();
00598 
00599     //! return sharedh tag
00600     Tag sharedh_tag();
00601 
00602     //! return sharedhs tag
00603     Tag sharedhs_tag();
00604 
00605     //! return pstatus tag
00606     Tag pstatus_tag();
00607 
00608     //! return pcomm tag; static because might not have a pcomm before going
00609     //! to look for one on the interface
00610     static Tag pcomm_tag( Interface* impl, bool create_if_missing = true );
00611 
00612     //! return partitions set tag
00613     Tag partition_tag();
00614     Tag part_tag()
00615     {
00616         return partition_tag();
00617     }
00618 
00619     // ==================================
00620     // \section DEBUGGING AIDS
00621     // ==================================
00622 
00623     //! print contents of pstatus value in human-readable form
00624     void print_pstatus( unsigned char pstat, std::string& ostr );
00625 
00626     //! print contents of pstatus value in human-readable form to std::cut
00627     void print_pstatus( unsigned char pstat );
00628 
00629     // ==================================
00630     // \section IMESHP-RELATED FUNCTIONS
00631     // ==================================
00632 
00633     //! return all the entities in parts owned locally
00634     ErrorCode get_part_entities( Range& ents, int dim = -1 );
00635 
00636     EntityHandle get_partitioning() const
00637     {
00638         return partitioningSet;
00639     }
00640     ErrorCode set_partitioning( EntityHandle h );
00641     ErrorCode get_global_part_count( int& count_out ) const;
00642     ErrorCode get_part_owner( int part_id, int& owner_out ) const;
00643     ErrorCode get_part_id( EntityHandle part, int& id_out ) const;
00644     ErrorCode get_part_handle( int id, EntityHandle& handle_out ) const;
00645     ErrorCode create_part( EntityHandle& part_out );
00646     ErrorCode destroy_part( EntityHandle part );
00647     ErrorCode collective_sync_partition();
00648     ErrorCode get_part_neighbor_ids( EntityHandle part, int neighbors_out[MAX_SHARING_PROCS], int& num_neighbors_out );
00649     ErrorCode get_interface_sets( EntityHandle part, Range& iface_sets_out, int* adj_part_id = 0 );
00650     ErrorCode get_owning_part( EntityHandle entity, int& owning_part_id_out, EntityHandle* owning_handle = 0 );
00651     ErrorCode get_sharing_parts( EntityHandle entity, int part_ids_out[MAX_SHARING_PROCS], int& num_part_ids_out,
00652                                  EntityHandle remote_handles[MAX_SHARING_PROCS] = 0 );
00653 
00654     /** Filter the entities by pstatus tag.
00655      * op is one of PSTATUS_ AND, OR, NOT; an entity is output if:
00656      * AND: all bits set in pstatus_val are also set on entity
00657      * OR: any bits set in pstatus_val also set on entity
00658      * NOT: any bits set in pstatus_val are not set on entity
00659      *
00660      * Results returned in input list, unless result_ents is passed in non-null,
00661      * in which case results are returned in result_ents.
00662      *
00663      * If ents is passed in empty, filter is done on shared entities in this
00664      * pcomm instance, i.e. contents of sharedEnts.
00665      *
00666      *\param ents       Input entities to filter
00667      *\param pstatus_val pstatus value to which entities are compared
00668      *\param op Bitwise operation performed between pstatus values
00669      *\param to_proc If non-negative and PSTATUS_SHARED is set on pstatus_val,
00670      *               only entities shared with to_proc are returned
00671      *\param result_ents If non-null, results of filter are put in the
00672      *       pointed-to range
00673      */
00674     ErrorCode filter_pstatus( Range& ents, const unsigned char pstatus_val, const unsigned char op, int to_proc = -1,
00675                               Range* returned_ents = NULL );
00676 
00677     /** \brief Get entities on interfaces shared with another proc
00678      *
00679      * \param other_proc Other proc sharing the interface
00680      * \param dim Dimension of entities to return, -1 if all dims
00681      * \param iface_ents Returned entities
00682      */
00683     ErrorCode get_iface_entities( int other_proc, int dim, Range& iface_ents );
00684 
00685     Interface* get_moab() const
00686     {
00687         return mbImpl;
00688     }
00689 
00690     ErrorCode clean_shared_tags( std::vector< Range* >& exchange_ents );
00691 
00692     class Buffer
00693     {
00694       public:
00695         unsigned char* mem_ptr;
00696         unsigned char* buff_ptr;
00697         unsigned int alloc_size;
00698 
00699         Buffer( unsigned int sz = 0 );
00700         Buffer( const Buffer& );
00701         ~Buffer();
00702         void reset_buffer( size_t buff_pos = 0 )
00703         {
00704             reset_ptr( buff_pos );
00705             reserve( INITIAL_BUFF_SIZE );
00706         }
00707         void reset_ptr( size_t buff_pos = 0 )
00708         {
00709             assert( ( !mem_ptr && !buff_pos ) || ( alloc_size >= buff_pos ) );
00710             buff_ptr = mem_ptr + buff_pos;
00711         }
00712         inline void reserve( unsigned int new_size );
00713         void set_stored_size()
00714         {
00715             *( (int*)mem_ptr ) = (int)( buff_ptr - mem_ptr );
00716         }
00717         int get_stored_size()
00718         {
00719             return *( (int*)mem_ptr );
00720         }
00721         int get_current_size()
00722         {
00723             return (int)( buff_ptr - mem_ptr );
00724         }
00725 
00726         void check_space( unsigned int addl_space );
00727     };
00728 
00729     //! public 'cuz we want to unit test these externally
00730     ErrorCode pack_buffer( Range& orig_ents, const bool adjacencies, const bool tags, const bool store_remote_handles,
00731                            const int to_proc, Buffer* buff, TupleList* entprocs = NULL, Range* allsent = NULL );
00732 
00733     ErrorCode unpack_buffer( unsigned char* buff_ptr, const bool store_remote_handles, const int from_proc,
00734                              const int ind, std::vector< std::vector< EntityHandle > >& L1hloc,
00735                              std::vector< std::vector< EntityHandle > >& L1hrem, std::vector< std::vector< int > >& L1p,
00736                              std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
00737                              std::vector< unsigned int >& L2p, std::vector< EntityHandle >& new_ents,
00738                              const bool created_iface = false );
00739 
00740     ErrorCode pack_entities( Range& entities, Buffer* buff, const bool store_remote_handles, const int to_proc,
00741                              const bool is_iface, TupleList* entprocs = NULL, Range* allsent = NULL );
00742 
00743     //! unpack entities in buff_ptr
00744     ErrorCode unpack_entities( unsigned char*& buff_ptr, const bool store_remote_handles, const int from_ind,
00745                                const bool is_iface, std::vector< std::vector< EntityHandle > >& L1hloc,
00746                                std::vector< std::vector< EntityHandle > >& L1hrem,
00747                                std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
00748                                std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
00749                                std::vector< EntityHandle >& new_ents, const bool created_iface = false );
00750 
00751     //! Call exchange_all_shared_handles, then compare the results with tag data
00752     //! on local shared entities.
00753     ErrorCode check_all_shared_handles( bool print_em = false );
00754 
00755     static ErrorCode check_all_shared_handles( ParallelComm** pcs, int num_pcs );
00756 
00757     struct SharedEntityData
00758     {
00759         EntityHandle local;
00760         EntityHandle remote;
00761         EntityID owner;
00762     };
00763 
00764     ErrorCode pack_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data );
00765 
00766     // check consistency of sharedEnts against their tags and their
00767     // vertices' tags
00768     ErrorCode check_local_shared();
00769 
00770     // check contents of communicated shared entity data against tags
00771     ErrorCode check_my_shared_handles( std::vector< std::vector< SharedEntityData > >& shents,
00772                                        const char* prefix = NULL );
00773 
00774     //! set rank for this pcomm; USED FOR TESTING ONLY!
00775     void set_rank( unsigned int r );
00776 
00777     //! set rank for this pcomm; USED FOR TESTING ONLY!
00778     void set_size( unsigned int r );
00779 
00780     //! get (and possibly allocate) buffers for messages to/from to_proc; returns
00781     //! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
00782     //! whether new buffer was allocated
00783     //! PUBLIC ONLY FOR TESTING!
00784     int get_buffers( int to_proc, bool* is_new = NULL );
00785 
00786     //! get buff processor vector
00787     const std::vector< unsigned int >& buff_procs() const;
00788 
00789     /* \brief Unpack message with remote handles
00790      * PUBLIC ONLY FOR TESTING!
00791      */
00792     ErrorCode unpack_remote_handles( unsigned int from_proc, unsigned char*& buff_ptr,
00793                                      std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
00794                                      std::vector< unsigned int >& L2p );
00795 
00796     /* \brief Pack message with remote handles
00797      * PUBLIC ONLY FOR TESTING!
00798      */
00799     ErrorCode pack_remote_handles( std::vector< EntityHandle >& L1hloc, std::vector< EntityHandle >& L1hrem,
00800                                    std::vector< int >& procs, unsigned int to_proc, Buffer* buff );
00801 
00802     // each iterate in proc_nvecs contains a set of procs and the entities *possibly*
00803     // on the interface between those procs; this function makes sets for each,
00804     // and tags the set with the procs sharing it; interface sets are optionally
00805     // returned; NOTE: a subsequent step is used to verify entities on the interface
00806     // and remove them if they're not shared
00807     ErrorCode create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
00808 
00809     // do the same but working straight from sharedEnts
00810     ErrorCode create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim );
00811 
00812     ErrorCode tag_shared_verts( TupleList& shared_ents,
00813                                 std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
00814                                 Range& proc_verts, unsigned int i_extra = 1 );
00815 
00816     ErrorCode list_entities( const EntityHandle* ents, int num_ents );
00817 
00818     ErrorCode list_entities( const Range& ents );
00819 
00820     void set_send_request( int n_request );  // set send request array
00821 
00822     void set_recv_request( int n_request );  // set recv request array
00823 
00824     //! reset message buffers to their initial state
00825     // changed to public function (HJK)
00826     void reset_all_buffers();
00827 
00828     static const unsigned int INITIAL_BUFF_SIZE;
00829 
00830     //! set the verbosity level of output from this pcomm
00831     void set_debug_verbosity( int verb );
00832 
00833     //! get the verbosity level of output from this pcomm
00834     int get_debug_verbosity();
00835 
00836     /* \brief Gather tag value from entities down to a specified root proc
00837      * This function gathers data from a domain-decomposed mesh onto a global mesh
00838      * represented on the root processor.  On the root, this gather mesh is distinct from
00839      * the root's domain-decomposed subdomain.  Entities are matched by global id, or by
00840      * another tag if its handle is input.  The dimension of all entities in gather_ents should
00841      * be the same, since this is the dimension of entities in gather_set that are queried for
00842      * matching global id tags.
00843      * \param gather_ents (Local) entities from which to gather data
00844      * \param tag_handle Tag whose values are being gathered
00845      * \param id_tag Tag to use for matching entities (global id used by default)
00846      * \param gather_set On root, set containing global mesh onto which to put data
00847      * \param root_proc_rank Rank of the specified root processor (default rank is 0)
00848      */
00849     ErrorCode gather_data( Range& gather_ents, Tag& tag_handle, Tag id_tag = 0, EntityHandle gather_set = 0,
00850                            int root_proc_rank = 0 );
00851 
00852     /* \brief communicate extra points positions on boundary
00853      * This function is called after intersection of 2 meshes, to settle the
00854      * position of the intersection points on the boundary (interface)
00855      * The initial mesh distributed on each processor is decomposed after
00856      * intersection with another mesh, such as that new points are created on the
00857      * boundary. these points should better match at the interface !
00858      * we perform an extra caution step, to ensure the robustness of the
00859      * intersection algorithm;  only shared edges extra nodes
00860      *  will be actually needed to be communicated, but we just pass by reference
00861      *  the whole extraNodesVec structure, we do
00862      *  not need to construct another data structure
00863      *  The node positions on edges that are owned will be communicated to other
00864      *  processors
00865      *
00866      * \param edges total range of entities
00867      * \param shared_edges_owned edges for which to communicate data
00868      * \param extraNodesVec handles of intersection vertices on all edges;
00869      */
00870     ErrorCode settle_intersection_points( Range& edges, Range& shared_edges_owned,
00871                                           std::vector< std::vector< EntityHandle >* >& extraNodesVec,
00872                                           double tolerance );
00873 
00874     /* \brief delete entities from moab database
00875      * will check the shared ents array, and clean it if necessary
00876      *
00877      */
00878     ErrorCode delete_entities( Range& to_delete );
00879 
00880     /*
00881      * \brief correct multi-sharing info for thin layers
00882      *
00883      * will be used for at least 3 processes, when there are thin ghost layers
00884      * right now it is public, for allowing users to call it directly
00885      * eventually, it should become private, and be called automatically
00886      */
00887 
00888     ErrorCode correct_thin_ghost_layers();
00889 
00890   private:
00891     ErrorCode reduce_void( int tag_data_type, const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
00892 
00893     template < class T >
00894     ErrorCode reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
00895 
00896     void print_debug_isend( int from, int to, unsigned char* buff, int tag, int size );
00897 
00898     void print_debug_irecv( int to, int from, unsigned char* buff, int size, int tag, int incoming );
00899 
00900     void print_debug_recd( MPI_Status status );
00901 
00902     void print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc );
00903 
00904     // common initialization code, called from various constructors
00905     void initialize();
00906 
00907     ErrorCode set_sharing_data( EntityHandle ent, unsigned char pstatus, int old_nump, int new_nump, int* ps,
00908                                 EntityHandle* hs );
00909 
00910     ErrorCode check_clean_iface( Range& allsent );
00911 
00912     void define_mpe();
00913 
00914     ErrorCode get_sent_ents( const bool is_iface, const int bridge_dim, const int ghost_dim, const int num_layers,
00915                              const int addl_ents, Range* sent_ents, Range& allsent, TupleList& entprocs );
00916 
00917     /** \brief Set pstatus values on entities
00918      *
00919      * \param pstatus_ents Entities to be set
00920      * \param pstatus_val Pstatus value to be set
00921      * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
00922      *        (and created if they don't exist)
00923      * \param verts_too If true, vertices also set
00924      * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
00925      *        existing value is over-written
00926      */
00927     ErrorCode set_pstatus_entities( Range& pstatus_ents, unsigned char pstatus_val, bool lower_dim_ents = false,
00928                                     bool verts_too = true, int operation = Interface::UNION );
00929 
00930     /** \brief Set pstatus values on entities (vector-based function)
00931      *
00932      * \param pstatus_ents Entities to be set
00933      * \param pstatus_val Pstatus value to be set
00934      * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
00935      *        (and created if they don't exist)
00936      * \param verts_too If true, vertices also set
00937      * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
00938      *        existing value is over-written
00939      */
00940     ErrorCode set_pstatus_entities( EntityHandle* pstatus_ents, int num_ents, unsigned char pstatus_val,
00941                                     bool lower_dim_ents = false, bool verts_too = true,
00942                                     int operation = Interface::UNION );
00943 
00944     //! estimate size required to pack entities
00945     int estimate_ents_buffer_size( Range& entities, const bool store_remote_handles );
00946 
00947     //! estimate size required to pack sets
00948     int estimate_sets_buffer_size( Range& entities, const bool store_remote_handles );
00949 
00950     //! send the indicated buffer, possibly sending size first
00951     ErrorCode send_buffer( const unsigned int to_proc, Buffer* send_buff, const int msg_tag, MPI_Request& send_req,
00952                            MPI_Request& ack_recv_req, int* ack_buff, int& this_incoming, int next_mesg_tag = -1,
00953                            Buffer* next_recv_buff = NULL, MPI_Request* next_recv_req = NULL,
00954                            int* next_incoming = NULL );
00955 
00956     //! process incoming message; if longer than the initial size, post
00957     //! recv for next part then send ack; if ack, send second part; else
00958     //! indicate that we're done and buffer is ready for processing
00959     ErrorCode recv_buffer( int mesg_tag_expected, const MPI_Status& mpi_status, Buffer* recv_buff,
00960                            MPI_Request& recv_2nd_req, MPI_Request& ack_req, int& this_incoming, Buffer* send_buff,
00961                            MPI_Request& send_req, MPI_Request& sent_ack_req, bool& done, Buffer* next_buff = NULL,
00962                            int next_tag = -1, MPI_Request* next_req = NULL, int* next_incoming = NULL );
00963 
00964     //! pack a range of entities with equal # verts per entity, along with
00965     //! the range on the sending proc
00966     ErrorCode pack_entity_seq( const int nodes_per_entity, const bool store_remote_handles, const int to_proc,
00967                                Range& these_ents, std::vector< EntityHandle >& entities, Buffer* buff );
00968 
00969     ErrorCode print_buffer( unsigned char* buff_ptr, int mesg_type, int from_proc, bool sent );
00970 
00971     //! for all the entities in the received buffer; for each, save
00972     //! entities in this instance which match connectivity, or zero if none found
00973     ErrorCode unpack_iface_entities( unsigned char*& buff_ptr, const int from_proc, const int ind,
00974                                      std::vector< EntityHandle >& recd_ents );
00975 
00976     ErrorCode pack_sets( Range& entities, Buffer* buff, const bool store_handles, const int to_proc );
00977 
00978     ErrorCode unpack_sets( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities, const bool store_handles,
00979                            const int to_proc );
00980 
00981     ErrorCode pack_adjacencies( Range& entities, Range::const_iterator& start_rit, Range& whole_range,
00982                                 unsigned char*& buff_ptr, int& count, const bool just_count, const bool store_handles,
00983                                 const int to_proc );
00984 
00985     ErrorCode unpack_adjacencies( unsigned char*& buff_ptr, Range& entities, const bool store_handles,
00986                                   const int from_proc );
00987 
00988     /* \brief Unpack message with remote handles (const pointer to buffer)
00989      */
00990     ErrorCode unpack_remote_handles( unsigned int from_proc, const unsigned char* buff_ptr,
00991                                      std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
00992                                      std::vector< unsigned int >& L2p );
00993 
00994     //! given connectivity and type, find an existing entity, if there is one
00995     ErrorCode find_existing_entity( const bool is_iface, const int owner_p, const EntityHandle owner_h,
00996                                     const int num_ents, const EntityHandle* connect, const int num_connect,
00997                                     const EntityType this_type, std::vector< EntityHandle >& L2hloc,
00998                                     std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
00999                                     EntityHandle& new_h );
01000 
01001     ErrorCode build_sharedhps_list( const EntityHandle entity, const unsigned char pstatus, const int sharedp,
01002                                     const std::set< unsigned int >& procs, unsigned int& num_ents, int* tmp_procs,
01003                                     EntityHandle* tmp_handles );
01004 
01005     /**\brief Get list of tags for which to exchange data
01006      *
01007      * Get tags and entities for which to exchange tag data.  This function
01008      * was originally part of 'pack_tags' requested with the
01009      * 'all_possible_tags' parameter.
01010      *
01011      *\param all_entities  Input.  The set of entities for which data is to
01012      *                      be communicated.
01013      *\param all_tags      Output.  Populated with the handles of tags to be
01014      *                      sent.
01015      *\param tag_ranges    Output.  For each corresponding tag in all_tags, the
01016      *                      subset of 'all_entities' for which a tag value has
01017      *                      been set.
01018      */
01019     ErrorCode get_tag_send_list( const Range& all_entities, std::vector< Tag >& all_tags,
01020                                  std::vector< Range >& tag_ranges );
01021 
01022     /**\brief Serialize entity tag data
01023      *
01024      * This function operates in two passes.  The first phase,
01025      * specified by 'just_count == true' calculates the necessary
01026      * buffer size for the serialized data.  The second phase
01027      * writes the actual binary serialized representation of the
01028      * data to the passed buffer.
01029      *
01030      *\NOTE First two arguments are not used.  (Legacy interface?)
01031      *
01032      *\param entities      NOT USED
01033      *\param start_rit     NOT USED
01034      *\param whole_range   Should be the union of the sets of entities for
01035      *                     which tag values are to be serialized.  Also
01036      *                     specifies ordering for indexes for tag values and
01037      *                     serves as the superset from which to compose entity
01038      *                     lists from individual tags if just_count and
01039      *                     all_possible_tags are both true.
01040      *\param buff_ptr      Buffer into which to write binary serialized data
01041      *\param count         Output:  The size of the serialized data is added
01042      *                     to this parameter.  NOTE: Should probably initialize
01043      *                     to zero before calling.
01044      *\param just_count    If true, just calculate the buffer size required to
01045      *                     hold the serialized data.  Will also append to
01046      *                     'all_tags' and 'tag_ranges' if all_possible_tags
01047      *                     == true.
01048      *\param store_handles The data for each tag is preceded by a list of
01049      *                     EntityHandles designating the entity each of
01050      *                     the subsequent tag values corresponds to.  This value
01051      *                     may be one of:
01052      *                     1) If store_handles == false:
01053      *                        An invalid handle composed of {MBMAXTYPE,idx}, where
01054      *                        idx is the position of the entity in "whole_range".
01055      *                     2) If store_hanldes == true and a valid remote
01056      *                        handle exists, the remote handle.
01057      *                     3) If store_hanldes == true and no valid remote
01058      *                        handle is defined for the entity, the same as 1).
01059      *\param to_proc       If 'store_handles' is true, the processor rank for
01060      *                     which to store the corresponding remote entity
01061      *                     handles.
01062      *\param all_tags      List of tags to write
01063      *\param tag_ranges    List of entities to serialize tag data, one
01064      *                            for each corresponding tag handle in 'all_tags.
01065      */
01066     ErrorCode pack_tags( Range& entities, const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags,
01067                          const std::vector< Range >& tag_ranges, Buffer* buff, const bool store_handles,
01068                          const int to_proc );
01069 
01070     /**\brief Calculate buffer size required to pack tag data
01071      *\param source_tag The tag for which data will be serialized
01072      *\param entities    The entities for which tag values will be serialized
01073      *\param count_out  Output: The required buffer size, in bytes.
01074      */
01075     ErrorCode packed_tag_size( Tag source_tag, const Range& entities, int& count_out );
01076 
01077     /**\brief Serialize tag data
01078      *\param source_tag    The tag for which data will be serialized
01079      *\param destination_tag Tag in which to store unpacked tag data.  Typically
01080      *                     the same as source_tag.
01081      *\param entities       The entities for which tag values will be serialized
01082      *\param whole_range   Calculate entity indices as location in this range
01083      *\param buff_ptr      Input/Output: As input, pointer to the start of the
01084      *                     buffer in which to serialize data.  As output, the
01085      *                     position just passed the serialized data.
01086      *\param count_out     Output: The required buffer size, in bytes.
01087      *\param store_handles The data for each tag is preceded by a list of
01088      *                     EntityHandles designating the entity each of
01089      *                     the subsequent tag values corresponds to.  This value
01090      *                     may be one of:
01091      *                     1) If store_handles == false:
01092      *                        An invalid handle composed of {MBMAXTYPE,idx}, where
01093      *                        idx is the position of the entity in "whole_range".
01094      *                     2) If store_hanldes == true and a valid remote
01095      *                        handle exists, the remote handle.
01096      *                     3) If store_hanldes == true and no valid remote
01097      *                        handle is defined for the entity, the same as 1).
01098      *\param to_proc       If 'store_handles' is true, the processor rank for
01099      *                     which to store the corresponding remote entity
01100      *                     handles.
01101      */
01102     ErrorCode pack_tag( Tag source_tag, Tag destination_tag, const Range& entities,
01103                         const std::vector< EntityHandle >& whole_range, Buffer* buff, const bool store_remote_handles,
01104                         const int to_proc );
01105 
01106     ErrorCode unpack_tags( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities, const bool store_handles,
01107                            const int to_proc, const MPI_Op* const mpi_op = NULL );
01108 
01109     ErrorCode tag_shared_verts( TupleList& shared_verts, Range* skin_ents,
01110                                 std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
01111                                 Range& proc_verts );
01112 
01113     ErrorCode get_proc_nvecs( int resolve_dim, int shared_dim, Range* skin_ents,
01114                               std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
01115 
01116     // after verifying shared entities, now parent/child links between sets can be established
01117     ErrorCode create_iface_pc_links();
01118 
01119     //! pack a range map with keys in this_range and values a contiguous series
01120     //! of handles starting at actual_start
01121     ErrorCode pack_range_map( Range& this_range, EntityHandle actual_start, HandleMap& handle_map );
01122 
01123     //! returns true if the set is an interface shared with to_proc
01124     bool is_iface_proc( EntityHandle this_set, int to_proc );
01125 
01126     //! for any remote_handles set to zero, remove corresponding sent_ents from
01127     //! iface_sets corresponding to from_proc
01128     ErrorCode update_iface_sets( Range& sent_ents, std::vector< EntityHandle >& remote_handles, int from_proc );
01129 
01130     //! for specified bridge/ghost dimension, to_proc, and number
01131     //! of layers, get the entities to be ghosted, and info on additional procs
01132     //! needing to communicate with to_proc
01133     ErrorCode get_ghosted_entities( int bridge_dim, int ghost_dim, int to_proc, int num_layers, int addl_ents,
01134                                     Range& ghosted_ents );
01135 
01136     //! add vertices adjacent to entities in this list
01137     ErrorCode add_verts( Range& sent_ents );
01138 
01139     //! Every processor sends shared entity handle data to every other processor
01140     //! that it shares entities with.  Passed back map is all received data,
01141     //! indexed by processor ID. This function is intended to be used for
01142     //! debugging.
01143     ErrorCode exchange_all_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data,
01144                                            std::vector< std::vector< SharedEntityData > >& result );
01145 
01146     //! replace handles in from_vec with corresponding handles on
01147     //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
01148     //! if no remote handle and new_ents is non-null, substitute
01149     //! instead CREATE_HANDLE(MBMAXTYPE, index) where index is handle's
01150     //! position in new_ents
01151     ErrorCode get_remote_handles( const bool store_remote_handles, EntityHandle* from_vec, EntityHandle* to_vec_tmp,
01152                                   int num_ents, int to_proc, const std::vector< EntityHandle >& new_ents );
01153 
01154     //! same as other version, except from_range and to_range should be
01155     //! different here
01156     ErrorCode get_remote_handles( const bool store_remote_handles, const Range& from_range, Range& to_range,
01157                                   int to_proc, const std::vector< EntityHandle >& new_ents );
01158 
01159     //! same as other version, except packs range into vector
01160     ErrorCode get_remote_handles( const bool store_remote_handles, const Range& from_range, EntityHandle* to_vec,
01161                                   int to_proc, const std::vector< EntityHandle >& new_ents );
01162 
01163     //! goes through from_vec, and for any with type MBMAXTYPE, replaces with
01164     //! new_ents value at index corresponding to id of entity in from_vec
01165     ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents );
01166 
01167     //! same as above except puts results in range
01168     ErrorCode get_local_handles( const Range& remote_handles, Range& local_handles,
01169                                  const std::vector< EntityHandle >& new_ents );
01170 
01171     //! same as above except gets new_ents from vector
01172     ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const std::vector< EntityHandle >& new_ents );
01173 
01174     ErrorCode update_remote_data( Range& local_range, Range& remote_range, int other_proc,
01175                                   const unsigned char add_pstat );
01176 
01177     ErrorCode update_remote_data( const EntityHandle new_h, const int* ps, const EntityHandle* hs, const int num_ps,
01178                                   const unsigned char add_pstat );
01179 
01180     ErrorCode update_remote_data_old( const EntityHandle new_h, const int* ps, const EntityHandle* hs, const int num_ps,
01181                                       const unsigned char add_pstat );
01182 
01183     /** \brief Set pstatus tag interface bit on entities in sets passed in
01184      */
01185     ErrorCode tag_iface_entities();
01186 
01187     //! add a pc to the iface instance tag PARALLEL_COMM
01188     int add_pcomm( ParallelComm* pc );
01189 
01190     //! remove a pc from the iface instance tag PARALLEL_COMM
01191     void remove_pcomm( ParallelComm* pc );
01192 
01193     //! check entities to make sure there are no zero-valued remote handles
01194     //! where they shouldn't be
01195     ErrorCode check_sent_ents( Range& allsent );
01196 
01197     //! assign entities to the input processor part
01198     ErrorCode assign_entities_part( std::vector< EntityHandle >& entities, const int proc );
01199 
01200     //! remove entities to the input processor part
01201     ErrorCode remove_entities_part( Range& entities, const int proc );
01202 
01203     //! MB interface associated with this writer
01204     Interface* mbImpl;
01205 
01206     //! Proc config object, keeps info on parallel stuff
01207     ProcConfig procConfig;
01208 
01209     //! Sequence manager, to get more efficient access to entities
01210     SequenceManager* sequenceManager;
01211 
01212     //! Error handler
01213     Error* errorHandler;
01214 
01215     //! more data buffers, proc-specific
01216     std::vector< Buffer* > localOwnedBuffs, remoteOwnedBuffs;
01217 
01218     //! reset message buffers to their initial state
01219     // void reset_all_buffers();
01220 
01221     //! delete all buffers, freeing up any memory held by them
01222     void delete_all_buffers();
01223 
01224     //! request objects, may be used if store_remote_handles is used
01225     std::vector< MPI_Request > sendReqs;
01226 
01227     //! receive request objects
01228     std::vector< MPI_Request > recvReqs, recvRemotehReqs;
01229 
01230     //! processor rank for each buffer index
01231     std::vector< unsigned int > buffProcs;
01232 
01233     //! the partition, interface sets for this comm'n instance
01234     Range partitionSets, interfaceSets;
01235 
01236     //! all local entities shared with others, whether ghost or ghosted
01237     std::set< EntityHandle > sharedEnts;
01238 
01239     //! tags used to save sharing procs and handles
01240     Tag sharedpTag, sharedpsTag, sharedhTag, sharedhsTag, pstatusTag, ifaceSetsTag, partitionTag;
01241 
01242     int globalPartCount;  //!< Cache of global part count
01243 
01244     EntityHandle partitioningSet;  //!< entity set containing all parts
01245 
01246     std::ofstream myFile;
01247 
01248     int pcommID;
01249 
01250     int ackbuff;
01251 
01252     //! used to set verbosity level and to report output
01253     DebugOutput* myDebug;
01254 
01255     //! Data about shared sets
01256     SharedSetData* sharedSetData;
01257 };
01258 
01259 inline ParallelComm::Buffer::Buffer( const Buffer& other_buff )
01260 {
01261     alloc_size = other_buff.alloc_size;
01262     mem_ptr    = (unsigned char*)malloc( alloc_size );
01263     memcpy( mem_ptr, other_buff.mem_ptr, alloc_size );
01264     buff_ptr = mem_ptr + ( other_buff.buff_ptr - other_buff.mem_ptr );
01265 }
01266 
01267 inline ParallelComm::Buffer::Buffer( unsigned int new_size ) : mem_ptr( NULL ), buff_ptr( NULL ), alloc_size( 0 )
01268 {
01269     if( new_size ) this->reserve( new_size );
01270 }
01271 
01272 inline ParallelComm::Buffer::~Buffer()
01273 {
01274     if( mem_ptr )
01275     {
01276         free( mem_ptr );
01277         mem_ptr = NULL;
01278     }
01279 }
01280 
01281 #define DEBUG_BUFFER 0
01282 
01283 inline void ParallelComm::Buffer::reserve( unsigned int new_size )
01284 {
01285 
01286 #ifdef DEBUG_BUFFER
01287     int tmp_pos = 0;
01288     if( mem_ptr ) { tmp_pos = buff_ptr - mem_ptr; }
01289     buff_ptr = (unsigned char*)malloc( new_size );
01290     assert( 0 <= tmp_pos && tmp_pos <= (int)alloc_size );
01291     if( tmp_pos ) memcpy( buff_ptr, mem_ptr, tmp_pos );
01292     if( mem_ptr ) free( mem_ptr );
01293     mem_ptr    = buff_ptr;
01294     alloc_size = new_size;
01295     buff_ptr   = mem_ptr + tmp_pos;
01296 #else
01297     if( mem_ptr && alloc_size < new_size )
01298     {
01299         size_t tmp_pos = mem_ptr ? buff_ptr - mem_ptr : 0;
01300         mem_ptr        = (unsigned char*)realloc( mem_ptr, new_size );
01301         alloc_size     = new_size;
01302         buff_ptr       = mem_ptr + tmp_pos;
01303     }
01304     else if( !mem_ptr )
01305     {
01306         mem_ptr    = (unsigned char*)malloc( new_size );
01307         alloc_size = new_size;
01308         buff_ptr   = mem_ptr;
01309     }
01310 #endif
01311 }
01312 
01313 inline void ParallelComm::Buffer::check_space( unsigned int addl_space )
01314 {
01315     assert( buff_ptr >= mem_ptr && buff_ptr <= mem_ptr + alloc_size );
01316     unsigned int new_size = buff_ptr - mem_ptr + addl_space;
01317     if( new_size > alloc_size ) reserve( 3 * new_size / 2 );
01318 }
01319 
01320 inline void ParallelComm::reset_all_buffers()
01321 {
01322     std::vector< Buffer* >::iterator vit;
01323     for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
01324         ( *vit )->reset_buffer();
01325     for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
01326         ( *vit )->reset_buffer();
01327 }
01328 
01329 inline void ParallelComm::delete_all_buffers()
01330 {
01331     std::vector< Buffer* >::iterator vit;
01332     for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
01333         delete( *vit );
01334     localOwnedBuffs.clear();
01335 
01336     for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
01337         delete( *vit );
01338     remoteOwnedBuffs.clear();
01339 }
01340 
01341 inline const std::vector< unsigned int >& ParallelComm::buff_procs() const
01342 {
01343     return buffProcs;
01344 }
01345 
01346 inline ErrorCode ParallelComm::get_shared_proc_tags( Tag& sharedp, Tag& sharedps, Tag& sharedh, Tag& sharedhs,
01347                                                      Tag& pstatus )
01348 {
01349     sharedp  = sharedp_tag();
01350     sharedps = sharedps_tag();
01351     sharedh  = sharedh_tag();
01352     sharedhs = sharedhs_tag();
01353     pstatus  = pstatus_tag();
01354 
01355     return MB_SUCCESS;
01356 }
01357 
01358 inline ErrorCode ParallelComm::exchange_tags( const char* tag_name, const Range& entities )
01359 {
01360     // get the tag handle
01361     std::vector< Tag > tags( 1 );
01362     ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
01363     if( MB_SUCCESS != result )
01364         return result;
01365     else if( !tags[0] )
01366         return MB_TAG_NOT_FOUND;
01367 
01368     return exchange_tags( tags, tags, entities );
01369 }
01370 
01371 inline ErrorCode ParallelComm::exchange_tags( Tag tagh, const Range& entities )
01372 {
01373     // get the tag handle
01374     std::vector< Tag > tags;
01375     tags.push_back( tagh );
01376 
01377     return exchange_tags( tags, tags, entities );
01378 }
01379 
01380 inline ErrorCode ParallelComm::reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities )
01381 {
01382     // get the tag handle
01383     std::vector< Tag > tags( 1 );
01384     ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
01385     if( MB_SUCCESS != result )
01386         return result;
01387     else if( !tags[0] )
01388         return MB_TAG_NOT_FOUND;
01389 
01390     return reduce_tags( tags, tags, mpi_op, entities );
01391 }
01392 
01393 inline ErrorCode ParallelComm::reduce_tags( Tag tagh, const MPI_Op mpi_op, const Range& entities )
01394 {
01395     // get the tag handle
01396     std::vector< Tag > tags;
01397     tags.push_back( tagh );
01398 
01399     return reduce_tags( tags, tags, mpi_op, entities );
01400 }
01401 
01402 inline ErrorCode ParallelComm::get_comm_procs( std::set< unsigned int >& procs )
01403 {
01404     ErrorCode result = get_interface_procs( procs );
01405     if( MB_SUCCESS != result ) return result;
01406 
01407     std::copy( buffProcs.begin(), buffProcs.end(), std::inserter( procs, procs.begin() ) );
01408 
01409     return MB_SUCCESS;
01410 }
01411 
01412 inline ErrorCode ParallelComm::get_owner( EntityHandle entity, int& owner )
01413 {
01414     EntityHandle tmp_handle;
01415     return get_owner_handle( entity, owner, tmp_handle );
01416 }
01417 
01418 /* \brief Unpack message with remote handles (const pointer to buffer)
01419  */
01420 inline ErrorCode ParallelComm::unpack_remote_handles( unsigned int from_proc, const unsigned char* buff_ptr,
01421                                                       std::vector< EntityHandle >& L2hloc,
01422                                                       std::vector< EntityHandle >& L2hrem,
01423                                                       std::vector< unsigned int >& L2p )
01424 {
01425     // cast away const-ness, we won't be passing back a modified ptr
01426     unsigned char* tmp_buff = const_cast< unsigned char* >( buff_ptr );
01427     return unpack_remote_handles( from_proc, tmp_buff, L2hloc, L2hrem, L2p );
01428 }
01429 
01430 inline void ParallelComm::set_rank( unsigned int r )
01431 {
01432     procConfig.proc_rank( r );
01433     if( procConfig.proc_size() < r ) procConfig.proc_size( r + 1 );
01434 }
01435 
01436 inline void ParallelComm::set_size( unsigned int s )
01437 {
01438     procConfig.proc_size( s );
01439 }
01440 
01441 inline ErrorCode ParallelComm::get_sharing_data( const EntityHandle* entities, int num_entities, std::set< int >& procs,
01442                                                  int op )
01443 {
01444     Range dum_range;
01445     // cast away constness 'cuz the range is passed as const
01446     EntityHandle* ents_cast = const_cast< EntityHandle* >( entities );
01447     std::copy( ents_cast, ents_cast + num_entities, range_inserter( dum_range ) );
01448     return get_sharing_data( dum_range, procs, op );
01449 }
01450 
01451 inline ErrorCode ParallelComm::get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs,
01452                                                  unsigned char& pstat, int& num_ps )
01453 {
01454     unsigned int dum_ps;
01455     ErrorCode result = get_sharing_data( entity, ps, hs, pstat, dum_ps );
01456     if( MB_SUCCESS == result ) num_ps = dum_ps;
01457     return result;
01458 }
01459 
01460 inline void ParallelComm::set_send_request( int n_request )
01461 {
01462     sendReqs.resize( n_request, MPI_REQUEST_NULL );
01463 }
01464 
01465 inline void ParallelComm::set_recv_request( int n_request )
01466 {
01467     recvReqs.resize( n_request, MPI_REQUEST_NULL );
01468 }
01469 }  // namespace moab
01470 
01471 #endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines