MOAB: Mesh Oriented datABase  (version 5.4.1)
ParallelComm.hpp
Go to the documentation of this file.
00001 /**
00002  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003  * storing and accessing finite element mesh data.
00004  *
00005  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00006  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007  * retains certain rights in this software.
00008  *
00009  * This library is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU Lesser General Public
00011  * License as published by the Free Software Foundation; either
00012  * version 2.1 of the License, or (at your option) any later version.
00013  *
00014  */
00015 
00016 #ifndef MOAB_PARALLEL_COMM_HPP
00017 #define MOAB_PARALLEL_COMM_HPP
00018 
00019 #include "moab/Forward.hpp"
00020 #include "moab/Interface.hpp"
00021 #include "moab/Range.hpp"
00022 #include "moab/ProcConfig.hpp"
00023 #include <map>
00024 #include <set>
00025 #include <vector>
00026 #include <iostream>
00027 #include <fstream>
00028 #include <cassert>
00029 #include <cstdlib>
00030 #include <cmath>
00031 #include "moab/TupleList.hpp"
00032 
00033 namespace moab
00034 {
00035 
00036 class SequenceManager;
00037 class Error;
00038 template < typename KeyType, typename ValType, ValType NullVal >
00039 class RangeMap;
00040 typedef RangeMap< EntityHandle, EntityHandle, 0 > HandleMap;
00041 class ParallelMergeMesh;
00042 class DebugOutput;
00043 class SharedSetData;
00044 
00045 #define MAX_SHARING_PROCS 64
00046 
00047 /**
00048  * \brief Parallel communications in MOAB
00049  * \author Tim Tautges
00050  *
00051  *  This class implements methods to communicate mesh between processors
00052  *
00053  */
00054 class ParallelComm
00055 {
00056   public:
00057     friend class ParallelMergeMesh;
00058 
00059     // ==================================
00060     // \section CONSTRUCTORS/DESTRUCTORS/PCOMM MANAGEMENT
00061     // ==================================
00062 
00063     //! constructor
00064     ParallelComm( Interface* impl, MPI_Comm comm, int* pcomm_id_out = 0 );
00065 
00066     //! constructor taking packed buffer, for testing
00067     ParallelComm( Interface* impl, std::vector< unsigned char >& tmp_buff, MPI_Comm comm, int* pcomm_id_out = 0 );
00068 
00069     //! Get ID used to reference this PCOMM instance
00070     int get_id() const
00071     {
00072         return pcommID;
00073     }
00074 
00075     //! get the indexed pcomm object from the interface
00076     static ParallelComm* get_pcomm( Interface* impl, const int index );
00077 
00078     //! Get ParallelComm instance associated with partition handle
00079     //! Will create ParallelComm instance if a) one does not already
00080     //! exist and b) a valid value for MPI_Comm is passed.
00081     static ParallelComm* get_pcomm( Interface* impl, EntityHandle partitioning, const MPI_Comm* comm = 0 );
00082 
00083     static ErrorCode get_all_pcomm( Interface* impl, std::vector< ParallelComm* >& list );
00084 
00085     //! destructor
00086     ~ParallelComm();
00087 
00088     static unsigned char PROC_SHARED, PROC_OWNER;
00089 
00090     // ==================================
00091     // \section GLOBAL IDS
00092     // ==================================
00093 
00094     //! assign a global id space, for largest-dimension or all entities (and
00095     //! in either case for vertices too)
00096     //!\param owned_only If true, do not get global IDs for non-owned entities
00097     //!                  from remote processors.
00098     ErrorCode assign_global_ids( EntityHandle this_set,
00099                                  const int dimension,
00100                                  const int start_id          = 1,
00101                                  const bool largest_dim_only = true,
00102                                  const bool parallel         = true,
00103                                  const bool owned_only       = false );
00104 
00105     //! assign a global id space, for largest-dimension or all entities (and
00106     //! in either case for vertices too)
00107     ErrorCode assign_global_ids( Range entities[],
00108                                  const int dimension,
00109                                  const int start_id,
00110                                  const bool parallel,
00111                                  const bool owned_only );
00112 
00113     //! check for global ids; based only on tag handle being there or not;
00114     //! if it's not there, create them for the specified dimensions
00115     //!\param owned_only If true, do not get global IDs for non-owned entities
00116     //!                  from remote processors.
00117     ErrorCode check_global_ids( EntityHandle this_set,
00118                                 const int dimension,
00119                                 const int start_id          = 1,
00120                                 const bool largest_dim_only = true,
00121                                 const bool parallel         = true,
00122                                 const bool owned_only       = false );
00123 
00124     // ==================================
00125     // \section HIGH-LEVEL COMMUNICATION (send/recv/bcast/scatter ents, exchange tags)
00126     // ==================================
00127 
00128     /** \brief send entities to another processor, optionally waiting until it's done
00129      *
00130      * Send entities to another processor, with adjs, sets, and tags.
00131      * If store_remote_handles is true, this call receives back handles assigned to
00132      * entities sent to destination processor and stores them in sharedh_tag or
00133      * sharedhs_tag.
00134      * \param to_proc Destination processor
00135      * \param orig_ents Entities requested to send
00136      * \param adjs If true, send adjacencies for equiv entities (currently unsupported)
00137      * \param tags If true, send tag values for all tags assigned to entities
00138      * \param store_remote_handles If true, also recv message with handles on destination processor
00139      * (currently unsupported) \param final_ents Range containing all entities sent \param incoming
00140      * keep track if any messages are coming to this processor (newly added) \param wait_all If
00141      * true, wait until all messages received/sent complete
00142      */
00143     ErrorCode send_entities( const int to_proc,
00144                              Range& orig_ents,
00145                              const bool adjs,
00146                              const bool tags,
00147                              const bool store_remote_handles,
00148                              const bool is_iface,
00149                              Range& final_ents,
00150                              int& incoming1,
00151                              int& incoming2,                                 // newly added
00152                              TupleList& entprocs,                            // newly added
00153                              std::vector< MPI_Request >& recv_remoteh_reqs,  // newly added
00154                              bool wait_all = true );
00155 
00156     ErrorCode send_entities( std::vector< unsigned int >& send_procs,
00157                              std::vector< Range* >& send_ents,
00158                              int& incoming1,
00159                              int& incoming2,
00160                              const bool store_remote_handles );
00161 
00162     /** \brief Receive entities from another processor, optionally waiting until it's done
00163      *
00164      * Receive entities from another processor, with adjs, sets, and tags.
00165      * If store_remote_handles is true, this call sends back handles assigned to
00166      * the entities received.
00167      * \param from_proc Source processor
00168      * \param store_remote_handles If true, send message with new entity handles to source processor
00169      * (currently unsupported) \param final_ents Range containing all entities received \param
00170      * incoming keep track if any messages are coming to this processor (newly added) \param
00171      * wait_all If true, wait until all messages received/sent complete
00172      */
00173     ErrorCode recv_entities( const int from_proc,
00174                              const bool store_remote_handles,
00175                              const bool is_iface,
00176                              Range& final_ents,
00177                              int& incomming1,
00178                              int& incoming2,
00179                              std::vector< std::vector< EntityHandle > >& L1hloc,
00180                              std::vector< std::vector< EntityHandle > >& L1hrem,
00181                              std::vector< std::vector< int > >& L1p,
00182                              std::vector< EntityHandle >& L2hloc,
00183                              std::vector< EntityHandle >& L2hrem,
00184                              std::vector< unsigned int >& L2p,
00185                              std::vector< MPI_Request >& recv_remoteh_reqs,
00186                              bool wait_all = true );
00187 
00188     ErrorCode recv_entities( std::set< unsigned int >& recv_procs,
00189                              int incoming1,
00190                              int incoming2,
00191                              const bool store_remote_handles,
00192                              const bool migrate = false );
00193 
00194     /** \brief Receive messages from another processor in while loop
00195      *
00196      * Receive messages from another processor.
00197      * \param from_proc Source processor
00198      * \param store_remote_handles If true, send message with new entity handles to source processor
00199      * (currently unsupported) \param final_ents Range containing all entities received \param
00200      * incoming keep track if any messages are coming to this processor (newly added)
00201      */
00202     ErrorCode recv_messages( const int from_proc,
00203                              const bool store_remote_handles,
00204                              const bool is_iface,
00205                              Range& final_ents,
00206                              int& incoming1,
00207                              int& incoming2,
00208                              std::vector< std::vector< EntityHandle > >& L1hloc,
00209                              std::vector< std::vector< EntityHandle > >& L1hrem,
00210                              std::vector< std::vector< int > >& L1p,
00211                              std::vector< EntityHandle >& L2hloc,
00212                              std::vector< EntityHandle >& L2hrem,
00213                              std::vector< unsigned int >& L2p,
00214                              std::vector< MPI_Request >& recv_remoteh_reqs );
00215 
00216     ErrorCode recv_remote_handle_messages( const int from_proc,
00217                                            int& incoming2,
00218                                            std::vector< EntityHandle >& L2hloc,
00219                                            std::vector< EntityHandle >& L2hrem,
00220                                            std::vector< unsigned int >& L2p,
00221                                            std::vector< MPI_Request >& recv_remoteh_reqs );
00222 
00223     /** \brief Exchange ghost cells with neighboring procs
00224      * Neighboring processors are those sharing an interface
00225      * with this processor.  All entities of dimension ghost_dim
00226      * within num_layers of interface, measured going through bridge_dim,
00227      * are exchanged.  See MeshTopoUtil::get_bridge_adjacencies for description
00228      * of bridge adjacencies.  If wait_all is false and store_remote_handles
00229      * is true, MPI_Request objects are available in the sendReqs[2*MAX_SHARING_PROCS]
00230      * member array, with inactive requests marked as MPI_REQUEST_NULL.  If
00231      * store_remote_handles or wait_all is false, this function returns after
00232      * all entities have been received and processed.
00233      * \param ghost_dim Dimension of ghost entities to be exchanged
00234      * \param bridge_dim Dimension of entities used to measure layers from interface
00235      * \param num_layers Number of layers of ghosts requested
00236      * \param addl_ents Dimension of additional adjacent entities to exchange with ghosts, 0 if none
00237      * \param store_remote_handles If true, send message with new entity handles to source processor
00238      * \param wait_all If true, function does not return until all send buffers
00239      *       are cleared.
00240      */
00241 
00242     ErrorCode exchange_ghost_cells( int ghost_dim,
00243                                     int bridge_dim,
00244                                     int num_layers,
00245                                     int addl_ents,
00246                                     bool store_remote_handles,
00247                                     bool wait_all          = true,
00248                                     EntityHandle* file_set = NULL );
00249 
00250     /** \brief Static version of exchange_ghost_cells, exchanging info through
00251      * buffers rather than messages
00252      */
00253     static ErrorCode exchange_ghost_cells( ParallelComm** pc,
00254                                            unsigned int num_procs,
00255                                            int ghost_dim,
00256                                            int bridge_dim,
00257                                            int num_layers,
00258                                            int addl_ents,
00259                                            bool store_remote_handles,
00260                                            EntityHandle* file_sets = NULL );
00261 
00262     /** \brief Post "MPI_Irecv" before meshing
00263      * \param exchange_procs processor vector exchanged
00264      */
00265     ErrorCode post_irecv( std::vector< unsigned int >& exchange_procs );
00266 
00267     ErrorCode post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs );
00268 
00269     /** \brief Exchange owned mesh for input mesh entities and sets
00270      * This function should be called collectively over the communicator for this ParallelComm.
00271      * If this version is called, all shared exchanged entities should have a value for this
00272      * tag (or the tag should have a default value).
00273      * \param exchange_procs processor vector exchanged
00274      * \param exchange_ents exchanged entities for each processors
00275      * \param migrate if the owner if entities are changed or not
00276      */
00277     ErrorCode exchange_owned_meshs( std::vector< unsigned int >& exchange_procs,
00278                                     std::vector< Range* >& exchange_ents,
00279                                     std::vector< MPI_Request >& recv_ent_reqs,
00280                                     std::vector< MPI_Request >& recv_remoteh_reqs,
00281                                     bool store_remote_handles,
00282                                     bool wait_all = true,
00283                                     bool migrate  = false,
00284                                     int dim       = 0 );
00285 
00286     /** \brief Exchange owned mesh for input mesh entities and sets
00287      * This function is called twice by exchange_owned_meshs to exchange entities before sets
00288      * \param migrate if the owner if entities are changed or not
00289      */
00290     ErrorCode exchange_owned_mesh( std::vector< unsigned int >& exchange_procs,
00291                                    std::vector< Range* >& exchange_ents,
00292                                    std::vector< MPI_Request >& recv_ent_reqs,
00293                                    std::vector< MPI_Request >& recv_remoteh_reqs,
00294                                    const bool recv_posted,
00295                                    bool store_remote_handles,
00296                                    bool wait_all,
00297                                    bool migrate = false );
00298 
00299     /** \brief Exchange tags for all shared and ghosted entities
00300      * This function should be called collectively over the communicator for this ParallelComm.
00301      * If this version is called, all ghosted/shared entities should have a value for this
00302      * tag (or the tag should have a default value).  If the entities vector is empty, all shared
00303      * entities participate in the exchange.  If a proc has no owned entities this function must
00304      * still be called since it is collective. \param src_tags Vector of tag handles to be exchanged
00305      * \param dst_tags Tag handles to store the tags on the non-owning procs
00306      * \param entities Entities for which tags are exchanged
00307      */
00308     ErrorCode exchange_tags( const std::vector< Tag >& src_tags,
00309                              const std::vector< Tag >& dst_tags,
00310                              const Range& entities );
00311 
00312     /** \brief Exchange tags for all shared and ghosted entities
00313      * This function should be called collectively over the communicator for this ParallelComm.
00314      * If the entities vector is empty, all shared entities
00315      * participate in the exchange.  If a proc has no owned entities this function must still be
00316      * called since it is collective. \param tag_name Name of tag to be exchanged \param entities
00317      * Entities for which tags are exchanged
00318      */
00319     ErrorCode exchange_tags( const char* tag_name, const Range& entities );
00320 
00321     /** \brief Exchange tags for all shared and ghosted entities
00322      * This function should be called collectively over the communicator for this ParallelComm.
00323      * If the entities vector is empty, all shared entities
00324      * participate in the exchange.  If a proc has no owned entities this function must still be
00325      * called since it is collective. \param tagh Handle of tag to be exchanged \param entities
00326      * Entities for which tags are exchanged
00327      */
00328     ErrorCode exchange_tags( Tag tagh, const Range& entities );
00329 
00330     /** \brief Perform data reduction operation for all shared and ghosted entities
00331      * This function should be called collectively over the communicator for this ParallelComm.
00332      * If this version is called, all ghosted/shared entities should have a value for this
00333      * tag (or the tag should have a default value).  Operation is any MPI_Op, with result stored
00334      * in destination tag.
00335      * \param src_tags Vector of tag handles to be reduced
00336      * \param dst_tags Vector of tag handles in which the answer will be stored
00337      * \param mpi_op Operation type
00338      * \param entities Entities on which reduction will be made; if empty, operates on all shared
00339      *                 entities
00340      */
00341     ErrorCode reduce_tags( const std::vector< Tag >& src_tags,
00342                            const std::vector< Tag >& dst_tags,
00343                            const MPI_Op mpi_op,
00344                            const Range& entities );
00345 
00346     /** \brief Perform data reduction operation for all shared and ghosted entities
00347      * Same as std::vector variant except for one tag specified by name
00348      * \param tag_name Name of tag to be reduced
00349      * \param mpi_op Operation type
00350      * \param entities Entities on which reduction will be made; if empty, operates on all shared
00351      *                 entities
00352      */
00353     ErrorCode reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities );
00354 
00355     /** \brief Perform data reduction operation for all shared and ghosted entities
00356      * Same as std::vector variant except for one tag specified by handle
00357      * \param tag_name Name of tag to be reduced
00358      * \param mpi_op Operation type
00359      * \param entities Entities on which reduction will be made; if empty, operates on all shared
00360      *                 entities
00361      */
00362     ErrorCode reduce_tags( Tag tag_handle, const MPI_Op mpi_op, const Range& entities );
00363 
00364     /** \brief Broadcast all entities resident on from_proc to other processors
00365      * This function assumes remote handles are *not* being stored, since (usually)
00366      * every processor will know about the whole mesh.
00367      * \param from_proc Processor having the mesh to be broadcast
00368      * \param entities On return, the entities sent or received in this call
00369      * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
00370      * \param tags If true, all non-default-valued tags are sent for sent entities
00371      */
00372     ErrorCode broadcast_entities( const int from_proc,
00373                                   Range& entities,
00374                                   const bool adjacencies = false,
00375                                   const bool tags        = true );
00376 
00377     /** \brief Scatter entities on from_proc to other processors
00378      * This function assumes remote handles are *not* being stored, since (usually)
00379      * every processor will know about the whole mesh.
00380      * \param from_proc Processor having the mesh to be broadcast
00381      * \param entities On return, the entities sent or received in this call
00382      * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
00383      * \param tags If true, all non-default-valued tags are sent for sent entities
00384      */
00385     ErrorCode scatter_entities( const int from_proc,
00386                                 std::vector< Range >& entities,
00387                                 const bool adjacencies = false,
00388                                 const bool tags        = true );
00389 
00390     /////////////////////////////////////////////////////////////////////////////////
00391     // Send and Receive routines for a sequence of entities: use case UMR
00392     /////////////////////////////////////////////////////////////////////////////////
00393 
00394     /** \brief Send and receives data from a set of processors
00395      */
00396     ErrorCode send_recv_entities( std::vector< int >& send_procs,
00397                                   std::vector< std::vector< int > >& msgsizes,
00398                                   std::vector< std::vector< EntityHandle > >& senddata,
00399                                   std::vector< std::vector< EntityHandle > >& recvdata );
00400 
00401     ErrorCode update_remote_data( EntityHandle entity,
00402                                   std::vector< int >& procs,
00403                                   std::vector< EntityHandle >& handles );
00404 
00405     ErrorCode get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc );
00406 
00407     /////////////////////////////////////////////////////////////////////////////////
00408 
00409     // ==================================
00410     // \section INITIALIZATION OF PARALLEL DATA (resolve_shared_ents, etc.)
00411     // ==================================
00412 
00413     /** \brief Resolve shared entities between processors
00414      *
00415      * Resolve shared entities between processors for entities in proc_ents,
00416      * by comparing global id tag values on vertices on skin of elements in
00417      * proc_ents.  Shared entities are assigned a tag that's either
00418      * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
00419      * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
00420      * number of sharing processors.  Values in these tags denote the ranks
00421      * of sharing processors, and the list ends with the value -1.
00422      *
00423      * If shared_dim is input as -1 or not input, a value one less than the
00424      * maximum dimension of entities in proc_ents is used.
00425      *
00426      * \param proc_ents Entities for which to resolve shared entities
00427      * \param shared_dim Maximum dimension of shared entities to look for
00428      */
00429     ErrorCode resolve_shared_ents( EntityHandle this_set,
00430                                    Range& proc_ents,
00431                                    int resolve_dim   = -1,
00432                                    int shared_dim    = -1,
00433                                    Range* skin_ents  = NULL,
00434                                    const Tag* id_tag = 0 );
00435 
00436     /** \brief Resolve shared entities between processors
00437      *
00438      * Same as resolve_shared_ents(Range&), except works for
00439      * all entities in instance of dimension dim.
00440      *
00441      * If shared_dim is input as -1 or not input, a value one less than the
00442      * maximum dimension of entities is used.
00443 
00444      * \param dim Dimension of entities in the partition
00445      * \param shared_dim Maximum dimension of shared entities to look for
00446      */
00447     ErrorCode resolve_shared_ents( EntityHandle this_set,
00448                                    int resolve_dim   = 3,
00449                                    int shared_dim    = -1,
00450                                    const Tag* id_tag = 0 );
00451 
00452     static ErrorCode resolve_shared_ents( ParallelComm** pc,
00453                                           const unsigned int np,
00454                                           EntityHandle this_set,
00455                                           const int to_dim );
00456 
00457     /** Remove shared sets.
00458      *
00459      * Generates list of candidate sets using from those (directly)
00460      * contained in passed set and passes them to the other version
00461      * of \c resolve_shared_sets.
00462      *\param this_set  Set directly containing candidate sets (e.g. file set)
00463      *\param id_tag    Tag containing global IDs for entity sets.
00464      */
00465 
00466     ErrorCode resolve_shared_sets( EntityHandle this_set, const Tag* id_tag = 0 );
00467 
00468     /** Remove shared sets.
00469      *
00470      * Use values of id_tag to match sets across processes and populate
00471      * sharing data for sets.
00472      *\param candidate_sets  Sets to consider as potentially shared.
00473      *\param id_tag    Tag containing global IDs for entity sets.
00474      */
00475     ErrorCode resolve_shared_sets( Range& candidate_sets, Tag id_tag );
00476 
00477     /** extend shared sets with ghost entities
00478      * After ghosting, ghost entities do not have yet information about
00479      * the material set, partition set, Neumann or Dirichlet set they could
00480      * belong to
00481      * This method will assign ghosted entities to the those special entity sets
00482      * In some case we might even have to create those sets, if they do not exist yet on
00483      * the local processor
00484      *
00485      * The special entity sets all have an unique identifier, in a form of an integer
00486      * tag to the set.
00487      * The shared sets data is not used, because we do not use the geometry sets, as they are
00488      * not uniquely identified
00489      *
00490      *
00491      * \param file_set : file set used per application
00492      *
00493      */
00494     ErrorCode augment_default_sets_with_ghosts( EntityHandle file_set );
00495     // ==================================
00496     // \section GET PARALLEL DATA (shared/owned/iface entities, etc.)
00497     // ==================================
00498 
00499     /** \brief Get parallel status of an entity
00500      * Returns the parallel status of an entity
00501      *
00502      * \param entity The entity being queried
00503      * \param pstatus_val Parallel status of the entity
00504      */
00505     ErrorCode get_pstatus( EntityHandle entity, unsigned char& pstatus_val );
00506 
00507     /** \brief Get entities with the given pstatus bit(s) set
00508      * Returns any entities whose pstatus tag value v satisfies (v & pstatus_val)
00509      *
00510      * \param dim Dimension of entities to be returned, or -1 if any
00511      * \param pstatus_val pstatus value of desired entities
00512      * \param pstatus_ents Entities returned from function
00513      */
00514     ErrorCode get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents );
00515 
00516     /** \brief Return the rank of the entity owner
00517      */
00518     ErrorCode get_owner( EntityHandle entity, int& owner );
00519 
00520     /** \brief Return the owner processor and handle of a given entity
00521      */
00522     ErrorCode get_owner_handle( EntityHandle entity, int& owner, EntityHandle& handle );
00523 
00524     /** \brief Get the shared processors/handles for an entity
00525      * Get the shared processors/handles for an entity.  Arrays must
00526      * be large enough to receive data for all sharing procs.  Does *not* include
00527      * this proc if only shared with one other proc.
00528      * \param entity Entity being queried
00529      * \param ps Pointer to sharing proc data
00530      * \param hs Pointer to shared proc handle data
00531      * \param pstat Reference to pstatus data returned from this function
00532      */
00533     ErrorCode get_sharing_data( const EntityHandle entity,
00534                                 int* ps,
00535                                 EntityHandle* hs,
00536                                 unsigned char& pstat,
00537                                 unsigned int& num_ps );
00538 
00539     /** \brief Get the shared processors/handles for an entity
00540      * Same as other version but with int num_ps
00541      * \param entity Entity being queried
00542      * \param ps Pointer to sharing proc data
00543      * \param hs Pointer to shared proc handle data
00544      * \param pstat Reference to pstatus data returned from this function
00545      */
00546     ErrorCode get_sharing_data( const EntityHandle entity,
00547                                 int* ps,
00548                                 EntityHandle* hs,
00549                                 unsigned char& pstat,
00550                                 int& num_ps );
00551 
00552     /** \brief Get the intersection or union of all sharing processors
00553      * Get the intersection or union of all sharing processors.  Processor set
00554      * is cleared as part of this function.
00555      * \param entities Entity list ptr
00556      * \param num_entities Number of entities
00557      * \param procs Processors returned
00558      * \param op Either Interface::UNION or Interface::INTERSECT
00559      */
00560     ErrorCode get_sharing_data( const EntityHandle* entities,
00561                                 int num_entities,
00562                                 std::set< int >& procs,
00563                                 int op = Interface::INTERSECT );
00564 
00565     /** \brief Get the intersection or union of all sharing processors
00566      * Same as previous variant but with range as input
00567      */
00568     ErrorCode get_sharing_data( const Range& entities, std::set< int >& procs, int op = Interface::INTERSECT );
00569 
00570     /** \brief Get shared entities of specified dimension
00571      * If other_proc is -1, any shared entities are returned.  If dim is -1,
00572      * entities of all dimensions on interface are returned.
00573      * \param other_proc Rank of processor for which interface entities are requested
00574      * \param shared_ents Entities returned from function
00575      * \param dim Dimension of interface entities requested
00576      * \param iface If true, return only entities on the interface
00577      * \param owned_filter If true, return only owned shared entities
00578      */
00579     ErrorCode get_shared_entities( int other_proc,
00580                                    Range& shared_ents,
00581                                    int dim                 = -1,
00582                                    const bool iface        = false,
00583                                    const bool owned_filter = false );
00584     /*
00585     //! return partition sets; if tag_name is input, gets sets with
00586     //! that tag name, otherwise uses PARALLEL_PARTITION tag
00587     ErrorCode get_partition_sets(EntityHandle this_set,
00588     Range &part_sets,
00589     const char *tag_name = NULL);
00590     */
00591     //! get processors with which this processor shares an interface
00592     ErrorCode get_interface_procs( std::set< unsigned int >& iface_procs, const bool get_buffs = false );
00593 
00594     //! get processors with which this processor communicates
00595     ErrorCode get_comm_procs( std::set< unsigned int >& procs );
00596 
00597     // ==================================
00598     // \section SHARED SETS
00599     // ==================================
00600 
00601     //! Get array of process IDs sharing a set.  Returns zero
00602     //! and passes back NULL if set is not shared.
00603     ErrorCode get_entityset_procs( EntityHandle entity_set, std::vector< unsigned >& ranks ) const;
00604 
00605     //! Get rank of the owner of a shared set.
00606     //! Returns this proc if set is not shared.
00607     //! Optionally returns handle on owning process for shared set.
00608     ErrorCode get_entityset_owner( EntityHandle entity_set,
00609                                    unsigned& owner_rank,
00610                                    EntityHandle* remote_handle = 0 ) const;
00611 
00612     //! Given set owner and handle on owner, find local set handle
00613     ErrorCode get_entityset_local_handle( unsigned owning_rank,
00614                                           EntityHandle remote_handle,
00615                                           EntityHandle& local_handle ) const;
00616 
00617     //! Get all shared sets
00618     ErrorCode get_shared_sets( Range& result ) const;
00619 
00620     //! Get ranks of all processes that own at least one set that is
00621     //! shared with this process.  Will include the rank of this process
00622     //! if this process owns any shared set.
00623     ErrorCode get_entityset_owners( std::vector< unsigned >& ranks ) const;
00624 
00625     //! Get shared sets owned by process with specified rank.
00626     ErrorCode get_owned_sets( unsigned owning_rank, Range& sets_out ) const;
00627 
00628     // ==================================
00629     // \section LOW-LEVEL DATA (tags, sets on interface/partition, etc.)
00630     // ==================================
00631 
00632     //! Get proc config for this communication object
00633     const ProcConfig& proc_config() const
00634     {
00635         return procConfig;
00636     }
00637 
00638     //! Get proc config for this communication object
00639     ProcConfig& proc_config()
00640     {
00641         return procConfig;
00642     }
00643 
00644     unsigned rank() const
00645     {
00646         return proc_config().proc_rank();
00647     }
00648     unsigned size() const
00649     {
00650         return proc_config().proc_size();
00651     }
00652     MPI_Comm comm() const
00653     {
00654         return proc_config().proc_comm();
00655     }
00656 
00657     //! return the tags used to indicate shared procs and handles
00658     ErrorCode get_shared_proc_tags( Tag& sharedp_tag,
00659                                     Tag& sharedps_tag,
00660                                     Tag& sharedh_tag,
00661                                     Tag& sharedhs_tag,
00662                                     Tag& pstatus_tag );
00663 
00664     //! return partition, interface set ranges
00665     Range& partition_sets()
00666     {
00667         return partitionSets;
00668     }
00669     const Range& partition_sets() const
00670     {
00671         return partitionSets;
00672     }
00673     Range& interface_sets()
00674     {
00675         return interfaceSets;
00676     }
00677     const Range& interface_sets() const
00678     {
00679         return interfaceSets;
00680     }
00681 
00682     //! return sharedp tag
00683     Tag sharedp_tag();
00684 
00685     //! return sharedps tag
00686     Tag sharedps_tag();
00687 
00688     //! return sharedh tag
00689     Tag sharedh_tag();
00690 
00691     //! return sharedhs tag
00692     Tag sharedhs_tag();
00693 
00694     //! return pstatus tag
00695     Tag pstatus_tag();
00696 
00697     //! return pcomm tag; static because might not have a pcomm before going
00698     //! to look for one on the interface
00699     static Tag pcomm_tag( Interface* impl, bool create_if_missing = true );
00700 
00701     //! return partitions set tag
00702     Tag partition_tag();
00703     Tag part_tag()
00704     {
00705         return partition_tag();
00706     }
00707 
00708     // ==================================
00709     // \section DEBUGGING AIDS
00710     // ==================================
00711 
00712     //! print contents of pstatus value in human-readable form
00713     void print_pstatus( unsigned char pstat, std::string& ostr );
00714 
00715     //! print contents of pstatus value in human-readable form to std::cut
00716     void print_pstatus( unsigned char pstat );
00717 
00718     // ==================================
00719     // \section IMESHP-RELATED FUNCTIONS
00720     // ==================================
00721 
00722     //! return all the entities in parts owned locally
00723     ErrorCode get_part_entities( Range& ents, int dim = -1 );
00724 
00725     EntityHandle get_partitioning() const
00726     {
00727         return partitioningSet;
00728     }
00729     ErrorCode set_partitioning( EntityHandle h );
00730     ErrorCode get_global_part_count( int& count_out ) const;
00731     ErrorCode get_part_owner( int part_id, int& owner_out ) const;
00732     ErrorCode get_part_id( EntityHandle part, int& id_out ) const;
00733     ErrorCode get_part_handle( int id, EntityHandle& handle_out ) const;
00734     ErrorCode create_part( EntityHandle& part_out );
00735     ErrorCode destroy_part( EntityHandle part );
00736     ErrorCode collective_sync_partition();
00737     ErrorCode get_part_neighbor_ids( EntityHandle part, int neighbors_out[MAX_SHARING_PROCS], int& num_neighbors_out );
00738     ErrorCode get_interface_sets( EntityHandle part, Range& iface_sets_out, int* adj_part_id = 0 );
00739     ErrorCode get_owning_part( EntityHandle entity, int& owning_part_id_out, EntityHandle* owning_handle = 0 );
00740     ErrorCode get_sharing_parts( EntityHandle entity,
00741                                  int part_ids_out[MAX_SHARING_PROCS],
00742                                  int& num_part_ids_out,
00743                                  EntityHandle remote_handles[MAX_SHARING_PROCS] = 0 );
00744 
00745     /** Filter the entities by pstatus tag.
00746      * op is one of PSTATUS_ AND, OR, NOT; an entity is output if:
00747      * AND: all bits set in pstatus_val are also set on entity
00748      * OR: any bits set in pstatus_val also set on entity
00749      * NOT: any bits set in pstatus_val are not set on entity
00750      *
00751      * Results returned in input list, unless result_ents is passed in non-null,
00752      * in which case results are returned in result_ents.
00753      *
00754      * If ents is passed in empty, filter is done on shared entities in this
00755      * pcomm instance, i.e. contents of sharedEnts.
00756      *
00757      *\param ents       Input entities to filter
00758      *\param pstatus_val pstatus value to which entities are compared
00759      *\param op Bitwise operation performed between pstatus values
00760      *\param to_proc If non-negative and PSTATUS_SHARED is set on pstatus_val,
00761      *               only entities shared with to_proc are returned
00762      *\param result_ents If non-null, results of filter are put in the
00763      *       pointed-to range
00764      */
00765     ErrorCode filter_pstatus( Range& ents,
00766                               const unsigned char pstatus_val,
00767                               const unsigned char op,
00768                               int to_proc          = -1,
00769                               Range* returned_ents = NULL );
00770 
00771     /** \brief Get entities on interfaces shared with another proc
00772      *
00773      * \param other_proc Other proc sharing the interface
00774      * \param dim Dimension of entities to return, -1 if all dims
00775      * \param iface_ents Returned entities
00776      */
00777     ErrorCode get_iface_entities( int other_proc, int dim, Range& iface_ents );
00778 
00779     Interface* get_moab() const
00780     {
00781         return mbImpl;
00782     }
00783 
00784     ErrorCode clean_shared_tags( std::vector< Range* >& exchange_ents );
00785 
00786     class Buffer
00787     {
00788       public:
00789         unsigned char* mem_ptr;
00790         unsigned char* buff_ptr;
00791         unsigned int alloc_size;
00792 
00793         Buffer( unsigned int sz = 0 );
00794         Buffer( const Buffer& );
00795         ~Buffer();
00796         void reset_buffer( size_t buff_pos = 0 )
00797         {
00798             reset_ptr( buff_pos );
00799             reserve( INITIAL_BUFF_SIZE );
00800         }
00801         void reset_ptr( size_t buff_pos = 0 )
00802         {
00803             assert( ( !mem_ptr && !buff_pos ) || ( alloc_size >= buff_pos ) );
00804             buff_ptr = mem_ptr + buff_pos;
00805         }
00806         inline void reserve( unsigned int new_size );
00807         void set_stored_size()
00808         {
00809             *( (int*)mem_ptr ) = (int)( buff_ptr - mem_ptr );
00810         }
00811         int get_stored_size()
00812         {
00813             return *( (int*)mem_ptr );
00814         }
00815         int get_current_size()
00816         {
00817             return (int)( buff_ptr - mem_ptr );
00818         }
00819 
00820         void check_space( unsigned int addl_space );
00821     };
00822 
00823     //! public 'cuz we want to unit test these externally
00824     ErrorCode pack_buffer( Range& orig_ents,
00825                            const bool adjacencies,
00826                            const bool tags,
00827                            const bool store_remote_handles,
00828                            const int to_proc,
00829                            Buffer* buff,
00830                            TupleList* entprocs = NULL,
00831                            Range* allsent      = NULL );
00832 
00833     ErrorCode unpack_buffer( unsigned char* buff_ptr,
00834                              const bool store_remote_handles,
00835                              const int from_proc,
00836                              const int ind,
00837                              std::vector< std::vector< EntityHandle > >& L1hloc,
00838                              std::vector< std::vector< EntityHandle > >& L1hrem,
00839                              std::vector< std::vector< int > >& L1p,
00840                              std::vector< EntityHandle >& L2hloc,
00841                              std::vector< EntityHandle >& L2hrem,
00842                              std::vector< unsigned int >& L2p,
00843                              std::vector< EntityHandle >& new_ents,
00844                              const bool created_iface = false );
00845 
00846     ErrorCode pack_entities( Range& entities,
00847                              Buffer* buff,
00848                              const bool store_remote_handles,
00849                              const int to_proc,
00850                              const bool is_iface,
00851                              TupleList* entprocs = NULL,
00852                              Range* allsent      = NULL );
00853 
00854     //! unpack entities in buff_ptr
00855     ErrorCode unpack_entities( unsigned char*& buff_ptr,
00856                                const bool store_remote_handles,
00857                                const int from_ind,
00858                                const bool is_iface,
00859                                std::vector< std::vector< EntityHandle > >& L1hloc,
00860                                std::vector< std::vector< EntityHandle > >& L1hrem,
00861                                std::vector< std::vector< int > >& L1p,
00862                                std::vector< EntityHandle >& L2hloc,
00863                                std::vector< EntityHandle >& L2hrem,
00864                                std::vector< unsigned int >& L2p,
00865                                std::vector< EntityHandle >& new_ents,
00866                                const bool created_iface = false );
00867 
00868     //! Call exchange_all_shared_handles, then compare the results with tag data
00869     //! on local shared entities.
00870     ErrorCode check_all_shared_handles( bool print_em = false );
00871 
00872     static ErrorCode check_all_shared_handles( ParallelComm** pcs, int num_pcs );
00873 
00874     struct SharedEntityData
00875     {
00876         EntityHandle local;
00877         EntityHandle remote;
00878         EntityID owner;
00879     };
00880 
00881     ErrorCode pack_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data );
00882 
00883     // check consistency of sharedEnts against their tags and their
00884     // vertices' tags
00885     ErrorCode check_local_shared();
00886 
00887     // check contents of communicated shared entity data against tags
00888     ErrorCode check_my_shared_handles( std::vector< std::vector< SharedEntityData > >& shents,
00889                                        const char* prefix = NULL );
00890 
00891     //! set rank for this pcomm; USED FOR TESTING ONLY!
00892     void set_rank( unsigned int r );
00893 
00894     //! set rank for this pcomm; USED FOR TESTING ONLY!
00895     void set_size( unsigned int r );
00896 
00897     //! get (and possibly allocate) buffers for messages to/from to_proc; returns
00898     //! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
00899     //! whether new buffer was allocated
00900     //! PUBLIC ONLY FOR TESTING!
00901     int get_buffers( int to_proc, bool* is_new = NULL );
00902 
00903     //! get buff processor vector
00904     const std::vector< unsigned int >& buff_procs() const;
00905 
00906     /* \brief Unpack message with remote handles
00907      * PUBLIC ONLY FOR TESTING!
00908      */
00909     ErrorCode unpack_remote_handles( unsigned int from_proc,
00910                                      unsigned char*& buff_ptr,
00911                                      std::vector< EntityHandle >& L2hloc,
00912                                      std::vector< EntityHandle >& L2hrem,
00913                                      std::vector< unsigned int >& L2p );
00914 
00915     /* \brief Pack message with remote handles
00916      * PUBLIC ONLY FOR TESTING!
00917      */
00918     ErrorCode pack_remote_handles( std::vector< EntityHandle >& L1hloc,
00919                                    std::vector< EntityHandle >& L1hrem,
00920                                    std::vector< int >& procs,
00921                                    unsigned int to_proc,
00922                                    Buffer* buff );
00923 
00924     // each iterate in proc_nvecs contains a set of procs and the entities *possibly*
00925     // on the interface between those procs; this function makes sets for each,
00926     // and tags the set with the procs sharing it; interface sets are optionally
00927     // returned; NOTE: a subsequent step is used to verify entities on the interface
00928     // and remove them if they're not shared
00929     ErrorCode create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
00930 
00931     // do the same but working straight from sharedEnts
00932     ErrorCode create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim );
00933 
00934     ErrorCode tag_shared_verts( TupleList& shared_ents,
00935                                 std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
00936                                 Range& proc_verts,
00937                                 unsigned int i_extra = 1 );
00938 
00939     ErrorCode list_entities( const EntityHandle* ents, int num_ents );
00940 
00941     ErrorCode list_entities( const Range& ents );
00942 
00943     void set_send_request( int n_request );  // set send request array
00944 
00945     void set_recv_request( int n_request );  // set recv request array
00946 
00947     //! reset message buffers to their initial state
00948     // changed to public function (HJK)
00949     void reset_all_buffers();
00950 
00951     static const unsigned int INITIAL_BUFF_SIZE;
00952 
00953     //! set the verbosity level of output from this pcomm
00954     void set_debug_verbosity( int verb );
00955 
00956     //! get the verbosity level of output from this pcomm
00957     int get_debug_verbosity();
00958 
00959     /* \brief Gather tag value from entities down to a specified root proc
00960      * This function gathers data from a domain-decomposed mesh onto a global mesh
00961      * represented on the root processor.  On the root, this gather mesh is distinct from
00962      * the root's domain-decomposed subdomain.  Entities are matched by global id, or by
00963      * another tag if its handle is input.  The dimension of all entities in gather_ents should
00964      * be the same, since this is the dimension of entities in gather_set that are queried for
00965      * matching global id tags.
00966      * \param gather_ents (Local) entities from which to gather data
00967      * \param tag_handle Tag whose values are being gathered
00968      * \param id_tag Tag to use for matching entities (global id used by default)
00969      * \param gather_set On root, set containing global mesh onto which to put data
00970      * \param root_proc_rank Rank of the specified root processor (default rank is 0)
00971      */
00972     ErrorCode gather_data( Range& gather_ents,
00973                            Tag& tag_handle,
00974                            Tag id_tag              = 0,
00975                            EntityHandle gather_set = 0,
00976                            int root_proc_rank      = 0 );
00977 
00978     /* \brief communicate extra points positions on boundary
00979      * This function is called after intersection of 2 meshes, to settle the
00980      * position of the intersection points on the boundary (interface)
00981      * The initial mesh distributed on each processor is decomposed after
00982      * intersection with another mesh, such as that new points are created on the
00983      * boundary. these points should better match at the interface !
00984      * we perform an extra caution step, to ensure the robustness of the
00985      * intersection algorithm;  only shared edges extra nodes
00986      *  will be actually needed to be communicated, but we just pass by reference
00987      *  the whole extraNodesVec structure, we do
00988      *  not need to construct another data structure
00989      *  The node positions on edges that are owned will be communicated to other
00990      *  processors
00991      *
00992      * \param edges total range of entities
00993      * \param shared_edges_owned edges for which to communicate data
00994      * \param extraNodesVec handles of intersection vertices on all edges;
00995      */
00996     ErrorCode settle_intersection_points( Range& edges,
00997                                           Range& shared_edges_owned,
00998                                           std::vector< std::vector< EntityHandle >* >& extraNodesVec,
00999                                           double tolerance );
01000 
01001     /* \brief delete entities from moab database
01002      * will check the shared ents array, and clean it if necessary
01003      *
01004      */
01005     ErrorCode delete_entities( Range& to_delete );
01006 
01007     /*
01008      * \brief correct multi-sharing info for thin layers
01009      *
01010      * will be used for at least 3 processes, when there are thin ghost layers
01011      * right now it is public, for allowing users to call it directly
01012      * eventually, it should become private, and be called automatically
01013      */
01014 
01015     ErrorCode correct_thin_ghost_layers();
01016 
01017   private:
01018     ErrorCode reduce_void( int tag_data_type, const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
01019 
01020     template < class T >
01021     ErrorCode reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
01022 
01023     void print_debug_isend( int from, int to, unsigned char* buff, int tag, int size );
01024 
01025     void print_debug_irecv( int to, int from, unsigned char* buff, int size, int tag, int incoming );
01026 
01027     void print_debug_recd( MPI_Status status );
01028 
01029     void print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc );
01030 
01031     // common initialization code, called from various constructors
01032     void initialize();
01033 
01034     ErrorCode set_sharing_data( EntityHandle ent,
01035                                 unsigned char pstatus,
01036                                 int old_nump,
01037                                 int new_nump,
01038                                 int* ps,
01039                                 EntityHandle* hs );
01040 
01041     ErrorCode check_clean_iface( Range& allsent );
01042 
01043     void define_mpe();
01044 
01045     ErrorCode get_sent_ents( const bool is_iface,
01046                              const int bridge_dim,
01047                              const int ghost_dim,
01048                              const int num_layers,
01049                              const int addl_ents,
01050                              Range* sent_ents,
01051                              Range& allsent,
01052                              TupleList& entprocs );
01053 
01054     /** \brief Set pstatus values on entities
01055      *
01056      * \param pstatus_ents Entities to be set
01057      * \param pstatus_val Pstatus value to be set
01058      * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
01059      *        (and created if they don't exist)
01060      * \param verts_too If true, vertices also set
01061      * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
01062      *        existing value is over-written
01063      */
01064     ErrorCode set_pstatus_entities( Range& pstatus_ents,
01065                                     unsigned char pstatus_val,
01066                                     bool lower_dim_ents = false,
01067                                     bool verts_too      = true,
01068                                     int operation       = Interface::UNION );
01069 
01070     /** \brief Set pstatus values on entities (vector-based function)
01071      *
01072      * \param pstatus_ents Entities to be set
01073      * \param pstatus_val Pstatus value to be set
01074      * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
01075      *        (and created if they don't exist)
01076      * \param verts_too If true, vertices also set
01077      * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
01078      *        existing value is over-written
01079      */
01080     ErrorCode set_pstatus_entities( EntityHandle* pstatus_ents,
01081                                     int num_ents,
01082                                     unsigned char pstatus_val,
01083                                     bool lower_dim_ents = false,
01084                                     bool verts_too      = true,
01085                                     int operation       = Interface::UNION );
01086 
01087     //! estimate size required to pack entities
01088     int estimate_ents_buffer_size( Range& entities, const bool store_remote_handles );
01089 
01090     //! estimate size required to pack sets
01091     int estimate_sets_buffer_size( Range& entities, const bool store_remote_handles );
01092 
01093     //! send the indicated buffer, possibly sending size first
01094     ErrorCode send_buffer( const unsigned int to_proc,
01095                            Buffer* send_buff,
01096                            const int msg_tag,
01097                            MPI_Request& send_req,
01098                            MPI_Request& ack_recv_req,
01099                            int* ack_buff,
01100                            int& this_incoming,
01101                            int next_mesg_tag          = -1,
01102                            Buffer* next_recv_buff     = NULL,
01103                            MPI_Request* next_recv_req = NULL,
01104                            int* next_incoming         = NULL );
01105 
01106     //! process incoming message; if longer than the initial size, post
01107     //! recv for next part then send ack; if ack, send second part; else
01108     //! indicate that we're done and buffer is ready for processing
01109     ErrorCode recv_buffer( int mesg_tag_expected,
01110                            const MPI_Status& mpi_status,
01111                            Buffer* recv_buff,
01112                            MPI_Request& recv_2nd_req,
01113                            MPI_Request& ack_req,
01114                            int& this_incoming,
01115                            Buffer* send_buff,
01116                            MPI_Request& send_req,
01117                            MPI_Request& sent_ack_req,
01118                            bool& done,
01119                            Buffer* next_buff     = NULL,
01120                            int next_tag          = -1,
01121                            MPI_Request* next_req = NULL,
01122                            int* next_incoming    = NULL );
01123 
01124     //! pack a range of entities with equal # verts per entity, along with
01125     //! the range on the sending proc
01126     ErrorCode pack_entity_seq( const int nodes_per_entity,
01127                                const bool store_remote_handles,
01128                                const int to_proc,
01129                                Range& these_ents,
01130                                std::vector< EntityHandle >& entities,
01131                                Buffer* buff );
01132 
01133     ErrorCode print_buffer( unsigned char* buff_ptr, int mesg_type, int from_proc, bool sent );
01134 
01135     //! for all the entities in the received buffer; for each, save
01136     //! entities in this instance which match connectivity, or zero if none found
01137     ErrorCode unpack_iface_entities( unsigned char*& buff_ptr,
01138                                      const int from_proc,
01139                                      const int ind,
01140                                      std::vector< EntityHandle >& recd_ents );
01141 
01142     ErrorCode pack_sets( Range& entities, Buffer* buff, const bool store_handles, const int to_proc );
01143 
01144     ErrorCode unpack_sets( unsigned char*& buff_ptr,
01145                            std::vector< EntityHandle >& entities,
01146                            const bool store_handles,
01147                            const int to_proc );
01148 
01149     ErrorCode pack_adjacencies( Range& entities,
01150                                 Range::const_iterator& start_rit,
01151                                 Range& whole_range,
01152                                 unsigned char*& buff_ptr,
01153                                 int& count,
01154                                 const bool just_count,
01155                                 const bool store_handles,
01156                                 const int to_proc );
01157 
01158     ErrorCode unpack_adjacencies( unsigned char*& buff_ptr,
01159                                   Range& entities,
01160                                   const bool store_handles,
01161                                   const int from_proc );
01162 
01163     /* \brief Unpack message with remote handles (const pointer to buffer)
01164      */
01165     ErrorCode unpack_remote_handles( unsigned int from_proc,
01166                                      const unsigned char* buff_ptr,
01167                                      std::vector< EntityHandle >& L2hloc,
01168                                      std::vector< EntityHandle >& L2hrem,
01169                                      std::vector< unsigned int >& L2p );
01170 
01171     //! given connectivity and type, find an existing entity, if there is one
01172     ErrorCode find_existing_entity( const bool is_iface,
01173                                     const int owner_p,
01174                                     const EntityHandle owner_h,
01175                                     const int num_ents,
01176                                     const EntityHandle* connect,
01177                                     const int num_connect,
01178                                     const EntityType this_type,
01179                                     std::vector< EntityHandle >& L2hloc,
01180                                     std::vector< EntityHandle >& L2hrem,
01181                                     std::vector< unsigned int >& L2p,
01182                                     EntityHandle& new_h );
01183 
01184     ErrorCode build_sharedhps_list( const EntityHandle entity,
01185                                     const unsigned char pstatus,
01186                                     const int sharedp,
01187                                     const std::set< unsigned int >& procs,
01188                                     unsigned int& num_ents,
01189                                     int* tmp_procs,
01190                                     EntityHandle* tmp_handles );
01191 
01192     /**\brief Get list of tags for which to exchange data
01193      *
01194      * Get tags and entities for which to exchange tag data.  This function
01195      * was originally part of 'pack_tags' requested with the
01196      * 'all_possible_tags' parameter.
01197      *
01198      *\param all_entities  Input.  The set of entities for which data is to
01199      *                      be communicated.
01200      *\param all_tags      Output.  Populated with the handles of tags to be
01201      *                      sent.
01202      *\param tag_ranges    Output.  For each corresponding tag in all_tags, the
01203      *                      subset of 'all_entities' for which a tag value has
01204      *                      been set.
01205      */
01206     ErrorCode get_tag_send_list( const Range& all_entities,
01207                                  std::vector< Tag >& all_tags,
01208                                  std::vector< Range >& tag_ranges );
01209 
01210     /**\brief Serialize entity tag data
01211      *
01212      * This function operates in two passes.  The first phase,
01213      * specified by 'just_count == true' calculates the necessary
01214      * buffer size for the serialized data.  The second phase
01215      * writes the actual binary serialized representation of the
01216      * data to the passed buffer.
01217      *
01218      *\NOTE First two arguments are not used.  (Legacy interface?)
01219      *
01220      *\param entities      NOT USED
01221      *\param start_rit     NOT USED
01222      *\param whole_range   Should be the union of the sets of entities for
01223      *                     which tag values are to be serialized.  Also
01224      *                     specifies ordering for indexes for tag values and
01225      *                     serves as the superset from which to compose entity
01226      *                     lists from individual tags if just_count and
01227      *                     all_possible_tags are both true.
01228      *\param buff_ptr      Buffer into which to write binary serialized data
01229      *\param count         Output:  The size of the serialized data is added
01230      *                     to this parameter.  NOTE: Should probably initialize
01231      *                     to zero before calling.
01232      *\param just_count    If true, just calculate the buffer size required to
01233      *                     hold the serialized data.  Will also append to
01234      *                     'all_tags' and 'tag_ranges' if all_possible_tags
01235      *                     == true.
01236      *\param store_handles The data for each tag is preceded by a list of
01237      *                     EntityHandles designating the entity each of
01238      *                     the subsequent tag values corresponds to.  This value
01239      *                     may be one of:
01240      *                     1) If store_handles == false:
01241      *                        An invalid handle composed of {MBMAXTYPE,idx}, where
01242      *                        idx is the position of the entity in "whole_range".
01243      *                     2) If store_hanldes == true and a valid remote
01244      *                        handle exists, the remote handle.
01245      *                     3) If store_hanldes == true and no valid remote
01246      *                        handle is defined for the entity, the same as 1).
01247      *\param to_proc       If 'store_handles' is true, the processor rank for
01248      *                     which to store the corresponding remote entity
01249      *                     handles.
01250      *\param all_tags      List of tags to write
01251      *\param tag_ranges    List of entities to serialize tag data, one
01252      *                            for each corresponding tag handle in 'all_tags.
01253      */
01254     ErrorCode pack_tags( Range& entities,
01255                          const std::vector< Tag >& src_tags,
01256                          const std::vector< Tag >& dst_tags,
01257                          const std::vector< Range >& tag_ranges,
01258                          Buffer* buff,
01259                          const bool store_handles,
01260                          const int to_proc );
01261 
01262     /**\brief Calculate buffer size required to pack tag data
01263      *\param source_tag The tag for which data will be serialized
01264      *\param entities    The entities for which tag values will be serialized
01265      *\param count_out  Output: The required buffer size, in bytes.
01266      */
01267     ErrorCode packed_tag_size( Tag source_tag, const Range& entities, int& count_out );
01268 
01269     /**\brief Serialize tag data
01270      *\param source_tag    The tag for which data will be serialized
01271      *\param destination_tag Tag in which to store unpacked tag data.  Typically
01272      *                     the same as source_tag.
01273      *\param entities       The entities for which tag values will be serialized
01274      *\param whole_range   Calculate entity indices as location in this range
01275      *\param buff_ptr      Input/Output: As input, pointer to the start of the
01276      *                     buffer in which to serialize data.  As output, the
01277      *                     position just passed the serialized data.
01278      *\param count_out     Output: The required buffer size, in bytes.
01279      *\param store_handles The data for each tag is preceded by a list of
01280      *                     EntityHandles designating the entity each of
01281      *                     the subsequent tag values corresponds to.  This value
01282      *                     may be one of:
01283      *                     1) If store_handles == false:
01284      *                        An invalid handle composed of {MBMAXTYPE,idx}, where
01285      *                        idx is the position of the entity in "whole_range".
01286      *                     2) If store_hanldes == true and a valid remote
01287      *                        handle exists, the remote handle.
01288      *                     3) If store_hanldes == true and no valid remote
01289      *                        handle is defined for the entity, the same as 1).
01290      *\param to_proc       If 'store_handles' is true, the processor rank for
01291      *                     which to store the corresponding remote entity
01292      *                     handles.
01293      */
01294     ErrorCode pack_tag( Tag source_tag,
01295                         Tag destination_tag,
01296                         const Range& entities,
01297                         const std::vector< EntityHandle >& whole_range,
01298                         Buffer* buff,
01299                         const bool store_remote_handles,
01300                         const int to_proc );
01301 
01302     ErrorCode unpack_tags( unsigned char*& buff_ptr,
01303                            std::vector< EntityHandle >& entities,
01304                            const bool store_handles,
01305                            const int to_proc,
01306                            const MPI_Op* const mpi_op = NULL );
01307 
01308     ErrorCode tag_shared_verts( TupleList& shared_verts,
01309                                 Range* skin_ents,
01310                                 std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
01311                                 Range& proc_verts );
01312 
01313     ErrorCode get_proc_nvecs( int resolve_dim,
01314                               int shared_dim,
01315                               Range* skin_ents,
01316                               std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
01317 
01318     // after verifying shared entities, now parent/child links between sets can be established
01319     ErrorCode create_iface_pc_links();
01320 
01321     //! pack a range map with keys in this_range and values a contiguous series
01322     //! of handles starting at actual_start
01323     ErrorCode pack_range_map( Range& this_range, EntityHandle actual_start, HandleMap& handle_map );
01324 
01325     //! returns true if the set is an interface shared with to_proc
01326     bool is_iface_proc( EntityHandle this_set, int to_proc );
01327 
01328     //! for any remote_handles set to zero, remove corresponding sent_ents from
01329     //! iface_sets corresponding to from_proc
01330     ErrorCode update_iface_sets( Range& sent_ents, std::vector< EntityHandle >& remote_handles, int from_proc );
01331 
01332     //! for specified bridge/ghost dimension, to_proc, and number
01333     //! of layers, get the entities to be ghosted, and info on additional procs
01334     //! needing to communicate with to_proc
01335     ErrorCode get_ghosted_entities( int bridge_dim,
01336                                     int ghost_dim,
01337                                     int to_proc,
01338                                     int num_layers,
01339                                     int addl_ents,
01340                                     Range& ghosted_ents );
01341 
01342     //! add vertices adjacent to entities in this list
01343     ErrorCode add_verts( Range& sent_ents );
01344 
01345     //! Every processor sends shared entity handle data to every other processor
01346     //! that it shares entities with.  Passed back map is all received data,
01347     //! indexed by processor ID. This function is intended to be used for
01348     //! debugging.
01349     ErrorCode exchange_all_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data,
01350                                            std::vector< std::vector< SharedEntityData > >& result );
01351 
01352     //! replace handles in from_vec with corresponding handles on
01353     //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
01354     //! if no remote handle and new_ents is non-null, substitute
01355     //! instead CREATE_HANDLE(MBMAXTYPE, index) where index is handle's
01356     //! position in new_ents
01357     ErrorCode get_remote_handles( const bool store_remote_handles,
01358                                   EntityHandle* from_vec,
01359                                   EntityHandle* to_vec_tmp,
01360                                   int num_ents,
01361                                   int to_proc,
01362                                   const std::vector< EntityHandle >& new_ents );
01363 
01364     //! same as other version, except from_range and to_range should be
01365     //! different here
01366     ErrorCode get_remote_handles( const bool store_remote_handles,
01367                                   const Range& from_range,
01368                                   Range& to_range,
01369                                   int to_proc,
01370                                   const std::vector< EntityHandle >& new_ents );
01371 
01372     //! same as other version, except packs range into vector
01373     ErrorCode get_remote_handles( const bool store_remote_handles,
01374                                   const Range& from_range,
01375                                   EntityHandle* to_vec,
01376                                   int to_proc,
01377                                   const std::vector< EntityHandle >& new_ents );
01378 
01379     //! goes through from_vec, and for any with type MBMAXTYPE, replaces with
01380     //! new_ents value at index corresponding to id of entity in from_vec
01381     ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents );
01382 
01383     //! same as above except puts results in range
01384     ErrorCode get_local_handles( const Range& remote_handles,
01385                                  Range& local_handles,
01386                                  const std::vector< EntityHandle >& new_ents );
01387 
01388     //! same as above except gets new_ents from vector
01389     ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const std::vector< EntityHandle >& new_ents );
01390 
01391     ErrorCode update_remote_data( Range& local_range,
01392                                   Range& remote_range,
01393                                   int other_proc,
01394                                   const unsigned char add_pstat );
01395 
01396     ErrorCode update_remote_data( const EntityHandle new_h,
01397                                   const int* ps,
01398                                   const EntityHandle* hs,
01399                                   const int num_ps,
01400                                   const unsigned char add_pstat );
01401 
01402     ErrorCode update_remote_data_old( const EntityHandle new_h,
01403                                       const int* ps,
01404                                       const EntityHandle* hs,
01405                                       const int num_ps,
01406                                       const unsigned char add_pstat );
01407 
01408     /** \brief Set pstatus tag interface bit on entities in sets passed in
01409      */
01410     ErrorCode tag_iface_entities();
01411 
01412     //! add a pc to the iface instance tag PARALLEL_COMM
01413     int add_pcomm( ParallelComm* pc );
01414 
01415     //! remove a pc from the iface instance tag PARALLEL_COMM
01416     void remove_pcomm( ParallelComm* pc );
01417 
01418     //! check entities to make sure there are no zero-valued remote handles
01419     //! where they shouldn't be
01420     ErrorCode check_sent_ents( Range& allsent );
01421 
01422     //! assign entities to the input processor part
01423     ErrorCode assign_entities_part( std::vector< EntityHandle >& entities, const int proc );
01424 
01425     //! remove entities to the input processor part
01426     ErrorCode remove_entities_part( Range& entities, const int proc );
01427 
01428     //! MB interface associated with this writer
01429     Interface* mbImpl;
01430 
01431     //! Proc config object, keeps info on parallel stuff
01432     ProcConfig procConfig;
01433 
01434     //! Sequence manager, to get more efficient access to entities
01435     SequenceManager* sequenceManager;
01436 
01437     //! Error handler
01438     Error* errorHandler;
01439 
01440     //! more data buffers, proc-specific
01441     std::vector< Buffer* > localOwnedBuffs, remoteOwnedBuffs;
01442 
01443     //! reset message buffers to their initial state
01444     // void reset_all_buffers();
01445 
01446     //! delete all buffers, freeing up any memory held by them
01447     void delete_all_buffers();
01448 
01449     //! request objects, may be used if store_remote_handles is used
01450     std::vector< MPI_Request > sendReqs;
01451 
01452     //! receive request objects
01453     std::vector< MPI_Request > recvReqs, recvRemotehReqs;
01454 
01455     //! processor rank for each buffer index
01456     std::vector< unsigned int > buffProcs;
01457 
01458     //! the partition, interface sets for this comm'n instance
01459     Range partitionSets, interfaceSets;
01460 
01461     //! all local entities shared with others, whether ghost or ghosted
01462     std::set< EntityHandle > sharedEnts;
01463 
01464     //! tags used to save sharing procs and handles
01465     Tag sharedpTag, sharedpsTag, sharedhTag, sharedhsTag, pstatusTag, ifaceSetsTag, partitionTag;
01466 
01467     int globalPartCount;  //!< Cache of global part count
01468 
01469     EntityHandle partitioningSet;  //!< entity set containing all parts
01470 
01471     std::ofstream myFile;
01472 
01473     int pcommID;
01474 
01475     int ackbuff;
01476 
01477     //! used to set verbosity level and to report output
01478     DebugOutput* myDebug;
01479 
01480     //! Data about shared sets
01481     SharedSetData* sharedSetData;
01482 };
01483 
01484 inline ParallelComm::Buffer::Buffer( const Buffer& other_buff )
01485 {
01486     alloc_size = other_buff.alloc_size;
01487     mem_ptr    = (unsigned char*)malloc( alloc_size );
01488     memcpy( mem_ptr, other_buff.mem_ptr, alloc_size );
01489     buff_ptr = mem_ptr + ( other_buff.buff_ptr - other_buff.mem_ptr );
01490 }
01491 
01492 inline ParallelComm::Buffer::Buffer( unsigned int new_size ) : mem_ptr( NULL ), buff_ptr( NULL ), alloc_size( 0 )
01493 {
01494     if( new_size ) this->reserve( new_size );
01495 }
01496 
01497 inline ParallelComm::Buffer::~Buffer()
01498 {
01499     if( mem_ptr )
01500     {
01501         free( mem_ptr );
01502         mem_ptr = NULL;
01503     }
01504 }
01505 
01506 #define DEBUG_BUFFER 0
01507 
01508 inline void ParallelComm::Buffer::reserve( unsigned int new_size )
01509 {
01510 
01511 #ifdef DEBUG_BUFFER
01512     int tmp_pos = 0;
01513     if( mem_ptr )
01514     {
01515         tmp_pos = buff_ptr - mem_ptr;
01516     }
01517     buff_ptr = (unsigned char*)malloc( new_size );
01518     assert( 0 <= tmp_pos && tmp_pos <= (int)alloc_size );
01519     if( tmp_pos ) memcpy( buff_ptr, mem_ptr, tmp_pos );
01520     if( mem_ptr ) free( mem_ptr );
01521     mem_ptr    = buff_ptr;
01522     alloc_size = new_size;
01523     buff_ptr   = mem_ptr + tmp_pos;
01524 #else
01525     if( mem_ptr && alloc_size < new_size )
01526     {
01527         size_t tmp_pos = mem_ptr ? buff_ptr - mem_ptr : 0;
01528         mem_ptr        = (unsigned char*)realloc( mem_ptr, new_size );
01529         alloc_size     = new_size;
01530         buff_ptr       = mem_ptr + tmp_pos;
01531     }
01532     else if( !mem_ptr )
01533     {
01534         mem_ptr    = (unsigned char*)malloc( new_size );
01535         alloc_size = new_size;
01536         buff_ptr   = mem_ptr;
01537     }
01538 #endif
01539 }
01540 
01541 inline void ParallelComm::Buffer::check_space( unsigned int addl_space )
01542 {
01543     assert( buff_ptr >= mem_ptr && buff_ptr <= mem_ptr + alloc_size );
01544     unsigned int new_size = buff_ptr - mem_ptr + addl_space;
01545     if( new_size > alloc_size ) reserve( 3 * new_size / 2 );
01546 }
01547 
01548 inline void ParallelComm::reset_all_buffers()
01549 {
01550     std::vector< Buffer* >::iterator vit;
01551     for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
01552         ( *vit )->reset_buffer();
01553     for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
01554         ( *vit )->reset_buffer();
01555 }
01556 
01557 inline void ParallelComm::delete_all_buffers()
01558 {
01559     std::vector< Buffer* >::iterator vit;
01560     for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
01561         delete( *vit );
01562     localOwnedBuffs.clear();
01563 
01564     for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
01565         delete( *vit );
01566     remoteOwnedBuffs.clear();
01567 }
01568 
01569 inline const std::vector< unsigned int >& ParallelComm::buff_procs() const
01570 {
01571     return buffProcs;
01572 }
01573 
01574 inline ErrorCode ParallelComm::get_shared_proc_tags( Tag& sharedp,
01575                                                      Tag& sharedps,
01576                                                      Tag& sharedh,
01577                                                      Tag& sharedhs,
01578                                                      Tag& pstatus )
01579 {
01580     sharedp  = sharedp_tag();
01581     sharedps = sharedps_tag();
01582     sharedh  = sharedh_tag();
01583     sharedhs = sharedhs_tag();
01584     pstatus  = pstatus_tag();
01585 
01586     return MB_SUCCESS;
01587 }
01588 
01589 inline ErrorCode ParallelComm::exchange_tags( const char* tag_name, const Range& entities )
01590 {
01591     // get the tag handle
01592     std::vector< Tag > tags( 1 );
01593     ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
01594     if( MB_SUCCESS != result )
01595         return result;
01596     else if( !tags[0] )
01597         return MB_TAG_NOT_FOUND;
01598 
01599     return exchange_tags( tags, tags, entities );
01600 }
01601 
01602 inline ErrorCode ParallelComm::exchange_tags( Tag tagh, const Range& entities )
01603 {
01604     // get the tag handle
01605     std::vector< Tag > tags;
01606     tags.push_back( tagh );
01607 
01608     return exchange_tags( tags, tags, entities );
01609 }
01610 
01611 inline ErrorCode ParallelComm::reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities )
01612 {
01613     // get the tag handle
01614     std::vector< Tag > tags( 1 );
01615     ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
01616     if( MB_SUCCESS != result )
01617         return result;
01618     else if( !tags[0] )
01619         return MB_TAG_NOT_FOUND;
01620 
01621     return reduce_tags( tags, tags, mpi_op, entities );
01622 }
01623 
01624 inline ErrorCode ParallelComm::reduce_tags( Tag tagh, const MPI_Op mpi_op, const Range& entities )
01625 {
01626     // get the tag handle
01627     std::vector< Tag > tags;
01628     tags.push_back( tagh );
01629 
01630     return reduce_tags( tags, tags, mpi_op, entities );
01631 }
01632 
01633 inline ErrorCode ParallelComm::get_comm_procs( std::set< unsigned int >& procs )
01634 {
01635     ErrorCode result = get_interface_procs( procs );
01636     if( MB_SUCCESS != result ) return result;
01637 
01638     std::copy( buffProcs.begin(), buffProcs.end(), std::inserter( procs, procs.begin() ) );
01639 
01640     return MB_SUCCESS;
01641 }
01642 
01643 inline ErrorCode ParallelComm::get_owner( EntityHandle entity, int& owner )
01644 {
01645     EntityHandle tmp_handle;
01646     return get_owner_handle( entity, owner, tmp_handle );
01647 }
01648 
01649 /* \brief Unpack message with remote handles (const pointer to buffer)
01650  */
01651 inline ErrorCode ParallelComm::unpack_remote_handles( unsigned int from_proc,
01652                                                       const unsigned char* buff_ptr,
01653                                                       std::vector< EntityHandle >& L2hloc,
01654                                                       std::vector< EntityHandle >& L2hrem,
01655                                                       std::vector< unsigned int >& L2p )
01656 {
01657     // cast away const-ness, we won't be passing back a modified ptr
01658     unsigned char* tmp_buff = const_cast< unsigned char* >( buff_ptr );
01659     return unpack_remote_handles( from_proc, tmp_buff, L2hloc, L2hrem, L2p );
01660 }
01661 
01662 inline void ParallelComm::set_rank( unsigned int r )
01663 {
01664     procConfig.proc_rank( r );
01665     if( procConfig.proc_size() < r ) procConfig.proc_size( r + 1 );
01666 }
01667 
01668 inline void ParallelComm::set_size( unsigned int s )
01669 {
01670     procConfig.proc_size( s );
01671 }
01672 
01673 inline ErrorCode ParallelComm::get_sharing_data( const EntityHandle* entities,
01674                                                  int num_entities,
01675                                                  std::set< int >& procs,
01676                                                  int op )
01677 {
01678     Range dum_range;
01679     // cast away constness 'cuz the range is passed as const
01680     EntityHandle* ents_cast = const_cast< EntityHandle* >( entities );
01681     std::copy( ents_cast, ents_cast + num_entities, range_inserter( dum_range ) );
01682     return get_sharing_data( dum_range, procs, op );
01683 }
01684 
01685 inline ErrorCode ParallelComm::get_sharing_data( const EntityHandle entity,
01686                                                  int* ps,
01687                                                  EntityHandle* hs,
01688                                                  unsigned char& pstat,
01689                                                  int& num_ps )
01690 {
01691     unsigned int dum_ps;
01692     ErrorCode result = get_sharing_data( entity, ps, hs, pstat, dum_ps );
01693     if( MB_SUCCESS == result ) num_ps = dum_ps;
01694     return result;
01695 }
01696 
01697 inline void ParallelComm::set_send_request( int n_request )
01698 {
01699     sendReqs.resize( n_request, MPI_REQUEST_NULL );
01700 }
01701 
01702 inline void ParallelComm::set_recv_request( int n_request )
01703 {
01704     recvReqs.resize( n_request, MPI_REQUEST_NULL );
01705 }
01706 }  // namespace moab
01707 
01708 #endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines