Branch data Line data Source code
1 : : /**
2 : : * MOAB, a Mesh-Oriented datABase, is a software component for creating,
3 : : * storing and accessing finite element mesh data.
4 : : *
5 : : * Copyright 2004 Sandia Corporation. Under the terms of Contract
6 : : * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
7 : : * retains certain rights in this software.
8 : : *
9 : : * This library is free software; you can redistribute it and/or
10 : : * modify it under the terms of the GNU Lesser General Public
11 : : * License as published by the Free Software Foundation; either
12 : : * version 2.1 of the License, or (at your option) any later version.
13 : : *
14 : : */
15 : :
16 : : #ifndef MOAB_PARALLEL_COMM_HPP
17 : : #define MOAB_PARALLEL_COMM_HPP
18 : :
19 : : #include "moab/Forward.hpp"
20 : : #include "moab/Interface.hpp"
21 : : #include "moab/Range.hpp"
22 : : #include "moab/ProcConfig.hpp"
23 : : #include <map>
24 : : #include <set>
25 : : #include <vector>
26 : : #include <iostream>
27 : : #include <fstream>
28 : : #include <assert.h>
29 : : #include <stdlib.h>
30 : : #include "math.h"
31 : : #include "moab/TupleList.hpp"
32 : :
33 : : namespace moab
34 : : {
35 : :
36 : : class SequenceManager;
37 : : class Error;
38 : : template < typename KeyType, typename ValType, ValType NullVal >
39 : : class RangeMap;
40 : : typedef RangeMap< EntityHandle, EntityHandle, 0 > HandleMap;
41 : : class ParallelMergeMesh;
42 : : class DebugOutput;
43 : : class SharedSetData;
44 : :
45 : : #define MAX_SHARING_PROCS 64
46 : :
47 : : /**
48 : : * \brief Parallel communications in MOAB
49 : : * \author Tim Tautges
50 : : *
51 : : * This class implements methods to communicate mesh between processors
52 : : *
53 : : */
54 : : class ParallelComm
55 : : {
56 : : public:
57 : : friend class ParallelMergeMesh;
58 : :
59 : : // ==================================
60 : : // \section CONSTRUCTORS/DESTRUCTORS/PCOMM MANAGEMENT
61 : : // ==================================
62 : :
63 : : //! constructor
64 : : ParallelComm( Interface* impl, MPI_Comm comm, int* pcomm_id_out = 0 );
65 : :
66 : : //! constructor taking packed buffer, for testing
67 : : ParallelComm( Interface* impl, std::vector< unsigned char >& tmp_buff, MPI_Comm comm, int* pcomm_id_out = 0 );
68 : :
69 : : //! Get ID used to reference this PCOMM instance
70 : 60 : int get_id() const
71 : : {
72 : 60 : return pcommID;
73 : : }
74 : :
75 : : //! get the indexed pcomm object from the interface
76 : : static ParallelComm* get_pcomm( Interface* impl, const int index );
77 : :
78 : : //! Get ParallelComm instance associated with partition handle
79 : : //! Will create ParallelComm instance if a) one does not already
80 : : //! exist and b) a valid value for MPI_Comm is passed.
81 : : static ParallelComm* get_pcomm( Interface* impl, EntityHandle partitioning, const MPI_Comm* comm = 0 );
82 : :
83 : : static ErrorCode get_all_pcomm( Interface* impl, std::vector< ParallelComm* >& list );
84 : :
85 : : //! destructor
86 : : ~ParallelComm();
87 : :
88 : : static unsigned char PROC_SHARED, PROC_OWNER;
89 : :
90 : : // ==================================
91 : : // \section GLOBAL IDS
92 : : // ==================================
93 : :
94 : : //! assign a global id space, for largest-dimension or all entities (and
95 : : //! in either case for vertices too)
96 : : //!\param owned_only If true, do not get global IDs for non-owned entities
97 : : //! from remote processors.
98 : : ErrorCode assign_global_ids( EntityHandle this_set, const int dimension, const int start_id = 1,
99 : : const bool largest_dim_only = true, const bool parallel = true,
100 : : const bool owned_only = false );
101 : :
102 : : //! assign a global id space, for largest-dimension or all entities (and
103 : : //! in either case for vertices too)
104 : : ErrorCode assign_global_ids( Range entities[], const int dimension, const int start_id, const bool parallel,
105 : : const bool owned_only );
106 : :
107 : : //! check for global ids; based only on tag handle being there or not;
108 : : //! if it's not there, create them for the specified dimensions
109 : : //!\param owned_only If true, do not get global IDs for non-owned entities
110 : : //! from remote processors.
111 : : ErrorCode check_global_ids( EntityHandle this_set, const int dimension, const int start_id = 1,
112 : : const bool largest_dim_only = true, const bool parallel = true,
113 : : const bool owned_only = false );
114 : :
115 : : // ==================================
116 : : // \section HIGH-LEVEL COMMUNICATION (send/recv/bcast/scatter ents, exchange tags)
117 : : // ==================================
118 : :
119 : : /** \brief send entities to another processor, optionally waiting until it's done
120 : : *
121 : : * Send entities to another processor, with adjs, sets, and tags.
122 : : * If store_remote_handles is true, this call receives back handles assigned to
123 : : * entities sent to destination processor and stores them in sharedh_tag or
124 : : * sharedhs_tag.
125 : : * \param to_proc Destination processor
126 : : * \param orig_ents Entities requested to send
127 : : * \param adjs If true, send adjacencies for equiv entities (currently unsupported)
128 : : * \param tags If true, send tag values for all tags assigned to entities
129 : : * \param store_remote_handles If true, also recv message with handles on destination processor
130 : : * (currently unsupported) \param final_ents Range containing all entities sent \param incoming
131 : : * keep track if any messages are coming to this processor (newly added) \param wait_all If
132 : : * true, wait until all messages received/sent complete
133 : : */
134 : : ErrorCode send_entities( const int to_proc, Range& orig_ents, const bool adjs, const bool tags,
135 : : const bool store_remote_handles, const bool is_iface, Range& final_ents, int& incoming1,
136 : : int& incoming2, // newly added
137 : : TupleList& entprocs, // newly added
138 : : std::vector< MPI_Request >& recv_remoteh_reqs, // newly added
139 : : bool wait_all = true );
140 : :
141 : : ErrorCode send_entities( std::vector< unsigned int >& send_procs, std::vector< Range* >& send_ents, int& incoming1,
142 : : int& incoming2, const bool store_remote_handles );
143 : :
144 : : /** \brief Receive entities from another processor, optionally waiting until it's done
145 : : *
146 : : * Receive entities from another processor, with adjs, sets, and tags.
147 : : * If store_remote_handles is true, this call sends back handles assigned to
148 : : * the entities received.
149 : : * \param from_proc Source processor
150 : : * \param store_remote_handles If true, send message with new entity handles to source processor
151 : : * (currently unsupported) \param final_ents Range containing all entities received \param
152 : : * incoming keep track if any messages are coming to this processor (newly added) \param
153 : : * wait_all If true, wait until all messages received/sent complete
154 : : */
155 : : ErrorCode recv_entities( const int from_proc, const bool store_remote_handles, const bool is_iface,
156 : : Range& final_ents, int& incomming1, int& incoming2,
157 : : std::vector< std::vector< EntityHandle > >& L1hloc,
158 : : std::vector< std::vector< EntityHandle > >& L1hrem, std::vector< std::vector< int > >& L1p,
159 : : std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
160 : : std::vector< unsigned int >& L2p, std::vector< MPI_Request >& recv_remoteh_reqs,
161 : : bool wait_all = true );
162 : :
163 : : ErrorCode recv_entities( std::set< unsigned int >& recv_procs, int incoming1, int incoming2,
164 : : const bool store_remote_handles, const bool migrate = false );
165 : :
166 : : /** \brief Receive messages from another processor in while loop
167 : : *
168 : : * Receive messages from another processor.
169 : : * \param from_proc Source processor
170 : : * \param store_remote_handles If true, send message with new entity handles to source processor
171 : : * (currently unsupported) \param final_ents Range containing all entities received \param
172 : : * incoming keep track if any messages are coming to this processor (newly added)
173 : : */
174 : : ErrorCode recv_messages( const int from_proc, const bool store_remote_handles, const bool is_iface,
175 : : Range& final_ents, int& incoming1, int& incoming2,
176 : : std::vector< std::vector< EntityHandle > >& L1hloc,
177 : : std::vector< std::vector< EntityHandle > >& L1hrem, std::vector< std::vector< int > >& L1p,
178 : : std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
179 : : std::vector< unsigned int >& L2p, std::vector< MPI_Request >& recv_remoteh_reqs );
180 : :
181 : : ErrorCode recv_remote_handle_messages( const int from_proc, int& incoming2, std::vector< EntityHandle >& L2hloc,
182 : : std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
183 : : std::vector< MPI_Request >& recv_remoteh_reqs );
184 : :
185 : : /** \brief Exchange ghost cells with neighboring procs
186 : : * Neighboring processors are those sharing an interface
187 : : * with this processor. All entities of dimension ghost_dim
188 : : * within num_layers of interface, measured going through bridge_dim,
189 : : * are exchanged. See MeshTopoUtil::get_bridge_adjacencies for description
190 : : * of bridge adjacencies. If wait_all is false and store_remote_handles
191 : : * is true, MPI_Request objects are available in the sendReqs[2*MAX_SHARING_PROCS]
192 : : * member array, with inactive requests marked as MPI_REQUEST_NULL. If
193 : : * store_remote_handles or wait_all is false, this function returns after
194 : : * all entities have been received and processed.
195 : : * \param ghost_dim Dimension of ghost entities to be exchanged
196 : : * \param bridge_dim Dimension of entities used to measure layers from interface
197 : : * \param num_layers Number of layers of ghosts requested
198 : : * \param addl_ents Dimension of additional adjacent entities to exchange with ghosts, 0 if none
199 : : * \param store_remote_handles If true, send message with new entity handles to source processor
200 : : * \param wait_all If true, function does not return until all send buffers
201 : : * are cleared.
202 : : */
203 : :
204 : : ErrorCode exchange_ghost_cells( int ghost_dim, int bridge_dim, int num_layers, int addl_ents,
205 : : bool store_remote_handles, bool wait_all = true, EntityHandle* file_set = NULL );
206 : :
207 : : /** \brief Static version of exchange_ghost_cells, exchanging info through
208 : : * buffers rather than messages
209 : : */
210 : : static ErrorCode exchange_ghost_cells( ParallelComm** pc, unsigned int num_procs, int ghost_dim, int bridge_dim,
211 : : int num_layers, int addl_ents, bool store_remote_handles,
212 : : EntityHandle* file_sets = NULL );
213 : :
214 : : /** \brief Post "MPI_Irecv" before meshing
215 : : * \param exchange_procs processor vector exchanged
216 : : */
217 : : ErrorCode post_irecv( std::vector< unsigned int >& exchange_procs );
218 : :
219 : : ErrorCode post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs );
220 : :
221 : : /** \brief Exchange owned mesh for input mesh entities and sets
222 : : * This function should be called collectively over the communicator for this ParallelComm.
223 : : * If this version is called, all shared exchanged entities should have a value for this
224 : : * tag (or the tag should have a default value).
225 : : * \param exchange_procs processor vector exchanged
226 : : * \param exchange_ents exchanged entities for each processors
227 : : * \param migrate if the owner if entities are changed or not
228 : : */
229 : : ErrorCode exchange_owned_meshs( std::vector< unsigned int >& exchange_procs, std::vector< Range* >& exchange_ents,
230 : : std::vector< MPI_Request >& recv_ent_reqs,
231 : : std::vector< MPI_Request >& recv_remoteh_reqs, bool store_remote_handles,
232 : : bool wait_all = true, bool migrate = false, int dim = 0 );
233 : :
234 : : /** \brief Exchange owned mesh for input mesh entities and sets
235 : : * This function is called twice by exchange_owned_meshs to exchange entities before sets
236 : : * \param migrate if the owner if entities are changed or not
237 : : */
238 : : ErrorCode exchange_owned_mesh( std::vector< unsigned int >& exchange_procs, std::vector< Range* >& exchange_ents,
239 : : std::vector< MPI_Request >& recv_ent_reqs,
240 : : std::vector< MPI_Request >& recv_remoteh_reqs, const bool recv_posted,
241 : : bool store_remote_handles, bool wait_all, bool migrate = false );
242 : :
243 : : /** \brief Exchange tags for all shared and ghosted entities
244 : : * This function should be called collectively over the communicator for this ParallelComm.
245 : : * If this version is called, all ghosted/shared entities should have a value for this
246 : : * tag (or the tag should have a default value). If the entities vector is empty, all shared
247 : : * entities participate in the exchange. If a proc has no owned entities this function must
248 : : * still be called since it is collective. \param src_tags Vector of tag handles to be exchanged
249 : : * \param dst_tags Tag handles to store the tags on the non-owning procs
250 : : * \param entities Entities for which tags are exchanged
251 : : */
252 : : ErrorCode exchange_tags( const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags,
253 : : const Range& entities );
254 : :
255 : : /** \brief Exchange tags for all shared and ghosted entities
256 : : * This function should be called collectively over the communicator for this ParallelComm.
257 : : * If the entities vector is empty, all shared entities
258 : : * participate in the exchange. If a proc has no owned entities this function must still be
259 : : * called since it is collective. \param tag_name Name of tag to be exchanged \param entities
260 : : * Entities for which tags are exchanged
261 : : */
262 : : ErrorCode exchange_tags( const char* tag_name, const Range& entities );
263 : :
264 : : /** \brief Exchange tags for all shared and ghosted entities
265 : : * This function should be called collectively over the communicator for this ParallelComm.
266 : : * If the entities vector is empty, all shared entities
267 : : * participate in the exchange. If a proc has no owned entities this function must still be
268 : : * called since it is collective. \param tagh Handle of tag to be exchanged \param entities
269 : : * Entities for which tags are exchanged
270 : : */
271 : : ErrorCode exchange_tags( Tag tagh, const Range& entities );
272 : :
273 : : /** \brief Perform data reduction operation for all shared and ghosted entities
274 : : * This function should be called collectively over the communicator for this ParallelComm.
275 : : * If this version is called, all ghosted/shared entities should have a value for this
276 : : * tag (or the tag should have a default value). Operation is any MPI_Op, with result stored
277 : : * in destination tag.
278 : : * \param src_tags Vector of tag handles to be reduced
279 : : * \param dst_tags Vector of tag handles in which the answer will be stored
280 : : * \param mpi_op Operation type
281 : : * \param entities Entities on which reduction will be made; if empty, operates on all shared
282 : : * entities
283 : : */
284 : : ErrorCode reduce_tags( const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags, const MPI_Op mpi_op,
285 : : const Range& entities );
286 : :
287 : : /** \brief Perform data reduction operation for all shared and ghosted entities
288 : : * Same as std::vector variant except for one tag specified by name
289 : : * \param tag_name Name of tag to be reduced
290 : : * \param mpi_op Operation type
291 : : * \param entities Entities on which reduction will be made; if empty, operates on all shared
292 : : * entities
293 : : */
294 : : ErrorCode reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities );
295 : :
296 : : /** \brief Perform data reduction operation for all shared and ghosted entities
297 : : * Same as std::vector variant except for one tag specified by handle
298 : : * \param tag_name Name of tag to be reduced
299 : : * \param mpi_op Operation type
300 : : * \param entities Entities on which reduction will be made; if empty, operates on all shared
301 : : * entities
302 : : */
303 : : ErrorCode reduce_tags( Tag tag_handle, const MPI_Op mpi_op, const Range& entities );
304 : :
305 : : /** \brief Broadcast all entities resident on from_proc to other processors
306 : : * This function assumes remote handles are *not* being stored, since (usually)
307 : : * every processor will know about the whole mesh.
308 : : * \param from_proc Processor having the mesh to be broadcast
309 : : * \param entities On return, the entities sent or received in this call
310 : : * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
311 : : * \param tags If true, all non-default-valued tags are sent for sent entities
312 : : */
313 : : ErrorCode broadcast_entities( const int from_proc, Range& entities, const bool adjacencies = false,
314 : : const bool tags = true );
315 : :
316 : : /** \brief Scatter entities on from_proc to other processors
317 : : * This function assumes remote handles are *not* being stored, since (usually)
318 : : * every processor will know about the whole mesh.
319 : : * \param from_proc Processor having the mesh to be broadcast
320 : : * \param entities On return, the entities sent or received in this call
321 : : * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
322 : : * \param tags If true, all non-default-valued tags are sent for sent entities
323 : : */
324 : : ErrorCode scatter_entities( const int from_proc, std::vector< Range >& entities, const bool adjacencies = false,
325 : : const bool tags = true );
326 : :
327 : : /////////////////////////////////////////////////////////////////////////////////
328 : : // Send and Receive routines for a sequence of entities: use case UMR
329 : : /////////////////////////////////////////////////////////////////////////////////
330 : :
331 : : /** \brief Send and receives data from a set of processors
332 : : */
333 : : ErrorCode send_recv_entities( std::vector< int >& send_procs, std::vector< std::vector< int > >& msgsizes,
334 : : std::vector< std::vector< EntityHandle > >& senddata,
335 : : std::vector< std::vector< EntityHandle > >& recvdata );
336 : :
337 : : ErrorCode update_remote_data( EntityHandle entity, std::vector< int >& procs,
338 : : std::vector< EntityHandle >& handles );
339 : :
340 : : ErrorCode get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc );
341 : :
342 : : /////////////////////////////////////////////////////////////////////////////////
343 : :
344 : : // ==================================
345 : : // \section INITIALIZATION OF PARALLEL DATA (resolve_shared_ents, etc.)
346 : : // ==================================
347 : :
348 : : /** \brief Resolve shared entities between processors
349 : : *
350 : : * Resolve shared entities between processors for entities in proc_ents,
351 : : * by comparing global id tag values on vertices on skin of elements in
352 : : * proc_ents. Shared entities are assigned a tag that's either
353 : : * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
354 : : * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
355 : : * number of sharing processors. Values in these tags denote the ranks
356 : : * of sharing processors, and the list ends with the value -1.
357 : : *
358 : : * If shared_dim is input as -1 or not input, a value one less than the
359 : : * maximum dimension of entities in proc_ents is used.
360 : : *
361 : : * \param proc_ents Entities for which to resolve shared entities
362 : : * \param shared_dim Maximum dimension of shared entities to look for
363 : : */
364 : : ErrorCode resolve_shared_ents( EntityHandle this_set, Range& proc_ents, int resolve_dim = -1, int shared_dim = -1,
365 : : Range* skin_ents = NULL, const Tag* id_tag = 0 );
366 : :
367 : : /** \brief Resolve shared entities between processors
368 : : *
369 : : * Same as resolve_shared_ents(Range&), except works for
370 : : * all entities in instance of dimension dim.
371 : : *
372 : : * If shared_dim is input as -1 or not input, a value one less than the
373 : : * maximum dimension of entities is used.
374 : :
375 : : * \param dim Dimension of entities in the partition
376 : : * \param shared_dim Maximum dimension of shared entities to look for
377 : : */
378 : : ErrorCode resolve_shared_ents( EntityHandle this_set, int resolve_dim = 3, int shared_dim = -1,
379 : : const Tag* id_tag = 0 );
380 : :
381 : : static ErrorCode resolve_shared_ents( ParallelComm** pc, const unsigned int np, EntityHandle this_set,
382 : : const int to_dim );
383 : :
384 : : /** Remove shared sets.
385 : : *
386 : : * Generates list of candidate sets using from those (directly)
387 : : * contained in passed set and passes them to the other version
388 : : * of \c resolve_shared_sets.
389 : : *\param this_set Set directly containing candidate sets (e.g. file set)
390 : : *\param id_tag Tag containing global IDs for entity sets.
391 : : */
392 : :
393 : : ErrorCode resolve_shared_sets( EntityHandle this_set, const Tag* id_tag = 0 );
394 : :
395 : : /** Remove shared sets.
396 : : *
397 : : * Use values of id_tag to match sets across processes and populate
398 : : * sharing data for sets.
399 : : *\param candidate_sets Sets to consider as potentially shared.
400 : : *\param id_tag Tag containing global IDs for entity sets.
401 : : */
402 : : ErrorCode resolve_shared_sets( Range& candidate_sets, Tag id_tag );
403 : :
404 : : /** extend shared sets with ghost entities
405 : : * After ghosting, ghost entities do not have yet information about
406 : : * the material set, partition set, Neumann or Dirichlet set they could
407 : : * belong to
408 : : * This method will assign ghosted entities to the those special entity sets
409 : : * In some case we might even have to create those sets, if they do not exist yet on
410 : : * the local processor
411 : : *
412 : : * The special entity sets all have an unique identifier, in a form of an integer
413 : : * tag to the set.
414 : : * The shared sets data is not used, because we do not use the geometry sets, as they are
415 : : * not uniquely identified
416 : : *
417 : : *
418 : : * \param file_set : file set used per application
419 : : *
420 : : */
421 : : ErrorCode augment_default_sets_with_ghosts( EntityHandle file_set );
422 : : // ==================================
423 : : // \section GET PARALLEL DATA (shared/owned/iface entities, etc.)
424 : : // ==================================
425 : :
426 : : /** \brief Get parallel status of an entity
427 : : * Returns the parallel status of an entity
428 : : *
429 : : * \param entity The entity being queried
430 : : * \param pstatus_val Parallel status of the entity
431 : : */
432 : : ErrorCode get_pstatus( EntityHandle entity, unsigned char& pstatus_val );
433 : :
434 : : /** \brief Get entities with the given pstatus bit(s) set
435 : : * Returns any entities whose pstatus tag value v satisfies (v & pstatus_val)
436 : : *
437 : : * \param dim Dimension of entities to be returned, or -1 if any
438 : : * \param pstatus_val pstatus value of desired entities
439 : : * \param pstatus_ents Entities returned from function
440 : : */
441 : : ErrorCode get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents );
442 : :
443 : : /** \brief Return the rank of the entity owner
444 : : */
445 : : ErrorCode get_owner( EntityHandle entity, int& owner );
446 : :
447 : : /** \brief Return the owner processor and handle of a given entity
448 : : */
449 : : ErrorCode get_owner_handle( EntityHandle entity, int& owner, EntityHandle& handle );
450 : :
451 : : /** \brief Get the shared processors/handles for an entity
452 : : * Get the shared processors/handles for an entity. Arrays must
453 : : * be large enough to receive data for all sharing procs. Does *not* include
454 : : * this proc if only shared with one other proc.
455 : : * \param entity Entity being queried
456 : : * \param ps Pointer to sharing proc data
457 : : * \param hs Pointer to shared proc handle data
458 : : * \param pstat Reference to pstatus data returned from this function
459 : : */
460 : : ErrorCode get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs, unsigned char& pstat,
461 : : unsigned int& num_ps );
462 : :
463 : : /** \brief Get the shared processors/handles for an entity
464 : : * Same as other version but with int num_ps
465 : : * \param entity Entity being queried
466 : : * \param ps Pointer to sharing proc data
467 : : * \param hs Pointer to shared proc handle data
468 : : * \param pstat Reference to pstatus data returned from this function
469 : : */
470 : : ErrorCode get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs, unsigned char& pstat,
471 : : int& num_ps );
472 : :
473 : : /** \brief Get the intersection or union of all sharing processors
474 : : * Get the intersection or union of all sharing processors. Processor set
475 : : * is cleared as part of this function.
476 : : * \param entities Entity list ptr
477 : : * \param num_entities Number of entities
478 : : * \param procs Processors returned
479 : : * \param op Either Interface::UNION or Interface::INTERSECT
480 : : */
481 : : ErrorCode get_sharing_data( const EntityHandle* entities, int num_entities, std::set< int >& procs,
482 : : int op = Interface::INTERSECT );
483 : :
484 : : /** \brief Get the intersection or union of all sharing processors
485 : : * Same as previous variant but with range as input
486 : : */
487 : : ErrorCode get_sharing_data( const Range& entities, std::set< int >& procs, int op = Interface::INTERSECT );
488 : :
489 : : /** \brief Get shared entities of specified dimension
490 : : * If other_proc is -1, any shared entities are returned. If dim is -1,
491 : : * entities of all dimensions on interface are returned.
492 : : * \param other_proc Rank of processor for which interface entities are requested
493 : : * \param shared_ents Entities returned from function
494 : : * \param dim Dimension of interface entities requested
495 : : * \param iface If true, return only entities on the interface
496 : : * \param owned_filter If true, return only owned shared entities
497 : : */
498 : : ErrorCode get_shared_entities( int other_proc, Range& shared_ents, int dim = -1, const bool iface = false,
499 : : const bool owned_filter = false );
500 : : /*
501 : : //! return partition sets; if tag_name is input, gets sets with
502 : : //! that tag name, otherwise uses PARALLEL_PARTITION tag
503 : : ErrorCode get_partition_sets(EntityHandle this_set,
504 : : Range &part_sets,
505 : : const char *tag_name = NULL);
506 : : */
507 : : //! get processors with which this processor shares an interface
508 : : ErrorCode get_interface_procs( std::set< unsigned int >& iface_procs, const bool get_buffs = false );
509 : :
510 : : //! get processors with which this processor communicates
511 : : ErrorCode get_comm_procs( std::set< unsigned int >& procs );
512 : :
513 : : // ==================================
514 : : // \section SHARED SETS
515 : : // ==================================
516 : :
517 : : //! Get array of process IDs sharing a set. Returns zero
518 : : //! and passes back NULL if set is not shared.
519 : : ErrorCode get_entityset_procs( EntityHandle entity_set, std::vector< unsigned >& ranks ) const;
520 : :
521 : : //! Get rank of the owner of a shared set.
522 : : //! Returns this proc if set is not shared.
523 : : //! Optionally returns handle on owning process for shared set.
524 : : ErrorCode get_entityset_owner( EntityHandle entity_set, unsigned& owner_rank,
525 : : EntityHandle* remote_handle = 0 ) const;
526 : :
527 : : //! Given set owner and handle on owner, find local set handle
528 : : ErrorCode get_entityset_local_handle( unsigned owning_rank, EntityHandle remote_handle,
529 : : EntityHandle& local_handle ) const;
530 : :
531 : : //! Get all shared sets
532 : : ErrorCode get_shared_sets( Range& result ) const;
533 : :
534 : : //! Get ranks of all processes that own at least one set that is
535 : : //! shared with this process. Will include the rank of this process
536 : : //! if this process owns any shared set.
537 : : ErrorCode get_entityset_owners( std::vector< unsigned >& ranks ) const;
538 : :
539 : : //! Get shared sets owned by process with specified rank.
540 : : ErrorCode get_owned_sets( unsigned owning_rank, Range& sets_out ) const;
541 : :
542 : : // ==================================
543 : : // \section LOW-LEVEL DATA (tags, sets on interface/partition, etc.)
544 : : // ==================================
545 : :
546 : : //! Get proc config for this communication object
547 : 101 : const ProcConfig& proc_config() const
548 : : {
549 : 101 : return procConfig;
550 : : }
551 : :
552 : : //! Get proc config for this communication object
553 : 346 : ProcConfig& proc_config()
554 : : {
555 : 346 : return procConfig;
556 : : }
557 : :
558 : 5 : unsigned rank() const
559 : : {
560 : 5 : return proc_config().proc_rank();
561 : : }
562 : 47 : unsigned size() const
563 : : {
564 : 47 : return proc_config().proc_size();
565 : : }
566 : 0 : MPI_Comm comm() const
567 : : {
568 : 0 : return proc_config().proc_comm();
569 : : }
570 : :
571 : : //! return the tags used to indicate shared procs and handles
572 : : ErrorCode get_shared_proc_tags( Tag& sharedp_tag, Tag& sharedps_tag, Tag& sharedh_tag, Tag& sharedhs_tag,
573 : : Tag& pstatus_tag );
574 : :
575 : : //! return partition, interface set ranges
576 : 357 : Range& partition_sets()
577 : : {
578 : 357 : return partitionSets;
579 : : }
580 : 5 : const Range& partition_sets() const
581 : : {
582 : 5 : return partitionSets;
583 : : }
584 : 6 : Range& interface_sets()
585 : : {
586 : 6 : return interfaceSets;
587 : : }
588 : : const Range& interface_sets() const
589 : : {
590 : : return interfaceSets;
591 : : }
592 : :
593 : : //! return sharedp tag
594 : : Tag sharedp_tag();
595 : :
596 : : //! return sharedps tag
597 : : Tag sharedps_tag();
598 : :
599 : : //! return sharedh tag
600 : : Tag sharedh_tag();
601 : :
602 : : //! return sharedhs tag
603 : : Tag sharedhs_tag();
604 : :
605 : : //! return pstatus tag
606 : : Tag pstatus_tag();
607 : :
608 : : //! return pcomm tag; static because might not have a pcomm before going
609 : : //! to look for one on the interface
610 : : static Tag pcomm_tag( Interface* impl, bool create_if_missing = true );
611 : :
612 : : //! return partitions set tag
613 : : Tag partition_tag();
614 : 19 : Tag part_tag()
615 : : {
616 : 19 : return partition_tag();
617 : : }
618 : :
619 : : // ==================================
620 : : // \section DEBUGGING AIDS
621 : : // ==================================
622 : :
623 : : //! print contents of pstatus value in human-readable form
624 : : void print_pstatus( unsigned char pstat, std::string& ostr );
625 : :
626 : : //! print contents of pstatus value in human-readable form to std::cut
627 : : void print_pstatus( unsigned char pstat );
628 : :
629 : : // ==================================
630 : : // \section IMESHP-RELATED FUNCTIONS
631 : : // ==================================
632 : :
633 : : //! return all the entities in parts owned locally
634 : : ErrorCode get_part_entities( Range& ents, int dim = -1 );
635 : :
636 : 42 : EntityHandle get_partitioning() const
637 : : {
638 : 42 : return partitioningSet;
639 : : }
640 : : ErrorCode set_partitioning( EntityHandle h );
641 : : ErrorCode get_global_part_count( int& count_out ) const;
642 : : ErrorCode get_part_owner( int part_id, int& owner_out ) const;
643 : : ErrorCode get_part_id( EntityHandle part, int& id_out ) const;
644 : : ErrorCode get_part_handle( int id, EntityHandle& handle_out ) const;
645 : : ErrorCode create_part( EntityHandle& part_out );
646 : : ErrorCode destroy_part( EntityHandle part );
647 : : ErrorCode collective_sync_partition();
648 : : ErrorCode get_part_neighbor_ids( EntityHandle part, int neighbors_out[MAX_SHARING_PROCS], int& num_neighbors_out );
649 : : ErrorCode get_interface_sets( EntityHandle part, Range& iface_sets_out, int* adj_part_id = 0 );
650 : : ErrorCode get_owning_part( EntityHandle entity, int& owning_part_id_out, EntityHandle* owning_handle = 0 );
651 : : ErrorCode get_sharing_parts( EntityHandle entity, int part_ids_out[MAX_SHARING_PROCS], int& num_part_ids_out,
652 : : EntityHandle remote_handles[MAX_SHARING_PROCS] = 0 );
653 : :
654 : : /** Filter the entities by pstatus tag.
655 : : * op is one of PSTATUS_ AND, OR, NOT; an entity is output if:
656 : : * AND: all bits set in pstatus_val are also set on entity
657 : : * OR: any bits set in pstatus_val also set on entity
658 : : * NOT: any bits set in pstatus_val are not set on entity
659 : : *
660 : : * Results returned in input list, unless result_ents is passed in non-null,
661 : : * in which case results are returned in result_ents.
662 : : *
663 : : * If ents is passed in empty, filter is done on shared entities in this
664 : : * pcomm instance, i.e. contents of sharedEnts.
665 : : *
666 : : *\param ents Input entities to filter
667 : : *\param pstatus_val pstatus value to which entities are compared
668 : : *\param op Bitwise operation performed between pstatus values
669 : : *\param to_proc If non-negative and PSTATUS_SHARED is set on pstatus_val,
670 : : * only entities shared with to_proc are returned
671 : : *\param result_ents If non-null, results of filter are put in the
672 : : * pointed-to range
673 : : */
674 : : ErrorCode filter_pstatus( Range& ents, const unsigned char pstatus_val, const unsigned char op, int to_proc = -1,
675 : : Range* returned_ents = NULL );
676 : :
677 : : /** \brief Get entities on interfaces shared with another proc
678 : : *
679 : : * \param other_proc Other proc sharing the interface
680 : : * \param dim Dimension of entities to return, -1 if all dims
681 : : * \param iface_ents Returned entities
682 : : */
683 : : ErrorCode get_iface_entities( int other_proc, int dim, Range& iface_ents );
684 : :
685 : 8 : Interface* get_moab() const
686 : : {
687 : 8 : return mbImpl;
688 : : }
689 : :
690 : : ErrorCode clean_shared_tags( std::vector< Range* >& exchange_ents );
691 : :
692 : : class Buffer
693 : : {
694 : : public:
695 : : unsigned char* mem_ptr;
696 : : unsigned char* buff_ptr;
697 : : unsigned int alloc_size;
698 : :
699 : : Buffer( unsigned int sz = 0 );
700 : : Buffer( const Buffer& );
701 : : ~Buffer();
702 : 0 : void reset_buffer( size_t buff_pos = 0 )
703 : : {
704 : 0 : reset_ptr( buff_pos );
705 : 0 : reserve( INITIAL_BUFF_SIZE );
706 : 0 : }
707 : 0 : void reset_ptr( size_t buff_pos = 0 )
708 : : {
709 [ # # ][ # # ]: 0 : assert( ( !mem_ptr && !buff_pos ) || ( alloc_size >= buff_pos ) );
[ # # ]
710 : 0 : buff_ptr = mem_ptr + buff_pos;
711 : 0 : }
712 : : inline void reserve( unsigned int new_size );
713 : 0 : void set_stored_size()
714 : : {
715 : 0 : *( (int*)mem_ptr ) = (int)( buff_ptr - mem_ptr );
716 : 0 : }
717 : 0 : int get_stored_size()
718 : : {
719 : 0 : return *( (int*)mem_ptr );
720 : : }
721 : 0 : int get_current_size()
722 : : {
723 : 0 : return (int)( buff_ptr - mem_ptr );
724 : : }
725 : :
726 : : void check_space( unsigned int addl_space );
727 : : };
728 : :
729 : : //! public 'cuz we want to unit test these externally
730 : : ErrorCode pack_buffer( Range& orig_ents, const bool adjacencies, const bool tags, const bool store_remote_handles,
731 : : const int to_proc, Buffer* buff, TupleList* entprocs = NULL, Range* allsent = NULL );
732 : :
733 : : ErrorCode unpack_buffer( unsigned char* buff_ptr, const bool store_remote_handles, const int from_proc,
734 : : const int ind, std::vector< std::vector< EntityHandle > >& L1hloc,
735 : : std::vector< std::vector< EntityHandle > >& L1hrem, std::vector< std::vector< int > >& L1p,
736 : : std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
737 : : std::vector< unsigned int >& L2p, std::vector< EntityHandle >& new_ents,
738 : : const bool created_iface = false );
739 : :
740 : : ErrorCode pack_entities( Range& entities, Buffer* buff, const bool store_remote_handles, const int to_proc,
741 : : const bool is_iface, TupleList* entprocs = NULL, Range* allsent = NULL );
742 : :
743 : : //! unpack entities in buff_ptr
744 : : ErrorCode unpack_entities( unsigned char*& buff_ptr, const bool store_remote_handles, const int from_ind,
745 : : const bool is_iface, std::vector< std::vector< EntityHandle > >& L1hloc,
746 : : std::vector< std::vector< EntityHandle > >& L1hrem,
747 : : std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
748 : : std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
749 : : std::vector< EntityHandle >& new_ents, const bool created_iface = false );
750 : :
751 : : //! Call exchange_all_shared_handles, then compare the results with tag data
752 : : //! on local shared entities.
753 : : ErrorCode check_all_shared_handles( bool print_em = false );
754 : :
755 : : static ErrorCode check_all_shared_handles( ParallelComm** pcs, int num_pcs );
756 : :
757 : : struct SharedEntityData
758 : : {
759 : : EntityHandle local;
760 : : EntityHandle remote;
761 : : EntityID owner;
762 : : };
763 : :
764 : : ErrorCode pack_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data );
765 : :
766 : : // check consistency of sharedEnts against their tags and their
767 : : // vertices' tags
768 : : ErrorCode check_local_shared();
769 : :
770 : : // check contents of communicated shared entity data against tags
771 : : ErrorCode check_my_shared_handles( std::vector< std::vector< SharedEntityData > >& shents,
772 : : const char* prefix = NULL );
773 : :
774 : : //! set rank for this pcomm; USED FOR TESTING ONLY!
775 : : void set_rank( unsigned int r );
776 : :
777 : : //! set rank for this pcomm; USED FOR TESTING ONLY!
778 : : void set_size( unsigned int r );
779 : :
780 : : //! get (and possibly allocate) buffers for messages to/from to_proc; returns
781 : : //! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
782 : : //! whether new buffer was allocated
783 : : //! PUBLIC ONLY FOR TESTING!
784 : : int get_buffers( int to_proc, bool* is_new = NULL );
785 : :
786 : : //! get buff processor vector
787 : : const std::vector< unsigned int >& buff_procs() const;
788 : :
789 : : /* \brief Unpack message with remote handles
790 : : * PUBLIC ONLY FOR TESTING!
791 : : */
792 : : ErrorCode unpack_remote_handles( unsigned int from_proc, unsigned char*& buff_ptr,
793 : : std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
794 : : std::vector< unsigned int >& L2p );
795 : :
796 : : /* \brief Pack message with remote handles
797 : : * PUBLIC ONLY FOR TESTING!
798 : : */
799 : : ErrorCode pack_remote_handles( std::vector< EntityHandle >& L1hloc, std::vector< EntityHandle >& L1hrem,
800 : : std::vector< int >& procs, unsigned int to_proc, Buffer* buff );
801 : :
802 : : // each iterate in proc_nvecs contains a set of procs and the entities *possibly*
803 : : // on the interface between those procs; this function makes sets for each,
804 : : // and tags the set with the procs sharing it; interface sets are optionally
805 : : // returned; NOTE: a subsequent step is used to verify entities on the interface
806 : : // and remove them if they're not shared
807 : : ErrorCode create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
808 : :
809 : : // do the same but working straight from sharedEnts
810 : : ErrorCode create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim );
811 : :
812 : : ErrorCode tag_shared_verts( TupleList& shared_ents,
813 : : std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
814 : : Range& proc_verts, unsigned int i_extra = 1 );
815 : :
816 : : ErrorCode list_entities( const EntityHandle* ents, int num_ents );
817 : :
818 : : ErrorCode list_entities( const Range& ents );
819 : :
820 : : void set_send_request( int n_request ); // set send request array
821 : :
822 : : void set_recv_request( int n_request ); // set recv request array
823 : :
824 : : //! reset message buffers to their initial state
825 : : // changed to public function (HJK)
826 : : void reset_all_buffers();
827 : :
828 : : static const unsigned int INITIAL_BUFF_SIZE;
829 : :
830 : : //! set the verbosity level of output from this pcomm
831 : : void set_debug_verbosity( int verb );
832 : :
833 : : //! get the verbosity level of output from this pcomm
834 : : int get_debug_verbosity();
835 : :
836 : : /* \brief Gather tag value from entities down to a specified root proc
837 : : * This function gathers data from a domain-decomposed mesh onto a global mesh
838 : : * represented on the root processor. On the root, this gather mesh is distinct from
839 : : * the root's domain-decomposed subdomain. Entities are matched by global id, or by
840 : : * another tag if its handle is input. The dimension of all entities in gather_ents should
841 : : * be the same, since this is the dimension of entities in gather_set that are queried for
842 : : * matching global id tags.
843 : : * \param gather_ents (Local) entities from which to gather data
844 : : * \param tag_handle Tag whose values are being gathered
845 : : * \param id_tag Tag to use for matching entities (global id used by default)
846 : : * \param gather_set On root, set containing global mesh onto which to put data
847 : : * \param root_proc_rank Rank of the specified root processor (default rank is 0)
848 : : */
849 : : ErrorCode gather_data( Range& gather_ents, Tag& tag_handle, Tag id_tag = 0, EntityHandle gather_set = 0,
850 : : int root_proc_rank = 0 );
851 : :
852 : : /* \brief communicate extra points positions on boundary
853 : : * This function is called after intersection of 2 meshes, to settle the
854 : : * position of the intersection points on the boundary (interface)
855 : : * The initial mesh distributed on each processor is decomposed after
856 : : * intersection with another mesh, such as that new points are created on the
857 : : * boundary. these points should better match at the interface !
858 : : * we perform an extra caution step, to ensure the robustness of the
859 : : * intersection algorithm; only shared edges extra nodes
860 : : * will be actually needed to be communicated, but we just pass by reference
861 : : * the whole extraNodesVec structure, we do
862 : : * not need to construct another data structure
863 : : * The node positions on edges that are owned will be communicated to other
864 : : * processors
865 : : *
866 : : * \param edges total range of entities
867 : : * \param shared_edges_owned edges for which to communicate data
868 : : * \param extraNodesVec handles of intersection vertices on all edges;
869 : : */
870 : : ErrorCode settle_intersection_points( Range& edges, Range& shared_edges_owned,
871 : : std::vector< std::vector< EntityHandle >* >& extraNodesVec,
872 : : double tolerance );
873 : :
874 : : /* \brief delete entities from moab database
875 : : * will check the shared ents array, and clean it if necessary
876 : : *
877 : : */
878 : : ErrorCode delete_entities( Range& to_delete );
879 : :
880 : : /*
881 : : * \brief correct multi-sharing info for thin layers
882 : : *
883 : : * will be used for at least 3 processes, when there are thin ghost layers
884 : : * right now it is public, for allowing users to call it directly
885 : : * eventually, it should become private, and be called automatically
886 : : */
887 : :
888 : : ErrorCode correct_thin_ghost_layers();
889 : :
890 : : private:
891 : : ErrorCode reduce_void( int tag_data_type, const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
892 : :
893 : : template < class T >
894 : : ErrorCode reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals );
895 : :
896 : : void print_debug_isend( int from, int to, unsigned char* buff, int tag, int size );
897 : :
898 : : void print_debug_irecv( int to, int from, unsigned char* buff, int size, int tag, int incoming );
899 : :
900 : : void print_debug_recd( MPI_Status status );
901 : :
902 : : void print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc );
903 : :
904 : : // common initialization code, called from various constructors
905 : : void initialize();
906 : :
907 : : ErrorCode set_sharing_data( EntityHandle ent, unsigned char pstatus, int old_nump, int new_nump, int* ps,
908 : : EntityHandle* hs );
909 : :
910 : : ErrorCode check_clean_iface( Range& allsent );
911 : :
912 : : void define_mpe();
913 : :
914 : : ErrorCode get_sent_ents( const bool is_iface, const int bridge_dim, const int ghost_dim, const int num_layers,
915 : : const int addl_ents, Range* sent_ents, Range& allsent, TupleList& entprocs );
916 : :
917 : : /** \brief Set pstatus values on entities
918 : : *
919 : : * \param pstatus_ents Entities to be set
920 : : * \param pstatus_val Pstatus value to be set
921 : : * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
922 : : * (and created if they don't exist)
923 : : * \param verts_too If true, vertices also set
924 : : * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
925 : : * existing value is over-written
926 : : */
927 : : ErrorCode set_pstatus_entities( Range& pstatus_ents, unsigned char pstatus_val, bool lower_dim_ents = false,
928 : : bool verts_too = true, int operation = Interface::UNION );
929 : :
930 : : /** \brief Set pstatus values on entities (vector-based function)
931 : : *
932 : : * \param pstatus_ents Entities to be set
933 : : * \param pstatus_val Pstatus value to be set
934 : : * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
935 : : * (and created if they don't exist)
936 : : * \param verts_too If true, vertices also set
937 : : * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
938 : : * existing value is over-written
939 : : */
940 : : ErrorCode set_pstatus_entities( EntityHandle* pstatus_ents, int num_ents, unsigned char pstatus_val,
941 : : bool lower_dim_ents = false, bool verts_too = true,
942 : : int operation = Interface::UNION );
943 : :
944 : : //! estimate size required to pack entities
945 : : int estimate_ents_buffer_size( Range& entities, const bool store_remote_handles );
946 : :
947 : : //! estimate size required to pack sets
948 : : int estimate_sets_buffer_size( Range& entities, const bool store_remote_handles );
949 : :
950 : : //! send the indicated buffer, possibly sending size first
951 : : ErrorCode send_buffer( const unsigned int to_proc, Buffer* send_buff, const int msg_tag, MPI_Request& send_req,
952 : : MPI_Request& ack_recv_req, int* ack_buff, int& this_incoming, int next_mesg_tag = -1,
953 : : Buffer* next_recv_buff = NULL, MPI_Request* next_recv_req = NULL,
954 : : int* next_incoming = NULL );
955 : :
956 : : //! process incoming message; if longer than the initial size, post
957 : : //! recv for next part then send ack; if ack, send second part; else
958 : : //! indicate that we're done and buffer is ready for processing
959 : : ErrorCode recv_buffer( int mesg_tag_expected, const MPI_Status& mpi_status, Buffer* recv_buff,
960 : : MPI_Request& recv_2nd_req, MPI_Request& ack_req, int& this_incoming, Buffer* send_buff,
961 : : MPI_Request& send_req, MPI_Request& sent_ack_req, bool& done, Buffer* next_buff = NULL,
962 : : int next_tag = -1, MPI_Request* next_req = NULL, int* next_incoming = NULL );
963 : :
964 : : //! pack a range of entities with equal # verts per entity, along with
965 : : //! the range on the sending proc
966 : : ErrorCode pack_entity_seq( const int nodes_per_entity, const bool store_remote_handles, const int to_proc,
967 : : Range& these_ents, std::vector< EntityHandle >& entities, Buffer* buff );
968 : :
969 : : ErrorCode print_buffer( unsigned char* buff_ptr, int mesg_type, int from_proc, bool sent );
970 : :
971 : : //! for all the entities in the received buffer; for each, save
972 : : //! entities in this instance which match connectivity, or zero if none found
973 : : ErrorCode unpack_iface_entities( unsigned char*& buff_ptr, const int from_proc, const int ind,
974 : : std::vector< EntityHandle >& recd_ents );
975 : :
976 : : ErrorCode pack_sets( Range& entities, Buffer* buff, const bool store_handles, const int to_proc );
977 : :
978 : : ErrorCode unpack_sets( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities, const bool store_handles,
979 : : const int to_proc );
980 : :
981 : : ErrorCode pack_adjacencies( Range& entities, Range::const_iterator& start_rit, Range& whole_range,
982 : : unsigned char*& buff_ptr, int& count, const bool just_count, const bool store_handles,
983 : : const int to_proc );
984 : :
985 : : ErrorCode unpack_adjacencies( unsigned char*& buff_ptr, Range& entities, const bool store_handles,
986 : : const int from_proc );
987 : :
988 : : /* \brief Unpack message with remote handles (const pointer to buffer)
989 : : */
990 : : ErrorCode unpack_remote_handles( unsigned int from_proc, const unsigned char* buff_ptr,
991 : : std::vector< EntityHandle >& L2hloc, std::vector< EntityHandle >& L2hrem,
992 : : std::vector< unsigned int >& L2p );
993 : :
994 : : //! given connectivity and type, find an existing entity, if there is one
995 : : ErrorCode find_existing_entity( const bool is_iface, const int owner_p, const EntityHandle owner_h,
996 : : const int num_ents, const EntityHandle* connect, const int num_connect,
997 : : const EntityType this_type, std::vector< EntityHandle >& L2hloc,
998 : : std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
999 : : EntityHandle& new_h );
1000 : :
1001 : : ErrorCode build_sharedhps_list( const EntityHandle entity, const unsigned char pstatus, const int sharedp,
1002 : : const std::set< unsigned int >& procs, unsigned int& num_ents, int* tmp_procs,
1003 : : EntityHandle* tmp_handles );
1004 : :
1005 : : /**\brief Get list of tags for which to exchange data
1006 : : *
1007 : : * Get tags and entities for which to exchange tag data. This function
1008 : : * was originally part of 'pack_tags' requested with the
1009 : : * 'all_possible_tags' parameter.
1010 : : *
1011 : : *\param all_entities Input. The set of entities for which data is to
1012 : : * be communicated.
1013 : : *\param all_tags Output. Populated with the handles of tags to be
1014 : : * sent.
1015 : : *\param tag_ranges Output. For each corresponding tag in all_tags, the
1016 : : * subset of 'all_entities' for which a tag value has
1017 : : * been set.
1018 : : */
1019 : : ErrorCode get_tag_send_list( const Range& all_entities, std::vector< Tag >& all_tags,
1020 : : std::vector< Range >& tag_ranges );
1021 : :
1022 : : /**\brief Serialize entity tag data
1023 : : *
1024 : : * This function operates in two passes. The first phase,
1025 : : * specified by 'just_count == true' calculates the necessary
1026 : : * buffer size for the serialized data. The second phase
1027 : : * writes the actual binary serialized representation of the
1028 : : * data to the passed buffer.
1029 : : *
1030 : : *\NOTE First two arguments are not used. (Legacy interface?)
1031 : : *
1032 : : *\param entities NOT USED
1033 : : *\param start_rit NOT USED
1034 : : *\param whole_range Should be the union of the sets of entities for
1035 : : * which tag values are to be serialized. Also
1036 : : * specifies ordering for indexes for tag values and
1037 : : * serves as the superset from which to compose entity
1038 : : * lists from individual tags if just_count and
1039 : : * all_possible_tags are both true.
1040 : : *\param buff_ptr Buffer into which to write binary serialized data
1041 : : *\param count Output: The size of the serialized data is added
1042 : : * to this parameter. NOTE: Should probably initialize
1043 : : * to zero before calling.
1044 : : *\param just_count If true, just calculate the buffer size required to
1045 : : * hold the serialized data. Will also append to
1046 : : * 'all_tags' and 'tag_ranges' if all_possible_tags
1047 : : * == true.
1048 : : *\param store_handles The data for each tag is preceded by a list of
1049 : : * EntityHandles designating the entity each of
1050 : : * the subsequent tag values corresponds to. This value
1051 : : * may be one of:
1052 : : * 1) If store_handles == false:
1053 : : * An invalid handle composed of {MBMAXTYPE,idx}, where
1054 : : * idx is the position of the entity in "whole_range".
1055 : : * 2) If store_hanldes == true and a valid remote
1056 : : * handle exists, the remote handle.
1057 : : * 3) If store_hanldes == true and no valid remote
1058 : : * handle is defined for the entity, the same as 1).
1059 : : *\param to_proc If 'store_handles' is true, the processor rank for
1060 : : * which to store the corresponding remote entity
1061 : : * handles.
1062 : : *\param all_tags List of tags to write
1063 : : *\param tag_ranges List of entities to serialize tag data, one
1064 : : * for each corresponding tag handle in 'all_tags.
1065 : : */
1066 : : ErrorCode pack_tags( Range& entities, const std::vector< Tag >& src_tags, const std::vector< Tag >& dst_tags,
1067 : : const std::vector< Range >& tag_ranges, Buffer* buff, const bool store_handles,
1068 : : const int to_proc );
1069 : :
1070 : : /**\brief Calculate buffer size required to pack tag data
1071 : : *\param source_tag The tag for which data will be serialized
1072 : : *\param entities The entities for which tag values will be serialized
1073 : : *\param count_out Output: The required buffer size, in bytes.
1074 : : */
1075 : : ErrorCode packed_tag_size( Tag source_tag, const Range& entities, int& count_out );
1076 : :
1077 : : /**\brief Serialize tag data
1078 : : *\param source_tag The tag for which data will be serialized
1079 : : *\param destination_tag Tag in which to store unpacked tag data. Typically
1080 : : * the same as source_tag.
1081 : : *\param entities The entities for which tag values will be serialized
1082 : : *\param whole_range Calculate entity indices as location in this range
1083 : : *\param buff_ptr Input/Output: As input, pointer to the start of the
1084 : : * buffer in which to serialize data. As output, the
1085 : : * position just passed the serialized data.
1086 : : *\param count_out Output: The required buffer size, in bytes.
1087 : : *\param store_handles The data for each tag is preceded by a list of
1088 : : * EntityHandles designating the entity each of
1089 : : * the subsequent tag values corresponds to. This value
1090 : : * may be one of:
1091 : : * 1) If store_handles == false:
1092 : : * An invalid handle composed of {MBMAXTYPE,idx}, where
1093 : : * idx is the position of the entity in "whole_range".
1094 : : * 2) If store_hanldes == true and a valid remote
1095 : : * handle exists, the remote handle.
1096 : : * 3) If store_hanldes == true and no valid remote
1097 : : * handle is defined for the entity, the same as 1).
1098 : : *\param to_proc If 'store_handles' is true, the processor rank for
1099 : : * which to store the corresponding remote entity
1100 : : * handles.
1101 : : */
1102 : : ErrorCode pack_tag( Tag source_tag, Tag destination_tag, const Range& entities,
1103 : : const std::vector< EntityHandle >& whole_range, Buffer* buff, const bool store_remote_handles,
1104 : : const int to_proc );
1105 : :
1106 : : ErrorCode unpack_tags( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities, const bool store_handles,
1107 : : const int to_proc, const MPI_Op* const mpi_op = NULL );
1108 : :
1109 : : ErrorCode tag_shared_verts( TupleList& shared_verts, Range* skin_ents,
1110 : : std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
1111 : : Range& proc_verts );
1112 : :
1113 : : ErrorCode get_proc_nvecs( int resolve_dim, int shared_dim, Range* skin_ents,
1114 : : std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs );
1115 : :
1116 : : // after verifying shared entities, now parent/child links between sets can be established
1117 : : ErrorCode create_iface_pc_links();
1118 : :
1119 : : //! pack a range map with keys in this_range and values a contiguous series
1120 : : //! of handles starting at actual_start
1121 : : ErrorCode pack_range_map( Range& this_range, EntityHandle actual_start, HandleMap& handle_map );
1122 : :
1123 : : //! returns true if the set is an interface shared with to_proc
1124 : : bool is_iface_proc( EntityHandle this_set, int to_proc );
1125 : :
1126 : : //! for any remote_handles set to zero, remove corresponding sent_ents from
1127 : : //! iface_sets corresponding to from_proc
1128 : : ErrorCode update_iface_sets( Range& sent_ents, std::vector< EntityHandle >& remote_handles, int from_proc );
1129 : :
1130 : : //! for specified bridge/ghost dimension, to_proc, and number
1131 : : //! of layers, get the entities to be ghosted, and info on additional procs
1132 : : //! needing to communicate with to_proc
1133 : : ErrorCode get_ghosted_entities( int bridge_dim, int ghost_dim, int to_proc, int num_layers, int addl_ents,
1134 : : Range& ghosted_ents );
1135 : :
1136 : : //! add vertices adjacent to entities in this list
1137 : : ErrorCode add_verts( Range& sent_ents );
1138 : :
1139 : : //! Every processor sends shared entity handle data to every other processor
1140 : : //! that it shares entities with. Passed back map is all received data,
1141 : : //! indexed by processor ID. This function is intended to be used for
1142 : : //! debugging.
1143 : : ErrorCode exchange_all_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data,
1144 : : std::vector< std::vector< SharedEntityData > >& result );
1145 : :
1146 : : //! replace handles in from_vec with corresponding handles on
1147 : : //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
1148 : : //! if no remote handle and new_ents is non-null, substitute
1149 : : //! instead CREATE_HANDLE(MBMAXTYPE, index) where index is handle's
1150 : : //! position in new_ents
1151 : : ErrorCode get_remote_handles( const bool store_remote_handles, EntityHandle* from_vec, EntityHandle* to_vec_tmp,
1152 : : int num_ents, int to_proc, const std::vector< EntityHandle >& new_ents );
1153 : :
1154 : : //! same as other version, except from_range and to_range should be
1155 : : //! different here
1156 : : ErrorCode get_remote_handles( const bool store_remote_handles, const Range& from_range, Range& to_range,
1157 : : int to_proc, const std::vector< EntityHandle >& new_ents );
1158 : :
1159 : : //! same as other version, except packs range into vector
1160 : : ErrorCode get_remote_handles( const bool store_remote_handles, const Range& from_range, EntityHandle* to_vec,
1161 : : int to_proc, const std::vector< EntityHandle >& new_ents );
1162 : :
1163 : : //! goes through from_vec, and for any with type MBMAXTYPE, replaces with
1164 : : //! new_ents value at index corresponding to id of entity in from_vec
1165 : : ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents );
1166 : :
1167 : : //! same as above except puts results in range
1168 : : ErrorCode get_local_handles( const Range& remote_handles, Range& local_handles,
1169 : : const std::vector< EntityHandle >& new_ents );
1170 : :
1171 : : //! same as above except gets new_ents from vector
1172 : : ErrorCode get_local_handles( EntityHandle* from_vec, int num_ents, const std::vector< EntityHandle >& new_ents );
1173 : :
1174 : : ErrorCode update_remote_data( Range& local_range, Range& remote_range, int other_proc,
1175 : : const unsigned char add_pstat );
1176 : :
1177 : : ErrorCode update_remote_data( const EntityHandle new_h, const int* ps, const EntityHandle* hs, const int num_ps,
1178 : : const unsigned char add_pstat );
1179 : :
1180 : : ErrorCode update_remote_data_old( const EntityHandle new_h, const int* ps, const EntityHandle* hs, const int num_ps,
1181 : : const unsigned char add_pstat );
1182 : :
1183 : : /** \brief Set pstatus tag interface bit on entities in sets passed in
1184 : : */
1185 : : ErrorCode tag_iface_entities();
1186 : :
1187 : : //! add a pc to the iface instance tag PARALLEL_COMM
1188 : : int add_pcomm( ParallelComm* pc );
1189 : :
1190 : : //! remove a pc from the iface instance tag PARALLEL_COMM
1191 : : void remove_pcomm( ParallelComm* pc );
1192 : :
1193 : : //! check entities to make sure there are no zero-valued remote handles
1194 : : //! where they shouldn't be
1195 : : ErrorCode check_sent_ents( Range& allsent );
1196 : :
1197 : : //! assign entities to the input processor part
1198 : : ErrorCode assign_entities_part( std::vector< EntityHandle >& entities, const int proc );
1199 : :
1200 : : //! remove entities to the input processor part
1201 : : ErrorCode remove_entities_part( Range& entities, const int proc );
1202 : :
1203 : : //! MB interface associated with this writer
1204 : : Interface* mbImpl;
1205 : :
1206 : : //! Proc config object, keeps info on parallel stuff
1207 : : ProcConfig procConfig;
1208 : :
1209 : : //! Sequence manager, to get more efficient access to entities
1210 : : SequenceManager* sequenceManager;
1211 : :
1212 : : //! Error handler
1213 : : Error* errorHandler;
1214 : :
1215 : : //! more data buffers, proc-specific
1216 : : std::vector< Buffer* > localOwnedBuffs, remoteOwnedBuffs;
1217 : :
1218 : : //! reset message buffers to their initial state
1219 : : // void reset_all_buffers();
1220 : :
1221 : : //! delete all buffers, freeing up any memory held by them
1222 : : void delete_all_buffers();
1223 : :
1224 : : //! request objects, may be used if store_remote_handles is used
1225 : : std::vector< MPI_Request > sendReqs;
1226 : :
1227 : : //! receive request objects
1228 : : std::vector< MPI_Request > recvReqs, recvRemotehReqs;
1229 : :
1230 : : //! processor rank for each buffer index
1231 : : std::vector< unsigned int > buffProcs;
1232 : :
1233 : : //! the partition, interface sets for this comm'n instance
1234 : : Range partitionSets, interfaceSets;
1235 : :
1236 : : //! all local entities shared with others, whether ghost or ghosted
1237 : : std::set< EntityHandle > sharedEnts;
1238 : :
1239 : : //! tags used to save sharing procs and handles
1240 : : Tag sharedpTag, sharedpsTag, sharedhTag, sharedhsTag, pstatusTag, ifaceSetsTag, partitionTag;
1241 : :
1242 : : int globalPartCount; //!< Cache of global part count
1243 : :
1244 : : EntityHandle partitioningSet; //!< entity set containing all parts
1245 : :
1246 : : std::ofstream myFile;
1247 : :
1248 : : int pcommID;
1249 : :
1250 : : int ackbuff;
1251 : :
1252 : : //! used to set verbosity level and to report output
1253 : : DebugOutput* myDebug;
1254 : :
1255 : : //! Data about shared sets
1256 : : SharedSetData* sharedSetData;
1257 : : };
1258 : :
1259 : 0 : inline ParallelComm::Buffer::Buffer( const Buffer& other_buff )
1260 : : {
1261 : 0 : alloc_size = other_buff.alloc_size;
1262 : 0 : mem_ptr = (unsigned char*)malloc( alloc_size );
1263 : 0 : memcpy( mem_ptr, other_buff.mem_ptr, alloc_size );
1264 : 0 : buff_ptr = mem_ptr + ( other_buff.buff_ptr - other_buff.mem_ptr );
1265 : 0 : }
1266 : :
1267 : 0 : inline ParallelComm::Buffer::Buffer( unsigned int new_size ) : mem_ptr( NULL ), buff_ptr( NULL ), alloc_size( 0 )
1268 : : {
1269 [ # # ]: 0 : if( new_size ) this->reserve( new_size );
1270 : 0 : }
1271 : :
1272 : 0 : inline ParallelComm::Buffer::~Buffer()
1273 : : {
1274 [ # # ]: 0 : if( mem_ptr )
1275 : : {
1276 : 0 : free( mem_ptr );
1277 : 0 : mem_ptr = NULL;
1278 : : }
1279 : 0 : }
1280 : :
1281 : : #define DEBUG_BUFFER 0
1282 : :
1283 : 0 : inline void ParallelComm::Buffer::reserve( unsigned int new_size )
1284 : : {
1285 : :
1286 : : #ifdef DEBUG_BUFFER
1287 : 0 : int tmp_pos = 0;
1288 [ # # ]: 0 : if( mem_ptr ) { tmp_pos = buff_ptr - mem_ptr; }
1289 : 0 : buff_ptr = (unsigned char*)malloc( new_size );
1290 [ # # ][ # # ]: 0 : assert( 0 <= tmp_pos && tmp_pos <= (int)alloc_size );
1291 [ # # ]: 0 : if( tmp_pos ) memcpy( buff_ptr, mem_ptr, tmp_pos );
1292 [ # # ]: 0 : if( mem_ptr ) free( mem_ptr );
1293 : 0 : mem_ptr = buff_ptr;
1294 : 0 : alloc_size = new_size;
1295 : 0 : buff_ptr = mem_ptr + tmp_pos;
1296 : : #else
1297 : : if( mem_ptr && alloc_size < new_size )
1298 : : {
1299 : : size_t tmp_pos = mem_ptr ? buff_ptr - mem_ptr : 0;
1300 : : mem_ptr = (unsigned char*)realloc( mem_ptr, new_size );
1301 : : alloc_size = new_size;
1302 : : buff_ptr = mem_ptr + tmp_pos;
1303 : : }
1304 : : else if( !mem_ptr )
1305 : : {
1306 : : mem_ptr = (unsigned char*)malloc( new_size );
1307 : : alloc_size = new_size;
1308 : : buff_ptr = mem_ptr;
1309 : : }
1310 : : #endif
1311 : 0 : }
1312 : :
1313 : 0 : inline void ParallelComm::Buffer::check_space( unsigned int addl_space )
1314 : : {
1315 [ # # ][ # # ]: 0 : assert( buff_ptr >= mem_ptr && buff_ptr <= mem_ptr + alloc_size );
1316 : 0 : unsigned int new_size = buff_ptr - mem_ptr + addl_space;
1317 [ # # ]: 0 : if( new_size > alloc_size ) reserve( 3 * new_size / 2 );
1318 : 0 : }
1319 : :
1320 : 9 : inline void ParallelComm::reset_all_buffers()
1321 : : {
1322 : 9 : std::vector< Buffer* >::iterator vit;
1323 [ # # ][ + - ]: 9 : for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
[ - + ]
1324 [ # # ][ # # ]: 0 : ( *vit )->reset_buffer();
1325 [ # # ][ + - ]: 9 : for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
[ - + ]
1326 [ # # ][ # # ]: 0 : ( *vit )->reset_buffer();
1327 : 9 : }
1328 : :
1329 : 42 : inline void ParallelComm::delete_all_buffers()
1330 : : {
1331 : 42 : std::vector< Buffer* >::iterator vit;
1332 [ # # ][ + - ]: 42 : for( vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit )
[ - + ]
1333 [ # # ][ # # ]: 0 : delete( *vit );
1334 : 42 : localOwnedBuffs.clear();
1335 : :
1336 [ # # ][ + - ]: 42 : for( vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit )
[ - + ]
1337 [ # # ][ # # ]: 0 : delete( *vit );
1338 : 42 : remoteOwnedBuffs.clear();
1339 : 42 : }
1340 : :
1341 : : inline const std::vector< unsigned int >& ParallelComm::buff_procs() const
1342 : : {
1343 : : return buffProcs;
1344 : : }
1345 : :
1346 : 0 : inline ErrorCode ParallelComm::get_shared_proc_tags( Tag& sharedp, Tag& sharedps, Tag& sharedh, Tag& sharedhs,
1347 : : Tag& pstatus )
1348 : : {
1349 : 0 : sharedp = sharedp_tag();
1350 : 0 : sharedps = sharedps_tag();
1351 : 0 : sharedh = sharedh_tag();
1352 : 0 : sharedhs = sharedhs_tag();
1353 : 0 : pstatus = pstatus_tag();
1354 : :
1355 : 0 : return MB_SUCCESS;
1356 : : }
1357 : :
1358 : 0 : inline ErrorCode ParallelComm::exchange_tags( const char* tag_name, const Range& entities )
1359 : : {
1360 : : // get the tag handle
1361 [ # # ]: 0 : std::vector< Tag > tags( 1 );
1362 [ # # ][ # # ]: 0 : ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
1363 [ # # ]: 0 : if( MB_SUCCESS != result )
1364 : 0 : return result;
1365 [ # # ][ # # ]: 0 : else if( !tags[0] )
1366 : 0 : return MB_TAG_NOT_FOUND;
1367 : :
1368 [ # # ]: 0 : return exchange_tags( tags, tags, entities );
1369 : : }
1370 : :
1371 : 0 : inline ErrorCode ParallelComm::exchange_tags( Tag tagh, const Range& entities )
1372 : : {
1373 : : // get the tag handle
1374 [ # # ]: 0 : std::vector< Tag > tags;
1375 [ # # ]: 0 : tags.push_back( tagh );
1376 : :
1377 [ # # ]: 0 : return exchange_tags( tags, tags, entities );
1378 : : }
1379 : :
1380 : : inline ErrorCode ParallelComm::reduce_tags( const char* tag_name, const MPI_Op mpi_op, const Range& entities )
1381 : : {
1382 : : // get the tag handle
1383 : : std::vector< Tag > tags( 1 );
1384 : : ErrorCode result = mbImpl->tag_get_handle( tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY );
1385 : : if( MB_SUCCESS != result )
1386 : : return result;
1387 : : else if( !tags[0] )
1388 : : return MB_TAG_NOT_FOUND;
1389 : :
1390 : : return reduce_tags( tags, tags, mpi_op, entities );
1391 : : }
1392 : :
1393 : 0 : inline ErrorCode ParallelComm::reduce_tags( Tag tagh, const MPI_Op mpi_op, const Range& entities )
1394 : : {
1395 : : // get the tag handle
1396 [ # # ]: 0 : std::vector< Tag > tags;
1397 [ # # ]: 0 : tags.push_back( tagh );
1398 : :
1399 [ # # ]: 0 : return reduce_tags( tags, tags, mpi_op, entities );
1400 : : }
1401 : :
1402 : 4 : inline ErrorCode ParallelComm::get_comm_procs( std::set< unsigned int >& procs )
1403 : : {
1404 : 4 : ErrorCode result = get_interface_procs( procs );
1405 [ - + ]: 4 : if( MB_SUCCESS != result ) return result;
1406 : :
1407 : 4 : std::copy( buffProcs.begin(), buffProcs.end(), std::inserter( procs, procs.begin() ) );
1408 : :
1409 : 4 : return MB_SUCCESS;
1410 : : }
1411 : :
1412 : 26 : inline ErrorCode ParallelComm::get_owner( EntityHandle entity, int& owner )
1413 : : {
1414 : : EntityHandle tmp_handle;
1415 [ + - ]: 26 : return get_owner_handle( entity, owner, tmp_handle );
1416 : : }
1417 : :
1418 : : /* \brief Unpack message with remote handles (const pointer to buffer)
1419 : : */
1420 : : inline ErrorCode ParallelComm::unpack_remote_handles( unsigned int from_proc, const unsigned char* buff_ptr,
1421 : : std::vector< EntityHandle >& L2hloc,
1422 : : std::vector< EntityHandle >& L2hrem,
1423 : : std::vector< unsigned int >& L2p )
1424 : : {
1425 : : // cast away const-ness, we won't be passing back a modified ptr
1426 : : unsigned char* tmp_buff = const_cast< unsigned char* >( buff_ptr );
1427 : : return unpack_remote_handles( from_proc, tmp_buff, L2hloc, L2hrem, L2p );
1428 : : }
1429 : :
1430 : : inline void ParallelComm::set_rank( unsigned int r )
1431 : : {
1432 : : procConfig.proc_rank( r );
1433 : : if( procConfig.proc_size() < r ) procConfig.proc_size( r + 1 );
1434 : : }
1435 : :
1436 : : inline void ParallelComm::set_size( unsigned int s )
1437 : : {
1438 : : procConfig.proc_size( s );
1439 : : }
1440 : :
1441 : 0 : inline ErrorCode ParallelComm::get_sharing_data( const EntityHandle* entities, int num_entities, std::set< int >& procs,
1442 : : int op )
1443 : : {
1444 [ # # ]: 0 : Range dum_range;
1445 : : // cast away constness 'cuz the range is passed as const
1446 : 0 : EntityHandle* ents_cast = const_cast< EntityHandle* >( entities );
1447 [ # # ][ # # ]: 0 : std::copy( ents_cast, ents_cast + num_entities, range_inserter( dum_range ) );
1448 [ # # ]: 0 : return get_sharing_data( dum_range, procs, op );
1449 : : }
1450 : :
1451 : 0 : inline ErrorCode ParallelComm::get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs,
1452 : : unsigned char& pstat, int& num_ps )
1453 : : {
1454 : : unsigned int dum_ps;
1455 [ # # ]: 0 : ErrorCode result = get_sharing_data( entity, ps, hs, pstat, dum_ps );
1456 [ # # ]: 0 : if( MB_SUCCESS == result ) num_ps = dum_ps;
1457 : 0 : return result;
1458 : : }
1459 : :
1460 : : inline void ParallelComm::set_send_request( int n_request )
1461 : : {
1462 : : sendReqs.resize( n_request, MPI_REQUEST_NULL );
1463 : : }
1464 : :
1465 : : inline void ParallelComm::set_recv_request( int n_request )
1466 : : {
1467 : : recvReqs.resize( n_request, MPI_REQUEST_NULL );
1468 : : }
1469 : : } // namespace moab
1470 : :
1471 : : #endif
|