Branch data Line data Source code
1 : : /** \file iMOAB.cpp
2 : : */
3 : :
4 : : #include "moab/MOABConfig.h"
5 : : #include "moab/Core.hpp"
6 : :
7 : : #ifdef MOAB_HAVE_MPI
8 : : #include "moab_mpi.h"
9 : : #include "moab/ParallelComm.hpp"
10 : : #include "moab/ParCommGraph.hpp"
11 : : #include "moab/ParallelMergeMesh.hpp"
12 : : #include "moab/IntxMesh/IntxUtils.hpp"
13 : : #endif
14 : : #include "DebugOutput.hpp"
15 : : #include "moab/iMOAB.h"
16 : :
17 : : /* this is needed because of direct access to hdf5/mhdf */
18 : : #ifdef MOAB_HAVE_HDF5
19 : : #include "mhdf.h"
20 : : #include <H5Tpublic.h>
21 : : #endif
22 : :
23 : : #include "moab/CartVect.hpp"
24 : : #include "MBTagConventions.hpp"
25 : : #include "moab/MeshTopoUtil.hpp"
26 : : #include "moab/ReadUtilIface.hpp"
27 : : #include "moab/MergeMesh.hpp"
28 : :
29 : : #ifdef MOAB_HAVE_TEMPESTREMAP
30 : : #include "STLStringHelper.h"
31 : : #include "moab/IntxMesh/IntxUtils.hpp"
32 : :
33 : : #include "moab/Remapping/TempestRemapper.hpp"
34 : : #include "moab/Remapping/TempestOnlineMap.hpp"
35 : : #endif
36 : :
37 : : // C++ includes
38 : : #include <cassert>
39 : : #include <sstream>
40 : : #include <iostream>
41 : :
42 : : using namespace moab;
43 : :
44 : : // #define VERBOSE
45 : :
46 : : // global variables ; should they be organized in a structure, for easier references?
47 : : // or how do we keep them global?
48 : :
49 : : #ifdef __cplusplus
50 : : extern "C" {
51 : : #endif
52 : :
53 : : #define CHKERRVAL( ierr ) \
54 : : { \
55 : : if( moab::MB_SUCCESS != ierr ) return 1; \
56 : : }
57 : : #define CHKIERRVAL( ierr ) \
58 : : { \
59 : : if( 0 != ierr ) return 1; \
60 : : }
61 : :
62 : : #ifdef MOAB_HAVE_TEMPESTREMAP
63 : : struct TempestMapAppData
64 : : {
65 : : moab::TempestRemapper* remapper;
66 : : std::map< std::string, moab::TempestOnlineMap* > weightMaps;
67 : : iMOAB_AppID pid_src;
68 : : iMOAB_AppID pid_dest;
69 : : };
70 : : #endif
71 : :
72 [ + - ][ + - ]: 19 : struct appData
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
[ + - ][ + - ]
73 : : {
74 : : EntityHandle file_set;
75 : : int global_id; // external component id, unique for application
76 : : std::string name;
77 : : Range all_verts;
78 : : Range local_verts; // it could include shared, but not owned at the interface
79 : : // these vertices would be all_verts if no ghosting was required
80 : : Range ghost_vertices; // locally ghosted from other processors
81 : : Range primary_elems;
82 : : Range owned_elems;
83 : : Range ghost_elems;
84 : : int dimension; // 2 or 3, dimension of primary elements (redundant?)
85 : : long num_global_elements; // reunion of all elements in primary_elements; either from hdf5
86 : : // reading or from reduce
87 : : long num_global_vertices; // reunion of all nodes, after sharing is resolved; it could be
88 : : // determined from hdf5 reading
89 : : Range mat_sets;
90 : : std::map< int, int > matIndex; // map from global block id to index in mat_sets
91 : : Range neu_sets;
92 : : Range diri_sets;
93 : : std::map< std::string, Tag > tagMap;
94 : : std::vector< Tag > tagList;
95 : : bool point_cloud;
96 : :
97 : : #ifdef MOAB_HAVE_MPI
98 : : // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
99 : : // application
100 : : std::map< int, ParCommGraph* > pgraph; // map from context () to the parcommgraph*
101 : : #endif
102 : :
103 : : #ifdef MOAB_HAVE_TEMPESTREMAP
104 : : TempestMapAppData tempestData;
105 : : #endif
106 : : };
107 : :
108 : 2 : struct GlobalContext
109 : : {
110 : : // are there reasons to have multiple moab inits? Is ref count needed?
111 : : Interface* MBI;
112 : : // we should also have the default tags stored, initialized
113 : : Tag material_tag, neumann_tag, dirichlet_tag,
114 : : globalID_tag; // material, neumann, dirichlet, globalID
115 : : int refCountMB;
116 : : int iArgc;
117 : : iMOAB_String* iArgv;
118 : : int unused_pid;
119 : :
120 : : std::map< std::string, int > appIdMap; // from app string (uppercase) to app id
121 : : std::map< int, int > appIdCompMap; // from component id to app id
122 : :
123 : : #ifdef MOAB_HAVE_MPI
124 : : std::vector< ParallelComm* > pcomms; // created in order of applications, one moab::ParallelComm for each
125 : : #endif
126 : :
127 : : std::vector< appData > appDatas; // the same order as pcomms
128 : : int globalrank, worldprocs;
129 : : bool MPI_initialized;
130 : :
131 : 2 : GlobalContext()
132 [ + - ][ + - ]: 2 : {
[ + - ]
133 : 2 : MBI = 0;
134 : 2 : refCountMB = 0;
135 : 2 : unused_pid = 0;
136 : 2 : }
137 : : };
138 : :
139 : 2 : static struct GlobalContext context;
140 : :
141 : 2 : ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv )
142 : : {
143 : 2 : context.iArgc = argc;
144 : 2 : context.iArgv = argv; // shallow copy
145 : :
146 [ + - ]: 2 : if( 0 == context.refCountMB )
147 : : {
148 [ + - ][ + - ]: 2 : context.MBI = new( std::nothrow ) moab::Core;
149 : : // retrieve the default tags
150 : : const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
151 : 2 : DIRICHLET_SET_TAG_NAME, GLOBAL_ID_TAG_NAME };
152 : : // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
153 : : Tag gtags[4];
154 : :
155 [ + + ]: 10 : for( int i = 0; i < 4; i++ )
156 : : {
157 : :
158 : : ErrorCode rval =
159 [ + - ][ - + ]: 8 : context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );CHKERRVAL( rval );
160 : : }
161 : :
162 : 2 : context.material_tag = gtags[0];
163 : 2 : context.neumann_tag = gtags[1];
164 : 2 : context.dirichlet_tag = gtags[2];
165 : 2 : context.globalID_tag = gtags[3];
166 : : }
167 : :
168 : 2 : context.MPI_initialized = false;
169 : : #ifdef MOAB_HAVE_MPI
170 : : int flagInit;
171 [ + - ]: 2 : MPI_Initialized( &flagInit );
172 : :
173 [ + - ][ + - ]: 2 : if( flagInit && !context.MPI_initialized )
174 : : {
175 [ + - ]: 2 : MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
176 [ + - ]: 2 : MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
177 : 2 : context.MPI_initialized = true;
178 : : }
179 : : #endif
180 : :
181 : 2 : context.refCountMB++;
182 : 2 : return 0;
183 : : }
184 : :
185 : 1 : ErrCode iMOAB_InitializeFortran()
186 : : {
187 : 1 : return iMOAB_Initialize( 0, 0 );
188 : : }
189 : :
190 : 0 : ErrCode iMOAB_Finalize()
191 : : {
192 : 0 : context.refCountMB--;
193 : :
194 [ # # ][ # # ]: 0 : if( 0 == context.refCountMB ) { delete context.MBI; }
195 : :
196 : 0 : return MB_SUCCESS;
197 : : }
198 : :
199 : 3 : ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name,
200 : : #ifdef MOAB_HAVE_MPI
201 : : MPI_Comm* comm,
202 : : #endif
203 : : int* compid, iMOAB_AppID pid )
204 : : {
205 : : // will create a parallel comm for this application too, so there will be a
206 : : // mapping from *pid to file set and to parallel comm instances
207 [ + - ]: 3 : std::string name( app_name );
208 : :
209 [ + - ][ + - ]: 3 : if( context.appIdMap.find( name ) != context.appIdMap.end() )
[ - + ]
210 : : {
211 [ # # ][ # # ]: 0 : std::cout << " application " << name << " already registered \n";
[ # # ]
212 : 0 : return 1;
213 : : }
214 : :
215 : 3 : *pid = context.unused_pid++;
216 [ + - ]: 3 : context.appIdMap[name] = *pid;
217 : 3 : int rankHere = 0;
218 : : #ifdef MOAB_HAVE_MPI
219 [ + - ]: 3 : MPI_Comm_rank( *comm, &rankHere );
220 : : #endif
221 [ + - ][ + - ]: 3 : if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
[ + - ][ + - ]
[ + - ][ + - ]
222 [ - + ]: 3 : if( *compid <= 0 )
223 : : {
224 [ # # ]: 0 : std::cout << " convention for external application is to have its id positive \n";
225 : 0 : return 1;
226 : : }
227 : :
228 [ + - ][ + - ]: 3 : if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
[ - + ]
229 : : {
230 [ # # ][ # # ]: 0 : std::cout << " external application with comp id " << *compid << " is already registered\n";
[ # # ]
231 : 0 : return 1;
232 : : }
233 : :
234 [ + - ]: 3 : context.appIdCompMap[*compid] = *pid;
235 : :
236 : : // now create ParallelComm and a file set for this application
237 : : #ifdef MOAB_HAVE_MPI
238 [ + - ]: 3 : if( comm )
239 : : {
240 [ + - ][ + - ]: 3 : ParallelComm* pco = new ParallelComm( context.MBI, *comm );
241 : :
242 : : #ifndef NDEBUG
243 [ + - ]: 3 : int index = pco->get_id(); // it could be useful to get app id from pcomm instance ...
244 [ - + ]: 3 : assert( index == *pid );
245 : : // here, we assert the the pid is the same as the id of the ParallelComm instance
246 : : // useful for writing in parallel
247 : : #endif
248 [ + - ]: 3 : context.pcomms.push_back( pco );
249 : : }
250 : : else
251 : : {
252 [ # # ]: 0 : context.pcomms.push_back( 0 );
253 : : }
254 : : #endif
255 : :
256 : : // create now the file set that will be used for loading the model in
257 : : EntityHandle file_set;
258 [ + - ][ - + ]: 3 : ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );CHKERRVAL( rval );
259 : :
260 [ + - ]: 6 : appData app_data;
261 : 3 : app_data.file_set = file_set;
262 : 3 : app_data.global_id = *compid; // will be used mostly for par comm graph
263 [ + - ]: 3 : app_data.name = name; // save the name of application
264 : :
265 : : #ifdef MOAB_HAVE_TEMPESTREMAP
266 : : app_data.tempestData.remapper = NULL; // Only allocate as needed
267 : : #endif
268 : :
269 : 3 : app_data.point_cloud = false;
270 : :
271 : : context.appDatas.push_back(
272 [ + - ]: 3 : app_data ); // it will correspond to app_FileSets[*pid] will be the file set of interest
273 : 3 : return 0;
274 : : }
275 : :
276 : 1 : ErrCode iMOAB_RegisterFortranApplication( const iMOAB_String app_name,
277 : : #ifdef MOAB_HAVE_MPI
278 : : int* comm,
279 : : #endif
280 : : int* compid, iMOAB_AppID pid, int app_name_length )
281 : : {
282 [ + - ]: 1 : std::string name( app_name );
283 : :
284 [ - + ]: 1 : if( (int)strlen( app_name ) > app_name_length )
285 : : {
286 [ # # ]: 0 : std::cout << " length of string issue \n";
287 : 0 : return 1;
288 : : }
289 : :
290 : : #ifdef MOAB_HAVE_MPI
291 : : MPI_Comm ccomm;
292 [ + - ]: 1 : if( comm )
293 : : {
294 : : // convert from Fortran communicator to a C communicator
295 : : // see transfer of handles
296 : : // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
297 : 1 : ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
298 : : }
299 : : #endif
300 : :
301 : : // now call C style registration function:
302 : : return iMOAB_RegisterApplication( app_name,
303 : : #ifdef MOAB_HAVE_MPI
304 : : &ccomm,
305 : : #endif
306 [ + - ]: 1 : compid, pid );
307 : : }
308 : :
309 : 0 : ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
310 : : {
311 : : // the file set , parallel comm are all in vectors indexed by *pid
312 : : // assume we did not delete anything yet
313 : : // *pid will not be reused if we register another application
314 [ # # ]: 0 : appData& data = context.appDatas[*pid];
315 : 0 : int rankHere = 0;
316 : : #ifdef MOAB_HAVE_MPI
317 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
318 [ # # ]: 0 : rankHere = pco->rank();
319 : : #endif
320 [ # # ]: 0 : if( !rankHere )
321 [ # # ][ # # ]: 0 : std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
[ # # ][ # # ]
[ # # ][ # # ]
322 [ # # ]: 0 : << " is de-registered now \n";
323 : :
324 : 0 : EntityHandle fileSet = data.file_set;
325 : : // get all entities part of the file set
326 [ # # ]: 0 : Range fileents;
327 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );CHKERRVAL( rval );
328 : :
329 [ # # ]: 0 : fileents.insert( fileSet );
330 : :
331 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );CHKERRVAL( rval ); // append all mesh sets
332 : :
333 : : #ifdef MOAB_HAVE_TEMPESTREMAP
334 : : if( data.tempestData.remapper ) delete data.tempestData.remapper;
335 : : if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
336 : : #endif
337 : :
338 : : #ifdef MOAB_HAVE_MPI
339 : :
340 : : // we could get the pco also with
341 : : // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
342 : :
343 : 0 : std::map< int, ParCommGraph* >& pargs = data.pgraph;
344 : :
345 : : // free the parallel comm graphs associated with this app
346 [ # # ][ # # ]: 0 : for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
[ # # ]
347 : : {
348 [ # # ]: 0 : ParCommGraph* pgr = mt->second;
349 [ # # ]: 0 : delete pgr;
350 : 0 : pgr = NULL;
351 : : }
352 [ # # ][ # # ]: 0 : if( pco ) delete pco;
353 : : #endif
354 : :
355 : : // delete first all except vertices
356 [ # # ]: 0 : Range vertices = fileents.subset_by_type( MBVERTEX );
357 [ # # ]: 0 : Range noverts = subtract( fileents, vertices );
358 : :
359 [ # # ][ # # ]: 0 : rval = context.MBI->delete_entities( noverts );CHKERRVAL( rval );
360 : : // now retrieve connected elements that still exist (maybe in other sets, pids?)
361 [ # # ]: 0 : Range adj_ents_left;
362 [ # # ][ # # ]: 0 : rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );CHKERRVAL( rval );
363 [ # # ][ # # ]: 0 : rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );CHKERRVAL( rval );
364 [ # # ][ # # ]: 0 : rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );CHKERRVAL( rval );
365 : :
366 [ # # ][ # # ]: 0 : if( !adj_ents_left.empty() )
367 : : {
368 [ # # ]: 0 : Range conn_verts;
369 [ # # ][ # # ]: 0 : rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );CHKERRVAL( rval );
370 [ # # ][ # # ]: 0 : vertices = subtract( vertices, conn_verts );
[ # # ]
371 : : }
372 : :
373 [ # # ][ # # ]: 0 : rval = context.MBI->delete_entities( vertices );CHKERRVAL( rval );
374 : :
375 [ # # ]: 0 : std::map< std::string, int >::iterator mit;
376 : :
377 [ # # ][ # # ]: 0 : for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
[ # # ]
378 : : {
379 [ # # ]: 0 : int pidx = mit->second;
380 : :
381 [ # # ]: 0 : if( *pid == pidx ) { break; }
382 : : }
383 : :
384 [ # # ]: 0 : context.appIdMap.erase( mit );
385 [ # # ]: 0 : std::map< int, int >::iterator mit1;
386 : :
387 [ # # ][ # # ]: 0 : for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
[ # # ]
388 : : {
389 [ # # ]: 0 : int pidx = mit1->second;
390 : :
391 [ # # ]: 0 : if( *pid == pidx ) { break; }
392 : : }
393 : :
394 [ # # ]: 0 : context.appIdCompMap.erase( mit1 );
395 : :
396 : 0 : context.unused_pid--; // we have to go backwards always TODO
397 [ # # ]: 0 : context.appDatas.pop_back();
398 : : #ifdef MOAB_HAVE_MPI
399 [ # # ]: 0 : context.pcomms.pop_back();
400 : : #endif
401 : 0 : return 0;
402 : : }
403 : :
404 : 2 : ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename, int* num_global_vertices, int* num_global_elements,
405 : : int* num_dimension, int* num_parts, int filename_length )
406 : : {
407 [ - + ]: 2 : assert( filename_length != 0 );
408 : : #ifdef MOAB_HAVE_HDF5
409 [ + - ]: 2 : std::string filen( filename );
410 : :
411 [ - + ][ # # ]: 2 : if( filename_length < (int)strlen( filename ) ) { filen = filen.substr( 0, filename_length ); }
[ # # ]
412 : :
413 : 2 : *num_global_vertices = 0;
414 : 2 : int edges = 0;
415 : 2 : int faces = 0;
416 : 2 : int regions = 0;
417 : 2 : *num_global_elements = 0;
418 : 2 : *num_dimension = 0;
419 : 2 : *num_parts = 0;
420 : :
421 : : mhdf_FileHandle file;
422 : : mhdf_Status status;
423 : : unsigned long max_id;
424 : : struct mhdf_FileDesc* data;
425 : :
426 [ + - ]: 2 : file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
427 : :
428 [ + - ][ - + ]: 2 : if( mhdf_isError( &status ) )
429 : : {
430 [ # # ][ # # ]: 0 : fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
431 : 0 : return 1;
432 : : }
433 : :
434 : : data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
435 [ + - ][ + - ]: 2 : 1 ); // will use extra set info; will get parallel partition tag info too!
436 : :
437 [ + - ][ - + ]: 2 : if( mhdf_isError( &status ) )
438 : : {
439 [ # # ][ # # ]: 0 : fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
440 : 0 : return 1;
441 : : }
442 : :
443 : 2 : *num_dimension = data->nodes.vals_per_ent;
444 : 2 : *num_global_vertices = (int)data->nodes.count;
445 : :
446 [ + + ]: 6 : for( int i = 0; i < data->num_elem_desc; i++ )
447 : : {
448 : 4 : struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
449 : 4 : struct mhdf_EntDesc* ent_d = &( el_desc->desc );
450 : :
451 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) ) { edges += ent_d->count; }
452 : :
453 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) ) { faces += ent_d->count; }
454 : :
455 [ + + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) ) { faces += ent_d->count; }
456 : :
457 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) ) { faces += ent_d->count; }
458 : :
459 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) ) { regions += ent_d->count; }
460 : :
461 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) ) { regions += ent_d->count; }
462 : :
463 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) ) { regions += ent_d->count; }
464 : :
465 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) ) { regions += ent_d->count; }
466 : :
467 [ + + ]: 4 : if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) ) { regions += ent_d->count; }
468 : :
469 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) ) { regions += ent_d->count; }
470 : :
471 [ - + ]: 4 : if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) ) { regions += ent_d->count; }
472 : : }
473 : :
474 : 2 : *num_parts = data->numEntSets[0];
475 : :
476 : : // is this required?
477 [ - + ]: 2 : if( edges > 0 )
478 : : {
479 : 0 : *num_dimension = 1; // I don't think it will ever return 1
480 : 0 : *num_global_elements = edges;
481 : : }
482 : :
483 [ + - ]: 2 : if( faces > 0 )
484 : : {
485 : 2 : *num_dimension = 2;
486 : 2 : *num_global_elements = faces;
487 : : }
488 : :
489 [ + - ]: 2 : if( regions > 0 )
490 : : {
491 : 2 : *num_dimension = 3;
492 : 2 : *num_global_elements = regions;
493 : : }
494 : :
495 [ + - ]: 2 : mhdf_closeFile( file, &status );
496 : :
497 : 2 : free( data );
498 : :
499 : : #else
500 : : std::cout << filename
501 : : << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
502 : : "formats other than a h5m file.\n";
503 : : *num_global_vertices = *num_global_elements = *num_dimension = *num_parts = 0;
504 : : #endif
505 : :
506 : 2 : return 0;
507 : : }
508 : :
509 : 2 : ErrCode iMOAB_LoadMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String read_options,
510 : : int* num_ghost_layers, int filename_length, int read_options_length )
511 : : {
512 [ - + ]: 2 : assert( filename_length != 0 );
513 : :
514 [ - + ]: 2 : if( (int)strlen( filename ) > filename_length )
515 : : {
516 [ # # ]: 0 : std::cout << " filename length issue\n";
517 : 0 : return 1;
518 : : }
519 : :
520 [ - + ]: 2 : if( (int)strlen( read_options ) > read_options_length )
521 : : {
522 [ # # ]: 0 : std::cout << " read options length issue\n";
523 : 0 : return 1;
524 : : }
525 : :
526 : : // make sure we use the file set and pcomm associated with the *pid
527 [ + - ]: 2 : std::ostringstream newopts;
528 [ + - ]: 2 : newopts << read_options;
529 : :
530 : : #ifdef MOAB_HAVE_MPI
531 : :
532 [ + - ]: 2 : if( context.MPI_initialized )
533 : : {
534 [ - + ]: 2 : if( context.worldprocs > 1 )
535 : : {
536 [ # # ]: 0 : std::string opts( read_options );
537 [ # # ][ # # ]: 0 : std::string pcid( "PARALLEL_COMM=" );
538 : 0 : std::size_t found = opts.find( pcid );
539 : :
540 [ # # ]: 0 : if( found != std::string::npos )
541 : : {
542 [ # # ]: 0 : std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
543 : 0 : return 1;
544 : : }
545 : :
546 : : // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
547 : : // files (used in test_remapping)
548 [ # # ][ # # ]: 0 : std::string filen( filename );
549 : 0 : std::string::size_type idx = filen.rfind( '.' );
550 : :
551 [ # # ]: 0 : if( idx != std::string::npos )
552 : : {
553 [ # # ]: 0 : std::string extension = filen.substr( idx + 1 );
554 [ # # ][ # # ]: 0 : if( extension == std::string( "h5m" ) ) newopts << ";;PARALLEL_COMM=" << *pid;
[ # # ][ # # ]
[ # # ]
555 : : }
556 : :
557 [ # # ]: 0 : if( *num_ghost_layers >= 1 )
558 : : {
559 : : // if we want ghosts, we will want additional entities, the last .1
560 : : // because the addl ents can be edges, faces that are part of the neumann sets
561 [ # # ]: 0 : std::string pcid2( "PARALLEL_GHOSTS=" );
562 : 0 : std::size_t found2 = opts.find( pcid2 );
563 : :
564 [ # # ]: 0 : if( found2 != std::string::npos )
565 : : {
566 : : std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
567 [ # # ]: 0 : "number of layers \n";
568 : : }
569 : : else
570 : : {
571 : : // dimension of primary entities is 3 here, but it could be 2 for climate
572 : : // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
573 : : // example: ";PARALLEL_GHOSTS=2.0.1"
574 [ # # ][ # # ]: 0 : newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
[ # # ]
575 : 0 : }
576 : 2 : }
577 : : }
578 : : }
579 : :
580 : : #endif
581 : :
582 : : // Now let us actually load the MOAB file with the appropriate read options
583 [ + - ][ + - ]: 2 : ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );CHKERRVAL( rval );
[ + - ][ + - ]
584 : :
585 : : #ifdef VERBOSE
586 : :
587 : : // some debugging stuff
588 : : std::ostringstream outfile;
589 : : #ifdef MOAB_HAVE_MPI
590 : : int rank = context.pcomms[*pid]->rank();
591 : : int nprocs = context.pcomms[*pid]->size();
592 : : outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
593 : : #else
594 : : outfile << "TaskMesh_n1.0.h5m";
595 : : #endif
596 : : // the mesh contains ghosts too, but they are not part of mat/neumann set
597 : : // write in serial the file, to see what tags are missing
598 : : rval = context.MBI->write_file( outfile.str().c_str() );CHKERRVAL( rval ); // everything on current task, written in serial
599 : :
600 : : #endif
601 [ # # ]: 0 : int rc = iMOAB_UpdateMeshInfo( pid );
602 : 2 : return rc;
603 : : }
604 : :
605 : 0 : ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, iMOAB_String filename, iMOAB_String write_options, int filename_length,
606 : : int write_options_length )
607 : : {
608 : : // maybe do some processing of strings and lengths
609 [ # # ]: 0 : if( (int)strlen( filename ) > filename_length )
610 : : {
611 [ # # ]: 0 : std::cout << " file name length issue\n";
612 : 0 : return 1;
613 : : }
614 : :
615 [ # # ]: 0 : if( (int)strlen( write_options ) > write_options_length )
616 : : {
617 [ # # ]: 0 : std::cout << " write options issue\n";
618 : 0 : return 1;
619 : : }
620 : :
621 [ # # ]: 0 : std::ostringstream newopts;
622 : : #ifdef MOAB_HAVE_MPI
623 [ # # ]: 0 : std::string write_opts( write_options );
624 [ # # ]: 0 : std::string pcid( "PARALLEL_COMM=" );
625 : 0 : std::size_t found = write_opts.find( pcid );
626 : :
627 [ # # ]: 0 : if( found != std::string::npos )
628 : : {
629 [ # # ]: 0 : std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
630 : 0 : return 1;
631 : : }
632 : :
633 : : // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
634 [ # # ]: 0 : std::string pw( "PARALLEL=WRITE_PART" );
635 : 0 : found = write_opts.find( pw );
636 : :
637 [ # # ][ # # ]: 0 : if( found != std::string::npos ) { newopts << "PARALLEL_COMM=" << *pid << ";"; }
[ # # ][ # # ]
638 : :
639 : : #endif
640 : :
641 [ # # ]: 0 : newopts << write_options;
642 : :
643 : : // Now let us actually write the file to disk with appropriate options
644 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &context.appDatas[*pid].file_set, 1 );CHKERRVAL( rval );
[ # # ][ # # ]
645 : :
646 : 0 : return 0;
647 : : }
648 : :
649 : 0 : ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid )
650 : : {
651 : : // this will include ghost elements info
652 : : //
653 : 0 : appData& data = context.appDatas[*pid];
654 : 0 : EntityHandle fileSet = data.file_set;
655 : : // first clear all data ranges; this can be called after ghosting
656 : 0 : data.all_verts.clear();
657 : 0 : data.primary_elems.clear();
658 : 0 : data.local_verts.clear();
659 : 0 : data.ghost_vertices.clear();
660 : 0 : data.owned_elems.clear();
661 : 0 : data.ghost_elems.clear();
662 : 0 : data.mat_sets.clear();
663 : 0 : data.neu_sets.clear();
664 : 0 : data.diri_sets.clear();
665 : :
666 : : // Let us get all the vertex entities
667 [ # # ]: 0 : ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );CHKERRVAL( rval ); // recursive
668 : :
669 : : // Let us check first entities of dimension = 3
670 : 0 : data.dimension = 3;
671 [ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );CHKERRVAL( rval ); // recursive
672 : :
673 [ # # ]: 0 : if( data.primary_elems.empty() )
674 : : {
675 : : // Now 3-D elements. Let us check entities of dimension = 2
676 : 0 : data.dimension = 2;
677 [ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );CHKERRVAL( rval ); // recursive
678 : :
679 [ # # ]: 0 : if( data.primary_elems.empty() )
680 : : {
681 : : // Now 3-D/2-D elements. Let us check entities of dimension = 1
682 : 0 : data.dimension = 1;
683 [ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );CHKERRVAL( rval ); // recursive
684 : :
685 [ # # ]: 0 : if( data.primary_elems.empty() )
686 : : {
687 : : // no elements of dimension 1 or 2 or 3; it could happen for point clouds
688 : 0 : data.dimension = 0;
689 : : }
690 : : }
691 : : }
692 : :
693 : : // check if the current mesh is just a point cloud
694 [ # # ][ # # ]: 0 : data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
[ # # ]
695 : :
696 : : #ifdef MOAB_HAVE_MPI
697 : :
698 [ # # ]: 0 : if( context.MPI_initialized )
699 : : {
700 : 0 : ParallelComm* pco = context.pcomms[*pid];
701 : :
702 : : // filter ghost vertices, from local
703 [ # # ]: 0 : rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );CHKERRVAL( rval );
704 : :
705 : : // Store handles for all ghosted entities
706 [ # # ]: 0 : data.ghost_vertices = subtract( data.all_verts, data.local_verts );
707 : :
708 : : // filter ghost elements, from local
709 [ # # ]: 0 : rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );CHKERRVAL( rval );
710 : :
711 [ # # ]: 0 : data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
712 : : }
713 : : else
714 : : {
715 : 0 : data.local_verts = data.all_verts;
716 : 0 : data.owned_elems = data.primary_elems;
717 : : }
718 : :
719 : : #else
720 : :
721 : : data.local_verts = data.all_verts;
722 : : data.owned_elems = data.primary_elems;
723 : :
724 : : #endif
725 : :
726 : : // Get the references for some standard internal tags such as material blocks, BCs, etc
727 : : rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
728 [ # # ]: 0 : data.mat_sets, Interface::UNION );CHKERRVAL( rval );
729 : :
730 : : rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
731 [ # # ]: 0 : data.neu_sets, Interface::UNION );CHKERRVAL( rval );
732 : :
733 : : rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
734 [ # # ]: 0 : data.diri_sets, Interface::UNION );CHKERRVAL( rval );
735 : :
736 : 0 : return 0;
737 : : }
738 : :
739 : 0 : ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid, int* num_visible_vertices, int* num_visible_elements,
740 : : int* num_visible_blocks, int* num_visible_surfaceBC, int* num_visible_vertexBC )
741 : : {
742 : : ErrorCode rval;
743 : 0 : appData& data = context.appDatas[*pid];
744 : 0 : EntityHandle fileSet = data.file_set;
745 : :
746 : : // this will include ghost elements
747 : : // first clear all data ranges; this can be called after ghosting
748 [ # # ]: 0 : if( num_visible_elements )
749 : : {
750 : 0 : num_visible_elements[2] = (int)data.primary_elems.size();
751 : : // separate ghost and local/owned primary elements
752 : 0 : num_visible_elements[0] = (int)data.owned_elems.size();
753 : 0 : num_visible_elements[1] = (int)data.ghost_elems.size();
754 : : }
755 [ # # ]: 0 : if( num_visible_vertices )
756 : : {
757 : 0 : num_visible_vertices[2] = (int)data.all_verts.size();
758 : 0 : num_visible_vertices[1] = (int)data.ghost_vertices.size();
759 : : num_visible_vertices[0] =
760 : 0 : num_visible_vertices[2] - num_visible_vertices[1]; // local are those that are not ghosts
761 : : }
762 : :
763 [ # # ]: 0 : if( num_visible_blocks )
764 : : {
765 : : rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
766 [ # # ]: 0 : data.mat_sets, Interface::UNION );CHKERRVAL( rval );
767 : :
768 : 0 : num_visible_blocks[2] = data.mat_sets.size();
769 : 0 : num_visible_blocks[0] = num_visible_blocks[2];
770 : 0 : num_visible_blocks[1] = 0;
771 : : }
772 : :
773 [ # # ]: 0 : if( num_visible_surfaceBC )
774 : : {
775 : : rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
776 [ # # ]: 0 : data.neu_sets, Interface::UNION );CHKERRVAL( rval );
777 : :
778 : 0 : num_visible_surfaceBC[2] = 0;
779 : : // count how many faces are in each neu set, and how many regions are
780 : : // adjacent to them;
781 : 0 : int numNeuSets = (int)data.neu_sets.size();
782 : :
783 [ # # ]: 0 : for( int i = 0; i < numNeuSets; i++ )
784 : : {
785 [ # # ]: 0 : Range subents;
786 [ # # ]: 0 : EntityHandle nset = data.neu_sets[i];
787 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );CHKERRVAL( rval );
788 : :
789 [ # # ][ # # ]: 0 : for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
[ # # ][ # # ]
[ # # ][ # # ]
790 : : {
791 [ # # ]: 0 : EntityHandle subent = *it;
792 [ # # ]: 0 : Range adjPrimaryEnts;
793 [ # # ][ # # ]: 0 : rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );CHKERRVAL( rval );
794 : :
795 [ # # ][ # # ]: 0 : num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
796 : 0 : }
797 : 0 : }
798 : :
799 : 0 : num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
800 : 0 : num_visible_surfaceBC[1] = 0;
801 : : }
802 : :
803 [ # # ]: 0 : if( num_visible_vertexBC )
804 : : {
805 : : rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
806 [ # # ]: 0 : data.diri_sets, Interface::UNION );CHKERRVAL( rval );
807 : :
808 : 0 : num_visible_vertexBC[2] = 0;
809 : 0 : int numDiriSets = (int)data.diri_sets.size();
810 : :
811 [ # # ]: 0 : for( int i = 0; i < numDiriSets; i++ )
812 : : {
813 [ # # ]: 0 : Range verts;
814 [ # # ]: 0 : EntityHandle diset = data.diri_sets[i];
815 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( diset, 0, verts );CHKERRVAL( rval );
816 : :
817 [ # # ][ # # ]: 0 : num_visible_vertexBC[2] += (int)verts.size();
818 : 0 : }
819 : :
820 : 0 : num_visible_vertexBC[0] = num_visible_vertexBC[2];
821 : 0 : num_visible_vertexBC[1] = 0;
822 : : }
823 : :
824 : 0 : return 0;
825 : : }
826 : :
827 : 0 : ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
828 : : {
829 : 0 : Range& verts = context.appDatas[*pid].all_verts;
830 : :
831 [ # # ]: 0 : if( (int)verts.size() != *vertices_length ) { return 1; } // problem with array length
832 : :
833 : : // global id tag is context.globalID_tag
834 [ # # ]: 0 : ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );CHKERRVAL( rval );
835 : :
836 : 0 : return 0;
837 : : }
838 : :
839 : 0 : ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
840 : : {
841 : 0 : Range& verts = context.appDatas[*pid].all_verts;
842 : 0 : int i = 0;
843 : : #ifdef MOAB_HAVE_MPI
844 : 0 : ParallelComm* pco = context.pcomms[*pid];
845 : :
846 [ # # ][ # # ]: 0 : for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
[ # # ][ # # ]
[ # # ]
847 : : {
848 [ # # ][ # # ]: 0 : ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );CHKERRVAL( rval );
[ # # ]
849 : : }
850 : :
851 [ # # ]: 0 : if( i != *vertices_length ) { return 1; } // warning array allocation problem
852 : :
853 : : #else
854 : :
855 : : /* everything owned by proc 0 */
856 : : if( (int)verts.size() != *vertices_length ) { return 1; } // warning array allocation problem
857 : :
858 : : for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
859 : : {
860 : : visible_global_rank_ID[i] = 0;
861 : : } // all vertices are owned by processor 0, as this is serial run
862 : :
863 : : #endif
864 : 0 : return 0;
865 : : }
866 : :
867 : 0 : ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
868 : : {
869 : 0 : Range& verts = context.appDatas[*pid].all_verts;
870 : :
871 : : // interleaved coordinates, so that means deep copy anyway
872 [ # # ]: 0 : if( *coords_length != 3 * (int)verts.size() ) { return 1; }
873 : :
874 [ # # ]: 0 : ErrorCode rval = context.MBI->get_coords( verts, coordinates );CHKERRVAL( rval );
875 : :
876 : 0 : return 0;
877 : : }
878 : :
879 : 0 : ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
880 : : {
881 : : // local id blocks? they are counted from 0 to number of visible blocks ...
882 : : // will actually return material set tag value for global
883 : 0 : Range& matSets = context.appDatas[*pid].mat_sets;
884 : :
885 [ # # ]: 0 : if( *block_length != (int)matSets.size() ) { return 1; }
886 : :
887 : : // return material set tag gtags[0 is material set tag
888 [ # # ]: 0 : ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );CHKERRVAL( rval );
889 : :
890 : : // populate map with index
891 : 0 : std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
892 [ # # ]: 0 : for( unsigned i = 0; i < matSets.size(); i++ )
893 : : {
894 : 0 : matIdx[global_block_IDs[i]] = i;
895 : : }
896 : :
897 : 0 : return 0;
898 : : }
899 : :
900 : 0 : ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* vertices_per_element,
901 : : int* num_elements_in_block )
902 : : {
903 [ # # ]: 0 : std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
904 [ # # ]: 0 : std::map< int, int >::iterator it = matMap.find( *global_block_ID );
905 : :
906 [ # # ][ # # ]: 0 : if( it == matMap.end() ) { return 1; } // error in finding block with id
907 : :
908 [ # # ]: 0 : int blockIndex = matMap[*global_block_ID];
909 [ # # ][ # # ]: 0 : EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
910 [ # # ]: 0 : Range blo_elems;
911 [ # # ]: 0 : ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
912 : :
913 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || blo_elems.empty() ) { return 1; }
[ # # ][ # # ]
914 : :
915 [ # # ][ # # ]: 0 : EntityType type = context.MBI->type_from_handle( blo_elems[0] );
916 : :
917 [ # # ][ # # ]: 0 : if( !blo_elems.all_of_type( type ) ) { return 1; } // not all of same type
918 : :
919 : 0 : const EntityHandle* conn = NULL;
920 : 0 : int num_verts = 0;
921 [ # # ][ # # ]: 0 : rval = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );CHKERRVAL( rval );
[ # # ]
922 : :
923 : 0 : *vertices_per_element = num_verts;
924 [ # # ]: 0 : *num_elements_in_block = (int)blo_elems.size();
925 : :
926 : 0 : return 0;
927 : : }
928 : :
929 : 0 : ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid, int* num_visible_elements, iMOAB_GlobalID* element_global_IDs,
930 : : int* ranks, iMOAB_GlobalID* block_IDs )
931 : : {
932 : 0 : appData& data = context.appDatas[*pid];
933 : : #ifdef MOAB_HAVE_MPI
934 : 0 : ParallelComm* pco = context.pcomms[*pid];
935 : : #endif
936 : :
937 [ # # ]: 0 : ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );CHKERRVAL( rval );
938 : :
939 : 0 : int i = 0;
940 : :
941 [ # # ][ # # ]: 0 : for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
[ # # ][ # # ]
[ # # ]
942 : : {
943 : : #ifdef MOAB_HAVE_MPI
944 [ # # ][ # # ]: 0 : rval = pco->get_owner( *eit, ranks[i] );CHKERRVAL( rval );
[ # # ]
945 : :
946 : : #else
947 : : /* everything owned by task 0 */
948 : : ranks[i] = 0;
949 : : #endif
950 : : }
951 : :
952 [ # # ][ # # ]: 0 : for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
[ # # ][ # # ]
[ # # ]
953 : : {
954 [ # # ]: 0 : EntityHandle matMeshSet = *mit;
955 [ # # ]: 0 : Range elems;
956 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
957 : :
958 : : int valMatTag;
959 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );CHKERRVAL( rval );
960 : :
961 [ # # ][ # # ]: 0 : for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
[ # # ][ # # ]
[ # # ]
962 : : {
963 [ # # ]: 0 : EntityHandle eh = *eit;
964 [ # # ]: 0 : int index = data.primary_elems.index( eh );
965 : :
966 [ # # ][ # # ]: 0 : if( -1 == index ) { return 1; }
967 : :
968 [ # # ]: 0 : if( -1 >= *num_visible_elements ) { return 1; }
969 : :
970 : 0 : block_IDs[index] = valMatTag;
971 : : }
972 : 0 : }
973 : :
974 : 0 : return 0;
975 : : }
976 : :
977 : 0 : ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* connectivity_length,
978 : : int* element_connectivity )
979 : : {
980 [ # # ]: 0 : appData& data = context.appDatas[*pid];
981 : 0 : std::map< int, int >& matMap = data.matIndex;
982 [ # # ]: 0 : std::map< int, int >::iterator it = matMap.find( *global_block_ID );
983 : :
984 [ # # ][ # # ]: 0 : if( it == matMap.end() ) { return 1; } // error in finding block with id
985 : :
986 [ # # ]: 0 : int blockIndex = matMap[*global_block_ID];
987 [ # # ]: 0 : EntityHandle matMeshSet = data.mat_sets[blockIndex];
988 [ # # ]: 0 : std::vector< EntityHandle > elems;
989 : :
990 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
991 : :
992 [ # # ]: 0 : if( elems.empty() ) { return 1; }
993 : :
994 [ # # ]: 0 : std::vector< EntityHandle > vconnect;
995 [ # # ][ # # ]: 0 : rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );CHKERRVAL( rval );
[ # # ]
996 : :
997 [ # # ]: 0 : if( *connectivity_length != (int)vconnect.size() ) { return 1; } // mismatched sizes
998 : :
999 [ # # ]: 0 : for( int i = 0; i < *connectivity_length; i++ )
1000 : : {
1001 [ # # ][ # # ]: 0 : int inx = data.all_verts.index( vconnect[i] );
1002 : :
1003 [ # # ]: 0 : if( -1 == inx ) { return 1; } // error, vertex not in local range
1004 : :
1005 : 0 : element_connectivity[i] = inx;
1006 : : }
1007 : :
1008 : 0 : return 0;
1009 : : }
1010 : :
1011 : 0 : ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid, iMOAB_LocalID* elem_index, int* connectivity_length,
1012 : : int* element_connectivity )
1013 : : {
1014 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1015 [ # # ][ # # ]: 0 : assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
[ # # ]
1016 : :
1017 : : int num_nodes;
1018 : : const EntityHandle* conn;
1019 : :
1020 [ # # ]: 0 : EntityHandle eh = data.primary_elems[*elem_index];
1021 : :
1022 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );CHKERRVAL( rval );
1023 : :
1024 [ # # ]: 0 : if( *connectivity_length < num_nodes ) { return 1; } // wrong number of vertices
1025 : :
1026 [ # # ]: 0 : for( int i = 0; i < num_nodes; i++ )
1027 : : {
1028 [ # # ]: 0 : int index = data.all_verts.index( conn[i] );
1029 : :
1030 [ # # ]: 0 : if( -1 == index ) { return 1; }
1031 : :
1032 : 0 : element_connectivity[i] = index;
1033 : : }
1034 : :
1035 : 0 : *connectivity_length = num_nodes;
1036 : :
1037 : 0 : return 0;
1038 : : }
1039 : :
1040 : 0 : ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* num_elements_in_block,
1041 : : int* element_ownership )
1042 : : {
1043 [ # # ]: 0 : std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
1044 : :
1045 [ # # ]: 0 : std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1046 : :
1047 [ # # ][ # # ]: 0 : if( it == matMap.end() ) { return 1; } // error in finding block with id
1048 : :
1049 [ # # ]: 0 : int blockIndex = matMap[*global_block_ID];
1050 [ # # ][ # # ]: 0 : EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1051 [ # # ]: 0 : Range elems;
1052 : :
1053 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
1054 : :
1055 [ # # ][ # # ]: 0 : if( elems.empty() ) { return 1; }
1056 : :
1057 [ # # ][ # # ]: 0 : if( *num_elements_in_block != (int)elems.size() ) { return 1; } // bad memory allocation
1058 : :
1059 : 0 : int i = 0;
1060 : : #ifdef MOAB_HAVE_MPI
1061 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
1062 : : #endif
1063 : :
1064 [ # # ][ # # ]: 0 : for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
[ # # ][ # # ]
[ # # ]
1065 : : {
1066 : : #ifdef MOAB_HAVE_MPI
1067 [ # # ][ # # ]: 0 : rval = pco->get_owner( *vit, element_ownership[i] );CHKERRVAL( rval );
[ # # ]
1068 : : #else
1069 : : element_ownership[i] = 0; /* owned by 0 */
1070 : : #endif
1071 : : }
1072 : :
1073 : 0 : return 0;
1074 : : }
1075 : :
1076 : 0 : ErrCode iMOAB_GetElementID( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* num_elements_in_block,
1077 : : iMOAB_GlobalID* global_element_ID, iMOAB_LocalID* local_element_ID )
1078 : : {
1079 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1080 : 0 : std::map< int, int >& matMap = data.matIndex;
1081 : :
1082 [ # # ]: 0 : std::map< int, int >::iterator it = matMap.find( *global_block_ID );
1083 : :
1084 [ # # ][ # # ]: 0 : if( it == matMap.end() ) { return 1; } // error in finding block with id
1085 : :
1086 [ # # ]: 0 : int blockIndex = matMap[*global_block_ID];
1087 [ # # ]: 0 : EntityHandle matMeshSet = data.mat_sets[blockIndex];
1088 [ # # ]: 0 : Range elems;
1089 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
1090 : :
1091 [ # # ][ # # ]: 0 : if( elems.empty() ) { return 1; }
1092 : :
1093 [ # # ][ # # ]: 0 : if( *num_elements_in_block != (int)elems.size() ) { return 1; } // bad memory allocation
1094 : :
1095 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );CHKERRVAL( rval );
1096 : :
1097 : : // check that elems are among primary_elems in data
1098 [ # # ]: 0 : for( int i = 0; i < *num_elements_in_block; i++ )
1099 : : {
1100 [ # # ][ # # ]: 0 : local_element_ID[i] = data.primary_elems.index( elems[i] );
1101 : :
1102 [ # # ]: 0 : if( -1 == local_element_ID[i] ) { return 1; } // error, not in local primary elements
1103 : : }
1104 : :
1105 : 0 : return 0;
1106 : : }
1107 : :
1108 : 0 : ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid, int* surface_BC_length, iMOAB_LocalID* local_element_ID,
1109 : : int* reference_surface_ID, int* boundary_condition_value )
1110 : : {
1111 : : // we have to fill bc data for neumann sets;/
1112 : : ErrorCode rval;
1113 : :
1114 : : // it was counted above, in GetMeshInfo
1115 : 0 : appData& data = context.appDatas[*pid];
1116 : 0 : int numNeuSets = (int)data.neu_sets.size();
1117 : :
1118 : 0 : int index = 0; // index [0, surface_BC_length) for the arrays returned
1119 : :
1120 [ # # ]: 0 : for( int i = 0; i < numNeuSets; i++ )
1121 : : {
1122 [ # # ]: 0 : Range subents;
1123 [ # # ]: 0 : EntityHandle nset = data.neu_sets[i];
1124 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );CHKERRVAL( rval );
1125 : :
1126 : : int neuVal;
1127 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );CHKERRVAL( rval );
1128 : :
1129 [ # # ][ # # ]: 0 : for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
[ # # ][ # # ]
[ # # ][ # # ]
1130 : : {
1131 [ # # ]: 0 : EntityHandle subent = *it;
1132 [ # # ]: 0 : Range adjPrimaryEnts;
1133 [ # # ][ # # ]: 0 : rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );CHKERRVAL( rval );
1134 : :
1135 : : // get global id of the primary ents, and side number of the quad/subentity
1136 : : // this is moab ordering
1137 [ # # ][ # # ]: 0 : for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
[ # # ][ # # ]
[ # # ][ # # ]
1138 : : {
1139 [ # # ]: 0 : EntityHandle primaryEnt = *pit;
1140 : : // get global id
1141 : : /*int globalID;
1142 : : rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1143 : : if (MB_SUCCESS!=rval)
1144 : : return 1;
1145 : : global_element_ID[index] = globalID;*/
1146 : :
1147 : : // get local element id
1148 [ # # ]: 0 : local_element_ID[index] = data.primary_elems.index( primaryEnt );
1149 : :
1150 [ # # ]: 0 : if( -1 == local_element_ID[index] ) { return 1; } // did not find the element locally
1151 : :
1152 : : int side_number, sense, offset;
1153 [ # # ][ # # ]: 0 : rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );CHKERRVAL( rval );
1154 : :
1155 : 0 : reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1156 : 0 : boundary_condition_value[index] = neuVal;
1157 : 0 : index++;
1158 : : }
1159 : 0 : }
1160 : 0 : }
1161 : :
1162 [ # # ]: 0 : if( index != *surface_BC_length ) { return 1; } // error in array allocations
1163 : :
1164 : 0 : return 0;
1165 : : }
1166 : :
1167 : 0 : ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid, int* vertex_BC_length, iMOAB_LocalID* local_vertex_ID,
1168 : : int* boundary_condition_value )
1169 : : {
1170 : : ErrorCode rval;
1171 : :
1172 : : // it was counted above, in GetMeshInfo
1173 : 0 : appData& data = context.appDatas[*pid];
1174 : 0 : int numDiriSets = (int)data.diri_sets.size();
1175 : 0 : int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1176 : :
1177 [ # # ]: 0 : for( int i = 0; i < numDiriSets; i++ )
1178 : : {
1179 [ # # ]: 0 : Range verts;
1180 [ # # ]: 0 : EntityHandle diset = data.diri_sets[i];
1181 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( diset, 0, verts );CHKERRVAL( rval );
1182 : :
1183 : : int diriVal;
1184 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );CHKERRVAL( rval );
1185 : :
1186 [ # # ][ # # ]: 0 : for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
[ # # ][ # # ]
[ # # ][ # # ]
1187 : : {
1188 [ # # ]: 0 : EntityHandle vt = *vit;
1189 : : /*int vgid;
1190 : : rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1191 : : if (MB_SUCCESS!=rval)
1192 : : return 1;
1193 : : global_vertext_ID[index] = vgid;*/
1194 [ # # ]: 0 : local_vertex_ID[index] = data.all_verts.index( vt );
1195 : :
1196 [ # # ]: 0 : if( -1 == local_vertex_ID[index] ) { return 1; } // vertex was not found
1197 : :
1198 : 0 : boundary_condition_value[index] = diriVal;
1199 : 0 : index++;
1200 : : }
1201 : 0 : }
1202 : :
1203 [ # # ]: 0 : if( *vertex_BC_length != index ) { return 1; } // array allocation issue
1204 : :
1205 : 0 : return 0;
1206 : : }
1207 : :
1208 : 0 : ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* tag_type,
1209 : : int* components_per_entity, int* tag_index, int tag_storage_name_length )
1210 : : {
1211 : : // see if the tag is already existing, and if yes, check the type, length
1212 [ # # ][ # # ]: 0 : if( *tag_type < 0 || *tag_type > 5 ) { return 1; } // we have 6 types of tags supported so far
1213 : :
1214 : : DataType tagDataType;
1215 : : TagType tagType;
1216 : 0 : void* defaultVal = NULL;
1217 [ # # ][ # # ]: 0 : int* defInt = new int[*components_per_entity];
1218 [ # # ][ # # ]: 0 : double* defDouble = new double[*components_per_entity];
1219 [ # # ][ # # ]: 0 : EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1220 : :
1221 [ # # ]: 0 : for( int i = 0; i < *components_per_entity; i++ )
1222 : : {
1223 : 0 : defInt[i] = 0;
1224 : 0 : defDouble[i] = -1e+10;
1225 : 0 : defHandle[i] = (EntityHandle)0;
1226 : : }
1227 : :
1228 [ # # # # : 0 : switch( *tag_type )
# # # ]
1229 : : {
1230 : : case 0:
1231 : 0 : tagDataType = MB_TYPE_INTEGER;
1232 : 0 : tagType = MB_TAG_DENSE;
1233 : 0 : defaultVal = defInt;
1234 : 0 : break;
1235 : :
1236 : : case 1:
1237 : 0 : tagDataType = MB_TYPE_DOUBLE;
1238 : 0 : tagType = MB_TAG_DENSE;
1239 : 0 : defaultVal = defDouble;
1240 : 0 : break;
1241 : :
1242 : : case 2:
1243 : 0 : tagDataType = MB_TYPE_HANDLE;
1244 : 0 : tagType = MB_TAG_DENSE;
1245 : 0 : defaultVal = defHandle;
1246 : 0 : break;
1247 : :
1248 : : case 3:
1249 : 0 : tagDataType = MB_TYPE_INTEGER;
1250 : 0 : tagType = MB_TAG_SPARSE;
1251 : 0 : defaultVal = defInt;
1252 : 0 : break;
1253 : :
1254 : : case 4:
1255 : 0 : tagDataType = MB_TYPE_DOUBLE;
1256 : 0 : tagType = MB_TAG_SPARSE;
1257 : 0 : defaultVal = defDouble;
1258 : 0 : break;
1259 : :
1260 : : case 5:
1261 : 0 : tagDataType = MB_TYPE_HANDLE;
1262 : 0 : tagType = MB_TAG_SPARSE;
1263 : 0 : defaultVal = defHandle;
1264 : 0 : break;
1265 : :
1266 : : default: {
1267 [ # # ]: 0 : delete[] defInt;
1268 [ # # ]: 0 : delete[] defDouble;
1269 [ # # ]: 0 : delete[] defHandle;
1270 : 0 : return 1;
1271 : : } // error
1272 : : }
1273 : :
1274 [ # # ]: 0 : std::string tag_name( tag_storage_name );
1275 : :
1276 [ # # ]: 0 : if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
1277 [ # # ][ # # ]: 0 : { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
1278 : :
1279 : : Tag tagHandle;
1280 : : ErrorCode rval = context.MBI->tag_get_handle( tag_name.c_str(), *components_per_entity, tagDataType, tagHandle,
1281 [ # # ]: 0 : tagType, defaultVal );
1282 : :
1283 [ # # ]: 0 : if( MB_TAG_NOT_FOUND == rval )
1284 : : {
1285 : : rval = context.MBI->tag_get_handle( tag_name.c_str(), *components_per_entity, tagDataType, tagHandle,
1286 [ # # ]: 0 : tagType | MB_TAG_CREAT, defaultVal );
1287 : : }
1288 : :
1289 : : // we don't need default values anymore, avoid leaks
1290 [ # # ]: 0 : delete[] defInt;
1291 [ # # ]: 0 : delete[] defDouble;
1292 [ # # ]: 0 : delete[] defHandle;
1293 : :
1294 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1295 : :
1296 [ # # ]: 0 : if( MB_ALREADY_ALLOCATED == rval )
1297 : : {
1298 : 0 : std::map< std::string, Tag >& mTags = data.tagMap;
1299 [ # # ]: 0 : std::map< std::string, Tag >::iterator mit = mTags.find( tag_name );
1300 : :
1301 [ # # ][ # # ]: 0 : if( mit == mTags.end() )
1302 : : {
1303 : : // add it to the map
1304 [ # # ]: 0 : mTags[tag_name] = tagHandle;
1305 : : // push it to the list of tags, too
1306 : 0 : *tag_index = (int)data.tagList.size();
1307 [ # # ]: 0 : data.tagList.push_back( tagHandle );
1308 : : }
1309 : :
1310 : 0 : return 0; // OK, we found it, and we have it stored in the map tag
1311 : : }
1312 [ # # ]: 0 : else if( MB_SUCCESS == rval )
1313 : : {
1314 [ # # ]: 0 : data.tagMap[tag_name] = tagHandle;
1315 : 0 : *tag_index = (int)data.tagList.size();
1316 [ # # ]: 0 : data.tagList.push_back( tagHandle );
1317 : 0 : return 0;
1318 : : }
1319 : : else
1320 : 0 : return 1; // some error, maybe the tag was not created
1321 : : }
1322 : :
1323 : 0 : ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
1324 : : int* ent_type, int* tag_storage_data, int tag_storage_name_length )
1325 : : {
1326 [ # # ]: 0 : std::string tag_name( tag_storage_name );
1327 : :
1328 [ # # ]: 0 : if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
1329 [ # # ][ # # ]: 0 : { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
1330 : :
1331 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1332 : :
1333 [ # # ][ # # ]: 0 : if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; } // tag not defined
[ # # ]
1334 : :
1335 [ # # ]: 0 : Tag tag = data.tagMap[tag_name];
1336 : :
1337 : 0 : int tagLength = 0;
1338 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
1339 : :
1340 : : DataType dtype;
1341 [ # # ]: 0 : rval = context.MBI->tag_get_data_type( tag, dtype );
1342 : :
1343 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER ) { return 1; }
1344 : :
1345 : : // set it on a subset of entities, based on type and length
1346 : : Range* ents_to_set;
1347 : :
1348 [ # # ]: 0 : if( *ent_type == 0 ) // vertices
1349 : 0 : { ents_to_set = &data.all_verts; }
1350 : : else // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
1351 : : {
1352 : 0 : ents_to_set = &data.primary_elems;
1353 : : }
1354 : :
1355 : 0 : int nents_to_be_set = *num_tag_storage_length / tagLength;
1356 : :
1357 [ # # ][ # # ]: 0 : if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
[ # # ][ # # ]
1358 : 0 : { return 1; } // to many entities to be set or too few
1359 : :
1360 : : // restrict the range; everything is contiguous; or not?
1361 : :
1362 : : // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
1363 : : // 1 ) );
1364 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );CHKERRVAL( rval );
1365 : :
1366 : 0 : return 0; // no error
1367 : : }
1368 : :
1369 : 0 : ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
1370 : : int* ent_type, int* tag_storage_data, int tag_storage_name_length )
1371 : : {
1372 : : ErrorCode rval;
1373 [ # # ]: 0 : std::string tag_name( tag_storage_name );
1374 : :
1375 [ # # ][ # # ]: 0 : if( tag_storage_name_length < (int)tag_name.length() ) { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
[ # # ]
1376 : :
1377 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1378 : :
1379 [ # # ][ # # ]: 0 : if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; } // tag not defined
[ # # ]
1380 : :
1381 [ # # ]: 0 : Tag tag = data.tagMap[tag_name];
1382 : :
1383 : 0 : int tagLength = 0;
1384 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
1385 : :
1386 : : DataType dtype;
1387 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data_type( tag, dtype );CHKERRVAL( rval );
1388 : :
1389 [ # # ]: 0 : if( dtype != MB_TYPE_INTEGER ) { return 1; }
1390 : :
1391 : : // set it on a subset of entities, based on type and length
1392 : : Range* ents_to_get;
1393 : :
1394 [ # # ]: 0 : if( *ent_type == 0 ) // vertices
1395 : 0 : { ents_to_get = &data.all_verts; }
1396 : : else // if (*ent_type == 1)
1397 : : {
1398 : 0 : ents_to_get = &data.primary_elems;
1399 : : }
1400 : :
1401 : 0 : int nents_to_get = *num_tag_storage_length / tagLength;
1402 : :
1403 [ # # ][ # # ]: 0 : if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
[ # # ][ # # ]
1404 : 0 : { return 1; } // to many entities to get, or too little
1405 : :
1406 : : // restrict the range; everything is contiguous; or not?
1407 : : // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
1408 : : // ) );
1409 : :
1410 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );CHKERRVAL( rval );
1411 : :
1412 : 0 : return 0; // no error
1413 : : }
1414 : :
1415 : 0 : ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
1416 : : int* ent_type, double* tag_storage_data, int tag_storage_name_length )
1417 : : {
1418 : : ErrorCode rval;
1419 : : // exactly the same code as for int tag :) maybe should check the type of tag too
1420 [ # # ]: 0 : std::string tag_name( tag_storage_name );
1421 : :
1422 [ # # ][ # # ]: 0 : if( tag_storage_name_length < (int)tag_name.length() ) { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
[ # # ]
1423 : :
1424 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1425 : :
1426 [ # # ][ # # ]: 0 : if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; } // tag not defined
[ # # ]
1427 : :
1428 [ # # ]: 0 : Tag tag = data.tagMap[tag_name];
1429 : :
1430 : 0 : int tagLength = 0;
1431 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
1432 : :
1433 : : DataType dtype;
1434 [ # # ]: 0 : rval = context.MBI->tag_get_data_type( tag, dtype );
1435 : :
1436 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || dtype != MB_TYPE_DOUBLE ) { return 1; }
1437 : :
1438 : : // set it on a subset of entities, based on type and length
1439 : 0 : Range* ents_to_set = NULL;
1440 : :
1441 [ # # ]: 0 : if( *ent_type == 0 ) // vertices
1442 : 0 : { ents_to_set = &data.all_verts; }
1443 [ # # ]: 0 : else if( *ent_type == 1 )
1444 : : {
1445 : 0 : ents_to_set = &data.primary_elems;
1446 : : }
1447 : :
1448 : 0 : int nents_to_be_set = *num_tag_storage_length / tagLength;
1449 : :
1450 [ # # ][ # # ]: 0 : if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 ) { return 1; } // to many entities to be set
[ # # ][ # # ]
1451 : :
1452 : : // restrict the range; everything is contiguous; or not?
1453 : : // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
1454 : : // 1 ) );
1455 : :
1456 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );CHKERRVAL( rval );
1457 : :
1458 : 0 : return 0; // no error
1459 : : }
1460 : :
1461 : 0 : ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
1462 : : int* ent_type, double* tag_storage_data, int tag_storage_name_length )
1463 : : {
1464 : : ErrorCode rval;
1465 : : // exactly the same code, except tag type check
1466 [ # # ]: 0 : std::string tag_name( tag_storage_name );
1467 : :
1468 [ # # ][ # # ]: 0 : if( tag_storage_name_length < (int)tag_name.length() ) { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
[ # # ]
1469 : :
1470 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1471 : :
1472 [ # # ][ # # ]: 0 : if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; } // tag not defined
[ # # ]
1473 : :
1474 [ # # ]: 0 : Tag tag = data.tagMap[tag_name];
1475 : :
1476 : 0 : int tagLength = 0;
1477 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
1478 : :
1479 : : DataType dtype;
1480 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data_type( tag, dtype );CHKERRVAL( rval );
1481 : :
1482 [ # # ]: 0 : if( dtype != MB_TYPE_DOUBLE ) { return 1; }
1483 : :
1484 : : // set it on a subset of entities, based on type and length
1485 : 0 : Range* ents_to_get = NULL;
1486 : :
1487 [ # # ]: 0 : if( *ent_type == 0 ) // vertices
1488 : 0 : { ents_to_get = &data.all_verts; }
1489 [ # # ]: 0 : else if( *ent_type == 1 )
1490 : : {
1491 : 0 : ents_to_get = &data.primary_elems;
1492 : : }
1493 : :
1494 : 0 : int nents_to_get = *num_tag_storage_length / tagLength;
1495 : :
1496 [ # # ][ # # ]: 0 : if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 ) { return 1; } // to many entities to get
[ # # ][ # # ]
1497 : :
1498 : : // restrict the range; everything is contiguous; or not?
1499 : :
1500 : : // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
1501 : : // ) );
1502 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );CHKERRVAL( rval );
1503 : :
1504 : 0 : return 0; // no error
1505 : : }
1506 : :
1507 : 0 : ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
1508 : : {
1509 : : #ifdef MOAB_HAVE_MPI
1510 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1511 [ # # ]: 0 : Range ent_exchange;
1512 [ # # ]: 0 : std::vector< Tag > tags;
1513 : :
1514 [ # # ]: 0 : for( int i = 0; i < *num_tag; i++ )
1515 : : {
1516 [ # # ][ # # ]: 0 : if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() ) { return 1; } // error in tag index
[ # # ]
1517 : :
1518 [ # # ][ # # ]: 0 : tags.push_back( data.tagList[tag_indices[i]] );
1519 : : }
1520 : :
1521 [ # # ][ # # ]: 0 : if( *ent_type == 0 ) { ent_exchange = data.all_verts; }
1522 [ # # ]: 0 : else if( *ent_type == 1 )
1523 : : {
1524 [ # # ]: 0 : ent_exchange = data.primary_elems;
1525 : : }
1526 : : else
1527 : : {
1528 : 0 : return 1;
1529 : : } // unexpected type
1530 : :
1531 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
1532 : :
1533 [ # # ][ # # ]: 0 : ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );CHKERRVAL( rval );
1534 : :
1535 : : #else
1536 : : /* do nothing if serial */
1537 : : // just silence the warning
1538 : : // do not call sync tags in serial!
1539 : : int k = *pid + *num_tag + *tag_indices + *ent_type;
1540 : : k++;
1541 : : #endif
1542 : :
1543 : 0 : return 0;
1544 : : }
1545 : :
1546 : 0 : ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
1547 : : {
1548 : : #ifdef MOAB_HAVE_MPI
1549 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1550 [ # # ]: 0 : Range ent_exchange;
1551 : :
1552 [ # # ][ # # ]: 0 : if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() ) { return 1; } // error in tag index
[ # # ]
1553 : :
1554 [ # # ]: 0 : Tag tagh = data.tagList[*tag_index];
1555 : :
1556 [ # # ][ # # ]: 0 : if( *ent_type == 0 ) { ent_exchange = data.all_verts; }
1557 [ # # ]: 0 : else if( *ent_type == 1 )
1558 : : {
1559 [ # # ]: 0 : ent_exchange = data.primary_elems;
1560 : : }
1561 : : else
1562 : : {
1563 : 0 : return 1;
1564 : : } // unexpected type
1565 : :
1566 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
1567 : : // we could do different MPI_Op; do not bother now, we will call from fortran
1568 [ # # ][ # # ]: 0 : ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );CHKERRVAL( rval );
1569 : :
1570 : : #else
1571 : : /* do nothing if serial */
1572 : : // just silence the warning
1573 : : // do not call sync tags in serial!
1574 : : int k = *pid + *tag_index + *ent_type;
1575 : : k++; // just do junk, to avoid complaints
1576 : : #endif
1577 : 0 : return 0;
1578 : : }
1579 : :
1580 : 0 : ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid, iMOAB_LocalID* local_index, int* num_adjacent_elements,
1581 : : iMOAB_LocalID* adjacent_element_IDs )
1582 : : {
1583 : : ErrorCode rval;
1584 : :
1585 : : // one neighbor for each subentity of dimension-1
1586 [ # # ]: 0 : MeshTopoUtil mtu( context.MBI );
1587 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1588 [ # # ]: 0 : EntityHandle eh = data.primary_elems[*local_index];
1589 [ # # ]: 0 : Range adjs;
1590 [ # # ][ # # ]: 0 : rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );CHKERRVAL( rval );
1591 : :
1592 [ # # ][ # # ]: 0 : if( *num_adjacent_elements < (int)adjs.size() ) { return 1; } // not dimensioned correctly
1593 : :
1594 [ # # ]: 0 : *num_adjacent_elements = (int)adjs.size();
1595 : :
1596 [ # # ]: 0 : for( int i = 0; i < *num_adjacent_elements; i++ )
1597 : : {
1598 [ # # ][ # # ]: 0 : adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
1599 : : }
1600 : :
1601 : 0 : return 0;
1602 : : }
1603 : :
1604 : : #if 0
1605 : :
1606 : : ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
1607 : : {
1608 : : return 0;
1609 : : }
1610 : :
1611 : : #endif
1612 : :
1613 : 0 : ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
1614 : : {
1615 : : ErrorCode rval;
1616 : 0 : appData& data = context.appDatas[*pid];
1617 : :
1618 [ # # ]: 0 : if( !data.local_verts.empty() ) // we should have no vertices in the app
1619 : 0 : { return 1; }
1620 : :
1621 : 0 : int nverts = *coords_len / *dim;
1622 : :
1623 [ # # ]: 0 : rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );CHKERRVAL( rval );
1624 : :
1625 [ # # ]: 0 : rval = context.MBI->add_entities( data.file_set, data.local_verts );CHKERRVAL( rval );
1626 : :
1627 : : // also add the vertices to the all_verts range
1628 : 0 : data.all_verts.merge( data.local_verts );
1629 : 0 : return 0;
1630 : : }
1631 : :
1632 : 0 : ErrCode iMOAB_CreateElements( iMOAB_AppID pid, int* num_elem, int* type, int* num_nodes_per_element, int* connectivity,
1633 : : int* block_ID )
1634 : : {
1635 : : // Create elements
1636 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1637 : :
1638 : : ReadUtilIface* read_iface;
1639 [ # # ][ # # ]: 0 : ErrorCode rval = context.MBI->query_interface( read_iface );CHKERRVAL( rval );
1640 : :
1641 : 0 : EntityType mbtype = ( EntityType )( *type );
1642 : : EntityHandle actual_start_handle;
1643 : 0 : EntityHandle* array = NULL;
1644 [ # # ][ # # ]: 0 : rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );CHKERRVAL( rval );
1645 : :
1646 : : // fill up with actual connectivity from input; assume the vertices are in order, and start
1647 : : // vertex is the first in the current data vertex range
1648 [ # # ]: 0 : EntityHandle firstVertex = data.local_verts[0];
1649 : :
1650 [ # # ]: 0 : for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
1651 : : {
1652 : 0 : array[j] = connectivity[j] + firstVertex - 1;
1653 : : } // assumes connectivity uses 1 based array (from fortran, mostly)
1654 : :
1655 [ # # ]: 0 : Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
1656 : :
1657 [ # # ][ # # ]: 0 : rval = context.MBI->add_entities( data.file_set, new_elems );CHKERRVAL( rval );
1658 : :
1659 [ # # ]: 0 : data.primary_elems.merge( new_elems );
1660 : :
1661 : : // add to adjacency
1662 [ # # ][ # # ]: 0 : rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );CHKERRVAL( rval );
1663 : : // organize all new elements in block, with the given block ID; if the block set is not
1664 : : // existing, create a new mesh set;
1665 [ # # ]: 0 : Range sets;
1666 : 0 : int set_no = *block_ID;
1667 : 0 : const void* setno_ptr = &set_no;
1668 : : rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1,
1669 [ # # ]: 0 : sets );
1670 : : EntityHandle block_set;
1671 : :
1672 [ # # ][ # # ]: 0 : if( MB_FAILURE == rval || sets.empty() )
[ # # ][ # # ]
1673 : : {
1674 : : // create a new set, with this block ID
1675 [ # # ][ # # ]: 0 : rval = context.MBI->create_meshset( MESHSET_SET, block_set );CHKERRVAL( rval );
1676 : :
1677 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );CHKERRVAL( rval );
1678 : :
1679 : : // add the material set to file set
1680 [ # # ][ # # ]: 0 : rval = context.MBI->add_entities( data.file_set, &block_set, 1 );CHKERRVAL( rval );
1681 : : }
1682 : : else
1683 : : {
1684 [ # # ]: 0 : block_set = sets[0];
1685 : : } // first set is the one we want
1686 : :
1687 : : /// add the new ents to the clock set
1688 [ # # ][ # # ]: 0 : rval = context.MBI->add_entities( block_set, new_elems );CHKERRVAL( rval );
1689 : :
1690 : 0 : return 0;
1691 : : }
1692 : :
1693 : 0 : ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
1694 : : {
1695 : 0 : appData& data = context.appDatas[*pid];
1696 : 0 : data.num_global_vertices = *num_global_verts;
1697 : 0 : data.num_global_elements = *num_global_elems;
1698 : 0 : return 0;
1699 : : }
1700 : :
1701 : 0 : ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
1702 : : {
1703 : 0 : appData& data = context.appDatas[*pid];
1704 [ # # ]: 0 : if( NULL != num_global_verts ) { *num_global_verts = data.num_global_vertices; }
1705 [ # # ]: 0 : if( NULL != num_global_elems ) { *num_global_elems = data.num_global_elements; }
1706 : :
1707 : 0 : return 0;
1708 : : }
1709 : :
1710 : 0 : static void split_tag_names( std::string input_names, std::string& separator,
1711 : : std::vector< std::string >& list_tag_names )
1712 : : {
1713 : 0 : size_t pos = 0;
1714 [ # # ]: 0 : std::string token;
1715 [ # # ]: 0 : while( ( pos = input_names.find( separator ) ) != std::string::npos )
1716 : : {
1717 [ # # ][ # # ]: 0 : token = input_names.substr( 0, pos );
1718 [ # # ]: 0 : list_tag_names.push_back( token );
1719 : : // std::cout << token << std::endl;
1720 [ # # ]: 0 : input_names.erase( 0, pos + separator.length() );
1721 : : }
1722 [ # # ]: 0 : if( !input_names.empty() )
1723 : : {
1724 : : // if leftover something, or if not ended with delimiter
1725 [ # # ]: 0 : list_tag_names.push_back( input_names );
1726 : : }
1727 : 0 : return;
1728 : : }
1729 : :
1730 : : #ifdef MOAB_HAVE_MPI
1731 : :
1732 : : // this makes sense only for parallel runs
1733 : 0 : ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
1734 : : {
1735 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1736 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
1737 : 0 : EntityHandle cset = data.file_set;
1738 : 0 : int dum_id = 0;
1739 : : ErrorCode rval;
1740 [ # # ][ # # ]: 0 : if( data.primary_elems.empty() )
1741 : : {
1742 : : // skip actual resolve, assume vertices are distributed already ,
1743 : : // no need to share them
1744 : : }
1745 : : else
1746 : : {
1747 : : // create an integer tag for resolving ; maybe it can be a long tag in the future
1748 : : // (more than 2 B vertices;)
1749 : :
1750 : : Tag stag;
1751 : : rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
1752 [ # # ][ # # ]: 0 : &dum_id );CHKERRVAL( rval );
1753 : :
1754 [ # # ][ # # ]: 0 : if( *num_verts > (int)data.local_verts.size() ) { return 1; } // we are not setting the size
1755 : :
1756 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );CHKERRVAL( rval ); // assumes integer tag
1757 : :
1758 [ # # ][ # # ]: 0 : rval = pco->resolve_shared_ents( cset, -1, -1, &stag );CHKERRVAL( rval );
1759 : :
1760 [ # # ][ # # ]: 0 : rval = context.MBI->tag_delete( stag );CHKERRVAL( rval );
1761 : : }
1762 : : // provide partition tag equal to rank
1763 : : Tag part_tag;
1764 : 0 : dum_id = -1;
1765 : : rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
1766 [ # # ]: 0 : MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
1767 : :
1768 [ # # ][ # # ]: 0 : if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
[ # # ]
1769 : : {
1770 [ # # ]: 0 : std::cout << " can't get par part tag.\n";
1771 : 0 : return 1;
1772 : : }
1773 : :
1774 [ # # ]: 0 : int rank = pco->rank();
1775 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );CHKERRVAL( rval );
1776 : :
1777 : 0 : return 0;
1778 : : }
1779 : :
1780 : : // this assumes that this was not called before
1781 : 0 : ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
1782 : : {
1783 : : ErrorCode rval;
1784 : :
1785 : : // verify we have valid ghost layers input specified. If invalid, exit quick.
1786 [ # # ]: 0 : if( *num_ghost_layers <= 0 ) { return 0; } // nothing to do
1787 : :
1788 : 0 : appData& data = context.appDatas[*pid];
1789 : 0 : ParallelComm* pco = context.pcomms[*pid];
1790 : :
1791 : 0 : int addl_ents = 0; // maybe we should be passing this too; most of the time we do not need additional ents
1792 : : // collective call
1793 : : rval =
1794 [ # # ]: 0 : pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, *num_ghost_layers, addl_ents, true, true, &data.file_set );CHKERRVAL( rval );
1795 : :
1796 : : // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the
1797 : : // file set
1798 : 0 : int rc = iMOAB_UpdateMeshInfo( pid );
1799 : 0 : return rc;
1800 : : }
1801 : :
1802 : 0 : ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* global, MPI_Group* receivingGroup, int* rcompid, int* method )
1803 : : {
1804 : 0 : int ierr = 0;
1805 : 0 : ErrorCode rval = MB_SUCCESS;
1806 : : // appData & data = context.appDatas[*pid];
1807 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
1808 : :
1809 [ # # ]: 0 : MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab;
1810 : : // no need to pass it along
1811 : : // first see what are the processors in each group; get the sender group too, from the sender
1812 : : // communicator
1813 : : MPI_Group senderGroup;
1814 [ # # ]: 0 : ierr = MPI_Comm_group( sender, &senderGroup );
1815 [ # # ]: 0 : if( ierr != 0 ) return 1;
1816 : :
1817 : : // instantiate the par comm graph
1818 : : // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
1819 : : // int coid2)
1820 : : ParCommGraph* cgraph =
1821 [ # # ][ # # ]: 0 : new ParCommGraph( *global, senderGroup, *receivingGroup, context.appDatas[*pid].global_id, *rcompid );
[ # # ]
1822 : : // we should search if we have another pcomm with the same comp ids in the list already
1823 : : // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
1824 [ # # ][ # # ]: 0 : context.appDatas[*pid].pgraph[-1] = cgraph;
1825 : :
1826 : 0 : int sender_rank = -1;
1827 [ # # ]: 0 : MPI_Comm_rank( sender, &sender_rank );
1828 : :
1829 : : // decide how to distribute elements to each processor
1830 : : // now, get the entities on local processor, and pack them into a buffer for various processors
1831 : : // we will do trivial partition: first get the total number of elements from "sender"
1832 [ # # ]: 0 : std::vector< int > number_elems_per_part;
1833 : : // how to distribute local elements to receiving tasks?
1834 : : // trivial partition: compute first the total number of elements need to be sent
1835 [ # # ][ # # ]: 0 : Range owned = context.appDatas[*pid].owned_elems;
1836 [ # # ][ # # ]: 0 : if( owned.size() == 0 )
1837 : : {
1838 : : // must be vertices that we want to send then
1839 [ # # ][ # # ]: 0 : owned = context.appDatas[*pid].local_verts;
1840 : : // we should have some vertices here
1841 : : }
1842 : :
1843 [ # # ]: 0 : if( *method == 0 ) // trivial partitioning, old method
1844 : : {
1845 [ # # ]: 0 : int local_owned_elem = (int)owned.size();
1846 [ # # ]: 0 : int size = pco->size();
1847 [ # # ]: 0 : int rank = pco->rank();
1848 [ # # ]: 0 : number_elems_per_part.resize( size ); //
1849 [ # # ]: 0 : number_elems_per_part[rank] = local_owned_elem;
1850 : : #if( MPI_VERSION >= 2 )
1851 : : // Use "in place" option
1852 [ # # ][ # # ]: 0 : ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INTEGER, sender );
1853 : : #else
1854 : : {
1855 : : std::vector< int > all_tmp( size );
1856 : : ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INTEGER, &all_tmp[0], 1, MPI_INTEGER, sender );
1857 : : number_elems_per_part = all_tmp;
1858 : : }
1859 : : #endif
1860 : :
1861 [ # # ]: 0 : if( ierr != 0 ) { return 1; }
1862 : :
1863 : : // every sender computes the trivial partition, it is cheap, and we need to send it anyway
1864 : : // to each sender
1865 [ # # ][ # # ]: 0 : rval = cgraph->compute_trivial_partition( number_elems_per_part );CHKERRVAL( rval );
1866 : :
1867 [ # # ][ # # ]: 0 : rval = cgraph->send_graph( *global );CHKERRVAL( rval );
1868 : : }
1869 : : else // *method != 0, so it is either graph or geometric, parallel
1870 : : {
1871 : : // owned are the primary elements on this app
1872 [ # # ][ # # ]: 0 : rval = cgraph->compute_partition( pco, owned, *method );CHKERRVAL( rval );
1873 : :
1874 : : // basically, send the graph to the receiver side, with unblocking send
1875 [ # # ][ # # ]: 0 : rval = cgraph->send_graph_partition( pco, *global );CHKERRVAL( rval );
1876 : : }
1877 : : // pco is needed to pack, not for communication
1878 [ # # ][ # # ]: 0 : rval = cgraph->send_mesh_parts( *global, pco, owned );CHKERRVAL( rval );
1879 : :
1880 : : // mark for deletion
1881 [ # # ]: 0 : MPI_Group_free( &senderGroup );
1882 : 0 : return 0;
1883 : : }
1884 : :
1885 : 0 : ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* global, MPI_Group* sendingGroup, int* scompid )
1886 : : {
1887 [ # # ]: 0 : appData& data = context.appDatas[*pid];
1888 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
1889 [ # # ]: 0 : MPI_Comm receive = pco->comm();
1890 : 0 : EntityHandle local_set = data.file_set;
1891 : : ErrorCode rval;
1892 : :
1893 : : // first see what are the processors in each group; get the sender group too, from the sender
1894 : : // communicator
1895 : : MPI_Group receiverGroup;
1896 [ # # ]: 0 : int ierr = MPI_Comm_group( receive, &receiverGroup );
1897 [ # # ]: 0 : CHKIERRVAL( ierr );
1898 : :
1899 : : // instantiate the par comm graph
1900 : : ParCommGraph* cgraph =
1901 [ # # ][ # # ]: 0 : new ParCommGraph( *global, *sendingGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
[ # # ]
1902 : : // TODO we should search if we have another pcomm with the same comp ids in the list already
1903 : : // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
1904 [ # # ][ # # ]: 0 : context.appDatas[*pid].pgraph[-1] = cgraph;
1905 : :
1906 : 0 : int receiver_rank = -1;
1907 [ # # ]: 0 : MPI_Comm_rank( receive, &receiver_rank );
1908 : :
1909 : : // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
1910 : : // knows what data to expect
1911 [ # # ]: 0 : std::vector< int > pack_array;
1912 [ # # ][ # # ]: 0 : rval = cgraph->receive_comm_graph( *global, pco, pack_array );CHKERRVAL( rval );
1913 : :
1914 : : // senders across for the current receiver
1915 [ # # ]: 0 : int current_receiver = cgraph->receiver( receiver_rank );
1916 : :
1917 [ # # ]: 0 : std::vector< int > senders_local;
1918 : 0 : size_t n = 0;
1919 : :
1920 [ # # ]: 0 : while( n < pack_array.size() )
1921 : : {
1922 [ # # ][ # # ]: 0 : if( current_receiver == pack_array[n] )
1923 : : {
1924 [ # # ][ # # ]: 0 : for( int j = 0; j < pack_array[n + 1]; j++ )
1925 : : {
1926 [ # # ][ # # ]: 0 : senders_local.push_back( pack_array[n + 2 + j] );
1927 : : }
1928 : :
1929 : 0 : break;
1930 : : }
1931 : :
1932 [ # # ]: 0 : n = n + 2 + pack_array[n + 1];
1933 : : }
1934 : :
1935 : : #ifdef VERBOSE
1936 : : std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
1937 : : << senders_local.size() << " tasks: ";
1938 : :
1939 : : for( int k = 0; k < (int)senders_local.size(); k++ )
1940 : : {
1941 : : std::cout << " " << senders_local[k];
1942 : : }
1943 : :
1944 : : std::cout << "\n";
1945 : : #endif
1946 : :
1947 [ # # ]: 0 : if( senders_local.empty() )
1948 [ # # ][ # # ]: 0 : { std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n"; }
[ # # ]
1949 [ # # ][ # # ]: 0 : rval = cgraph->receive_mesh( *global, pco, local_set, senders_local );CHKERRVAL( rval );
1950 : :
1951 : : // after we are done, we could merge vertices that come from different senders, but
1952 : : // have the same global id
1953 : : Tag idtag;
1954 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );CHKERRVAL( rval );
1955 : :
1956 : : // data.point_cloud = false;
1957 [ # # ]: 0 : Range local_ents;
1958 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_handle( local_set, local_ents );CHKERRVAL( rval );
1959 : :
1960 : : // do not do merge if point cloud
1961 [ # # ][ # # ]: 0 : if( !local_ents.all_of_type( MBVERTEX ) )
1962 : : {
1963 [ # # ]: 0 : if( (int)senders_local.size() >= 2 ) // need to remove duplicate vertices
1964 : : // that might come from different senders
1965 : : {
1966 : :
1967 [ # # ]: 0 : Range local_verts = local_ents.subset_by_type( MBVERTEX );
1968 [ # # ][ # # ]: 0 : Range local_elems = subtract( local_ents, local_verts );
1969 : :
1970 : : // remove from local set the vertices
1971 [ # # ][ # # ]: 0 : rval = context.MBI->remove_entities( local_set, local_verts );CHKERRVAL( rval );
1972 : :
1973 : : #ifdef VERBOSE
1974 : : std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
1975 : : #endif
1976 [ # # ][ # # ]: 0 : MergeMesh mm( context.MBI );
1977 : :
1978 [ # # ][ # # ]: 0 : rval = mm.merge_using_integer_tag( local_verts, idtag );CHKERRVAL( rval );
1979 : :
1980 [ # # ][ # # ]: 0 : Range new_verts; // local elems are local entities without vertices
1981 [ # # ][ # # ]: 0 : rval = context.MBI->get_connectivity( local_elems, new_verts );CHKERRVAL( rval );
1982 : :
1983 : : #ifdef VERBOSE
1984 : : std::cout << "after merging: new verts: " << new_verts.size() << "\n";
1985 : : #endif
1986 [ # # ][ # # ]: 0 : rval = context.MBI->add_entities( local_set, new_verts );CHKERRVAL( rval );
[ # # ]
1987 : : }
1988 : : }
1989 : : else
1990 : 0 : data.point_cloud = true;
1991 : :
1992 [ # # ]: 0 : if( !data.point_cloud )
1993 : : {
1994 : : // still need to resolve shared entities (in this case, vertices )
1995 [ # # ][ # # ]: 0 : rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );CHKERRVAL( rval );
1996 : : }
1997 : : else
1998 : : {
1999 : : // if partition tag exists, set it to current rank; just to make it visible in VisIt
2000 : : Tag densePartTag;
2001 [ # # ]: 0 : rval = context.MBI->tag_get_handle( "partition", densePartTag );
2002 [ # # ][ # # ]: 0 : if( NULL != densePartTag && MB_SUCCESS == rval )
2003 : : {
2004 [ # # ]: 0 : Range local_verts;
2005 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );CHKERRVAL( rval );
2006 [ # # ][ # # ]: 0 : std::vector< int > vals;
2007 [ # # ]: 0 : int rank = pco->rank();
2008 [ # # ][ # # ]: 0 : vals.resize( local_verts.size(), rank );
2009 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );CHKERRVAL( rval );
[ # # ][ # # ]
2010 : : }
2011 : : }
2012 : : // set the parallel partition tag
2013 : : Tag part_tag;
2014 : 0 : int dum_id = -1;
2015 : : rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2016 [ # # ]: 0 : MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2017 : :
2018 [ # # ][ # # ]: 0 : if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
[ # # ]
2019 : : {
2020 [ # # ]: 0 : std::cout << " can't get par part tag.\n";
2021 : 0 : return 1;
2022 : : }
2023 : :
2024 [ # # ]: 0 : int rank = pco->rank();
2025 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );CHKERRVAL( rval );
2026 : :
2027 : : // populate the mesh with current data info
2028 [ # # ]: 0 : ierr = iMOAB_UpdateMeshInfo( pid );
2029 [ # # ]: 0 : CHKIERRVAL( ierr );
2030 : :
2031 : : // mark for deletion
2032 [ # # ]: 0 : MPI_Group_free( &receiverGroup );
2033 : :
2034 : 0 : return 0;
2035 : : }
2036 : :
2037 : 0 : ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id,
2038 : : int tag_storage_name_length )
2039 : : {
2040 : :
2041 [ # # ]: 0 : appData& data = context.appDatas[*pid];
2042 [ # # ]: 0 : std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2043 [ # # ][ # # ]: 0 : if( mt == data.pgraph.end() ) { return 1; }
2044 [ # # ]: 0 : ParCommGraph* cgraph = mt->second;
2045 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
2046 [ # # ]: 0 : Range owned = data.owned_elems;
2047 : : ErrorCode rval;
2048 : : EntityHandle cover_set;
2049 : :
2050 [ # # ][ # # ]: 0 : if( data.point_cloud ) { owned = data.local_verts; }
2051 : : // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2052 : : // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2053 : : // from ints remapper
2054 : : #ifdef MOAB_HAVE_TEMPESTREMAP
2055 : : if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2056 : : {
2057 : : cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2058 : : rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
2059 : : // should still have only quads ?
2060 : : }
2061 : : #else
2062 : : // now, in case we are sending from intx between ocn and atm, we involve coverage set
2063 : : // how do I know if this receiver already participated in an intersection driven by coupler?
2064 : : // also, what if this was the "source" mesh in intx?
2065 : : // in that case, the elements might have been instantiated in the coverage set locally, the
2066 : : // "owned" range can be different the elements are now in tempestRemap coverage_set
2067 [ # # ]: 0 : cover_set = cgraph->get_cover_set(); // this will be non null only for intx app ?
2068 : :
2069 [ # # ]: 0 : if( 0 != cover_set )
2070 : : {
2071 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
2072 : : }
2073 : : #endif
2074 : :
2075 [ # # ]: 0 : std::string tag_name( tag_storage_name );
2076 : :
2077 [ # # ]: 0 : if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
2078 [ # # ][ # # ]: 0 : { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
2079 : : // basically, we assume everything is defined already on the tag,
2080 : : // and we can get the tags just by its name
2081 : : // we assume that there are separators ";" between the tag names
2082 [ # # ]: 0 : std::vector< std::string > tagNames;
2083 [ # # ]: 0 : std::vector< Tag > tagHandles;
2084 [ # # ]: 0 : std::string separator( ";" );
2085 [ # # ][ # # ]: 0 : split_tag_names( tag_name, separator, tagNames );
2086 [ # # ]: 0 : for( size_t i = 0; i < tagNames.size(); i++ )
2087 : : {
2088 : : Tag tagHandle;
2089 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2090 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
2091 [ # # ]: 0 : tagHandles.push_back( tagHandle );
2092 : : }
2093 : :
2094 : : // pco is needed to pack, and for moab instance, not for communication!
2095 : : // still use nonblocking communication, over the joint comm
2096 [ # # ][ # # ]: 0 : rval = cgraph->send_tag_values( *join, pco, owned, tagHandles );CHKERRVAL( rval );
2097 : : // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2098 : :
2099 : 0 : return 0;
2100 : : }
2101 : :
2102 : 0 : ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id,
2103 : : int tag_storage_name_length )
2104 : : {
2105 [ # # ]: 0 : appData& data = context.appDatas[*pid];
2106 : :
2107 [ # # ]: 0 : std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2108 [ # # ][ # # ]: 0 : if( mt == data.pgraph.end() ) { return 1; }
2109 [ # # ]: 0 : ParCommGraph* cgraph = mt->second;
2110 : :
2111 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
2112 [ # # ]: 0 : Range owned = data.owned_elems;
2113 : :
2114 : : // how do I know if this receiver already participated in an intersection driven by coupler?
2115 : : // also, what if this was the "source" mesh in intx?
2116 : : // in that case, the elements might have been instantiated in the coverage set locally, the
2117 : : // "owned" range can be different the elements are now in tempestRemap coverage_set
2118 [ # # ]: 0 : EntityHandle cover_set = cgraph->get_cover_set();
2119 : : ErrorCode rval;
2120 [ # # ]: 0 : if( 0 != cover_set )
2121 : : {
2122 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
2123 : : }
2124 [ # # ][ # # ]: 0 : if( data.point_cloud ) { owned = data.local_verts; }
2125 : : // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
2126 : : // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
2127 : : // from ints remapper
2128 : : #ifdef MOAB_HAVE_TEMPESTREMAP
2129 : : if( data.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2130 : : {
2131 : : cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2132 : : rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
2133 : : // should still have only quads ?
2134 : : }
2135 : : #endif
2136 : : /*
2137 : : * data_intx.remapper exists though only on the intersection application
2138 : : * how do we get from here ( we know the pid that receives, and the commgraph used by migrate
2139 : : * mesh )
2140 : : */
2141 : :
2142 [ # # ]: 0 : std::string tag_name( tag_storage_name );
2143 : :
2144 [ # # ]: 0 : if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
2145 [ # # ][ # # ]: 0 : { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
2146 : :
2147 : : // we assume that there are separators ";" between the tag names
2148 [ # # ]: 0 : std::vector< std::string > tagNames;
2149 [ # # ]: 0 : std::vector< Tag > tagHandles;
2150 [ # # ]: 0 : std::string separator( ";" );
2151 [ # # ][ # # ]: 0 : split_tag_names( tag_name, separator, tagNames );
2152 [ # # ]: 0 : for( size_t i = 0; i < tagNames.size(); i++ )
2153 : : {
2154 : : Tag tagHandle;
2155 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
2156 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
2157 [ # # ]: 0 : tagHandles.push_back( tagHandle );
2158 : : }
2159 : :
2160 : : #ifdef VERBOSE
2161 : : std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
2162 : : << " and file set = " << ( data.file_set ) << "\n";
2163 : : #endif
2164 : : // pco is needed to pack, and for moab instance, not for communication!
2165 : : // still use nonblocking communication
2166 [ # # ][ # # ]: 0 : rval = cgraph->receive_tag_values( *join, pco, owned, tagHandles );CHKERRVAL( rval );
2167 : :
2168 : : #ifdef VERBOSE
2169 : : std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
2170 : : #endif
2171 : :
2172 : 0 : return 0;
2173 : : }
2174 : :
2175 : 0 : ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
2176 : : {
2177 : : // need first to find the pgraph that holds the information we need
2178 : : // this will be called on sender side only
2179 [ # # ]: 0 : appData& data = context.appDatas[*pid];
2180 [ # # ]: 0 : std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
2181 [ # # ][ # # ]: 0 : if( mt == data.pgraph.end() ) return 1; // error
2182 : :
2183 [ # # ][ # # ]: 0 : mt->second->release_send_buffers();
2184 : 0 : return 0;
2185 : : }
2186 : :
2187 : : /**
2188 : : \brief compute a comm graph between 2 moab apps, based on ID matching
2189 : : <B>Operations:</B> Collective
2190 : : */
2191 : : //#define VERBOSE
2192 : 0 : ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1, iMOAB_AppID pid2, MPI_Comm* join, MPI_Group* group1,
2193 : : MPI_Group* group2, int* type1, int* type2, int* comp1, int* comp2 )
2194 : : {
2195 : 0 : ErrorCode rval = MB_SUCCESS;
2196 : 0 : int localRank = 0, numProcs = 1;
2197 [ # # ]: 0 : MPI_Comm_rank( *join, &localRank );
2198 [ # # ]: 0 : MPI_Comm_size( *join, &numProcs );
2199 : : // instantiate the par comm graph
2200 : : // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
2201 : : // int coid2)
2202 : 0 : ParCommGraph* cgraph = NULL;
2203 [ # # ][ # # ]: 0 : if( *pid1 >= 0 ) cgraph = new ParCommGraph( *join, *group1, *group2, *comp1, *comp2 );
[ # # ]
2204 : 0 : ParCommGraph* cgraph_rev = NULL;
2205 [ # # ][ # # ]: 0 : if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( *join, *group2, *group1, *comp2, *comp1 );
[ # # ]
2206 : : // we should search if we have another pcomm with the same comp ids in the list already
2207 : : // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
2208 [ # # ][ # # ]: 0 : if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph; // the context will be the other comp
[ # # ]
2209 [ # # ][ # # ]: 0 : if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev; // from 2 to 1
[ # # ]
2210 : : // each model has a list of global ids that will need to be sent by gs to rendezvous the other
2211 : : // model on the joint comm
2212 [ # # ]: 0 : TupleList TLcomp1;
2213 [ # # ]: 0 : TLcomp1.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2214 [ # # ]: 0 : TupleList TLcomp2;
2215 [ # # ]: 0 : TLcomp2.initialize( 2, 0, 0, 0, 0 ); // to proc, marker
2216 : : // will push_back a new tuple, if needed
2217 : :
2218 [ # # ]: 0 : TLcomp1.enableWriteAccess();
2219 : :
2220 : : // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
2221 : : Tag tagType1;
2222 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", tagType1 );CHKERRVAL( rval );
2223 : : // find the values on first cell
2224 : 0 : int lenTagType1 = 1;
2225 [ # # ]: 0 : if( tagType1 )
2226 : : {
2227 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_length( tagType1, lenTagType1 );CHKERRVAL( rval ); // usually it is 16
2228 : : }
2229 : : Tag tagType2;
2230 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_handle( "GLOBAL_ID", tagType2 );CHKERRVAL( rval );
2231 : :
2232 [ # # ]: 0 : std::vector< int > valuesComp1;
2233 : : // populate first tuple
2234 [ # # ]: 0 : if( *pid1 >= 0 )
2235 : : {
2236 [ # # ]: 0 : appData& data1 = context.appDatas[*pid1];
2237 : 0 : EntityHandle fset1 = data1.file_set;
2238 : : // in case of tempest remap, get the coverage set
2239 : : #ifdef MOAB_HAVE_TEMPESTREMAP
2240 : : if( data1.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2241 : : {
2242 : : fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2243 : : // should still have only quads ?
2244 : : }
2245 : : #endif
2246 [ # # ]: 0 : Range ents_of_interest;
2247 [ # # ]: 0 : if( *type1 == 1 )
2248 : : {
2249 [ # # ]: 0 : assert( tagType1 );
2250 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );CHKERRVAL( rval );
2251 [ # # ][ # # ]: 0 : valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
2252 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tagType1, ents_of_interest, &valuesComp1[0] );CHKERRVAL( rval );
[ # # ]
2253 : : }
2254 [ # # ]: 0 : else if( *type1 == 2 )
2255 : : {
2256 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );CHKERRVAL( rval );
2257 [ # # ][ # # ]: 0 : valuesComp1.resize( ents_of_interest.size() );
2258 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );CHKERRVAL( rval ); // just global ids
[ # # ]
2259 : : }
2260 [ # # ]: 0 : else if( *type1 == 3 ) // for FV meshes, just get the global id of cell
2261 : : {
2262 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );CHKERRVAL( rval );
2263 [ # # ][ # # ]: 0 : valuesComp1.resize( ents_of_interest.size() );
2264 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );CHKERRVAL( rval ); // just global ids
[ # # ]
2265 : : }
2266 : : else
2267 : : {
2268 : 0 : CHKERRVAL( MB_FAILURE ); // we know only type 1 or 2 or 3
2269 : : }
2270 : : // now fill the tuple list with info and markers
2271 : : // because we will send only the ids, order and compress the list
2272 [ # # ][ # # ]: 0 : std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
2273 [ # # ]: 0 : TLcomp1.resize( uniq.size() );
2274 [ # # ][ # # ]: 0 : for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
[ # # ]
2275 : : {
2276 : : // to proc, marker, element local index, index in el
2277 [ # # ]: 0 : int marker = *sit;
2278 : 0 : int to_proc = marker % numProcs;
2279 [ # # ]: 0 : int n = TLcomp1.get_n();
2280 : 0 : TLcomp1.vi_wr[2 * n] = to_proc; // send to processor
2281 : 0 : TLcomp1.vi_wr[2 * n + 1] = marker;
2282 [ # # ]: 0 : TLcomp1.inc_n();
2283 : 0 : }
2284 : : }
2285 : :
2286 [ # # ]: 0 : ProcConfig pc( *join ); // proc config does the crystal router
2287 : : pc.crystal_router()->gs_transfer( 1, TLcomp1,
2288 [ # # ][ # # ]: 0 : 0 ); // communication towards joint tasks, with markers
2289 : : // sort by value (key 1)
2290 : : #ifdef VERBOSE
2291 : : std::stringstream ff1;
2292 : : ff1 << "TLcomp1_" << localRank << ".txt";
2293 : : TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2294 : : #endif
2295 [ # # ]: 0 : moab::TupleList::buffer sort_buffer;
2296 [ # # ][ # # ]: 0 : sort_buffer.buffer_init( TLcomp1.get_n() );
2297 [ # # ]: 0 : TLcomp1.sort( 1, &sort_buffer );
2298 [ # # ]: 0 : sort_buffer.reset();
2299 : : #ifdef VERBOSE
2300 : : // after sorting
2301 : : TLcomp1.print_to_file( ff1.str().c_str() ); // it will append!
2302 : : #endif
2303 : : // do the same, for the other component, number2, with type2
2304 : : // start copy
2305 [ # # ]: 0 : TLcomp2.enableWriteAccess();
2306 : : // populate second tuple
2307 [ # # ]: 0 : std::vector< int > valuesComp2;
2308 [ # # ]: 0 : if( *pid2 >= 0 )
2309 : : {
2310 [ # # ]: 0 : appData& data2 = context.appDatas[*pid2];
2311 : 0 : EntityHandle fset2 = data2.file_set;
2312 : : // in case of tempest remap, get the coverage set
2313 : : #ifdef MOAB_HAVE_TEMPESTREMAP
2314 : : if( data2.tempestData.remapper != NULL ) // this is the case this is part of intx;;
2315 : : {
2316 : : fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2317 : : // should still have only quads ?
2318 : : }
2319 : : #endif
2320 : :
2321 [ # # ]: 0 : Range ents_of_interest;
2322 [ # # ]: 0 : if( *type2 == 1 )
2323 : : {
2324 [ # # ]: 0 : assert( tagType1 );
2325 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );CHKERRVAL( rval );
2326 [ # # ][ # # ]: 0 : valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
2327 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tagType1, ents_of_interest, &valuesComp2[0] );CHKERRVAL( rval );
[ # # ]
2328 : : }
2329 [ # # ]: 0 : else if( *type2 == 2 )
2330 : : {
2331 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );CHKERRVAL( rval );
2332 [ # # ][ # # ]: 0 : valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2333 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );CHKERRVAL( rval ); // just global ids
[ # # ]
2334 : : }
2335 [ # # ]: 0 : else if( *type2 == 3 )
2336 : : {
2337 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );CHKERRVAL( rval );
2338 [ # # ][ # # ]: 0 : valuesComp2.resize( ents_of_interest.size() ); // stride is 1 here
2339 [ # # ][ # # ]: 0 : rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );CHKERRVAL( rval ); // just global ids
[ # # ]
2340 : : }
2341 : : else
2342 : : {
2343 : 0 : CHKERRVAL( MB_FAILURE ); // we know only type 1 or 2
2344 : : }
2345 : : // now fill the tuple list with info and markers
2346 [ # # ][ # # ]: 0 : std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
2347 [ # # ]: 0 : TLcomp2.resize( uniq.size() );
2348 [ # # ][ # # ]: 0 : for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
[ # # ]
2349 : : {
2350 : : // to proc, marker, element local index, index in el
2351 [ # # ]: 0 : int marker = *sit;
2352 : 0 : int to_proc = marker % numProcs;
2353 [ # # ]: 0 : int n = TLcomp2.get_n();
2354 : 0 : TLcomp2.vi_wr[2 * n] = to_proc; // send to processor
2355 : 0 : TLcomp2.vi_wr[2 * n + 1] = marker;
2356 [ # # ]: 0 : TLcomp2.inc_n();
2357 : 0 : }
2358 : : }
2359 : : pc.crystal_router()->gs_transfer( 1, TLcomp2,
2360 [ # # ][ # # ]: 0 : 0 ); // communication towards joint tasks, with markers
2361 : : // sort by value (key 1)
2362 : : #ifdef VERBOSE
2363 : : std::stringstream ff2;
2364 : : ff2 << "TLcomp2_" << localRank << ".txt";
2365 : : TLcomp2.print_to_file( ff2.str().c_str() );
2366 : : #endif
2367 [ # # ][ # # ]: 0 : sort_buffer.buffer_reserve( TLcomp2.get_n() );
2368 [ # # ]: 0 : TLcomp2.sort( 1, &sort_buffer );
2369 [ # # ]: 0 : sort_buffer.reset();
2370 : : // end copy
2371 : : #ifdef VERBOSE
2372 : : TLcomp2.print_to_file( ff2.str().c_str() );
2373 : : #endif
2374 : : // need to send back the info, from the rendezvous point, for each of the values
2375 : : /* so go over each value, on local process in joint communicator
2376 : :
2377 : : now have to send back the info needed for communication;
2378 : : loop in in sync over both TLComp1 and TLComp2, in local process;
2379 : : So, build new tuple lists, to send synchronous communication
2380 : : populate them at the same time, based on marker, that is indexed
2381 : : */
2382 : :
2383 [ # # ]: 0 : TupleList TLBackToComp1;
2384 [ # # ]: 0 : TLBackToComp1.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc on comp2,
2385 [ # # ]: 0 : TLBackToComp1.enableWriteAccess();
2386 : :
2387 [ # # ]: 0 : TupleList TLBackToComp2;
2388 [ # # ]: 0 : TLBackToComp2.initialize( 3, 0, 0, 0, 0 ); // to proc, marker, from proc,
2389 [ # # ]: 0 : TLBackToComp2.enableWriteAccess();
2390 : :
2391 [ # # ]: 0 : int n1 = TLcomp1.get_n();
2392 [ # # ]: 0 : int n2 = TLcomp2.get_n();
2393 : :
2394 : 0 : int indexInTLComp1 = 0;
2395 : 0 : int indexInTLComp2 = 0; // advance both, according to the marker
2396 [ # # ][ # # ]: 0 : if( n1 > 0 && n2 > 0 )
2397 : : {
2398 : :
2399 [ # # ][ # # ]: 0 : while( indexInTLComp1 < n1 && indexInTLComp2 < n2 ) // if any is over, we are done
2400 : : {
2401 : 0 : int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
2402 : 0 : int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
2403 [ # # ]: 0 : if( currentValue1 < currentValue2 )
2404 : : {
2405 : : // we have a big problem; basically, we are saying that
2406 : : // dof currentValue is on one model and not on the other
2407 : : // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
2408 : 0 : indexInTLComp1++;
2409 : 0 : continue;
2410 : : }
2411 [ # # ]: 0 : if( currentValue1 > currentValue2 )
2412 : : {
2413 : : // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
2414 : 0 : indexInTLComp2++;
2415 : 0 : continue;
2416 : : }
2417 : 0 : int size1 = 1;
2418 : 0 : int size2 = 1;
2419 [ # # ][ # # ]: 0 : while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
2420 : 0 : size1++;
2421 [ # # ][ # # ]: 0 : while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
2422 : 0 : size2++;
2423 : : // must be found in both lists, find the start and end indices
2424 [ # # ]: 0 : for( int i1 = 0; i1 < size1; i1++ )
2425 : : {
2426 [ # # ]: 0 : for( int i2 = 0; i2 < size2; i2++ )
2427 : : {
2428 : : // send the info back to components
2429 [ # # ]: 0 : int n = TLBackToComp1.get_n();
2430 [ # # ]: 0 : TLBackToComp1.reserve();
2431 : 0 : TLBackToComp1.vi_wr[3 * n] =
2432 : 0 : TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // send back to the proc marker
2433 : : // came from, info from comp2
2434 : 0 : TLBackToComp1.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
2435 : 0 : TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // from proc on comp2
2436 [ # # ]: 0 : n = TLBackToComp2.get_n();
2437 [ # # ]: 0 : TLBackToComp2.reserve();
2438 : 0 : TLBackToComp2.vi_wr[3 * n] =
2439 : 0 : TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )]; // send back info to original
2440 : 0 : TLBackToComp2.vi_wr[3 * n + 1] = currentValue1; // initial value (resend?)
2441 : 0 : TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )]; // from proc on comp1
2442 : : // what if there are repeated markers in TLcomp2? increase just index2
2443 : : }
2444 : : }
2445 : 0 : indexInTLComp1 += size1;
2446 : 0 : indexInTLComp2 += size2;
2447 : : }
2448 : : }
2449 [ # # ][ # # ]: 0 : pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 ); // communication towards original tasks, with info about
2450 [ # # ][ # # ]: 0 : pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
2451 : :
2452 [ # # ]: 0 : if( *pid1 >= 0 )
2453 : : {
2454 : : // we are on original comp 1 tasks
2455 : : // before ordering
2456 : : // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
2457 : : // processors it communicates with
2458 : : #ifdef VERBOSE
2459 : : std::stringstream f1;
2460 : : f1 << "TLBack1_" << localRank << ".txt";
2461 : : TLBackToComp1.print_to_file( f1.str().c_str() );
2462 : : #endif
2463 [ # # ][ # # ]: 0 : sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
2464 [ # # ]: 0 : TLBackToComp1.sort( 1, &sort_buffer );
2465 [ # # ]: 0 : sort_buffer.reset();
2466 : : #ifdef VERBOSE
2467 : : TLBackToComp1.print_to_file( f1.str().c_str() );
2468 : : #endif
2469 : : // so we are now on pid1, we know now each marker were it has to go
2470 : : // add a new method to ParCommGraph, to set up the involved_IDs_map
2471 [ # # ]: 0 : cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
2472 : : }
2473 [ # # ]: 0 : if( *pid2 >= 0 )
2474 : : {
2475 : : // we are on original comp 1 tasks
2476 : : // before ordering
2477 : : // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
2478 : : // processors it communicates with
2479 : : #ifdef VERBOSE
2480 : : std::stringstream f2;
2481 : : f2 << "TLBack2_" << localRank << ".txt";
2482 : : TLBackToComp2.print_to_file( f2.str().c_str() );
2483 : : #endif
2484 [ # # ][ # # ]: 0 : sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
2485 [ # # ]: 0 : TLBackToComp2.sort( 2, &sort_buffer );
2486 [ # # ]: 0 : sort_buffer.reset();
2487 : : #ifdef VERBOSE
2488 : : TLBackToComp2.print_to_file( f2.str().c_str() );
2489 : : #endif
2490 [ # # ]: 0 : cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
2491 : : //
2492 : : }
2493 : 0 : return 0;
2494 : : }
2495 : : //#undef VERBOSE
2496 : :
2497 : 0 : ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
2498 : : {
2499 [ # # ]: 0 : appData& data = context.appDatas[*pid];
2500 [ # # ]: 0 : ParallelComm* pco = context.pcomms[*pid];
2501 : : // collapse vertices and transform cells into triangles/quads /polys
2502 : : // tags we care about: area, frac, global id
2503 [ # # ]: 0 : std::vector< Tag > tagsList;
2504 : : Tag tag;
2505 [ # # ]: 0 : ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
2506 [ # # ][ # # ]: 0 : if( !tag || rval != MB_SUCCESS ) return 1; // fatal error, abort
2507 [ # # ]: 0 : tagsList.push_back( tag );
2508 [ # # ]: 0 : rval = context.MBI->tag_get_handle( "area", tag );
2509 [ # # ][ # # ]: 0 : if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
[ # # ]
2510 [ # # ]: 0 : rval = context.MBI->tag_get_handle( "frac", tag );
2511 [ # # ][ # # ]: 0 : if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
[ # # ]
2512 : 0 : double tol = 1.0e-9;
2513 [ # # ][ # # ]: 0 : rval = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );CHKERRVAL( rval );
2514 : :
2515 : : // clean material sets of cells that were deleted
2516 : : rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1,
2517 [ # # ][ # # ]: 0 : data.mat_sets, Interface::UNION );CHKERRVAL( rval );
2518 : :
2519 [ # # ][ # # ]: 0 : if( !data.mat_sets.empty() )
2520 : : {
2521 [ # # ]: 0 : EntityHandle matSet = data.mat_sets[0];
2522 [ # # ]: 0 : Range elems;
2523 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );CHKERRVAL( rval );
2524 [ # # ][ # # ]: 0 : rval = context.MBI->remove_entities( matSet, elems );CHKERRVAL( rval );
2525 : : // put back only cells from data.file_set
2526 [ # # ]: 0 : elems.clear();
2527 [ # # ][ # # ]: 0 : rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );CHKERRVAL( rval );
2528 [ # # ][ # # ]: 0 : rval = context.MBI->add_entities( matSet, elems );CHKERRVAL( rval );
[ # # ]
2529 : : }
2530 [ # # ]: 0 : int ierr = iMOAB_UpdateMeshInfo( pid );
2531 [ # # ]: 0 : if( ierr > 0 ) return ierr;
2532 [ # # ]: 0 : ParallelMergeMesh pmm( pco, tol );
2533 : : rval = pmm.merge( data.file_set,
2534 : : /* do not do local merge*/ false,
2535 [ # # ][ # # ]: 0 : /* 2d cells*/ 2 );CHKERRVAL( rval );
2536 : :
2537 : : // assign global ids only for vertices, cells have them fine
2538 [ # # ][ # # ]: 0 : rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );CHKERRVAL( rval );
2539 : :
2540 : : // set the partition tag on the file set
2541 : : Tag part_tag;
2542 : 0 : int dum_id = -1;
2543 : : rval = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
2544 [ # # ]: 0 : MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
2545 : :
2546 [ # # ][ # # ]: 0 : if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
[ # # ]
2547 : : {
2548 [ # # ]: 0 : std::cout << " can't get par part tag.\n";
2549 : 0 : return 1;
2550 : : }
2551 : :
2552 [ # # ]: 0 : int rank = pco->rank();
2553 [ # # ][ # # ]: 0 : rval = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );CHKERRVAL( rval );
2554 : :
2555 : 0 : return 0;
2556 : : }
2557 : :
2558 : : #ifdef MOAB_HAVE_TEMPESTREMAP
2559 : :
2560 : : // this call must be collective on the joint communicator
2561 : : // intersection tasks on coupler will need to send to the components tasks the list of
2562 : : // id elements that are relevant: they intersected some of the target elements (which are not needed
2563 : : // here)
2564 : : // in the intersection
2565 : : ErrCode iMOAB_CoverageGraph( MPI_Comm* join, iMOAB_AppID pid_src, iMOAB_AppID pid_migr, iMOAB_AppID pid_intx,
2566 : : int* context_id )
2567 : : {
2568 : : // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
2569 : : // exchange
2570 : : ErrorCode rval;
2571 : : std::vector< int > srcSenders;
2572 : : std::vector< int > receivers;
2573 : : ParCommGraph* sendGraph = NULL;
2574 : : int ierr;
2575 : : int default_context_id = -1;
2576 : :
2577 : : // First, find the original graph between PE sets
2578 : : // And based on this one, we will build the newly modified one for coverage
2579 : : if( *pid_src >= 0 )
2580 : : {
2581 : : sendGraph = context.appDatas[*pid_src].pgraph[default_context_id]; // maybe check if it does not exist
2582 : :
2583 : : // report the sender and receiver tasks in the joint comm
2584 : : srcSenders = sendGraph->senders();
2585 : : receivers = sendGraph->receivers();
2586 : : #ifdef VERBOSE
2587 : : std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
2588 : : #endif
2589 : : }
2590 : :
2591 : : ParCommGraph* recvGraph = NULL; // will be non null on receiver tasks (intx tasks)
2592 : : if( *pid_migr >= 0 )
2593 : : {
2594 : : // find the original one
2595 : : recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
2596 : : // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
2597 : : srcSenders = recvGraph->senders();
2598 : : receivers = recvGraph->receivers();
2599 : : #ifdef VERBOSE
2600 : : std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
2601 : : #endif
2602 : : }
2603 : :
2604 : : // loop over pid_intx elements, to see what original processors in joint comm have sent the
2605 : : // coverage mesh if we are on intx tasks, send coverage info towards original component tasks,
2606 : : // about needed cells
2607 : : TupleList TLcovIDs;
2608 : : TLcovIDs.initialize( 2, 0, 0, 0, 0 ); // to proc, GLOBAL ID; estimate about 100 IDs to be sent
2609 : : // will push_back a new tuple, if needed
2610 : : TLcovIDs.enableWriteAccess();
2611 : : // the crystal router will send ID cell to the original source, on the component task
2612 : : // if we are on intx tasks, loop over all intx elements and
2613 : :
2614 : : int currentRankInJointComm = -1;
2615 : : ierr = MPI_Comm_rank( *join, ¤tRankInJointComm );
2616 : : CHKIERRVAL( ierr );
2617 : :
2618 : : // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
2619 : : // need to send information towards component tasks
2620 : : if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
2621 : : receivers.end() ) // we are on receivers tasks, we can request intx info
2622 : : {
2623 : : // find the pcomm for the intx pid
2624 : : if( *pid_intx >= (int)context.appDatas.size() ) return 1;
2625 : :
2626 : : appData& dataIntx = context.appDatas[*pid_intx];
2627 : : Tag parentTag, orgSendProcTag;
2628 : :
2629 : : rval = context.MBI->tag_get_handle( "SourceParent", parentTag );CHKERRVAL( rval ); // global id of the blue, source element
2630 : : if( !parentTag ) return 1; // fatal error, abort
2631 : :
2632 : : rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );CHKERRVAL( rval );
2633 : : if( !orgSendProcTag ) return 1; // fatal error, abort
2634 : :
2635 : : // find the file set, red parents for intx cells, and put them in tuples
2636 : : EntityHandle intxSet = dataIntx.file_set;
2637 : : Range cells;
2638 : : // get all entities from the set, and look at their RedParent
2639 : : rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );CHKERRVAL( rval );
2640 : :
2641 : : std::map< int, std::set< int > > idsFromProcs; // send that info back to enhance parCommGraph cache
2642 : : for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
2643 : : {
2644 : : EntityHandle intx_cell = *it;
2645 : : int gidCell, origProc; // look at receivers
2646 : :
2647 : : rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );CHKERRVAL( rval );
2648 : : rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );CHKERRVAL( rval );
2649 : : // we have augmented the overlap set with ghost cells ; in that case, the
2650 : : // orig_sending_processor is not set so it will be -1;
2651 : : if( origProc < 0 ) continue;
2652 : :
2653 : : std::set< int >& setInts = idsFromProcs[origProc];
2654 : : setInts.insert( gidCell );
2655 : : }
2656 : :
2657 : : // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
2658 : : // from coverage set
2659 : : if( cells.empty() )
2660 : : {
2661 : : // get coverage set
2662 : : assert( *pid_intx >= 0 );
2663 : : appData& dataIntx = context.appDatas[*pid_intx];
2664 : : EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2665 : :
2666 : : // get all cells from coverage set
2667 : : Tag gidTag;
2668 : : rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );CHKERRVAL( rval );
2669 : : rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );CHKERRVAL( rval );
2670 : : // look at their orig_sending_processor
2671 : : for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
2672 : : {
2673 : : EntityHandle covCell = *it;
2674 : : int gidCell, origProc; // look at o
2675 : :
2676 : : rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );CHKERRVAL( rval );
2677 : : rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );CHKERRVAL( rval );
2678 : : // we have augmented the overlap set with ghost cells ; in that case, the
2679 : : // orig_sending_processor is not set so it will be -1;
2680 : : if( origProc < 0 ) // it cannot < 0, I think
2681 : : continue;
2682 : : std::set< int >& setInts = idsFromProcs[origProc];
2683 : : setInts.insert( gidCell );
2684 : : }
2685 : : }
2686 : :
2687 : : #ifdef VERBOSE
2688 : : std::ofstream dbfile;
2689 : : std::stringstream outf;
2690 : : outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
2691 : : dbfile.open( outf.str().c_str() );
2692 : : dbfile << "Writing this to a file.\n";
2693 : :
2694 : : dbfile << " map size:" << idsFromProcs.size()
2695 : : << std::endl; // on the receiver side, these show how much data to receive
2696 : : // from the sender (how many ids, and how much tag data later; we need to size up the
2697 : : // receiver buffer) arrange in tuples , use map iterators to send the ids
2698 : : for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
2699 : : {
2700 : : std::set< int >& setIds = mt->second;
2701 : : dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
2702 : : int counter = 0;
2703 : : for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
2704 : : {
2705 : : int valueID = *st;
2706 : : dbfile << " " << valueID;
2707 : : counter++;
2708 : : if( counter % 10 == 0 ) dbfile << "\n";
2709 : : }
2710 : : dbfile << "\n";
2711 : : }
2712 : : dbfile.close();
2713 : : #endif
2714 : : if( NULL != recvGraph )
2715 : : {
2716 : : ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph ); // just copy
2717 : : recvGraph1->set_context_id( *context_id );
2718 : : recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
2719 : : // this par comm graph will need to use the coverage set
2720 : : // so we are for sure on intx pes (the receiver is the coupler mesh)
2721 : : assert( *pid_intx >= 0 );
2722 : : appData& dataIntx = context.appDatas[*pid_intx];
2723 : : EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
2724 : : recvGraph1->set_cover_set( cover_set );
2725 : : context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1;
2726 : : }
2727 : : for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
2728 : : {
2729 : : int procToSendTo = mit->first;
2730 : : std::set< int >& idSet = mit->second;
2731 : : for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
2732 : : {
2733 : : int n = TLcovIDs.get_n();
2734 : : TLcovIDs.reserve();
2735 : : TLcovIDs.vi_wr[2 * n] = procToSendTo; // send to processor
2736 : : TLcovIDs.vi_wr[2 * n + 1] = *sit; // global id needs index in the local_verts range
2737 : : }
2738 : : }
2739 : : }
2740 : :
2741 : : ProcConfig pc( *join ); // proc config does the crystal router
2742 : : pc.crystal_router()->gs_transfer( 1, TLcovIDs,
2743 : : 0 ); // communication towards component tasks, with what ids are needed
2744 : : // for each task from receiver
2745 : :
2746 : : // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
2747 : : if( NULL != sendGraph )
2748 : : {
2749 : : // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
2750 : : // receiver task
2751 : : ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph ); // just copy
2752 : : sendGraph1->set_context_id( *context_id );
2753 : : context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
2754 : : rval = sendGraph1->settle_send_graph( TLcovIDs );CHKERRVAL( rval );
2755 : : }
2756 : : return 0; // success
2757 : : }
2758 : :
2759 : : ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix,
2760 : : int length_prefix )
2761 : : {
2762 : : ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
2763 : : std::string prefix_str( prefix );
2764 : : if( length_prefix < (int)strlen( prefix ) ) { prefix_str = prefix_str.substr( 0, length_prefix ); }
2765 : : if( NULL != cgraph )
2766 : : cgraph->dump_comm_information( prefix_str, *is_sender );
2767 : : else
2768 : : {
2769 : : std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
2770 : : << " context: " << *context_id << "\n";
2771 : : }
2772 : : return 0;
2773 : : }
2774 : :
2775 : : #endif // #ifdef MOAB_HAVE_TEMPESTREMAP
2776 : :
2777 : : #endif // #ifdef MOAB_HAVE_MPI
2778 : :
2779 : :
2780 : : #ifdef MOAB_HAVE_TEMPESTREMAP
2781 : :
2782 : : #ifdef MOAB_HAVE_NETCDF
2783 : :
2784 : : ErrCode iMOAB_LoadMappingWeightsFromFile(
2785 : : iMOAB_AppID pid_intersection, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
2786 : : const iMOAB_String remap_weights_filename, int* owned_dof_ids, int* owned_dof_ids_length, int* row_major_ownership,
2787 : : int solution_weights_identifier_length, int remap_weights_filename_length )
2788 : : {
2789 : : assert( remap_weights_filename_length > 0 && solution_weights_identifier_length > 0 );
2790 : : // assert( row_major_ownership != NULL );
2791 : :
2792 : : ErrorCode rval;
2793 : : bool row_based_partition = ( row_major_ownership != NULL && *row_major_ownership > 0 ? true : false );
2794 : :
2795 : : // Get the source and target data and pcomm objects
2796 : : appData& data_intx = context.appDatas[*pid_intersection];
2797 : : TempestMapAppData& tdata = data_intx.tempestData;
2798 : :
2799 : : // Get the handle to the remapper object
2800 : : if( tdata.remapper == NULL )
2801 : : {
2802 : : // Now allocate and initialize the remapper object
2803 : : #ifdef MOAB_HAVE_MPI
2804 : : ParallelComm* pco = context.pcomms[*pid_intersection];
2805 : : tdata.remapper = new moab::TempestRemapper( context.MBI, pco );
2806 : : #else
2807 : : tdata.remapper = new moab::TempestRemapper( context.MBI );
2808 : : #endif
2809 : : tdata.remapper->meshValidate = true;
2810 : : tdata.remapper->constructEdgeMap = true;
2811 : :
2812 : : // Do not create new filesets; Use the sets from our respective applications
2813 : : tdata.remapper->initialize( false );
2814 : : // tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
2815 : : // tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
2816 : : tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
2817 : : }
2818 : :
2819 : : // Setup loading of weights onto TempestOnlineMap
2820 : : // Set the context for the remapping weights computation
2821 : : tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
2822 : :
2823 : : // Now allocate and initialize the remapper object
2824 : : moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
2825 : : assert( weightMap != NULL );
2826 : :
2827 : : // Check that both the DoF ownership array and length are NULL. Here we will assume a trivial partition for the map
2828 : : // If both are non-NULL, then ensure that the length of the array is greater than 0.
2829 : : assert( ( owned_dof_ids == NULL && owned_dof_ids_length == NULL ) ||
2830 : : ( owned_dof_ids != NULL && owned_dof_ids_length != NULL && *owned_dof_ids_length > 0 ) );
2831 : :
2832 : : // Invoke the actual call to read in the parallel map
2833 : : if( owned_dof_ids == NULL && owned_dof_ids_length == NULL )
2834 : : {
2835 : : std::vector< int > tmp_owned_ids;
2836 : : rval = weightMap->ReadParallelMap( remap_weights_filename,
2837 : : tmp_owned_ids,
2838 : : row_based_partition );CHKERRVAL( rval );
2839 : : }
2840 : : else
2841 : : {
2842 : : rval = weightMap->ReadParallelMap( remap_weights_filename,
2843 : : std::vector< int >( owned_dof_ids, owned_dof_ids + *owned_dof_ids_length ),
2844 : : row_based_partition );CHKERRVAL( rval );
2845 : : }
2846 : :
2847 : : return 0;
2848 : : }
2849 : :
2850 : : ErrCode iMOAB_WriteMappingWeightsToFile(
2851 : : iMOAB_AppID pid_intersection, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
2852 : : const iMOAB_String remap_weights_filename, int solution_weights_identifier_length, int remap_weights_filename_length )
2853 : : {
2854 : : assert( remap_weights_filename_length > 0 && solution_weights_identifier_length > 0 );
2855 : :
2856 : : ErrorCode rval;
2857 : :
2858 : : // Get the source and target data and pcomm objects
2859 : : appData& data_intx = context.appDatas[*pid_intersection];
2860 : : TempestMapAppData& tdata = data_intx.tempestData;
2861 : :
2862 : : // Get the handle to the remapper object
2863 : : assert( tdata.remapper != NULL );
2864 : :
2865 : : // Now get online weights object and ensure it is valid
2866 : : moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
2867 : : assert( weightMap != NULL );
2868 : :
2869 : : std::string filename = std::string( remap_weights_filename );
2870 : : size_t lastindex = filename.find_last_of( "." );
2871 : : std::string extension = filename.substr( lastindex + 1, filename.size() );
2872 : :
2873 : : if (extension == "nc")
2874 : : {
2875 : : // Invoke the actual call to write the parallel map to disk
2876 : : rval = weightMap->WriteParallelWeightsToFile( filename );CHKERRVAL( rval );
2877 : : }
2878 : : else
2879 : : {
2880 : : /* Write to the parallel H5M format */
2881 : : rval = weightMap->WriteParallelMap( filename );CHKERRVAL( rval );
2882 : : }
2883 : :
2884 : : return 0;
2885 : : }
2886 : :
2887 : : #endif // #ifdef MOAB_HAVE_NETCDF
2888 : :
2889 : : #define USE_API
2890 : : static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
2891 : : {
2892 : : ErrorCode rval;
2893 : : moab::CartVect pos;
2894 : :
2895 : : Range& verts = context.appDatas[*pid].all_verts;
2896 : : moab::EntityHandle firstVertex = ( verts[0] );
2897 : :
2898 : : // coordinate data
2899 : : rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );CHKERRVAL( rval );
2900 : :
2901 : : // compute the distance from origin
2902 : : // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
2903 : : *radius = pos.length();
2904 : : return 0;
2905 : : }
2906 : :
2907 : : ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
2908 : : {
2909 : : ErrorCode rval;
2910 : : ErrCode ierr;
2911 : : bool validate = true;
2912 : :
2913 : : double radius_source = 1.0;
2914 : : double radius_target = 1.0;
2915 : : const double epsrel = 1e-15;
2916 : : const double boxeps = 1.e-6;
2917 : :
2918 : : // Get the source and target data and pcomm objects
2919 : : appData& data_src = context.appDatas[*pid_src];
2920 : : appData& data_tgt = context.appDatas[*pid_tgt];
2921 : : appData& data_intx = context.appDatas[*pid_intx];
2922 : : #ifdef MOAB_HAVE_MPI
2923 : : ParallelComm* pco_intx = context.pcomms[*pid_intx];
2924 : : #endif
2925 : :
2926 : : // Mesh intersection has already been computed; Return early.
2927 : : TempestMapAppData& tdata = data_intx.tempestData;
2928 : : if( tdata.remapper != NULL ) return 0;
2929 : :
2930 : : bool is_parallel = false, is_root = true;
2931 : : int rank = 0;
2932 : : #ifdef MOAB_HAVE_MPI
2933 : : if( pco_intx )
2934 : : {
2935 : : rank = pco_intx->rank();
2936 : : is_parallel = ( pco_intx->size() > 1 );
2937 : : is_root = ( rank == 0 );
2938 : : rval = pco_intx->check_all_shared_handles();CHKERRVAL( rval );
2939 : : }
2940 : : #endif
2941 : :
2942 : : moab::DebugOutput outputFormatter( std::cout, rank, 0 );
2943 : : outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
2944 : :
2945 : : ierr = iMOAB_UpdateMeshInfo( pid_src );
2946 : : CHKIERRVAL( ierr );
2947 : : ierr = iMOAB_UpdateMeshInfo( pid_tgt );
2948 : : CHKIERRVAL( ierr );
2949 : :
2950 : : // Rescale the radius of both to compute the intersection
2951 : : ComputeSphereRadius( pid_src, &radius_source );
2952 : : ComputeSphereRadius( pid_tgt, &radius_target );
2953 : : #ifdef VERBOSE
2954 : : if( is_root )
2955 : : outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
2956 : : radius_target );
2957 : : #endif
2958 : :
2959 : : /* Let make sure that the radius match for source and target meshes. If not, rescale now and
2960 : : * unscale later. */
2961 : : bool radii_scaled = false;
2962 : : bool defaultradius = 1.0;
2963 : : if( fabs( radius_source - radius_target ) > 1e-10 )
2964 : : { /* the radii are different */
2965 : : radii_scaled = true;
2966 : : rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );CHKERRVAL( rval );
2967 : : rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );CHKERRVAL( rval );
2968 : : }
2969 : :
2970 : : // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
2971 : : #ifdef MOAB_HAVE_TEMPESTREMAP
2972 : : IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature );
2973 : : #else
2974 : : IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
2975 : : #endif
2976 : :
2977 : : // print verbosely about the problem setting
2978 : : bool use_kdtree_search = false;
2979 : : double srctgt_areas[2], srctgt_areas_glb[2];
2980 : : {
2981 : : moab::Range rintxverts, rintxelems;
2982 : : rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );CHKERRVAL( rval );
2983 : : rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );CHKERRVAL( rval );
2984 : : rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );CHKERRVAL( rval );
2985 : : rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );CHKERRVAL( rval );
2986 : : srctgt_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
2987 : : #ifdef VERBOSE
2988 : : if( is_root )
2989 : : outputFormatter.printf( 0, "The red set contains %d vertices and %d elements \n", rintxverts.size(),
2990 : : rintxelems.size() );
2991 : : #endif
2992 : :
2993 : : moab::Range bintxverts, bintxelems;
2994 : : rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );CHKERRVAL( rval );
2995 : : rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );CHKERRVAL( rval );
2996 : : rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );CHKERRVAL( rval );
2997 : : rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );CHKERRVAL( rval );
2998 : : srctgt_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
2999 : : #ifdef VERBOSE
3000 : : if( is_root )
3001 : : outputFormatter.printf( 0, "The blue set contains %d vertices and %d elements \n", bintxverts.size(),
3002 : : bintxelems.size() );
3003 : : #endif
3004 : : #ifdef MOAB_HAVE_MPI
3005 : : MPI_Allreduce( &srctgt_areas[0], &srctgt_areas_glb[0], 2, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
3006 : : #else
3007 : : srctgt_areas_glb[0] = srctgt_areas[0];
3008 : : srctgt_areas_glb[1] = srctgt_areas[1];
3009 : : #endif
3010 : : use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
3011 : : }
3012 : :
3013 : : data_intx.dimension = data_tgt.dimension;
3014 : : // set the context for the source and destination applications
3015 : : tdata.pid_src = pid_src;
3016 : : tdata.pid_dest = pid_tgt;
3017 : :
3018 : : // Now allocate and initialize the remapper object
3019 : : #ifdef MOAB_HAVE_MPI
3020 : : tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
3021 : : #else
3022 : : tdata.remapper = new moab::TempestRemapper( context.MBI );
3023 : : #endif
3024 : : tdata.remapper->meshValidate = true;
3025 : : tdata.remapper->constructEdgeMap = true;
3026 : :
3027 : : // Do not create new filesets; Use the sets from our respective applications
3028 : : tdata.remapper->initialize( false );
3029 : : tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
3030 : : tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
3031 : : tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
3032 : :
3033 : : rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );CHKERRVAL( rval );
3034 : : rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );CHKERRVAL( rval );
3035 : :
3036 : : // First, compute the covering source set.
3037 : : rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );CHKERRVAL( rval );
3038 : :
3039 : : // Next, compute intersections with MOAB.
3040 : : rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );CHKERRVAL( rval );
3041 : :
3042 : : // Mapping computation done
3043 : : if( validate )
3044 : : {
3045 : : double local_area,
3046 : : global_areas[3]; // Array for Initial area, and through Method 1 and Method 2
3047 : : local_area = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
3048 : :
3049 : : global_areas[0] = srctgt_areas_glb[0];
3050 : : global_areas[1] = srctgt_areas_glb[1];
3051 : :
3052 : : #ifdef MOAB_HAVE_MPI
3053 : : if( is_parallel ) { MPI_Reduce( &local_area, &global_areas[2], 1, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() ); }
3054 : : else
3055 : : {
3056 : : global_areas[2] = local_area;
3057 : : }
3058 : : #else
3059 : : global_areas[2] = local_area;
3060 : : #endif
3061 : : if( is_root )
3062 : : {
3063 : : outputFormatter.printf( 0,
3064 : : "initial area: source mesh = %12.14f, target mesh = %12.14f, "
3065 : : "overlap mesh = %12.14f\n",
3066 : : global_areas[0], global_areas[1], global_areas[2] );
3067 : : outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
3068 : : fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
3069 : : fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
3070 : : }
3071 : : }
3072 : :
3073 : : return 0;
3074 : : }
3075 : :
3076 : : ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
3077 : : {
3078 : : ErrorCode rval;
3079 : : ErrCode ierr;
3080 : :
3081 : : double radius_source = 1.0;
3082 : : double radius_target = 1.0;
3083 : : const double epsrel = 1e-15;
3084 : : const double boxeps = 1.e-8;
3085 : :
3086 : : // Get the source and target data and pcomm objects
3087 : : appData& data_src = context.appDatas[*pid_src];
3088 : : appData& data_tgt = context.appDatas[*pid_tgt];
3089 : : appData& data_intx = context.appDatas[*pid_intx];
3090 : : #ifdef MOAB_HAVE_MPI
3091 : : ParallelComm* pco_intx = context.pcomms[*pid_intx];
3092 : : #endif
3093 : :
3094 : : // Mesh intersection has already been computed; Return early.
3095 : : TempestMapAppData& tdata = data_intx.tempestData;
3096 : : if( tdata.remapper != NULL ) return 0;
3097 : :
3098 : : #ifdef MOAB_HAVE_MPI
3099 : : if( pco_intx )
3100 : : {
3101 : : rval = pco_intx->check_all_shared_handles();CHKERRVAL( rval );
3102 : : }
3103 : : #endif
3104 : :
3105 : : ierr = iMOAB_UpdateMeshInfo( pid_src );CHKIERRVAL( ierr );
3106 : : ierr = iMOAB_UpdateMeshInfo( pid_tgt );CHKIERRVAL( ierr );
3107 : :
3108 : : // Rescale the radius of both to compute the intersection
3109 : : ComputeSphereRadius( pid_src, &radius_source );
3110 : : ComputeSphereRadius( pid_tgt, &radius_target );
3111 : :
3112 : : IntxAreaUtils areaAdaptor;
3113 : : // print verbosely about the problem setting
3114 : : {
3115 : : moab::Range rintxverts, rintxelems;
3116 : : rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );CHKERRVAL( rval );
3117 : : rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );CHKERRVAL( rval );
3118 : : rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );CHKERRVAL( rval );
3119 : : rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );CHKERRVAL( rval );
3120 : : #ifdef VERBOSE
3121 : : std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
3122 : : << " elements \n";
3123 : : #endif
3124 : :
3125 : : moab::Range bintxverts, bintxelems;
3126 : : rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );CHKERRVAL( rval );
3127 : : rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );CHKERRVAL( rval );
3128 : : rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );CHKERRVAL( rval );
3129 : : rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );CHKERRVAL( rval );
3130 : : #ifdef VERBOSE
3131 : : std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
3132 : : << " elements \n";
3133 : : #endif
3134 : : }
3135 : :
3136 : : data_intx.dimension = data_tgt.dimension;
3137 : : // set the context for the source and destination applications
3138 : : // set the context for the source and destination applications
3139 : : tdata.pid_src = pid_src;
3140 : : tdata.pid_dest = pid_tgt;
3141 : : data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
3142 : : assert( data_intx.point_cloud == true );
3143 : :
3144 : : // Now allocate and initialize the remapper object
3145 : : #ifdef MOAB_HAVE_MPI
3146 : : tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
3147 : : #else
3148 : : tdata.remapper = new moab::TempestRemapper( context.MBI );
3149 : : #endif
3150 : : tdata.remapper->meshValidate = true;
3151 : : tdata.remapper->constructEdgeMap = true;
3152 : :
3153 : : // Do not create new filesets; Use the sets from our respective applications
3154 : : tdata.remapper->initialize( false );
3155 : : tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh ) = data_src.file_set;
3156 : : tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh ) = data_tgt.file_set;
3157 : : tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
3158 : :
3159 : : /* Let make sure that the radius match for source and target meshes. If not, rescale now and
3160 : : * unscale later. */
3161 : : bool radii_scaled = false;
3162 : : if( fabs( radius_source - radius_target ) > 1e-10 )
3163 : : { /* the radii are different */
3164 : : radii_scaled = true;
3165 : : rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );CHKERRVAL( rval );
3166 : : rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );CHKERRVAL( rval );
3167 : : }
3168 : :
3169 : : rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );CHKERRVAL( rval );
3170 : : rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );CHKERRVAL( rval );
3171 : :
3172 : : // First, compute the covering source set.
3173 : : rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );CHKERRVAL( rval );
3174 : :
3175 : : #ifdef MOAB_HAVE_MPI
3176 : : /* VSM: This context should be set on the data_src but it would overwrite the source
3177 : : covering set context in case it is coupled to another APP as well.
3178 : : This needs a redesign. */
3179 : : // data_intx.covering_set = tdata.remapper->GetCoveringSet();
3180 : : // data_src.covering_set = tdata.remapper->GetCoveringSet();
3181 : : #endif
3182 : :
3183 : : // Now let us re-convert the MOAB mesh back to Tempest representation
3184 : : rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
3185 : :
3186 : : return 0;
3187 : : }
3188 : :
3189 : : ErrCode iMOAB_ComputeScalarProjectionWeights(
3190 : : iMOAB_AppID pid_intx, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3191 : : const iMOAB_String disc_method_source, int* disc_order_source, const iMOAB_String disc_method_target,
3192 : : int* disc_order_target, int* fMonotoneTypeID, int* fVolumetric, int* fNoConservation, int* fValidate,
3193 : : const iMOAB_String source_solution_tag_dof_name, const iMOAB_String target_solution_tag_dof_name,
3194 : : int solution_weights_identifier_length, int disc_method_source_length, int disc_method_target_length,
3195 : : int source_solution_tag_dof_name_length, int target_solution_tag_dof_name_length )
3196 : : {
3197 : : moab::ErrorCode rval;
3198 : :
3199 : : assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
3200 : : assert( disc_method_source_length > 0 && disc_method_target_length > 0 );
3201 : : assert( solution_weights_identifier_length > 0 && source_solution_tag_dof_name_length > 0 &&
3202 : : target_solution_tag_dof_name_length > 0 );
3203 : :
3204 : : // Get the source and target data and pcomm objects
3205 : : appData& data_intx = context.appDatas[*pid_intx];
3206 : : TempestMapAppData& tdata = data_intx.tempestData;
3207 : :
3208 : : // Setup computation of weights
3209 : : // Set the context for the remapping weights computation
3210 : : tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
3211 : :
3212 : : // Call to generate the remap weights with the tempest meshes
3213 : : moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3214 : : assert( weightMap != NULL );
3215 : :
3216 : : // Now let us compute the local-global mapping and store it in the context
3217 : : // We need this mapping when computing matvec products and to do reductions in parallel
3218 : : // Additionally, the call below will also compute weights with TempestRemap
3219 : : rval = weightMap->GenerateRemappingWeights(
3220 : : std::string( disc_method_source ),
3221 : : std::string( disc_method_target ), // std::string strInputType, std::string strOutputType,
3222 : : ( *disc_order_source ), ( *disc_order_target ), // const int nPin, const int nPout,
3223 : : true,
3224 : : ( fMonotoneTypeID ? *fMonotoneTypeID : 0 ), // bool fBubble=false, int fMonotoneTypeID=0,
3225 : : ( fVolumetric ? *fVolumetric > 0 : false ), // bool fVolumetric=false,
3226 : : ( fNoConservation ? *fNoConservation > 0 : false ), // bool fNoConservation=false,
3227 : : ( fValidate ? *fValidate : false ), // bool fNoCheck=false,
3228 : : source_solution_tag_dof_name, target_solution_tag_dof_name,
3229 : : "", //"", // std::string strVariables="", std::string strOutputMap="",
3230 : : "", "", // std::string strInputData="", std::string strOutputData="",
3231 : : "", false, // std::string strNColName="", bool fOutputDouble=false,
3232 : : "", false, 0.0, // std::string strPreserveVariables="", bool fPreserveAll=false, double
3233 : : // dFillValueOverride=0.0,
3234 : : false, false // bool fInputConcave = false, bool fOutputConcave = false
3235 : : );CHKERRVAL( rval );
3236 : :
3237 : : return 0;
3238 : : }
3239 : :
3240 : : ErrCode iMOAB_ApplyScalarProjectionWeights(
3241 : : iMOAB_AppID pid_intersection, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
3242 : : const iMOAB_String source_solution_tag_name, const iMOAB_String target_solution_tag_name,
3243 : : int solution_weights_identifier_length, int source_solution_tag_name_length, int target_solution_tag_name_length )
3244 : : {
3245 : : moab::ErrorCode rval;
3246 : :
3247 : : assert( solution_weights_identifier_length > 0 && source_solution_tag_name_length > 0 &&
3248 : : target_solution_tag_name_length > 0 );
3249 : :
3250 : : // Get the source and target data and pcomm objects
3251 : : appData& data_intx = context.appDatas[*pid_intersection];
3252 : : TempestMapAppData& tdata = data_intx.tempestData;
3253 : :
3254 : : // Now allocate and initialize the remapper object
3255 : : moab::TempestRemapper* remapper = tdata.remapper;
3256 : : moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
3257 : :
3258 : : // we assume that there are separators ";" between the tag names
3259 : : std::vector< std::string > srcNames;
3260 : : std::vector< std::string > tgtNames;
3261 : : std::vector< Tag > srcTagHandles;
3262 : : std::vector< Tag > tgtTagHandles;
3263 : : std::string separator( ";" );
3264 : : std::string src_name( source_solution_tag_name );
3265 : : std::string tgt_name( target_solution_tag_name );
3266 : : split_tag_names( src_name, separator, srcNames );
3267 : : split_tag_names( tgt_name, separator, tgtNames );
3268 : : if( srcNames.size() != tgtNames.size() )
3269 : : {
3270 : : std::cout << " error in parsing source and target tag names. \n";
3271 : : return 1;
3272 : : }
3273 : :
3274 : : for( size_t i = 0; i < srcNames.size(); i++ )
3275 : : {
3276 : : Tag tagHandle;
3277 : : rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
3278 : : if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
3279 : : srcTagHandles.push_back( tagHandle );
3280 : : rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
3281 : : if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
3282 : : tgtTagHandles.push_back( tagHandle );
3283 : : }
3284 : :
3285 : : std::vector< double > solSTagVals;
3286 : : std::vector< double > solTTagVals;
3287 : :
3288 : : moab::Range sents, tents;
3289 : : if( data_intx.point_cloud )
3290 : : {
3291 : : appData& data_src = context.appDatas[*tdata.pid_src];
3292 : : appData& data_tgt = context.appDatas[*tdata.pid_dest];
3293 : : if( data_src.point_cloud )
3294 : : {
3295 : : moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
3296 : : solSTagVals.resize( covSrcEnts.size(), -1.0 );
3297 : : sents = covSrcEnts;
3298 : : }
3299 : : else
3300 : : {
3301 : : moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
3302 : : solSTagVals.resize( covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() *
3303 : : weightMap->GetSourceNDofsPerElement(),
3304 : : -1.0 );
3305 : : sents = covSrcEnts;
3306 : : }
3307 : : if( data_tgt.point_cloud )
3308 : : {
3309 : : moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
3310 : : solTTagVals.resize( tgtEnts.size(), -1.0 );
3311 : : tents = tgtEnts;
3312 : : }
3313 : : else
3314 : : {
3315 : : moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
3316 : : solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
3317 : : weightMap->GetDestinationNDofsPerElement(),
3318 : : -1.0 );
3319 : : tents = tgtEnts;
3320 : : }
3321 : : }
3322 : : else
3323 : : {
3324 : : moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
3325 : : moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
3326 : : solSTagVals.resize(
3327 : : covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
3328 : : solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
3329 : : weightMap->GetDestinationNDofsPerElement(),
3330 : : -1.0 );
3331 : :
3332 : : sents = covSrcEnts;
3333 : : tents = tgtEnts;
3334 : : }
3335 : :
3336 : : for( size_t i = 0; i < srcTagHandles.size(); i++ )
3337 : : {
3338 : : // The tag data is np*np*n_el_src
3339 : : Tag ssolnTag = srcTagHandles[i];
3340 : : Tag tsolnTag = tgtTagHandles[i];
3341 : : rval = context.MBI->tag_get_data( ssolnTag, sents, &solSTagVals[0] );CHKERRVAL( rval );
3342 : :
3343 : : // Compute the application of weights on the suorce solution data and store it in the
3344 : : // destination solution vector data Optionally, can also perform the transpose application
3345 : : // of the weight matrix. Set the 3rd argument to true if this is needed
3346 : : rval = weightMap->ApplyWeights( solSTagVals, solTTagVals, false );CHKERRVAL( rval );
3347 : :
3348 : : // The tag data is np*np*n_el_dest
3349 : : rval = context.MBI->tag_set_data( tsolnTag, tents, &solTTagVals[0] );CHKERRVAL( rval );
3350 : : }
3351 : :
3352 : : // #define VERBOSE
3353 : : #ifdef VERBOSE
3354 : : ParallelComm* pco_intx = context.pcomms[*pid_intersection];
3355 : :
3356 : : int ivar = 0;
3357 : : {
3358 : : Tag ssolnTag = srcTagHandles[ivar];
3359 : : std::stringstream sstr;
3360 : : sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
3361 : : std::ofstream output_file( sstr.str().c_str() );
3362 : : for( unsigned i = 0; i < sents.size(); ++i )
3363 : : {
3364 : : EntityHandle elem = sents[i];
3365 : : std::vector< double > locsolSTagVals( 16 );
3366 : : rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );CHKERRVAL( rval );
3367 : : output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
3368 : : for( unsigned j = 0; j < 16; ++j )
3369 : : output_file << locsolSTagVals[j] << " ";
3370 : : }
3371 : : output_file.flush(); // required here
3372 : : output_file.close();
3373 : : }
3374 : : {
3375 : : std::stringstream sstr;
3376 : : sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
3377 : : EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
3378 : : context.appDatas[*tdata.pid_dest].file_set };
3379 : : rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
3380 : : }
3381 : : {
3382 : : std::stringstream sstr;
3383 : : sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
3384 : : // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
3385 : : EntityHandle covering_set = remapper->GetCoveringSet();
3386 : : EntityHandle sets[2] = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
3387 : : rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
3388 : : }
3389 : : {
3390 : : std::stringstream sstr;
3391 : : sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
3392 : : // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
3393 : : EntityHandle covering_set = remapper->GetCoveringSet();
3394 : : rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
3395 : : }
3396 : : {
3397 : : std::stringstream sstr;
3398 : : sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
3399 : : // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
3400 : : EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
3401 : : rval = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
3402 : : }
3403 : : {
3404 : : std::stringstream sstr;
3405 : : sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
3406 : : std::ofstream output_file( sstr.str().c_str() );
3407 : : for( unsigned i = 0; i < solSTagVals.size(); ++i )
3408 : : output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
3409 : : << solSTagVals[i] << "\n";
3410 : : output_file.flush(); // required here
3411 : : output_file.close();
3412 : : }
3413 : : #endif
3414 : : // #undef VERBOSE
3415 : :
3416 : : return 0;
3417 : : }
3418 : :
3419 : : #endif // MOAB_HAVE_TEMPESTREMAP
3420 : :
3421 : : #ifdef __cplusplus
3422 [ + - ][ + - ]: 8 : }
3423 : : #endif
|