Branch data Line data Source code
1 : : /**
2 : : * MOAB, a Mesh-Oriented datABase, is a software component for creating,
3 : : * storing and accessing finite element mesh data.
4 : : *
5 : : * Copyright 2004 Sandia Corporation. Under the terms of Contract
6 : : * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
7 : : * retains certain rights in this software.
8 : : *
9 : : * This library is free software; you can redistribute it and/or
10 : : * modify it under the terms of the GNU Lesser General Public
11 : : * License as published by the Free Software Foundation; either
12 : : * version 2.1 of the License, or (at your option) any later version.
13 : : *
14 : : */
15 : :
16 : : //-------------------------------------------------------------------------
17 : : // Filename : ReadHDF5.cpp
18 : : //
19 : : // Purpose : HDF5 Writer
20 : : //
21 : : // Creator : Jason Kraftcheck
22 : : //
23 : : // Creation Date : 04/18/04
24 : : //-------------------------------------------------------------------------
25 : :
26 : : #include <assert.h>
27 : : #include "moab/MOABConfig.h"
28 : : /* Include our MPI header before any HDF5 because otherwise
29 : : it will get included indirectly by HDF5 */
30 : : #ifdef MOAB_HAVE_MPI
31 : : #include "moab_mpi.h"
32 : : #include "moab/ParallelComm.hpp"
33 : : #endif
34 : : #include <H5Tpublic.h>
35 : : #include <H5Ppublic.h>
36 : : #include <H5Epublic.h>
37 : : #include "moab/Interface.hpp"
38 : : #include "Internals.hpp"
39 : : #include "MBTagConventions.hpp"
40 : : #include "ReadHDF5.hpp"
41 : : #include "moab/CN.hpp"
42 : : #include "moab/FileOptions.hpp"
43 : : #include "moab/CpuTimer.hpp"
44 : : #ifdef MOAB_HAVE_HDF5_PARALLEL
45 : : #include <H5FDmpi.h>
46 : : #include <H5FDmpio.h>
47 : : #endif
48 : : //#include "WriteHDF5.hpp"
49 : :
50 : : #include <stdlib.h>
51 : : #include <string.h>
52 : : #include <limits>
53 : : #include <functional>
54 : : #include <iostream>
55 : :
56 : : #include "IODebugTrack.hpp"
57 : : #include "ReadHDF5Dataset.hpp"
58 : : #include "ReadHDF5VarLen.hpp"
59 : : #include "moab_mpe.h"
60 : :
61 : : namespace moab
62 : : {
63 : :
64 : : /* If true, coordinates are read in blocked format (all X values before
65 : : * Y values before Z values.) If undefined, then all coordinates for a
66 : : * given vertex are read at the same time.
67 : : */
68 : : const bool DEFAULT_BLOCKED_COORDINATE_IO = false;
69 : :
70 : : /* If true, file is opened first by root node only to read summary,
71 : : * file is the closed and the summary is broadcast to all nodes, after
72 : : * which all nodes open file in parallel to read data. If undefined,
73 : : * file is opened once in parallel and all nodes read summary data.
74 : : */
75 : : const bool DEFAULT_BCAST_SUMMARY = true;
76 : :
77 : : /* If true and all processors are to read the same block of data,
78 : : * read it on one and broadcast to others rather than using collective
79 : : * io
80 : : */
81 : : const bool DEFAULT_BCAST_DUPLICATE_READS = true;
82 : :
83 : : #define READ_HDF5_BUFFER_SIZE ( 128 * 1024 * 1024 )
84 : :
85 : : #define assert_range( PTR, CNT ) \
86 : : assert( ( PTR ) >= (void*)dataBuffer ); \
87 : : assert( ( ( PTR ) + ( CNT ) ) <= (void*)( dataBuffer + bufferSize ) );
88 : :
89 : : // Call \c error function during HDF5 library errors to make
90 : : // it easier to trap such errors in the debugger. This function
91 : : // gets registered with the HDF5 library as a callback. It
92 : : // works the same as the default (H5Eprint), except that it
93 : : // also calls the \c error function as a no-op.
94 : : #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
95 : 0 : static herr_t handle_hdf5_error( hid_t stack, void* data )
96 : : {
97 : 0 : ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
98 : 0 : herr_t result = 0;
99 [ # # ][ # # ]: 0 : if( h->func ) result = ( *h->func )( stack, h->data );MB_CHK_ERR_CONT( MB_FAILURE );
100 : 0 : return result;
101 : : }
102 : : #else
103 : : static herr_t handle_hdf5_error( void* data )
104 : : {
105 : : ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
106 : : herr_t result = 0;
107 : : if( h->func ) result = ( *h->func )( h->data );MB_CHK_ERR_CONT( MB_FAILURE );
108 : : return result;
109 : : }
110 : : #endif
111 : :
112 : 0 : static void copy_sorted_file_ids( const EntityHandle* sorted_ids, long num_ids, Range& results )
113 : : {
114 [ # # ]: 0 : Range::iterator hint = results.begin();
115 : 0 : long i = 0;
116 [ # # ]: 0 : while( i < num_ids )
117 : : {
118 : 0 : EntityHandle start = sorted_ids[i];
119 [ # # ][ # # ]: 0 : for( ++i; i < num_ids && sorted_ids[i] == 1 + sorted_ids[i - 1]; ++i )
120 : : ;
121 [ # # ]: 0 : hint = results.insert( hint, start, sorted_ids[i - 1] );
122 : : }
123 : 0 : }
124 : :
125 : 0 : static void intersect( const mhdf_EntDesc& group, const Range& range, Range& result )
126 : : {
127 [ # # ][ # # ]: 0 : Range::const_iterator s, e;
128 [ # # ][ # # ]: 0 : s = Range::lower_bound( range.begin(), range.end(), group.start_id );
[ # # ]
129 [ # # ][ # # ]: 0 : e = Range::lower_bound( s, range.end(), group.start_id + group.count );
130 [ # # ]: 0 : result.merge( s, e );
131 : 0 : }
132 : :
133 : : #define debug_barrier() debug_barrier_line( __LINE__ )
134 : 790 : void ReadHDF5::debug_barrier_line( int lineno )
135 : : {
136 : : #ifdef MOAB_HAVE_MPI
137 [ - + ]: 790 : if( mpiComm )
138 : : {
139 : 0 : const unsigned threshold = 2;
140 : : static unsigned long count = 0;
141 [ # # ]: 0 : if( dbgOut.get_verbosity() >= threshold )
142 : : {
143 : 0 : dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
144 : 0 : MPI_Barrier( *mpiComm );
145 : : }
146 : : }
147 : : #else
148 : : if( lineno ) {}
149 : : #endif
150 : 790 : }
151 : :
152 : : class CheckOpenReadHDF5Handles
153 : : {
154 : : int fileline;
155 : : mhdf_FileHandle handle;
156 : : int enter_count;
157 : :
158 : : public:
159 : 2272 : CheckOpenReadHDF5Handles( mhdf_FileHandle file, int line )
160 : 2272 : : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
161 : : {
162 : 2272 : }
163 : 2272 : ~CheckOpenReadHDF5Handles()
164 : : {
165 : 2272 : int new_count = mhdf_countOpenHandles( handle );
166 [ - + ]: 2272 : if( new_count != enter_count )
167 : : {
168 : 0 : std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
169 : 0 : << "Open at entrance: " << enter_count << std::endl
170 : 0 : << "Open at exit: " << new_count << std::endl;
171 : : }
172 : 2272 : }
173 : : };
174 : :
175 : : #ifdef NDEBUG
176 : : #define CHECK_OPEN_HANDLES
177 : : #else
178 : : #define CHECK_OPEN_HANDLES CheckOpenReadHDF5Handles check_open_handles_( filePtr, __LINE__ )
179 : : #endif
180 : :
181 : 69 : ReaderIface* ReadHDF5::factory( Interface* iface )
182 : : {
183 [ + - ]: 69 : return new ReadHDF5( iface );
184 : : }
185 : :
186 : 69 : ReadHDF5::ReadHDF5( Interface* iface )
187 : : : bufferSize( READ_HDF5_BUFFER_SIZE ), dataBuffer( NULL ), iFace( iface ), filePtr( 0 ), fileInfo( NULL ),
188 : : readUtil( NULL ), handleType( 0 ), indepIO( H5P_DEFAULT ), collIO( H5P_DEFAULT ), myPcomm( NULL ),
189 : : debugTrack( false ), dbgOut( stderr ), nativeParallel( false ), mpiComm( NULL ),
190 : : blockedCoordinateIO( DEFAULT_BLOCKED_COORDINATE_IO ), bcastSummary( DEFAULT_BCAST_SUMMARY ),
191 [ + - ][ + - ]: 69 : bcastDuplicateReads( DEFAULT_BCAST_DUPLICATE_READS ), setMeta( 0 ), timer( NULL ), cputime( false )
[ + - ]
192 : : {
193 : 69 : }
194 : :
195 : 69 : ErrorCode ReadHDF5::init()
196 : : {
197 : : ErrorCode rval;
198 : :
199 [ - + ]: 69 : if( readUtil ) return MB_SUCCESS;
200 : :
201 : 69 : indepIO = collIO = H5P_DEFAULT;
202 : : // WriteHDF5::register_known_tag_types(iFace);
203 : :
204 : 69 : handleType = H5Tcopy( H5T_NATIVE_ULONG );
205 [ - + ][ # # ]: 69 : if( handleType < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
206 : :
207 [ - + ]: 69 : if( H5Tset_size( handleType, sizeof( EntityHandle ) ) < 0 )
208 : : {
209 : 0 : H5Tclose( handleType );
210 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
211 : : }
212 : :
213 : 69 : rval = iFace->query_interface( readUtil );
214 [ - + ]: 69 : if( MB_SUCCESS != rval )
215 : : {
216 : 0 : H5Tclose( handleType );
217 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
218 : : }
219 : :
220 : 69 : idMap.clear();
221 : 69 : fileInfo = 0;
222 : 69 : debugTrack = false;
223 : 69 : myPcomm = 0;
224 : :
225 : 69 : return MB_SUCCESS;
226 : : }
227 : :
228 [ + - ][ + - ]: 207 : ReadHDF5::~ReadHDF5()
[ + - ][ + - ]
229 : : {
230 [ - + ]: 69 : if( !readUtil ) // init() failed.
231 : 0 : return;
232 : :
233 [ - + ]: 69 : delete[] setMeta;
234 : 69 : setMeta = 0;
235 : 69 : iFace->release_interface( readUtil );
236 : 69 : H5Tclose( handleType );
237 [ - + ]: 138 : }
238 : :
239 : 69 : ErrorCode ReadHDF5::set_up_read( const char* filename, const FileOptions& opts )
240 : : {
241 : : ErrorCode rval;
242 : : mhdf_Status status;
243 : 69 : indepIO = collIO = H5P_DEFAULT;
244 : 69 : mpiComm = 0;
245 : :
246 [ + - ][ - + ]: 69 : if( MB_SUCCESS != init() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
247 : :
248 : : #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
249 [ + - ]: 69 : herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
250 : : #else
251 : : herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
252 : : #endif
253 [ - + ]: 69 : if( err < 0 )
254 : : {
255 : 0 : errorHandler.func = 0;
256 : 0 : errorHandler.data = 0;
257 : : }
258 : : else
259 : : {
260 : : #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
261 [ + - ]: 69 : err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
262 : : #else
263 : : err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
264 : : #endif
265 [ - + ]: 69 : if( err < 0 )
266 : : {
267 : 0 : errorHandler.func = 0;
268 : 0 : errorHandler.data = 0;
269 : : }
270 : : }
271 : :
272 : : // Set up debug output
273 : : int tmpval;
274 [ + - ][ - + ]: 69 : if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) )
275 : : {
276 [ # # ]: 0 : dbgOut.set_verbosity( tmpval );
277 [ # # ][ # # ]: 0 : dbgOut.set_prefix( "H5M " );
278 : : }
279 [ + - ]: 69 : dbgOut.limit_output_to_first_N_procs( 32 );
280 : :
281 : : // Enable some extra checks for reads. Note: amongst other things this
282 : : // will print errors if the entire file is not read, so if doing a
283 : : // partial read that is not a parallel read, this should be disabled.
284 [ + - ]: 69 : debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
285 : :
286 [ + - ]: 69 : opts.get_toggle_option( "BLOCKED_COORDINATE_IO", DEFAULT_BLOCKED_COORDINATE_IO, blockedCoordinateIO );
287 [ + - ]: 69 : opts.get_toggle_option( "BCAST_SUMMARY", DEFAULT_BCAST_SUMMARY, bcastSummary );
288 [ + - ]: 69 : opts.get_toggle_option( "BCAST_DUPLICATE_READS", DEFAULT_BCAST_DUPLICATE_READS, bcastDuplicateReads );
289 : :
290 : : // Handle parallel options
291 [ + - ]: 69 : bool use_mpio = ( MB_SUCCESS == opts.get_null_option( "USE_MPIO" ) );
292 [ + - ]: 69 : rval = opts.match_option( "PARALLEL", "READ_PART" );
293 : 69 : bool parallel = ( rval != MB_ENTITY_NOT_FOUND );
294 : 69 : nativeParallel = ( rval == MB_SUCCESS );
295 [ - + ][ # # ]: 69 : if( use_mpio && !parallel )
296 [ # # ][ # # ]: 0 : { MB_SET_ERR( MB_NOT_IMPLEMENTED, "'USE_MPIO' option specified w/out 'PARALLEL' option" ); }
[ # # ][ # # ]
[ # # ]
297 : :
298 : : // This option is intended for testing purposes only, and thus
299 : : // is not documented anywhere. Decreasing the buffer size can
300 : : // expose bugs that would otherwise only be seen when reading
301 : : // very large files.
302 [ + - ]: 69 : rval = opts.get_int_option( "BUFFER_SIZE", bufferSize );
303 [ + - ]: 69 : if( MB_SUCCESS != rval ) { bufferSize = READ_HDF5_BUFFER_SIZE; }
304 [ # # ][ # # ]: 0 : else if( bufferSize < (int)std::max( sizeof( EntityHandle ), sizeof( void* ) ) )
305 : : {
306 [ # # ][ # # ]: 0 : MB_CHK_ERR( MB_INVALID_SIZE );
307 : : }
308 : :
309 : 69 : dataBuffer = (char*)malloc( bufferSize );
310 [ - + ][ # # ]: 69 : if( !dataBuffer ) MB_CHK_ERR( MB_MEMORY_ALLOCATION_FAILED );
[ # # ]
311 : :
312 [ + - ][ + + ]: 69 : if( use_mpio || nativeParallel )
313 : : {
314 : :
315 : : #ifndef MOAB_HAVE_HDF5_PARALLEL
316 : 2 : free( dataBuffer );
317 : 2 : dataBuffer = NULL;
318 [ + - ][ + - ]: 2 : MB_SET_ERR( MB_NOT_IMPLEMENTED, "MOAB not configured with parallel HDF5 support" );
[ + - ][ - + ]
[ + - ]
319 : : #else
320 : : MPI_Info info = MPI_INFO_NULL;
321 : : std::string cb_size;
322 : : rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
323 : : if( MB_SUCCESS == rval )
324 : : {
325 : : MPI_Info_create( &info );
326 : : MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
327 : : }
328 : :
329 : : int pcomm_no = 0;
330 : : rval = opts.get_int_option( "PARALLEL_COMM", pcomm_no );
331 : : if( rval == MB_TYPE_OUT_OF_RANGE ) { MB_SET_ERR( rval, "Invalid value for PARALLEL_COMM option" ); }
332 : : myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
333 : : if( 0 == myPcomm ) { myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD ); }
334 : : const int rank = myPcomm->proc_config().proc_rank();
335 : : dbgOut.set_rank( rank );
336 : : dbgOut.limit_output_to_first_N_procs( 32 );
337 : : mpiComm = new MPI_Comm( myPcomm->proc_config().proc_comm() );
338 : :
339 : : #ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
340 : : dbgOut.print( 1, "H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS is not defined\n" );
341 : : #endif
342 : :
343 : : // Open the file in serial on root to read summary
344 : : dbgOut.tprint( 1, "Getting file summary\n" );
345 : : fileInfo = 0;
346 : :
347 : : hid_t file_prop;
348 : : if( bcastSummary )
349 : : {
350 : : unsigned long size = 0;
351 : : if( rank == 0 )
352 : : {
353 : : file_prop = H5Pcreate( H5P_FILE_ACCESS );
354 : : err = H5Pset_fapl_mpio( file_prop, MPI_COMM_SELF, MPI_INFO_NULL );
355 : : assert( file_prop >= 0 );
356 : : assert( err >= 0 );
357 : : filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
358 : : H5Pclose( file_prop );
359 : :
360 : : if( filePtr )
361 : : {
362 : : fileInfo = mhdf_getFileSummary( filePtr, handleType, &status,
363 : : 0 ); // no extra set info
364 : : if( !is_error( status ) )
365 : : {
366 : : size = fileInfo->total_size;
367 : : fileInfo->offset = (unsigned char*)fileInfo;
368 : : }
369 : : }
370 : : mhdf_closeFile( filePtr, &status );
371 : : if( fileInfo && mhdf_isError( &status ) )
372 : : {
373 : : free( fileInfo );
374 : : fileInfo = NULL;
375 : : }
376 : : }
377 : :
378 : : dbgOut.tprint( 1, "Communicating file summary\n" );
379 : : int mpi_err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, myPcomm->proc_config().proc_comm() );
380 : : if( mpi_err || !size ) return MB_FAILURE;
381 : :
382 : : if( rank != 0 ) fileInfo = reinterpret_cast< mhdf_FileDesc* >( malloc( size ) );
383 : :
384 : : MPI_Bcast( fileInfo, size, MPI_BYTE, 0, myPcomm->proc_config().proc_comm() );
385 : :
386 : : if( rank != 0 ) mhdf_fixFileDesc( fileInfo, reinterpret_cast< mhdf_FileDesc* >( fileInfo->offset ) );
387 : : }
388 : :
389 : : file_prop = H5Pcreate( H5P_FILE_ACCESS );
390 : : err = H5Pset_fapl_mpio( file_prop, myPcomm->proc_config().proc_comm(), info );
391 : : assert( file_prop >= 0 );
392 : : assert( err >= 0 );
393 : :
394 : : collIO = H5Pcreate( H5P_DATASET_XFER );
395 : : assert( collIO > 0 );
396 : : err = H5Pset_dxpl_mpio( collIO, H5FD_MPIO_COLLECTIVE );
397 : : assert( err >= 0 );
398 : : indepIO = nativeParallel ? H5P_DEFAULT : collIO;
399 : :
400 : : // Re-open file in parallel
401 : : dbgOut.tprintf( 1, "Opening \"%s\" for parallel IO\n", filename );
402 : : filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
403 : :
404 : : H5Pclose( file_prop );
405 : : if( !filePtr )
406 : : {
407 : : free( dataBuffer );
408 : : dataBuffer = NULL;
409 : : H5Pclose( indepIO );
410 : : if( collIO != indepIO ) H5Pclose( collIO );
411 : : collIO = indepIO = H5P_DEFAULT;
412 : : MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
413 : : }
414 : :
415 : : if( !bcastSummary )
416 : : {
417 : : fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
418 : : if( is_error( status ) )
419 : : {
420 : : free( dataBuffer );
421 : : dataBuffer = NULL;
422 : : mhdf_closeFile( filePtr, &status );
423 : : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
424 : : }
425 : : }
426 : : #endif // HDF5_PARALLEL
427 : : }
428 : : else
429 : : {
430 : : // Open the file
431 [ + - ]: 67 : filePtr = mhdf_openFile( filename, 0, NULL, handleType, &status );
432 [ + + ]: 67 : if( !filePtr )
433 : : {
434 : 1 : free( dataBuffer );
435 : 1 : dataBuffer = NULL;
436 [ + - ][ + - ]: 1 : MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
[ + - ][ + - ]
[ - + ][ + - ]
437 : : }
438 : :
439 : : // Get file info
440 [ + - ]: 66 : fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
441 [ + - ][ - + ]: 66 : if( is_error( status ) )
442 : : {
443 : 0 : free( dataBuffer );
444 : 0 : dataBuffer = NULL;
445 [ # # ]: 0 : mhdf_closeFile( filePtr, &status );
446 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
447 : : }
448 : : }
449 : :
450 [ + - ]: 66 : ReadHDF5Dataset::default_hyperslab_selection_limit();
451 : : int hslimit;
452 [ + - ]: 66 : rval = opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit );
453 [ - + ][ # # ]: 66 : if( MB_SUCCESS == rval && hslimit > 0 )
454 [ # # ]: 0 : ReadHDF5Dataset::set_hyperslab_selection_limit( hslimit );
455 : : else
456 [ + - ]: 66 : ReadHDF5Dataset::default_hyperslab_selection_limit();
457 [ + - ][ + - ]: 198 : if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
[ + - ][ - + ]
458 [ + - ][ + - ]: 132 : ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
[ - + ]
459 : : {
460 [ # # ]: 0 : ReadHDF5Dataset::append_hyperslabs();
461 [ # # ][ # # ]: 0 : if( MB_SUCCESS != opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit ) )
462 [ # # ]: 0 : ReadHDF5Dataset::set_hyperslab_selection_limit( std::numeric_limits< int >::max() );
463 [ # # ]: 0 : dbgOut.print( 1, "Using H5S_APPEND for hyperslab selection\n" );
464 : : }
465 : :
466 : 69 : return MB_SUCCESS;
467 : : }
468 : :
469 : 69 : ErrorCode ReadHDF5::clean_up_read( const FileOptions& )
470 : : {
471 : : HDF5ErrorHandler handler;
472 : : #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
473 [ + - ]: 69 : herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
474 : : #else
475 : : herr_t err = H5Eget_auto( &handler.func, &handler.data );
476 : : #endif
477 [ + - ][ + - ]: 69 : if( err >= 0 && handler.func == &handle_hdf5_error )
478 : : {
479 [ - + ]: 69 : assert( handler.data == &errorHandler );
480 : : #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
481 [ + - ]: 69 : H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
482 : : #else
483 : : H5Eset_auto( errorHandler.func, errorHandler.data );
484 : : #endif
485 : : }
486 : :
487 : 69 : free( dataBuffer );
488 : 69 : dataBuffer = NULL;
489 : 69 : free( fileInfo );
490 : 69 : fileInfo = NULL;
491 : 69 : delete mpiComm;
492 : 69 : mpiComm = 0;
493 : :
494 [ - + ][ # # ]: 69 : if( indepIO != H5P_DEFAULT ) H5Pclose( indepIO );
495 [ - + ][ # # ]: 69 : if( collIO != indepIO ) H5Pclose( collIO );
496 : 69 : collIO = indepIO = H5P_DEFAULT;
497 : :
498 [ + + ]: 69 : delete[] setMeta;
499 : 69 : setMeta = 0;
500 : :
501 : : mhdf_Status status;
502 [ + - ]: 69 : mhdf_closeFile( filePtr, &status );
503 : 69 : filePtr = 0;
504 [ + - ][ + + ]: 69 : return is_error( status ) ? MB_FAILURE : MB_SUCCESS;
505 : : }
506 : :
507 : 69 : ErrorCode ReadHDF5::load_file( const char* filename, const EntityHandle* file_set, const FileOptions& opts,
508 : : const ReaderIface::SubsetList* subset_list, const Tag* file_id_tag )
509 : : {
510 : : ErrorCode rval;
511 : :
512 : 69 : rval = set_up_read( filename, opts );
513 [ + + ]: 69 : if( MB_SUCCESS != rval )
514 : : {
515 : 3 : clean_up_read( opts );
516 : 3 : return rval;
517 : : }
518 : : // See if we need to report times
519 : :
520 : 66 : rval = opts.get_null_option( "CPUTIME" );
521 [ - + ]: 66 : if( MB_SUCCESS == rval )
522 : : {
523 : 0 : cputime = true;
524 [ # # ]: 0 : timer = new CpuTimer;
525 [ # # ]: 0 : for( int i = 0; i < NUM_TIMES; i++ )
526 : 0 : _times[i] = 0;
527 : : }
528 : :
529 : : // We read the entire set description table regardless of partial
530 : : // or complete reads or serial vs parallel reads
531 : 66 : rval = read_all_set_meta();
532 : :
533 [ - + ]: 66 : if( cputime ) _times[SET_META_TIME] = timer->time_elapsed();
534 [ - + ][ # # ]: 66 : if( subset_list && MB_SUCCESS == rval )
535 : : rval = load_file_partial( subset_list->tag_list, subset_list->tag_list_length, subset_list->num_parts,
536 : 0 : subset_list->part_number, opts );
537 : : else
538 : 66 : rval = load_file_impl( opts );
539 : :
540 [ + - ][ - + ]: 66 : if( MB_SUCCESS == rval && file_id_tag )
541 : : {
542 : 0 : dbgOut.tprint( 1, "Storing file IDs in tag\n" );
543 : 0 : rval = store_file_ids( *file_id_tag );
544 : : }
545 : 66 : ErrorCode rval3 = opts.get_null_option( "STORE_SETS_FILEIDS" );
546 [ - + ]: 66 : if( MB_SUCCESS == rval3 )
547 : : {
548 : 0 : rval = store_sets_file_ids();
549 [ # # ]: 0 : if( MB_SUCCESS != rval ) return rval;
550 : : }
551 : :
552 [ - + ]: 66 : if( cputime ) _times[STORE_FILE_IDS_TIME] = timer->time_elapsed();
553 : :
554 [ + - ][ + + ]: 66 : if( MB_SUCCESS == rval && 0 != file_set )
555 : : {
556 : 21 : dbgOut.tprint( 1, "Reading QA records\n" );
557 : 21 : rval = read_qa( *file_set );
558 : : }
559 : :
560 [ - + ]: 66 : if( cputime ) _times[READ_QA_TIME] = timer->time_elapsed();
561 : 66 : dbgOut.tprint( 1, "Cleaning up\n" );
562 : 66 : ErrorCode rval2 = clean_up_read( opts );
563 [ + - ][ - + ]: 66 : if( rval == MB_SUCCESS && rval2 != MB_SUCCESS ) rval = rval2;
564 : :
565 [ + - ]: 66 : if( MB_SUCCESS == rval )
566 : 66 : dbgOut.tprint( 1, "Read finished.\n" );
567 : : else
568 : : {
569 [ # # ]: 0 : std::string msg;
570 [ # # ]: 0 : iFace->get_last_error( msg );
571 [ # # ]: 0 : dbgOut.tprintf( 1, "READ FAILED (ERROR CODE %s): %s\n", ErrorCodeStr[rval], msg.c_str() );
572 : : }
573 : :
574 [ - + ]: 66 : if( cputime )
575 : : {
576 : 0 : _times[TOTAL_TIME] = timer->time_since_birth();
577 : 0 : print_times();
578 : 0 : delete timer;
579 : : }
580 [ - + ]: 66 : if( H5P_DEFAULT != collIO ) H5Pclose( collIO );
581 [ - + ]: 66 : if( H5P_DEFAULT != indepIO ) H5Pclose( indepIO );
582 : 66 : collIO = indepIO = H5P_DEFAULT;
583 : :
584 : 69 : return rval;
585 : : }
586 : :
587 : 66 : ErrorCode ReadHDF5::load_file_impl( const FileOptions& )
588 : : {
589 : : ErrorCode rval;
590 : : mhdf_Status status;
591 : : int i;
592 : :
593 [ + - ]: 66 : CHECK_OPEN_HANDLES;
594 : :
595 [ + - ]: 66 : dbgOut.tprint( 1, "Reading all nodes...\n" );
596 [ + - ]: 132 : Range ids;
597 [ + + ]: 66 : if( fileInfo->nodes.count )
598 : : {
599 [ + - ]: 65 : ids.insert( fileInfo->nodes.start_id, fileInfo->nodes.start_id + fileInfo->nodes.count - 1 );
600 [ + - ]: 65 : rval = read_nodes( ids );
601 [ - + ][ # # ]: 65 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
602 : : }
603 : :
604 [ + - ]: 66 : dbgOut.tprint( 1, "Reading all element connectivity...\n" );
605 [ + - ]: 132 : std::vector< int > polyhedra; // Need to do these last so that faces are loaded
606 [ + + ]: 171 : for( i = 0; i < fileInfo->num_elem_desc; ++i )
607 : : {
608 [ + - ][ - + ]: 105 : if( CN::EntityTypeFromName( fileInfo->elems[i].type ) == MBPOLYHEDRON )
609 : : {
610 [ # # ]: 0 : polyhedra.push_back( i );
611 : 0 : continue;
612 : : }
613 : :
614 [ + - ]: 105 : rval = read_elems( i );
615 [ - + ][ # # ]: 105 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
616 : : }
617 [ # # ][ + - ]: 66 : for( std::vector< int >::iterator it = polyhedra.begin(); it != polyhedra.end(); ++it )
[ - + ]
618 : : {
619 [ # # ][ # # ]: 0 : rval = read_elems( *it );
620 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
621 : : }
622 : :
623 [ + - ]: 66 : dbgOut.tprint( 1, "Reading all sets...\n" );
624 [ + - ]: 66 : ids.clear();
625 [ + - ]: 66 : if( fileInfo->sets.count )
626 : : {
627 [ + - ]: 66 : ids.insert( fileInfo->sets.start_id, fileInfo->sets.start_id + fileInfo->sets.count - 1 );
628 [ + - ]: 66 : rval = read_sets( ids );
629 [ - + ][ # # ]: 66 : if( rval != MB_SUCCESS ) { MB_SET_ERR( rval, "ReadHDF5 Failure" ); }
[ # # ][ # # ]
[ # # ][ # # ]
630 : : }
631 : :
632 [ + - ]: 66 : dbgOut.tprint( 1, "Reading all adjacencies...\n" );
633 [ + + ]: 171 : for( i = 0; i < fileInfo->num_elem_desc; ++i )
634 : : {
635 [ + + ]: 105 : if( !fileInfo->elems[i].have_adj ) continue;
636 : :
637 : : long table_len;
638 [ + - ]: 3 : hid_t table = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &table_len, &status );
639 [ + - ][ - + ]: 3 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
640 : :
641 [ + - ]: 3 : rval = read_adjacencies( table, table_len );
642 [ + - ]: 3 : mhdf_closeData( filePtr, table, &status );
643 [ - + ][ # # ]: 3 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
644 [ + - ][ - + ]: 3 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
645 : : }
646 : :
647 [ + - ]: 66 : dbgOut.tprint( 1, "Reading all tags...\n" );
648 [ + + ]: 682 : for( i = 0; i < fileInfo->num_tag_desc; ++i )
649 : : {
650 [ + - ]: 616 : rval = read_tag( i );
651 [ - + ][ # # ]: 616 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
652 : : }
653 : :
654 [ + - ]: 66 : dbgOut.tprint( 1, "Core read finished. Cleaning up...\n" );
655 : 132 : return MB_SUCCESS;
656 : : }
657 : :
658 : 0 : ErrorCode ReadHDF5::find_int_tag( const char* name, int& index )
659 : : {
660 [ # # ]: 0 : for( index = 0; index < fileInfo->num_tag_desc; ++index )
661 [ # # ]: 0 : if( !strcmp( name, fileInfo->tags[index].name ) ) break;
662 : :
663 [ # # ]: 0 : if( index == fileInfo->num_tag_desc )
664 [ # # ][ # # ]: 0 : { MB_SET_ERR( MB_TAG_NOT_FOUND, "File does not contain subset tag '" << name << "'" ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
665 : :
666 [ # # ][ # # ]: 0 : if( fileInfo->tags[index].type != mhdf_INTEGER || fileInfo->tags[index].size != 1 )
667 [ # # ][ # # ]: 0 : { MB_SET_ERR( MB_TAG_NOT_FOUND, "Tag ' " << name << "' does not contain single integer value" ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
668 : :
669 : 0 : return MB_SUCCESS;
670 : : }
671 : :
672 : 0 : ErrorCode ReadHDF5::get_subset_ids( const ReaderIface::IDTag* subset_list, int subset_list_length, Range& file_ids )
673 : : {
674 : : ErrorCode rval;
675 : :
676 [ # # ]: 0 : for( int i = 0; i < subset_list_length; ++i )
677 : : {
678 : : int tag_index;
679 [ # # ]: 0 : rval = find_int_tag( subset_list[i].tag_name, tag_index );
680 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
681 : :
682 [ # # ]: 0 : Range tmp_file_ids;
683 [ # # ][ # # ]: 0 : if( !subset_list[i].num_tag_values ) { rval = get_tagged_entities( tag_index, tmp_file_ids ); }
684 : : else
685 : : {
686 : 0 : std::vector< int > ids( subset_list[i].tag_values,
687 [ # # ]: 0 : subset_list[i].tag_values + subset_list[i].num_tag_values );
688 [ # # ]: 0 : std::sort( ids.begin(), ids.end() );
689 [ # # ]: 0 : rval = search_tag_values( tag_index, ids, tmp_file_ids );
690 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
691 : : }
692 : :
693 [ # # ][ # # ]: 0 : if( tmp_file_ids.empty() ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
[ # # ][ # # ]
694 : :
695 [ # # ]: 0 : if( i == 0 )
696 [ # # ]: 0 : file_ids.swap( tmp_file_ids );
697 : : else
698 [ # # ][ # # ]: 0 : file_ids = intersect( tmp_file_ids, file_ids );
[ # # ]
699 : 0 : }
700 : :
701 : 0 : return MB_SUCCESS;
702 : : }
703 : :
704 : 0 : ErrorCode ReadHDF5::get_partition( Range& tmp_file_ids, int num_parts, int part_number )
705 : : {
706 [ # # ]: 0 : CHECK_OPEN_HANDLES;
707 : :
708 : : // Check that the tag only identified sets
709 [ # # ][ # # ]: 0 : if( (unsigned long)fileInfo->sets.start_id > tmp_file_ids.front() )
710 : : {
711 [ # # ]: 0 : dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
712 [ # # ][ # # ]: 0 : tmp_file_ids.erase( tmp_file_ids.begin(), tmp_file_ids.lower_bound( (EntityHandle)fileInfo->sets.start_id ) );
[ # # ]
713 : : }
714 : 0 : unsigned long set_end = (unsigned long)fileInfo->sets.start_id + fileInfo->sets.count;
715 [ # # ][ # # ]: 0 : if( tmp_file_ids.back() >= set_end )
716 : : {
717 [ # # ]: 0 : dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
718 [ # # ][ # # ]: 0 : tmp_file_ids.erase( tmp_file_ids.upper_bound( (EntityHandle)set_end ), tmp_file_ids.end() );
[ # # ]
719 : : }
720 : :
721 [ # # ]: 0 : Range::iterator s = tmp_file_ids.begin();
722 [ # # ]: 0 : size_t num_per_proc = tmp_file_ids.size() / num_parts;
723 [ # # ]: 0 : size_t num_extra = tmp_file_ids.size() % num_parts;
724 [ # # ]: 0 : Range::iterator e;
725 [ # # ]: 0 : if( part_number < (long)num_extra )
726 : : {
727 [ # # ]: 0 : s += ( num_per_proc + 1 ) * part_number;
728 : 0 : e = s;
729 [ # # ]: 0 : e += ( num_per_proc + 1 );
730 : : }
731 : : else
732 : : {
733 [ # # ]: 0 : s += num_per_proc * part_number + num_extra;
734 : 0 : e = s;
735 [ # # ]: 0 : e += num_per_proc;
736 : : }
737 [ # # ][ # # ]: 0 : tmp_file_ids.erase( e, tmp_file_ids.end() );
738 [ # # ][ # # ]: 0 : tmp_file_ids.erase( tmp_file_ids.begin(), s );
739 : :
740 : 0 : return MB_SUCCESS;
741 : : }
742 : :
743 : 0 : ErrorCode ReadHDF5::load_file_partial( const ReaderIface::IDTag* subset_list, int subset_list_length, int num_parts,
744 : : int part_number, const FileOptions& opts )
745 : : {
746 : : mhdf_Status status;
747 : :
748 [ # # ][ # # ]: 0 : static MPEState mpe_event( "ReadHDF5", "yellow" );
[ # # ][ # # ]
749 : :
750 [ # # ]: 0 : mpe_event.start( "gather parts" );
751 : :
752 [ # # ]: 0 : CHECK_OPEN_HANDLES;
753 : :
754 [ # # ]: 0 : for( int i = 0; i < subset_list_length; ++i )
755 : : {
756 : 0 : dbgOut.printf( 2, "Select by \"%s\" with num_tag_values = %d\n", subset_list[i].tag_name,
757 [ # # ]: 0 : subset_list[i].num_tag_values );
758 [ # # ]: 0 : if( subset_list[i].num_tag_values )
759 : : {
760 [ # # ]: 0 : assert( 0 != subset_list[i].tag_values );
761 [ # # ]: 0 : dbgOut.printf( 2, " \"%s\" values = { %d", subset_list[i].tag_name, subset_list[i].tag_values[0] );
762 [ # # ]: 0 : for( int j = 1; j < subset_list[i].num_tag_values; ++j )
763 [ # # ]: 0 : dbgOut.printf( 2, ", %d", subset_list[i].tag_values[j] );
764 [ # # ]: 0 : dbgOut.printf( 2, " }\n" );
765 : : }
766 : : }
767 [ # # ][ # # ]: 0 : if( num_parts ) dbgOut.printf( 2, "Partition with num_parts = %d and part_number = %d\n", num_parts, part_number );
768 : :
769 [ # # ]: 0 : dbgOut.tprint( 1, "RETRIEVING TAGGED ENTITIES\n" );
770 : :
771 [ # # ]: 0 : Range file_ids;
772 [ # # ]: 0 : ErrorCode rval = get_subset_ids( subset_list, subset_list_length, file_ids );
773 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
774 : :
775 [ # # ][ # # ]: 0 : if( cputime ) _times[SUBSET_IDS_TIME] = timer->time_elapsed();
776 : :
777 [ # # ]: 0 : if( num_parts )
778 : : {
779 : : /*if (num_parts>(int)file_ids.size())
780 : : {
781 : : MB_SET_ERR(MB_FAILURE, "Only " << file_ids.size() << " parts to distribute to " <<
782 : : num_parts << " processes.");
783 : : }*/
784 [ # # ]: 0 : rval = get_partition( file_ids, num_parts, part_number );
785 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
786 : : }
787 : :
788 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_PARTITION_TIME] = timer->time_elapsed();
789 : :
790 [ # # ]: 0 : dbgOut.print_ints( 4, "Set file IDs for partial read: ", file_ids );
791 [ # # ]: 0 : mpe_event.end();
792 [ # # ]: 0 : mpe_event.start( "gather related sets" );
793 [ # # ]: 0 : dbgOut.tprint( 1, "GATHERING ADDITIONAL ENTITIES\n" );
794 : :
795 : : enum RecusiveSetMode
796 : : {
797 : : RSM_NONE,
798 : : RSM_SETS,
799 : : RSM_CONTENTS
800 : : };
801 : 0 : const char* const set_opts[] = { "NONE", "SETS", "CONTENTS", NULL };
802 : : int child_mode;
803 [ # # ]: 0 : rval = opts.match_option( "CHILDREN", set_opts, child_mode );
804 [ # # ]: 0 : if( MB_ENTITY_NOT_FOUND == rval )
805 : 0 : child_mode = RSM_CONTENTS;
806 [ # # ]: 0 : else if( MB_SUCCESS != rval )
807 : : {
808 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "Invalid value for 'CHILDREN' option" );
[ # # ][ # # ]
[ # # ]
809 : : }
810 : : int content_mode;
811 [ # # ]: 0 : rval = opts.match_option( "SETS", set_opts, content_mode );
812 [ # # ]: 0 : if( MB_ENTITY_NOT_FOUND == rval )
813 : 0 : content_mode = RSM_CONTENTS;
814 [ # # ]: 0 : else if( MB_SUCCESS != rval )
815 : : {
816 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "Invalid value for 'SETS' option" );
[ # # ][ # # ]
[ # # ]
817 : : }
818 : :
819 : : // If we want the contents of contained/child sets,
820 : : // search for them now (before gathering the non-set contents
821 : : // of the sets.)
822 [ # # ]: 0 : Range sets;
823 [ # # ]: 0 : intersect( fileInfo->sets, file_ids, sets );
824 [ # # ][ # # ]: 0 : if( content_mode == RSM_CONTENTS || child_mode == RSM_CONTENTS )
825 : : {
826 [ # # ]: 0 : dbgOut.tprint( 1, " doing read_set_ids_recursive\n" );
827 [ # # ]: 0 : rval = read_set_ids_recursive( sets, content_mode == RSM_CONTENTS, child_mode == RSM_CONTENTS );
828 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
829 : : }
830 : :
831 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_SET_IDS_TIME] = timer->time_elapsed();
832 [ # # ]: 0 : debug_barrier();
833 : :
834 : : // Get elements and vertices contained in sets
835 [ # # ]: 0 : dbgOut.tprint( 1, " doing get_set_contents\n" );
836 [ # # ]: 0 : rval = get_set_contents( sets, file_ids );
837 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
838 : :
839 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_SET_CONTENTS_TIME] = timer->time_elapsed();
840 : :
841 [ # # ]: 0 : dbgOut.print_ints( 5, "File IDs for partial read: ", file_ids );
842 [ # # ]: 0 : debug_barrier();
843 [ # # ]: 0 : mpe_event.end();
844 [ # # ]: 0 : dbgOut.tprint( 1, "GATHERING NODE IDS\n" );
845 : :
846 : : // Figure out the maximum dimension of entity to be read
847 : 0 : int max_dim = 0;
848 [ # # ]: 0 : for( int i = 0; i < fileInfo->num_elem_desc; ++i )
849 : : {
850 [ # # ]: 0 : EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
851 [ # # ][ # # ]: 0 : if( type <= MBVERTEX || type >= MBENTITYSET )
852 : : {
853 : 0 : assert( false ); // For debug code die for unknown element types
854 : : continue; // For release code, skip unknown element types
855 : : }
856 [ # # ]: 0 : int dim = CN::Dimension( type );
857 [ # # ]: 0 : if( dim > max_dim )
858 : : {
859 [ # # ]: 0 : Range subset;
860 [ # # ]: 0 : intersect( fileInfo->elems[i].desc, file_ids, subset );
861 [ # # ][ # # ]: 0 : if( !subset.empty() ) max_dim = dim;
862 : : }
863 : : }
864 : : #ifdef MOAB_HAVE_MPI
865 [ # # ]: 0 : if( nativeParallel )
866 : : {
867 : 0 : int send = max_dim;
868 [ # # ]: 0 : MPI_Allreduce( &send, &max_dim, 1, MPI_INT, MPI_MAX, *mpiComm );
869 : : }
870 : : #endif
871 : :
872 : : // If input contained any polyhedra, then need to get faces
873 : : // of the polyhedra before the next loop because we need to
874 : : // read said faces in that loop.
875 [ # # ]: 0 : for( int i = 0; i < fileInfo->num_elem_desc; ++i )
876 : : {
877 [ # # ]: 0 : EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
878 [ # # ]: 0 : if( type != MBPOLYHEDRON ) continue;
879 : :
880 [ # # ]: 0 : debug_barrier();
881 [ # # ]: 0 : dbgOut.print( 2, " Getting polyhedra faces\n" );
882 [ # # ]: 0 : mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
883 : :
884 [ # # ]: 0 : Range polyhedra;
885 [ # # ]: 0 : intersect( fileInfo->elems[i].desc, file_ids, polyhedra );
886 [ # # ]: 0 : rval = read_elems( i, polyhedra, &file_ids );
887 [ # # ]: 0 : mpe_event.end( rval );
888 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
889 : 0 : }
890 : :
891 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_POLYHEDRA_TIME] = timer->time_elapsed();
892 : : // Get node file ids for all elements
893 [ # # ]: 0 : Range nodes;
894 [ # # ]: 0 : intersect( fileInfo->nodes, file_ids, nodes );
895 [ # # ]: 0 : for( int i = 0; i < fileInfo->num_elem_desc; ++i )
896 : : {
897 [ # # ]: 0 : EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
898 [ # # ][ # # ]: 0 : if( type <= MBVERTEX || type >= MBENTITYSET )
899 : : {
900 : 0 : assert( false ); // For debug code die for unknown element types
901 : 0 : continue; // For release code, skip unknown element types
902 : : }
903 [ # # ]: 0 : if( MBPOLYHEDRON == type ) continue;
904 : :
905 [ # # ]: 0 : debug_barrier();
906 [ # # ]: 0 : dbgOut.printf( 2, " Getting element node IDs for: %s\n", fileInfo->elems[i].handle );
907 : :
908 [ # # ]: 0 : Range subset;
909 [ # # ]: 0 : intersect( fileInfo->elems[i].desc, file_ids, subset );
910 [ # # ]: 0 : mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
911 : :
912 : : // If dimension is max_dim, then we can create the elements now
913 : : // so we don't have to read the table again later (connectivity
914 : : // will be fixed up after nodes are created when update_connectivity())
915 : : // is called. For elements of a smaller dimension, we just build
916 : : // the node ID range now because a) we'll have to read the whole
917 : : // connectivity table again later, and b) we don't want to worry
918 : : // about accidentally creating multiple copies of the same element.
919 [ # # ][ # # ]: 0 : if( CN::Dimension( type ) == max_dim )
920 [ # # ]: 0 : rval = read_elems( i, subset, &nodes );
921 : : else
922 [ # # ]: 0 : rval = read_elems( i, subset, nodes );
923 [ # # ]: 0 : mpe_event.end( rval );
924 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
925 : 0 : }
926 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_ELEMENTS_TIME] = timer->time_elapsed();
927 [ # # ]: 0 : debug_barrier();
928 [ # # ]: 0 : mpe_event.start( "read coords" );
929 : : dbgOut.tprintf( 1, "READING NODE COORDINATES (%lu nodes in %lu selects)\n", (unsigned long)nodes.size(),
930 [ # # ][ # # ]: 0 : (unsigned long)nodes.psize() );
[ # # ]
931 : :
932 : : // Read node coordinates and create vertices in MOAB
933 : : // NOTE: This populates the RangeMap with node file ids,
934 : : // which is expected by read_node_adj_elems.
935 [ # # ]: 0 : rval = read_nodes( nodes );
936 [ # # ]: 0 : mpe_event.end( rval );
937 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
938 : :
939 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_NODES_TIME] = timer->time_elapsed();
940 : :
941 [ # # ]: 0 : debug_barrier();
942 [ # # ]: 0 : dbgOut.tprint( 1, "READING ELEMENTS\n" );
943 : :
944 : : // Decide if we need to read additional elements
945 : : enum SideMode
946 : : {
947 : : SM_EXPLICIT,
948 : : SM_NODES,
949 : : SM_SIDES
950 : : };
951 : : int side_mode;
952 : 0 : const char* const options[] = { "EXPLICIT", "NODES", "SIDES", 0 };
953 [ # # ]: 0 : rval = opts.match_option( "ELEMENTS", options, side_mode );
954 [ # # ]: 0 : if( MB_ENTITY_NOT_FOUND == rval )
955 : : {
956 : : // If only nodes were specified, then default to "NODES", otherwise
957 : : // default to "SIDES".
958 [ # # ]: 0 : if( 0 == max_dim )
959 : 0 : side_mode = SM_NODES;
960 : : else
961 : 0 : side_mode = SM_SIDES;
962 : : }
963 [ # # ]: 0 : else if( MB_SUCCESS != rval )
964 : : {
965 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "Invalid value for 'ELEMENTS' option" );
[ # # ][ # # ]
[ # # ]
966 : : }
967 : :
968 [ # # ][ # # ]: 0 : if( side_mode == SM_SIDES /*ELEMENTS=SIDES*/ && max_dim == 0 /*node-based*/ )
969 : : {
970 : : // Read elements until we find something. Once we find something,
971 : : // read only elements of the same dimension. NOTE: loop termination
972 : : // criterion changes on both sides (max_dim can be changed in loop
973 : : // body).
974 [ # # ]: 0 : for( int dim = 3; dim >= max_dim; --dim )
975 : : {
976 [ # # ]: 0 : for( int i = 0; i < fileInfo->num_elem_desc; ++i )
977 : : {
978 [ # # ]: 0 : EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
979 [ # # ][ # # ]: 0 : if( CN::Dimension( type ) == dim )
980 : : {
981 [ # # ]: 0 : debug_barrier();
982 [ # # ]: 0 : dbgOut.tprintf( 2, " Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
983 [ # # ]: 0 : mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
984 [ # # ]: 0 : Range ents;
985 [ # # ]: 0 : rval = read_node_adj_elems( fileInfo->elems[i] );
986 [ # # ]: 0 : mpe_event.end( rval );
987 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
988 [ # # ][ # # ]: 0 : if( !ents.empty() ) max_dim = 3;
[ # # ]
989 : : }
990 : : }
991 : : }
992 : : }
993 : :
994 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_NODEADJ_TIME] = timer->time_elapsed();
995 [ # # ]: 0 : Range side_entities;
996 [ # # ]: 0 : if( side_mode != SM_EXPLICIT /*ELEMENTS=NODES || ELEMENTS=SIDES*/ )
997 : : {
998 [ # # ]: 0 : if( 0 == max_dim ) max_dim = 4;
999 : : // Now read any additional elements for which we've already read all
1000 : : // of the nodes.
1001 [ # # ]: 0 : for( int dim = max_dim - 1; dim > 0; --dim )
1002 : : {
1003 [ # # ]: 0 : for( int i = 0; i < fileInfo->num_elem_desc; ++i )
1004 : : {
1005 [ # # ]: 0 : EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
1006 [ # # ][ # # ]: 0 : if( CN::Dimension( type ) == dim )
1007 : : {
1008 [ # # ]: 0 : debug_barrier();
1009 [ # # ]: 0 : dbgOut.tprintf( 2, " Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
1010 [ # # ]: 0 : mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
1011 [ # # ]: 0 : rval = read_node_adj_elems( fileInfo->elems[i], &side_entities );
1012 [ # # ]: 0 : mpe_event.end( rval );
1013 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1014 : : }
1015 : : }
1016 : : }
1017 : : }
1018 : :
1019 : : // We need to do this here for polyhedra to be handled correctly.
1020 : : // We have to wait until the faces are read in the above code block,
1021 : : // but need to create the connectivity before doing update_connectivity,
1022 : : // which might otherwise delete polyhedra faces.
1023 [ # # ][ # # ]: 0 : if( cputime ) _times[GET_SIDEELEM_TIME] = timer->time_elapsed();
1024 : :
1025 [ # # ]: 0 : debug_barrier();
1026 [ # # ]: 0 : dbgOut.tprint( 1, "UPDATING CONNECTIVITY ARRAYS FOR READ ELEMENTS\n" );
1027 [ # # ]: 0 : mpe_event.start( "updating connectivity for elements read before vertices" );
1028 [ # # ]: 0 : rval = update_connectivity();
1029 [ # # ]: 0 : mpe_event.end();
1030 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1031 : :
1032 [ # # ][ # # ]: 0 : if( cputime ) _times[UPDATECONN_TIME] = timer->time_elapsed();
1033 : :
1034 [ # # ]: 0 : dbgOut.tprint( 1, "READING ADJACENCIES\n" );
1035 [ # # ]: 0 : for( int i = 0; i < fileInfo->num_elem_desc; ++i )
1036 : : {
1037 [ # # ]: 0 : if (fileInfo->elems[i].have_adj /*&&
1038 : : idMap.intersects(fileInfo->elems[i].desc.start_id, fileInfo->elems[i].desc.count) */)
1039 : : {
1040 [ # # ]: 0 : mpe_event.start( "reading adjacencies for ", fileInfo->elems[i].handle );
1041 : : long len;
1042 [ # # ]: 0 : hid_t th = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &len, &status );
1043 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1044 : :
1045 [ # # ]: 0 : rval = read_adjacencies( th, len );
1046 [ # # ]: 0 : mhdf_closeData( filePtr, th, &status );
1047 [ # # ]: 0 : mpe_event.end( rval );
1048 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1049 : : }
1050 : : }
1051 : :
1052 [ # # ][ # # ]: 0 : if( cputime ) _times[ADJACENCY_TIME] = timer->time_elapsed();
1053 : :
1054 : : // If doing ELEMENTS=SIDES then we need to delete any entities
1055 : : // that we read that aren't actually sides (e.g. an interior face
1056 : : // that connects two disjoint portions of the part). Both
1057 : : // update_connectivity and reading of any explicit adjacencies must
1058 : : // happen before this.
1059 [ # # ]: 0 : if( side_mode == SM_SIDES )
1060 : : {
1061 [ # # ]: 0 : debug_barrier();
1062 [ # # ]: 0 : mpe_event.start( "cleaning up non-side lower-dim elements" );
1063 [ # # ]: 0 : dbgOut.tprint( 1, "CHECKING FOR AND DELETING NON-SIDE ELEMENTS\n" );
1064 [ # # ]: 0 : rval = delete_non_side_elements( side_entities );
1065 [ # # ]: 0 : mpe_event.end( rval );
1066 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1067 : : }
1068 : :
1069 [ # # ][ # # ]: 0 : if( cputime ) _times[DELETE_NON_SIDEELEM_TIME] = timer->time_elapsed();
1070 : :
1071 [ # # ]: 0 : debug_barrier();
1072 [ # # ]: 0 : dbgOut.tprint( 1, "READING SETS\n" );
1073 : :
1074 : : // If reading contained/child sets but not their contents then find
1075 : : // them now. If we were also reading their contents we would
1076 : : // have found them already.
1077 [ # # ][ # # ]: 0 : if( content_mode == RSM_SETS || child_mode == RSM_SETS )
1078 : : {
1079 [ # # ]: 0 : dbgOut.tprint( 1, " doing read_set_ids_recursive\n" );
1080 [ # # ]: 0 : mpe_event.start( "finding recursively contained sets" );
1081 [ # # ]: 0 : rval = read_set_ids_recursive( sets, content_mode == RSM_SETS, child_mode == RSM_SETS );
1082 [ # # ]: 0 : mpe_event.end( rval );
1083 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1084 : : }
1085 : :
1086 [ # # ][ # # ]: 0 : if( cputime ) _times[READ_SET_IDS_RECURS_TIME] = timer->time_elapsed();
1087 : :
1088 [ # # ]: 0 : dbgOut.tprint( 1, " doing find_sets_containing\n" );
1089 [ # # ]: 0 : mpe_event.start( "finding sets containing any read entities" );
1090 : :
1091 : : // Decide whether to read set-containing parents
1092 : 0 : bool read_set_containing_parents = true;
1093 [ # # ]: 0 : std::string tmp_opt;
1094 [ # # ]: 0 : rval = opts.get_option( "NO_SET_CONTAINING_PARENTS", tmp_opt );
1095 [ # # ]: 0 : if( MB_SUCCESS == rval ) read_set_containing_parents = false;
1096 : :
1097 : : // Append file IDs of sets containing any of the nodes or elements
1098 : : // we've read up to this point.
1099 [ # # ]: 0 : rval = find_sets_containing( sets, read_set_containing_parents );
1100 [ # # ]: 0 : mpe_event.end( rval );
1101 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1102 : :
1103 [ # # ][ # # ]: 0 : if( cputime ) _times[FIND_SETS_CONTAINING_TIME] = timer->time_elapsed();
1104 : :
1105 : : // Now actually read all set data and instantiate sets in MOAB.
1106 : : // Get any contained sets out of file_ids.
1107 [ # # ]: 0 : mpe_event.start( "reading set contents/parents/children" );
1108 : 0 : EntityHandle first_set = fileInfo->sets.start_id;
1109 [ # # ][ # # ]: 0 : sets.merge( file_ids.lower_bound( first_set ), file_ids.lower_bound( first_set + fileInfo->sets.count ) );
[ # # ]
1110 [ # # ]: 0 : dbgOut.tprint( 1, " doing read_sets\n" );
1111 [ # # ]: 0 : rval = read_sets( sets );
1112 [ # # ]: 0 : mpe_event.end( rval );
1113 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1114 : :
1115 [ # # ][ # # ]: 0 : if( cputime ) _times[READ_SETS_TIME] = timer->time_elapsed();
1116 : :
1117 [ # # ]: 0 : dbgOut.tprint( 1, "READING TAGS\n" );
1118 : :
1119 [ # # ]: 0 : for( int i = 0; i < fileInfo->num_tag_desc; ++i )
1120 : : {
1121 [ # # ]: 0 : mpe_event.start( "reading tag: ", fileInfo->tags[i].name );
1122 [ # # ]: 0 : rval = read_tag( i );
1123 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1124 : : }
1125 : :
1126 [ # # ][ # # ]: 0 : if( cputime ) _times[READ_TAGS_TIME] = timer->time_elapsed();
1127 : :
1128 [ # # ]: 0 : dbgOut.tprint( 1, "PARTIAL READ COMPLETE.\n" );
1129 : :
1130 : 0 : return MB_SUCCESS;
1131 : : }
1132 : :
1133 : 0 : ErrorCode ReadHDF5::search_tag_values( int tag_index, const std::vector< int >& sorted_values, Range& file_ids,
1134 : : bool sets_only )
1135 : : {
1136 : : ErrorCode rval;
1137 : : mhdf_Status status;
1138 : 0 : std::vector< EntityHandle >::iterator iter;
1139 : 0 : const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
1140 : : long size;
1141 : : long start_id;
1142 : :
1143 [ # # ]: 0 : CHECK_OPEN_HANDLES;
1144 : :
1145 [ # # ]: 0 : debug_barrier();
1146 : :
1147 : : // Do dense data
1148 : :
1149 : : hid_t table;
1150 : : const char* name;
1151 [ # # ]: 0 : std::vector< EntityHandle > indices;
1152 : : // These are probably in order of dimension, so iterate
1153 : : // in reverse order to make Range insertions more efficient.
1154 [ # # ]: 0 : std::vector< int > grp_indices( tag.dense_elem_indices, tag.dense_elem_indices + tag.num_dense_indices );
1155 [ # # ][ # # ]: 0 : for( std::vector< int >::reverse_iterator i = grp_indices.rbegin(); i != grp_indices.rend(); ++i )
[ # # ]
1156 : : {
1157 [ # # ]: 0 : int idx = *i;
1158 [ # # ]: 0 : if( idx == -2 )
1159 : : {
1160 [ # # ]: 0 : name = mhdf_set_type_handle();
1161 : 0 : start_id = fileInfo->sets.start_id;
1162 : : }
1163 [ # # ]: 0 : else if( sets_only )
1164 : : {
1165 : 0 : continue;
1166 : : }
1167 [ # # ]: 0 : else if( idx == -1 )
1168 : : {
1169 [ # # ]: 0 : name = mhdf_node_type_handle();
1170 : 0 : start_id = fileInfo->nodes.start_id;
1171 : : }
1172 : : else
1173 : : {
1174 [ # # ][ # # ]: 0 : if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1175 : 0 : name = fileInfo->elems[idx].handle;
1176 : 0 : start_id = fileInfo->elems[idx].desc.start_id;
1177 : : }
1178 [ # # ]: 0 : table = mhdf_openDenseTagData( filePtr, tag.name, name, &size, &status );
1179 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1180 [ # # ]: 0 : rval = search_tag_values( table, size, sorted_values, indices );
1181 [ # # ]: 0 : mhdf_closeData( filePtr, table, &status );
1182 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1183 : : // Convert from table indices to file IDs and add to result list
1184 [ # # ]: 0 : std::sort( indices.begin(), indices.end(), std::greater< EntityHandle >() );
1185 : : std::transform( indices.begin(), indices.end(), range_inserter( file_ids ),
1186 : : // std::bind1st(std::plus<long>(), start_id));
1187 [ # # ][ # # ]: 0 : std::bind( std::plus< long >(), start_id, std::placeholders::_1 ) );
[ # # ]
1188 : 0 : indices.clear();
1189 : : }
1190 : :
1191 [ # # ]: 0 : if( !tag.have_sparse ) return MB_SUCCESS;
1192 : :
1193 : : // Do sparse data
1194 : :
1195 : : hid_t tables[2];
1196 : : long junk; // Redundant value for non-variable-length tags
1197 [ # # ]: 0 : mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
1198 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1199 [ # # ]: 0 : rval = search_tag_values( tables[1], size, sorted_values, indices );
1200 [ # # ]: 0 : mhdf_closeData( filePtr, tables[1], &status );
1201 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || is_error( status ) )
[ # # ][ # # ]
1202 : : {
1203 [ # # ]: 0 : mhdf_closeData( filePtr, tables[0], &status );
1204 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
1205 : : }
1206 : : // Convert to ranges
1207 [ # # ]: 0 : std::sort( indices.begin(), indices.end() );
1208 [ # # ]: 0 : std::vector< EntityHandle > ranges;
1209 : 0 : iter = indices.begin();
1210 [ # # ][ # # ]: 0 : while( iter != indices.end() )
1211 : : {
1212 [ # # ][ # # ]: 0 : ranges.push_back( *iter );
1213 [ # # ]: 0 : EntityHandle last = *iter;
1214 [ # # ][ # # ]: 0 : for( ++iter; iter != indices.end() && ( last + 1 ) == *iter; ++iter, ++last )
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
[ # # # # ]
1215 : : ;
1216 [ # # ]: 0 : ranges.push_back( last );
1217 : : }
1218 : : // Read file ids
1219 : 0 : iter = ranges.begin();
1220 : 0 : unsigned long offset = 0;
1221 [ # # ][ # # ]: 0 : while( iter != ranges.end() )
1222 : : {
1223 [ # # ]: 0 : long begin = *iter;
1224 [ # # ]: 0 : ++iter;
1225 [ # # ]: 0 : long end = *iter;
1226 [ # # ]: 0 : ++iter;
1227 [ # # ]: 0 : mhdf_readSparseTagEntitiesWithOpt( tables[0], begin, end - begin + 1, handleType, &indices[offset], indepIO,
1228 [ # # ]: 0 : &status );
1229 [ # # ][ # # ]: 0 : if( is_error( status ) )
1230 : : {
1231 [ # # ]: 0 : mhdf_closeData( filePtr, tables[0], &status );
1232 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
1233 : : }
1234 : 0 : offset += end - begin + 1;
1235 : : }
1236 [ # # ]: 0 : mhdf_closeData( filePtr, tables[0], &status );
1237 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1238 [ # # ]: 0 : assert( offset == indices.size() );
1239 [ # # ]: 0 : std::sort( indices.begin(), indices.end() );
1240 : :
1241 [ # # ]: 0 : if( sets_only )
1242 : : {
1243 : : iter = std::lower_bound( indices.begin(), indices.end(),
1244 [ # # ]: 0 : ( EntityHandle )( fileInfo->sets.start_id + fileInfo->sets.count ) );
1245 [ # # ]: 0 : indices.erase( iter, indices.end() );
1246 [ # # ]: 0 : iter = std::lower_bound( indices.begin(), indices.end(), fileInfo->sets.start_id );
1247 [ # # ]: 0 : indices.erase( indices.begin(), iter );
1248 : : }
1249 [ # # ][ # # ]: 0 : copy_sorted_file_ids( &indices[0], indices.size(), file_ids );
1250 : :
1251 : 0 : return MB_SUCCESS;
1252 : : }
1253 : :
1254 : 0 : ErrorCode ReadHDF5::get_tagged_entities( int tag_index, Range& file_ids )
1255 : : {
1256 : 0 : const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
1257 : :
1258 [ # # ]: 0 : CHECK_OPEN_HANDLES;
1259 : :
1260 : : // Do dense data
1261 [ # # ]: 0 : Range::iterator hint = file_ids.begin();
1262 [ # # ]: 0 : for( int i = 0; i < tag.num_dense_indices; ++i )
1263 : : {
1264 : 0 : int idx = tag.dense_elem_indices[i];
1265 : : mhdf_EntDesc* ents;
1266 [ # # ]: 0 : if( idx == -2 )
1267 : 0 : ents = &fileInfo->sets;
1268 [ # # ]: 0 : else if( idx == -1 )
1269 : 0 : ents = &fileInfo->nodes;
1270 : : else
1271 : : {
1272 [ # # ][ # # ]: 0 : if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1273 : 0 : ents = &( fileInfo->elems[idx].desc );
1274 : : }
1275 : :
1276 : 0 : EntityHandle h = (EntityHandle)ents->start_id;
1277 [ # # ]: 0 : hint = file_ids.insert( hint, h, h + ents->count - 1 );
1278 : : }
1279 : :
1280 [ # # ]: 0 : if( !tag.have_sparse ) return MB_SUCCESS;
1281 : :
1282 : : // Do sparse data
1283 : :
1284 : : mhdf_Status status;
1285 : : hid_t tables[2];
1286 : : long size, junk;
1287 [ # # ]: 0 : mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
1288 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1289 [ # # ]: 0 : mhdf_closeData( filePtr, tables[1], &status );
1290 [ # # ][ # # ]: 0 : if( is_error( status ) )
1291 : : {
1292 [ # # ]: 0 : mhdf_closeData( filePtr, tables[0], &status );
1293 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
1294 : : }
1295 : :
1296 [ # # ]: 0 : hid_t file_type = H5Dget_type( tables[0] );
1297 [ # # ][ # # ]: 0 : if( file_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1298 : :
1299 [ # # ]: 0 : hint = file_ids.begin();
1300 : 0 : EntityHandle* buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
1301 [ # # ][ # # ]: 0 : const long buffer_size = bufferSize / std::max( sizeof( EntityHandle ), H5Tget_size( file_type ) );
1302 : 0 : long remaining = size, offset = 0;
1303 [ # # ]: 0 : while( remaining )
1304 : : {
1305 [ # # ]: 0 : long count = std::min( buffer_size, remaining );
1306 [ # # ][ # # ]: 0 : assert_range( buffer, count );
1307 [ # # ]: 0 : mhdf_readSparseTagEntitiesWithOpt( *tables, offset, count, file_type, buffer, collIO, &status );
1308 [ # # ][ # # ]: 0 : if( is_error( status ) )
1309 : : {
1310 [ # # ]: 0 : H5Tclose( file_type );
1311 [ # # ]: 0 : mhdf_closeData( filePtr, *tables, &status );
1312 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
1313 : : }
1314 [ # # ]: 0 : H5Tconvert( file_type, handleType, count, buffer, NULL, H5P_DEFAULT );
1315 : :
1316 [ # # ]: 0 : std::sort( buffer, buffer + count );
1317 [ # # ]: 0 : for( long i = 0; i < count; ++i )
1318 [ # # ]: 0 : hint = file_ids.insert( hint, buffer[i], buffer[i] );
1319 : :
1320 : 0 : remaining -= count;
1321 : 0 : offset += count;
1322 : : }
1323 : :
1324 [ # # ]: 0 : H5Tclose( file_type );
1325 [ # # ]: 0 : mhdf_closeData( filePtr, *tables, &status );
1326 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1327 : :
1328 : 0 : return MB_SUCCESS;
1329 : : }
1330 : :
1331 : 0 : ErrorCode ReadHDF5::search_tag_values( hid_t tag_table, unsigned long table_size,
1332 : : const std::vector< int >& sorted_values,
1333 : : std::vector< EntityHandle >& value_indices )
1334 : : {
1335 [ # # ]: 0 : debug_barrier();
1336 : :
1337 [ # # ]: 0 : CHECK_OPEN_HANDLES;
1338 : :
1339 : : mhdf_Status status;
1340 : 0 : size_t chunk_size = bufferSize / sizeof( int );
1341 : 0 : int* buffer = reinterpret_cast< int* >( dataBuffer );
1342 : 0 : size_t remaining = table_size, offset = 0;
1343 [ # # ]: 0 : while( remaining )
1344 : : {
1345 : : // Get a block of tag values
1346 [ # # ]: 0 : size_t count = std::min( chunk_size, remaining );
1347 [ # # ][ # # ]: 0 : assert_range( buffer, count );
1348 [ # # ][ # # ]: 0 : mhdf_readTagValuesWithOpt( tag_table, offset, count, H5T_NATIVE_INT, buffer, collIO, &status );
1349 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1350 : :
1351 : : // Search tag values
1352 [ # # ]: 0 : for( size_t i = 0; i < count; ++i )
1353 [ # # ][ # # ]: 0 : if( std::binary_search( sorted_values.begin(), sorted_values.end(), (int)buffer[i] ) )
1354 [ # # ]: 0 : value_indices.push_back( i + offset );
1355 : :
1356 : 0 : offset += count;
1357 : 0 : remaining -= count;
1358 : : }
1359 : :
1360 : 0 : return MB_SUCCESS;
1361 : : }
1362 : :
1363 : 65 : ErrorCode ReadHDF5::read_nodes( const Range& node_file_ids )
1364 : : {
1365 : : ErrorCode rval;
1366 : : mhdf_Status status;
1367 : 65 : const int dim = fileInfo->nodes.vals_per_ent;
1368 [ + - ]: 65 : Range range;
1369 : :
1370 [ + - ]: 130 : CHECK_OPEN_HANDLES;
1371 : :
1372 [ + - ][ - + ]: 65 : if( node_file_ids.empty() && !nativeParallel ) return MB_SUCCESS;
[ # # ][ - + ]
1373 : :
1374 : : int cdim;
1375 [ + - ]: 65 : rval = iFace->get_dimension( cdim );
1376 [ - + ][ # # ]: 65 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1377 : :
1378 [ - + ]: 65 : if( cdim < dim )
1379 : : {
1380 [ # # ]: 0 : rval = iFace->set_dimension( dim );
1381 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1382 : : }
1383 : :
1384 [ + - ]: 65 : hid_t data_id = mhdf_openNodeCoordsSimple( filePtr, &status );
1385 [ + - ][ - + ]: 65 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1386 : :
1387 : : EntityHandle handle;
1388 [ + - ]: 130 : std::vector< double* > arrays( dim );
1389 [ + - ]: 65 : const size_t num_nodes = node_file_ids.size();
1390 [ + - ]: 65 : if( num_nodes > 0 )
1391 : : {
1392 [ + - ]: 65 : rval = readUtil->get_node_coords( dim, (int)num_nodes, 0, handle, arrays );
1393 [ - + ]: 65 : if( MB_SUCCESS != rval )
1394 : : {
1395 [ # # ]: 0 : mhdf_closeData( filePtr, data_id, &status );
1396 [ # # ][ # # ]: 65 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
1397 : : }
1398 : : }
1399 : :
1400 [ - + ]: 65 : if( blockedCoordinateIO )
1401 : : {
1402 : : try
1403 : : {
1404 [ # # ]: 0 : for( int d = 0; d < dim; ++d )
1405 : : {
1406 [ # # ]: 0 : ReadHDF5Dataset reader( "blocked coords", data_id, nativeParallel, mpiComm, false );
1407 [ # # ]: 0 : reader.set_column( d );
1408 [ # # ][ # # ]: 0 : reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, num_nodes, H5T_NATIVE_DOUBLE );
1409 [ # # ][ # # ]: 0 : dbgOut.printf( 3, "Reading %lu chunks for coordinate dimension %d\n", reader.get_read_count(), d );
1410 : : // Should normally only have one read call, unless sparse nature
1411 : : // of file_ids caused reader to do something strange
1412 : 0 : size_t count, offset = 0;
1413 : 0 : int nn = 0;
1414 [ # # ][ # # ]: 0 : while( !reader.done() )
1415 : : {
1416 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d for dimension %d\n", ++nn, d );
1417 [ # # ][ # # ]: 0 : reader.read( arrays[d] + offset, count );
1418 : 0 : offset += count;
1419 : : }
1420 [ # # ]: 0 : if( offset != num_nodes )
1421 : : {
1422 [ # # ]: 0 : mhdf_closeData( filePtr, data_id, &status );
1423 [ # # ]: 0 : assert( false );
1424 : : return MB_FAILURE;
1425 : : }
1426 : 0 : }
1427 : : }
1428 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
1429 : : {
1430 [ # # ]: 0 : mhdf_closeData( filePtr, data_id, &status );
1431 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
1432 : : }
1433 : : }
1434 : : else
1435 : : { // !blockedCoordinateIO
1436 : 65 : double* buffer = (double*)dataBuffer;
1437 : 65 : long chunk_size = bufferSize / ( 3 * sizeof( double ) );
1438 : 65 : long coffset = 0;
1439 : 65 : int nn = 0;
1440 : : try
1441 : : {
1442 [ + - ]: 65 : ReadHDF5Dataset reader( "interleaved coords", data_id, nativeParallel, mpiComm, false );
1443 [ + - ][ + - ]: 65 : reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, chunk_size, H5T_NATIVE_DOUBLE );
1444 [ + - ][ + - ]: 65 : dbgOut.printf( 3, "Reading %lu chunks for coordinate coordinates\n", reader.get_read_count() );
1445 [ + - ][ + + ]: 130 : while( !reader.done() )
1446 : : {
1447 [ + - ]: 65 : dbgOut.tprintf( 3, "Reading chunk %d of node coords\n", ++nn );
1448 : :
1449 : : size_t count;
1450 [ + - ]: 65 : reader.read( buffer, count );
1451 : :
1452 [ + + ]: 30784 : for( size_t i = 0; i < count; ++i )
1453 [ + + ]: 122876 : for( int d = 0; d < dim; ++d )
1454 [ + - ]: 92157 : arrays[d][coffset + i] = buffer[dim * i + d];
1455 : 65 : coffset += count;
1456 : 65 : }
1457 : : }
1458 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
1459 : : {
1460 [ # # ]: 0 : mhdf_closeData( filePtr, data_id, &status );
1461 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
1462 : : }
1463 : : }
1464 : :
1465 [ + - ]: 65 : dbgOut.print( 3, "Closing node coordinate table\n" );
1466 [ + - ]: 65 : mhdf_closeData( filePtr, data_id, &status );
1467 [ - + ]: 65 : for( int d = dim; d < cdim; ++d )
1468 [ # # ]: 0 : memset( arrays[d], 0, num_nodes * sizeof( double ) );
1469 : :
1470 [ + - ][ + - ]: 65 : dbgOut.printf( 3, "Updating ID to handle map for %lu nodes\n", (unsigned long)node_file_ids.size() );
1471 [ + - ]: 130 : return insert_in_id_map( node_file_ids, handle );
1472 : : }
1473 : :
1474 : 105 : ErrorCode ReadHDF5::read_elems( int i )
1475 : : {
1476 [ + - ]: 105 : Range ids;
1477 : 105 : ids.insert( fileInfo->elems[i].desc.start_id,
1478 [ + - ]: 105 : fileInfo->elems[i].desc.start_id + fileInfo->elems[i].desc.count - 1 );
1479 [ + - ]: 105 : return read_elems( i, ids );
1480 : : }
1481 : :
1482 : 105 : ErrorCode ReadHDF5::read_elems( int i, const Range& file_ids, Range* node_ids )
1483 : : {
1484 [ - + ]: 105 : if( fileInfo->elems[i].desc.vals_per_ent < 0 )
1485 : : {
1486 [ # # ]: 0 : if( node_ids != 0 ) // Not implemented for version 3 format of poly data
1487 [ # # ]: 0 : MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
1488 : 0 : return read_poly( fileInfo->elems[i], file_ids );
1489 : : }
1490 : : else
1491 : 105 : return read_elems( fileInfo->elems[i], file_ids, node_ids );
1492 : : }
1493 : :
1494 : 105 : ErrorCode ReadHDF5::read_elems( const mhdf_ElemDesc& elems, const Range& file_ids, Range* node_ids )
1495 : : {
1496 [ + - ]: 105 : CHECK_OPEN_HANDLES;
1497 : :
1498 [ + - ]: 105 : debug_barrier();
1499 : : dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", elems.handle,
1500 [ + - ][ + - ]: 105 : (unsigned long)file_ids.size(), (unsigned long)file_ids.psize() );
[ + - ]
1501 : :
1502 : 105 : ErrorCode rval = MB_SUCCESS;
1503 : : mhdf_Status status;
1504 : :
1505 [ + - ]: 105 : EntityType type = CN::EntityTypeFromName( elems.type );
1506 [ - + ][ # # ]: 105 : if( type == MBMAXTYPE ) { MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
1507 : :
1508 : 105 : const int nodes_per_elem = elems.desc.vals_per_ent;
1509 [ + - ]: 105 : const size_t count = file_ids.size();
1510 [ + - ]: 105 : hid_t data_id = mhdf_openConnectivitySimple( filePtr, elems.handle, &status );
1511 [ + - ][ - + ]: 105 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1512 : :
1513 : : EntityHandle handle;
1514 : 105 : EntityHandle* array = 0;
1515 [ + - ][ + - ]: 105 : if( count > 0 ) rval = readUtil->get_element_connect( count, nodes_per_elem, type, 0, handle, array );
1516 [ - + ][ # # ]: 105 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1517 : :
1518 : : try
1519 : : {
1520 : 105 : EntityHandle* buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
1521 : 105 : const size_t buffer_size = bufferSize / ( sizeof( EntityHandle ) * nodes_per_elem );
1522 [ + - ]: 105 : ReadHDF5Dataset reader( elems.handle, data_id, nativeParallel, mpiComm );
1523 [ + - ]: 105 : reader.set_file_ids( file_ids, elems.desc.start_id, buffer_size, handleType );
1524 : : dbgOut.printf( 3, "Reading connectivity in %lu chunks for element group \"%s\"\n", reader.get_read_count(),
1525 [ + - ][ + - ]: 105 : elems.handle );
1526 : 105 : EntityHandle* iter = array;
1527 : 105 : int nn = 0;
1528 [ + - ][ + + ]: 210 : while( !reader.done() )
1529 : : {
1530 [ + - ]: 105 : dbgOut.printf( 3, "Reading chunk %d for \"%s\"\n", ++nn, elems.handle );
1531 : :
1532 : : size_t num_read;
1533 [ + - ]: 105 : reader.read( buffer, num_read );
1534 [ + - ]: 105 : iter = std::copy( buffer, buffer + num_read * nodes_per_elem, iter );
1535 : :
1536 [ - + ]: 105 : if( node_ids )
1537 : : {
1538 [ # # ]: 0 : std::sort( buffer, buffer + num_read * nodes_per_elem );
1539 [ # # ]: 0 : num_read = std::unique( buffer, buffer + num_read * nodes_per_elem ) - buffer;
1540 [ # # ]: 105 : copy_sorted_file_ids( buffer, num_read, *node_ids );
1541 : : }
1542 : : }
1543 [ - + ]: 105 : assert( iter - array == (ptrdiff_t)count * nodes_per_elem );
1544 : : }
1545 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
1546 : : {
1547 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
1548 : : }
1549 : :
1550 [ + - ]: 105 : if( !node_ids )
1551 : : {
1552 [ + - ]: 105 : rval = convert_id_to_handle( array, count * nodes_per_elem );
1553 [ - + ][ # # ]: 105 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1554 : :
1555 [ + - ]: 105 : rval = readUtil->update_adjacencies( handle, count, nodes_per_elem, array );
1556 [ - + ][ # # ]: 105 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1557 : : }
1558 : : else
1559 : : {
1560 : : IDConnectivity t;
1561 : 0 : t.handle = handle;
1562 : 0 : t.count = count;
1563 : 0 : t.nodes_per_elem = nodes_per_elem;
1564 : 0 : t.array = array;
1565 [ # # ]: 0 : idConnectivityList.push_back( t );
1566 : : }
1567 : :
1568 [ + - ]: 105 : return insert_in_id_map( file_ids, handle );
1569 : : }
1570 : :
1571 : 0 : ErrorCode ReadHDF5::update_connectivity()
1572 : : {
1573 : : ErrorCode rval;
1574 : 0 : std::vector< IDConnectivity >::iterator i;
1575 [ # # ][ # # ]: 0 : for( i = idConnectivityList.begin(); i != idConnectivityList.end(); ++i )
[ # # ]
1576 : : {
1577 [ # # ][ # # ]: 0 : rval = convert_id_to_handle( i->array, i->count * i->nodes_per_elem );
[ # # ][ # # ]
1578 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1579 : :
1580 [ # # ][ # # ]: 0 : rval = readUtil->update_adjacencies( i->handle, i->count, i->nodes_per_elem, i->array );
[ # # ][ # # ]
[ # # ]
1581 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1582 : : }
1583 : 0 : idConnectivityList.clear();
1584 : :
1585 : 0 : return MB_SUCCESS;
1586 : : }
1587 : :
1588 : 0 : ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, Range* handles_out )
1589 : : {
1590 : : mhdf_Status status;
1591 : : ErrorCode rval;
1592 : :
1593 [ # # ]: 0 : CHECK_OPEN_HANDLES;
1594 : :
1595 [ # # ]: 0 : hid_t table = mhdf_openConnectivitySimple( filePtr, group.handle, &status );
1596 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1597 : :
1598 [ # # ]: 0 : rval = read_node_adj_elems( group, table, handles_out );
1599 : :
1600 [ # # ]: 0 : mhdf_closeData( filePtr, table, &status );
1601 [ # # ][ # # ]: 0 : if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR_RET_VAL( "ReadHDF5 Failure", MB_FAILURE );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1602 : :
1603 : 0 : return rval;
1604 : : }
1605 : :
1606 : 0 : ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, hid_t table_handle, Range* handles_out )
1607 : : {
1608 [ # # ]: 0 : CHECK_OPEN_HANDLES;
1609 : :
1610 [ # # ]: 0 : debug_barrier();
1611 : :
1612 : : mhdf_Status status;
1613 : : ErrorCode rval;
1614 [ # # ][ # # ]: 0 : IODebugTrack debug_track( debugTrack, std::string( group.handle ) );
1615 : :
1616 : : // Copy data to local variables (makes other code clearer)
1617 : 0 : const int node_per_elem = group.desc.vals_per_ent;
1618 : 0 : long start_id = group.desc.start_id;
1619 : 0 : long remaining = group.desc.count;
1620 [ # # ]: 0 : const EntityType type = CN::EntityTypeFromName( group.type );
1621 : :
1622 : : // Figure out how many elements we can read in each pass
1623 : 0 : long* const buffer = reinterpret_cast< long* >( dataBuffer );
1624 : 0 : const long buffer_size = bufferSize / ( node_per_elem * sizeof( buffer[0] ) );
1625 : : // Read all element connectivity in buffer_size blocks
1626 : 0 : long offset = 0;
1627 : : dbgOut.printf( 3, "Reading node-adjacent elements from \"%s\" in %ld chunks\n", group.handle,
1628 [ # # ]: 0 : ( remaining + buffer_size - 1 ) / buffer_size );
1629 : 0 : int nn = 0;
1630 [ # # ]: 0 : Range::iterator hint;
1631 [ # # ][ # # ]: 0 : if( handles_out ) hint = handles_out->begin();
1632 [ # # ]: 0 : while( remaining )
1633 : : {
1634 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d of connectivity data for \"%s\"\n", ++nn, group.handle );
1635 : :
1636 : : // Read a block of connectivity data
1637 [ # # ]: 0 : const long count = std::min( remaining, buffer_size );
1638 [ # # ]: 0 : debug_track.record_io( offset, count );
1639 [ # # ][ # # ]: 0 : assert_range( buffer, count * node_per_elem );
1640 [ # # ][ # # ]: 0 : mhdf_readConnectivityWithOpt( table_handle, offset, count, H5T_NATIVE_LONG, buffer, collIO, &status );
1641 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1642 : 0 : offset += count;
1643 : 0 : remaining -= count;
1644 : :
1645 : : // Count the number of elements in the block that we want,
1646 : : // zero connectivity for other elements
1647 : 0 : long num_elem = 0;
1648 : 0 : long* iter = buffer;
1649 [ # # ]: 0 : for( long i = 0; i < count; ++i )
1650 : : {
1651 [ # # ]: 0 : for( int j = 0; j < node_per_elem; ++j )
1652 : : {
1653 [ # # ]: 0 : iter[j] = (long)idMap.find( iter[j] );
1654 [ # # ]: 0 : if( !iter[j] )
1655 : : {
1656 : 0 : iter[0] = 0;
1657 : 0 : break;
1658 : : }
1659 : : }
1660 [ # # ]: 0 : if( iter[0] ) ++num_elem;
1661 : 0 : iter += node_per_elem;
1662 : : }
1663 : :
1664 [ # # ]: 0 : if( !num_elem )
1665 : : {
1666 : 0 : start_id += count;
1667 : 0 : continue;
1668 : : }
1669 : :
1670 : : // Create elements
1671 : : EntityHandle handle;
1672 : : EntityHandle* array;
1673 [ # # ]: 0 : rval = readUtil->get_element_connect( (int)num_elem, node_per_elem, type, 0, handle, array );
1674 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1675 : :
1676 : : // Copy all non-zero connectivity values
1677 : 0 : iter = buffer;
1678 : 0 : EntityHandle* iter2 = array;
1679 : 0 : EntityHandle h = handle;
1680 [ # # ]: 0 : for( long i = 0; i < count; ++i )
1681 : : {
1682 [ # # ]: 0 : if( !*iter )
1683 : : {
1684 : 0 : iter += node_per_elem;
1685 : 0 : continue;
1686 : : }
1687 [ # # ][ # # ]: 0 : if( !idMap.insert( start_id + i, h++, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1688 : :
1689 : 0 : long* const end = iter + node_per_elem;
1690 [ # # ]: 0 : for( ; iter != end; ++iter, ++iter2 )
1691 : 0 : *iter2 = (EntityHandle)*iter;
1692 : : }
1693 [ # # ]: 0 : assert( iter2 - array == num_elem * node_per_elem );
1694 : 0 : start_id += count;
1695 : :
1696 [ # # ]: 0 : rval = readUtil->update_adjacencies( handle, num_elem, node_per_elem, array );
1697 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1698 [ # # ][ # # ]: 0 : if( handles_out ) hint = handles_out->insert( hint, handle, handle + num_elem - 1 );
1699 : : }
1700 : :
1701 [ # # ]: 0 : debug_track.all_reduce();
1702 : 0 : return MB_SUCCESS;
1703 : : }
1704 : :
1705 : 0 : ErrorCode ReadHDF5::read_elems( int i, const Range& elems_in, Range& nodes )
1706 : : {
1707 [ # # ]: 0 : CHECK_OPEN_HANDLES;
1708 : :
1709 [ # # ]: 0 : debug_barrier();
1710 : 0 : dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", fileInfo->elems[i].handle,
1711 [ # # ][ # # ]: 0 : (unsigned long)elems_in.size(), (unsigned long)elems_in.psize() );
[ # # ]
1712 : :
1713 : 0 : EntityHandle* const buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
1714 : 0 : const int node_per_elem = fileInfo->elems[i].desc.vals_per_ent;
1715 : 0 : const size_t buffer_size = bufferSize / ( node_per_elem * sizeof( EntityHandle ) );
1716 : :
1717 [ # # ][ # # ]: 0 : if( elems_in.empty() ) return MB_SUCCESS;
1718 : :
1719 [ # # ][ # # ]: 0 : assert( (long)elems_in.front() >= fileInfo->elems[i].desc.start_id );
1720 [ # # ][ # # ]: 0 : assert( (long)elems_in.back() - fileInfo->elems[i].desc.start_id < fileInfo->elems[i].desc.count );
1721 : :
1722 : : // We don't support version 3 style poly element data
1723 [ # # ][ # # ]: 0 : if( fileInfo->elems[i].desc.vals_per_ent <= 0 ) MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
[ # # ]
1724 : :
1725 : : mhdf_Status status;
1726 [ # # ]: 0 : hid_t table = mhdf_openConnectivitySimple( filePtr, fileInfo->elems[i].handle, &status );
1727 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1728 : :
1729 : : try
1730 : : {
1731 [ # # ]: 0 : ReadHDF5Dataset reader( fileInfo->elems[i].handle, table, nativeParallel, mpiComm );
1732 [ # # ]: 0 : reader.set_file_ids( elems_in, fileInfo->elems[i].desc.start_id, buffer_size, handleType );
1733 : : dbgOut.printf( 3, "Reading node list in %lu chunks for \"%s\"\n", reader.get_read_count(),
1734 [ # # ][ # # ]: 0 : fileInfo->elems[i].handle );
1735 : 0 : int nn = 0;
1736 [ # # ][ # # ]: 0 : while( !reader.done() )
1737 : : {
1738 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d of \"%s\" connectivity\n", ++nn, fileInfo->elems[i].handle );
1739 : : size_t num_read;
1740 [ # # ]: 0 : reader.read( buffer, num_read );
1741 [ # # ]: 0 : std::sort( buffer, buffer + num_read * node_per_elem );
1742 [ # # ]: 0 : num_read = std::unique( buffer, buffer + num_read * node_per_elem ) - buffer;
1743 [ # # ]: 0 : copy_sorted_file_ids( buffer, num_read, nodes );
1744 : 0 : }
1745 : : }
1746 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
1747 : : {
1748 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
1749 : : }
1750 : :
1751 : 0 : return MB_SUCCESS;
1752 : : }
1753 : :
1754 : 0 : ErrorCode ReadHDF5::read_poly( const mhdf_ElemDesc& elems, const Range& file_ids )
1755 : : {
1756 : : class PolyReader : public ReadHDF5VarLen
1757 : : {
1758 : : private:
1759 : : const EntityType type;
1760 : : ReadHDF5* readHDF5;
1761 : :
1762 : : public:
1763 : 0 : PolyReader( EntityType elem_type, void* buffer, size_t buffer_size, ReadHDF5* owner, DebugOutput& dbg )
1764 : 0 : : ReadHDF5VarLen( dbg, buffer, buffer_size ), type( elem_type ), readHDF5( owner )
1765 : : {
1766 : 0 : }
1767 [ # # ]: 0 : virtual ~PolyReader() {}
1768 : 0 : ErrorCode store_data( EntityHandle file_id, void* data, long len, bool )
1769 : : {
1770 : : size_t valid;
1771 : 0 : EntityHandle* conn = reinterpret_cast< EntityHandle* >( data );
1772 [ # # ]: 0 : readHDF5->convert_id_to_handle( conn, len, valid );
1773 [ # # ][ # # ]: 0 : if( valid != (size_t)len ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
[ # # ]
1774 : : EntityHandle handle;
1775 [ # # ][ # # ]: 0 : ErrorCode rval = readHDF5->moab()->create_element( type, conn, len, handle );
1776 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1777 : :
1778 [ # # ]: 0 : rval = readHDF5->insert_in_id_map( file_id, handle );
1779 : 0 : return rval;
1780 : : }
1781 : : };
1782 : :
1783 [ # # ]: 0 : CHECK_OPEN_HANDLES;
1784 : :
1785 [ # # ]: 0 : debug_barrier();
1786 : :
1787 [ # # ]: 0 : EntityType type = CN::EntityTypeFromName( elems.type );
1788 [ # # ][ # # ]: 0 : if( type == MBMAXTYPE ) { MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
1789 : :
1790 : : hid_t handles[2];
1791 : : mhdf_Status status;
1792 : : long num_poly, num_conn, first_id;
1793 [ # # ]: 0 : mhdf_openPolyConnectivity( filePtr, elems.handle, &num_poly, &num_conn, &first_id, handles, &status );
1794 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1795 : :
1796 [ # # ]: 0 : std::string nm( elems.handle );
1797 [ # # ][ # # ]: 0 : ReadHDF5Dataset offset_reader( ( nm + " offsets" ).c_str(), handles[0], nativeParallel, mpiComm, true );
1798 [ # # ][ # # ]: 0 : ReadHDF5Dataset connect_reader( ( nm + " data" ).c_str(), handles[1], nativeParallel, mpiComm, true );
1799 : :
1800 [ # # ]: 0 : PolyReader tool( type, dataBuffer, bufferSize, this, dbgOut );
1801 [ # # ]: 0 : return tool.read( offset_reader, connect_reader, file_ids, first_id, handleType );
1802 : : }
1803 : :
1804 : 0 : ErrorCode ReadHDF5::delete_non_side_elements( const Range& side_ents )
1805 : : {
1806 : : ErrorCode rval;
1807 : :
1808 : : // Build list of entities that we need to find the sides of
1809 [ # # ]: 0 : Range explicit_ents;
1810 [ # # ]: 0 : Range::iterator hint = explicit_ents.begin();
1811 [ # # ][ # # ]: 0 : for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
[ # # ][ # # ]
[ # # ]
1812 : : {
1813 [ # # ]: 0 : EntityHandle start = i->value;
1814 [ # # ][ # # ]: 0 : EntityHandle end = i->value + i->count - 1;
1815 [ # # ]: 0 : EntityType type = TYPE_FROM_HANDLE( start );
1816 [ # # ][ # # ]: 0 : assert( type == TYPE_FROM_HANDLE( end ) ); // Otherwise handle space entirely full!!
1817 [ # # ][ # # ]: 0 : if( type != MBVERTEX && type != MBENTITYSET ) hint = explicit_ents.insert( hint, start, end );
[ # # ]
1818 : : }
1819 [ # # ][ # # ]: 0 : explicit_ents = subtract( explicit_ents, side_ents );
1820 : :
1821 : : // Figure out which entities we want to delete
1822 [ # # ]: 0 : Range dead_ents( side_ents );
1823 [ # # ][ # # ]: 0 : Range::iterator ds, de, es;
[ # # ]
1824 [ # # ]: 0 : ds = dead_ents.lower_bound( CN::TypeDimensionMap[1].first );
1825 [ # # ]: 0 : de = dead_ents.lower_bound( CN::TypeDimensionMap[2].first, ds );
1826 [ # # ][ # # ]: 0 : if( ds != de )
1827 : : {
1828 : : // Get subset of explicit ents of dimension greater than 1
1829 [ # # ]: 0 : es = explicit_ents.lower_bound( CN::TypeDimensionMap[2].first );
1830 [ # # ][ # # ]: 0 : Range subset, adj;
[ # # ]
1831 [ # # ][ # # ]: 0 : subset.insert( es, explicit_ents.end() );
1832 [ # # ]: 0 : rval = iFace->get_adjacencies( subset, 1, false, adj, Interface::UNION );
1833 [ # # ]: 0 : if( MB_SUCCESS != rval ) return rval;
1834 [ # # ][ # # ]: 0 : dead_ents = subtract( dead_ents, adj );
[ # # ]
1835 : : }
1836 [ # # ]: 0 : ds = dead_ents.lower_bound( CN::TypeDimensionMap[2].first );
1837 [ # # ]: 0 : de = dead_ents.lower_bound( CN::TypeDimensionMap[3].first, ds );
1838 [ # # ][ # # ]: 0 : assert( de == dead_ents.end() );
[ # # ]
1839 [ # # ][ # # ]: 0 : if( ds != de )
1840 : : {
1841 : : // Get subset of explicit ents of dimension 3
1842 [ # # ]: 0 : es = explicit_ents.lower_bound( CN::TypeDimensionMap[3].first );
1843 [ # # ][ # # ]: 0 : Range subset, adj;
[ # # ]
1844 [ # # ][ # # ]: 0 : subset.insert( es, explicit_ents.end() );
1845 [ # # ]: 0 : rval = iFace->get_adjacencies( subset, 2, false, adj, Interface::UNION );
1846 [ # # ]: 0 : if( MB_SUCCESS != rval ) return rval;
1847 [ # # ][ # # ]: 0 : dead_ents = subtract( dead_ents, adj );
[ # # ]
1848 : : }
1849 : :
1850 : : // Now delete anything remaining in dead_ents
1851 [ # # ][ # # ]: 0 : dbgOut.printf( 2, "Deleting %lu elements\n", (unsigned long)dead_ents.size() );
1852 [ # # ]: 0 : dbgOut.print( 4, "\tDead entities: ", dead_ents );
1853 [ # # ]: 0 : rval = iFace->delete_entities( dead_ents );
1854 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1855 : :
1856 : : // Remove dead entities from ID map
1857 [ # # ][ # # ]: 0 : while( !dead_ents.empty() )
1858 : : {
1859 [ # # ]: 0 : EntityHandle start = dead_ents.front();
1860 [ # # ][ # # ]: 0 : EntityID count = dead_ents.const_pair_begin()->second - start + 1;
1861 : 0 : IDMap::iterator rit;
1862 [ # # ][ # # ]: 0 : for( rit = idMap.begin(); rit != idMap.end(); ++rit )
[ # # ][ # # ]
[ # # ]
1863 [ # # ][ # # ]: 0 : if( rit->value <= start && ( EntityID )( start - rit->value ) < rit->count ) break;
[ # # ][ # # ]
[ # # ][ # # ]
1864 [ # # ][ # # ]: 0 : if( rit == idMap.end() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
1865 : :
1866 [ # # ]: 0 : EntityID offset = start - rit->value;
1867 [ # # ]: 0 : EntityID avail = rit->count - offset;
1868 [ # # ]: 0 : if( avail < count ) count = avail;
1869 : :
1870 [ # # ][ # # ]: 0 : dead_ents.erase( dead_ents.begin(), dead_ents.begin() + count );
[ # # ][ # # ]
1871 [ # # ][ # # ]: 0 : idMap.erase( rit->begin + offset, count );
1872 : : }
1873 : :
1874 : 0 : return MB_SUCCESS;
1875 : : }
1876 : :
1877 : 66 : ErrorCode ReadHDF5::read_sets( const Range& file_ids )
1878 : : {
1879 [ + - ]: 66 : CHECK_OPEN_HANDLES;
1880 : :
1881 [ + - ]: 66 : debug_barrier();
1882 : :
1883 : : mhdf_Status status;
1884 : : ErrorCode rval;
1885 : :
1886 : 66 : const size_t num_sets = fileInfo->sets.count;
1887 [ - + ]: 66 : if( !num_sets ) // If no sets at all!
1888 : 0 : return MB_SUCCESS;
1889 : :
1890 : : // Create sets
1891 [ + - ][ + - ]: 132 : std::vector< unsigned > flags( file_ids.size() );
1892 [ + - ]: 66 : Range::iterator si = file_ids.begin();
1893 [ + - ][ + + ]: 1110 : for( size_t i = 0; i < flags.size(); ++i, ++si )
1894 [ + - ][ + - ]: 1044 : flags[i] = setMeta[*si - fileInfo->sets.start_id][3] & ~(long)mhdf_SET_RANGE_BIT;
1895 : : EntityHandle start_handle;
1896 : : // the files ids could be empty, for empty partitions
1897 [ + - ][ + - ]: 66 : if( !file_ids.empty() )
1898 : : {
1899 [ + - ][ + - ]: 66 : rval = readUtil->create_entity_sets( flags.size(), &flags[0], 0, start_handle );
1900 [ - + ][ # # ]: 66 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1901 [ + - ]: 66 : rval = insert_in_id_map( file_ids, start_handle );
1902 [ - + ][ # # ]: 66 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
1903 : : }
1904 : :
1905 : : // Read contents
1906 [ + + ]: 66 : if( fileInfo->have_set_contents )
1907 : : {
1908 : 65 : long len = 0;
1909 [ + - ]: 65 : hid_t handle = mhdf_openSetData( filePtr, &len, &status );
1910 [ + - ][ - + ]: 65 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1911 : :
1912 [ + - ]: 65 : ReadHDF5Dataset dat( "set contents", handle, nativeParallel, mpiComm, true );
1913 [ + - ]: 65 : rval = read_set_data( file_ids, start_handle, dat, CONTENT );
1914 [ - + ][ # # ]: 65 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ + - ]
1915 : : }
1916 : :
1917 : : // Read set child lists
1918 [ + + ]: 66 : if( fileInfo->have_set_children )
1919 : : {
1920 : 41 : long len = 0;
1921 [ + - ]: 41 : hid_t handle = mhdf_openSetChildren( filePtr, &len, &status );
1922 [ + - ][ - + ]: 41 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1923 : :
1924 [ + - ]: 41 : ReadHDF5Dataset dat( "set children", handle, nativeParallel, mpiComm, true );
1925 [ + - ]: 41 : rval = read_set_data( file_ids, start_handle, dat, CHILD );
1926 [ - + ][ # # ]: 41 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ + - ]
1927 : : }
1928 : :
1929 : : // Read set parent lists
1930 [ + + ]: 66 : if( fileInfo->have_set_parents )
1931 : : {
1932 : 40 : long len = 0;
1933 [ + - ]: 40 : hid_t handle = mhdf_openSetParents( filePtr, &len, &status );
1934 [ + - ][ - + ]: 40 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1935 : :
1936 [ + - ]: 40 : ReadHDF5Dataset dat( "set parents", handle, nativeParallel, mpiComm, true );
1937 [ + - ]: 40 : rval = read_set_data( file_ids, start_handle, dat, PARENT );
1938 [ - + ][ # # ]: 40 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ + - ]
1939 : : }
1940 : :
1941 : 132 : return MB_SUCCESS;
1942 : : }
1943 : :
1944 : 66 : ErrorCode ReadHDF5::read_all_set_meta()
1945 : : {
1946 [ + - ]: 66 : CHECK_OPEN_HANDLES;
1947 : :
1948 [ - + ]: 66 : assert( !setMeta );
1949 : 66 : const long num_sets = fileInfo->sets.count;
1950 [ - + ]: 66 : if( !num_sets ) return MB_SUCCESS;
1951 : :
1952 : : mhdf_Status status;
1953 [ + - ]: 66 : hid_t handle = mhdf_openSetMetaSimple( filePtr, &status );
1954 [ + - ][ - + ]: 66 : if( is_error( status ) ) { MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1955 : :
1956 : : // Allocate extra space if we need it for data conversion
1957 [ + - ]: 66 : hid_t meta_type = H5Dget_type( handle );
1958 [ + - ]: 66 : size_t size = H5Tget_size( meta_type );
1959 [ - + ]: 66 : if( size > sizeof( long ) )
1960 [ # # ][ # # ]: 0 : setMeta = new long[( num_sets * size + ( sizeof( long ) - 1 ) ) / sizeof( long )][4];
1961 : : else
1962 [ + - ][ + - ]: 66 : setMeta = new long[num_sets][4];
1963 : :
1964 : : // Set some parameters based on whether or not each proc reads the
1965 : : // table or only the root reads it and bcasts it to the others
1966 : 66 : int rank = 0;
1967 : 66 : bool bcast = false;
1968 : 66 : hid_t ioprop = H5P_DEFAULT;
1969 : : #ifdef MOAB_HAVE_MPI
1970 : 66 : MPI_Comm comm = 0;
1971 [ - + ]: 66 : if( nativeParallel )
1972 : : {
1973 [ # # ][ # # ]: 0 : rank = myPcomm->proc_config().proc_rank();
1974 [ # # ][ # # ]: 0 : comm = myPcomm->proc_config().proc_comm();
1975 : 0 : bcast = bcastDuplicateReads;
1976 [ # # ]: 0 : if( !bcast ) ioprop = collIO;
1977 : : }
1978 : : #endif
1979 : :
1980 [ - + ][ # # ]: 66 : if( !bcast || 0 == rank )
1981 : : {
1982 [ + - ]: 66 : mhdf_readSetMetaWithOpt( handle, 0, num_sets, meta_type, setMeta, ioprop, &status );
1983 [ + - ][ - + ]: 66 : if( is_error( status ) )
1984 : : {
1985 [ # # ]: 0 : H5Tclose( meta_type );
1986 [ # # ]: 0 : mhdf_closeData( filePtr, handle, &status );
1987 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
1988 : : }
1989 : :
1990 [ + - ][ + - ]: 66 : H5Tconvert( meta_type, H5T_NATIVE_LONG, num_sets * 4, setMeta, 0, H5P_DEFAULT );
1991 : : }
1992 [ + - ]: 66 : mhdf_closeData( filePtr, handle, &status );
1993 [ + - ][ - + ]: 66 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
1994 [ + - ]: 66 : H5Tclose( meta_type );
1995 : :
1996 [ - + ]: 66 : if( bcast )
1997 : : {
1998 : : #ifdef MOAB_HAVE_MPI
1999 [ # # ]: 0 : int ierr = MPI_Bcast( (void*)setMeta, num_sets * 4, MPI_LONG, 0, comm );
2000 [ # # ][ # # ]: 0 : if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2001 : : #else
2002 : : assert( rank == 0 ); // If not MPI, then only one proc
2003 : : #endif
2004 : : }
2005 : :
2006 : 66 : return MB_SUCCESS;
2007 : : }
2008 : :
2009 : 0 : ErrorCode ReadHDF5::read_set_ids_recursive( Range& sets_in_out, bool contained_sets, bool child_sets )
2010 : : {
2011 [ # # ]: 0 : CHECK_OPEN_HANDLES;
2012 : : mhdf_Status status;
2013 : :
2014 [ # # ]: 0 : if( !fileInfo->have_set_children ) child_sets = false;
2015 [ # # ]: 0 : if( !fileInfo->have_set_contents ) contained_sets = false;
2016 [ # # ][ # # ]: 0 : if( !child_sets && !contained_sets ) return MB_SUCCESS;
2017 : :
2018 : : // Open data tables
2019 [ # # ]: 0 : if( fileInfo->sets.count == 0 )
2020 : : {
2021 [ # # ][ # # ]: 0 : assert( sets_in_out.empty() );
2022 : 0 : return MB_SUCCESS;
2023 : : }
2024 : :
2025 [ # # ][ # # ]: 0 : if( !contained_sets && !child_sets ) return MB_SUCCESS;
2026 : :
2027 [ # # ]: 0 : ReadHDF5Dataset cont( "set contents", false, mpiComm );
2028 [ # # ]: 0 : ReadHDF5Dataset child( "set children", false, mpiComm );
2029 : :
2030 [ # # ]: 0 : if( contained_sets )
2031 : : {
2032 : 0 : long content_len = 0;
2033 [ # # ]: 0 : hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
2034 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2035 : : try
2036 : : {
2037 [ # # ]: 0 : cont.init( content_handle, true );
2038 : : }
2039 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
2040 : : {
2041 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
2042 : : }
2043 : : }
2044 : :
2045 [ # # ]: 0 : if( child_sets )
2046 : : {
2047 : 0 : long child_len = 0;
2048 [ # # ]: 0 : hid_t child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
2049 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2050 : : try
2051 : : {
2052 [ # # ]: 0 : child.init( child_handle, true );
2053 : : }
2054 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
2055 : : {
2056 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
2057 : : }
2058 : : }
2059 : :
2060 : 0 : ErrorCode rval = MB_SUCCESS;
2061 [ # # ][ # # ]: 0 : Range children, new_children( sets_in_out );
2062 : 0 : int iteration_count = 0;
2063 [ # # ]: 0 : do
2064 : : {
2065 : 0 : ++iteration_count;
2066 [ # # ]: 0 : dbgOut.tprintf( 2, "Iteration %d of read_set_ids_recursive\n", iteration_count );
2067 [ # # ]: 0 : children.clear();
2068 [ # # ]: 0 : if( child_sets )
2069 : : {
2070 [ # # ]: 0 : rval = read_set_data( new_children, 0, child, CHILD, &children );
2071 [ # # ]: 0 : if( MB_SUCCESS != rval ) break;
2072 : : }
2073 [ # # ]: 0 : if( contained_sets )
2074 : : {
2075 [ # # ]: 0 : rval = read_set_data( new_children, 0, cont, CONTENT, &children );
2076 : : // Remove any non-set values
2077 [ # # ]: 0 : Range::iterator it = children.lower_bound( fileInfo->sets.start_id );
2078 [ # # ][ # # ]: 0 : children.erase( children.begin(), it );
2079 [ # # ]: 0 : it = children.lower_bound( fileInfo->sets.start_id + fileInfo->sets.count );
2080 [ # # ][ # # ]: 0 : children.erase( it, children.end() );
2081 [ # # ]: 0 : if( MB_SUCCESS != rval ) break;
2082 : : }
2083 [ # # ][ # # ]: 0 : new_children = subtract( children, sets_in_out );
2084 [ # # ]: 0 : dbgOut.print_ints( 2, "Adding additional contained/child sets", new_children );
2085 [ # # ]: 0 : sets_in_out.merge( new_children );
2086 [ # # ]: 0 : } while( !new_children.empty() );
2087 : :
2088 : 0 : return MB_SUCCESS;
2089 : : }
2090 : :
2091 : 0 : ErrorCode ReadHDF5::find_sets_containing( Range& sets_out, bool read_set_containing_parents )
2092 : : {
2093 : : ErrorCode rval;
2094 : : mhdf_Status status;
2095 : :
2096 [ # # ]: 0 : CHECK_OPEN_HANDLES;
2097 : :
2098 [ # # ]: 0 : if( !fileInfo->have_set_contents ) return MB_SUCCESS;
2099 [ # # ]: 0 : assert( fileInfo->sets.count );
2100 : :
2101 : : // Open data tables
2102 : 0 : long content_len = 0;
2103 [ # # ]: 0 : hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
2104 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2105 : :
2106 [ # # ]: 0 : hid_t data_type = H5Dget_type( content_handle );
2107 : :
2108 [ # # ]: 0 : rval = find_sets_containing( content_handle, data_type, content_len, read_set_containing_parents, sets_out );
2109 : :
2110 [ # # ]: 0 : H5Tclose( data_type );
2111 : :
2112 [ # # ]: 0 : mhdf_closeData( filePtr, content_handle, &status );
2113 [ # # ][ # # ]: 0 : if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2114 : :
2115 : 0 : return rval;
2116 : : }
2117 : :
2118 : 0 : static bool set_map_intersect( bool ranged, const long* contents, int content_len,
2119 : : const RangeMap< long, EntityHandle >& id_map )
2120 : : {
2121 [ # # ]: 0 : if( ranged )
2122 : : {
2123 [ # # ][ # # ]: 0 : if( !content_len || id_map.empty() ) return false;
[ # # ]
2124 : :
2125 : 0 : const long* j = contents;
2126 : 0 : const long* const end = contents + content_len;
2127 [ # # ]: 0 : assert( content_len % 2 == 0 );
2128 [ # # ]: 0 : while( j != end )
2129 : : {
2130 : 0 : long start = *( j++ );
2131 : 0 : long count = *( j++ );
2132 [ # # ]: 0 : if( id_map.intersects( start, count ) ) return true;
2133 : : }
2134 : : }
2135 : : else
2136 : : {
2137 : 0 : const long* const end = contents + content_len;
2138 [ # # ]: 0 : for( const long* i = contents; i != end; ++i )
2139 [ # # ]: 0 : if( id_map.exists( *i ) ) return true;
2140 : : }
2141 : :
2142 : 0 : return false;
2143 : : }
2144 : :
2145 : : struct SetContOffComp
2146 : : {
2147 : 0 : bool operator()( const long a1[4], const long a2[4] )
2148 : : {
2149 : 0 : return a1[ReadHDF5::CONTENT] < a2[0];
2150 : : }
2151 : : };
2152 : :
2153 : 0 : ErrorCode ReadHDF5::find_sets_containing( hid_t contents_handle, hid_t content_type, long contents_len,
2154 : : bool read_set_containing_parents, Range& file_ids )
2155 : : {
2156 [ # # ]: 0 : CHECK_OPEN_HANDLES;
2157 : :
2158 : : // Scan all set contents data
2159 : :
2160 [ # # ]: 0 : const size_t content_size = H5Tget_size( content_type );
2161 : 0 : const long num_sets = fileInfo->sets.count;
2162 [ # # ]: 0 : dbgOut.printf( 2, "Searching contents of %ld\n", num_sets );
2163 : : mhdf_Status status;
2164 : :
2165 : 0 : int rank = 0;
2166 : 0 : bool bcast = false;
2167 : : #ifdef MOAB_HAVE_MPI
2168 : 0 : MPI_Comm comm = 0;
2169 [ # # ]: 0 : if( nativeParallel )
2170 : : {
2171 [ # # ][ # # ]: 0 : rank = myPcomm->proc_config().proc_rank();
2172 [ # # ][ # # ]: 0 : comm = myPcomm->proc_config().proc_comm();
2173 : 0 : bcast = bcastDuplicateReads;
2174 : : }
2175 : : #endif
2176 : :
2177 : : // Check offsets so that we don't read past end of table or
2178 : : // walk off end of array.
2179 : 0 : long prev = -1;
2180 [ # # ]: 0 : for( long i = 0; i < num_sets; ++i )
2181 : : {
2182 [ # # ]: 0 : if( setMeta[i][CONTENT] < prev )
2183 : : {
2184 [ # # ][ # # ]: 0 : std::cerr << "Invalid data in set contents offsets at position " << i << ": index " << setMeta[i][CONTENT]
[ # # ][ # # ]
2185 [ # # ][ # # ]: 0 : << " is less than previous index " << prev << std::endl;
[ # # ]
2186 [ # # ]: 0 : std::cerr.flush();
2187 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2188 : : }
2189 : 0 : prev = setMeta[i][CONTENT];
2190 : : }
2191 [ # # ]: 0 : if( setMeta[num_sets - 1][CONTENT] >= contents_len )
2192 : : {
2193 [ # # ][ # # ]: 0 : std::cerr << "Maximum set content index " << setMeta[num_sets - 1][CONTENT]
2194 [ # # ][ # # ]: 0 : << " exceeds contents table length of " << contents_len << std::endl;
[ # # ]
2195 [ # # ]: 0 : std::cerr.flush();
2196 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2197 : : }
2198 : :
2199 : : // Set up buffer for reading set contents
2200 : 0 : long* const content_buffer = (long*)dataBuffer;
2201 [ # # ]: 0 : const long content_len = bufferSize / std::max( content_size, sizeof( long ) );
2202 : :
2203 : : // Scan set table
2204 [ # # ]: 0 : Range::iterator hint = file_ids.begin();
2205 [ # # ]: 0 : Range tmp_range;
2206 : 0 : long prev_idx = -1;
2207 : 0 : int mm = 0;
2208 : 0 : long sets_offset = 0;
2209 : : long temp_content[4];
2210 [ # # ]: 0 : while( sets_offset < num_sets )
2211 : : {
2212 : 0 : temp_content[0] = content_len + prev_idx;
2213 : : long sets_count =
2214 [ # # ]: 0 : std::lower_bound( setMeta + sets_offset, setMeta + num_sets, temp_content, SetContOffComp() ) - setMeta -
2215 : 0 : sets_offset;
2216 [ # # ][ # # ]: 0 : assert( sets_count >= 0 && sets_offset + sets_count <= num_sets );
2217 [ # # ]: 0 : if( !sets_count )
2218 : : { // Contents of single set don't fit in buffer
2219 : 0 : long content_remaining = setMeta[sets_offset][CONTENT] - prev_idx;
2220 : 0 : long content_offset = prev_idx + 1;
2221 [ # # ]: 0 : while( content_remaining )
2222 : : {
2223 [ # # ]: 0 : long content_count = content_len < content_remaining ? 2 * ( content_len / 2 ) : content_remaining;
2224 [ # # ][ # # ]: 0 : assert_range( content_buffer, content_count );
2225 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, content_count );
2226 [ # # ][ # # ]: 0 : if( !bcast || 0 == rank )
2227 : : {
2228 [ # # ]: 0 : if( !bcast )
2229 : : mhdf_readSetDataWithOpt( contents_handle, content_offset, content_count, content_type,
2230 [ # # ]: 0 : content_buffer, collIO, &status );
2231 : : else
2232 : : mhdf_readSetData( contents_handle, content_offset, content_count, content_type, content_buffer,
2233 [ # # ]: 0 : &status );
2234 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2235 : :
2236 [ # # ][ # # ]: 0 : H5Tconvert( content_type, H5T_NATIVE_LONG, content_count, content_buffer, 0, H5P_DEFAULT );
2237 : : }
2238 [ # # ]: 0 : if( bcast )
2239 : : {
2240 : : #ifdef MOAB_HAVE_MPI
2241 [ # # ]: 0 : int ierr = MPI_Bcast( content_buffer, content_count, MPI_LONG, 0, comm );
2242 [ # # ][ # # ]: 0 : if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2243 : : #else
2244 : : assert( rank == 0 ); // If not MPI, then only one proc
2245 : : #endif
2246 : : }
2247 : :
2248 [ # # ]: 0 : if( read_set_containing_parents )
2249 : : {
2250 [ # # ]: 0 : tmp_range.clear();
2251 [ # # ]: 0 : if( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT )
2252 [ # # ]: 0 : tmp_range.insert( *content_buffer, *( content_buffer + 1 ) );
2253 : : else
2254 [ # # ][ # # ]: 0 : std::copy( content_buffer, content_buffer + content_count, range_inserter( tmp_range ) );
2255 [ # # ][ # # ]: 0 : tmp_range = intersect( tmp_range, file_ids );
2256 : : }
2257 : :
2258 [ # # ][ # # ]: 0 : if( !tmp_range.empty() || set_map_intersect( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT,
[ # # ][ # # ]
2259 [ # # ]: 0 : content_buffer, content_count, idMap ) )
2260 : : {
2261 : 0 : long id = fileInfo->sets.start_id + sets_offset;
2262 [ # # ]: 0 : hint = file_ids.insert( hint, id, id );
2263 [ # # ]: 0 : if( !nativeParallel ) // Don't stop if doing READ_PART because we need to read
2264 : : // collectively
2265 : 0 : break;
2266 : : }
2267 : 0 : content_remaining -= content_count;
2268 : 0 : content_offset += content_count;
2269 : : }
2270 : 0 : prev_idx = setMeta[sets_offset][CONTENT];
2271 : 0 : sets_count = 1;
2272 : : }
2273 [ # # ]: 0 : else if( long read_num = setMeta[sets_offset + sets_count - 1][CONTENT] - prev_idx )
2274 : : {
2275 [ # # ]: 0 : assert( sets_count > 0 );
2276 [ # # ][ # # ]: 0 : assert_range( content_buffer, read_num );
2277 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, read_num );
2278 [ # # ][ # # ]: 0 : if( !bcast || 0 == rank )
2279 : : {
2280 [ # # ]: 0 : if( !bcast )
2281 : : mhdf_readSetDataWithOpt( contents_handle, prev_idx + 1, read_num, content_type, content_buffer,
2282 [ # # ]: 0 : collIO, &status );
2283 : : else
2284 [ # # ]: 0 : mhdf_readSetData( contents_handle, prev_idx + 1, read_num, content_type, content_buffer, &status );
2285 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2286 : :
2287 [ # # ][ # # ]: 0 : H5Tconvert( content_type, H5T_NATIVE_LONG, read_num, content_buffer, 0, H5P_DEFAULT );
2288 : : }
2289 [ # # ]: 0 : if( bcast )
2290 : : {
2291 : : #ifdef MOAB_HAVE_MPI
2292 [ # # ]: 0 : int ierr = MPI_Bcast( content_buffer, read_num, MPI_LONG, 0, comm );
2293 [ # # ][ # # ]: 0 : if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2294 : : #else
2295 : : assert( rank == 0 ); // If not MPI, then only one proc
2296 : : #endif
2297 : : }
2298 : :
2299 : 0 : long* buff_iter = content_buffer;
2300 [ # # ]: 0 : for( long i = 0; i < sets_count; ++i )
2301 : : {
2302 : 0 : long set_size = setMeta[i + sets_offset][CONTENT] - prev_idx;
2303 : 0 : prev_idx += set_size;
2304 : :
2305 : : // Check whether contents include set already being loaded
2306 [ # # ]: 0 : if( read_set_containing_parents )
2307 : : {
2308 [ # # ]: 0 : tmp_range.clear();
2309 [ # # ]: 0 : if( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT )
2310 : : {
2311 : : // put in tmp_range the contents on the set
2312 : : // file_ids contain at this points only other sets
2313 : 0 : const long* j = buff_iter;
2314 : 0 : const long* const end = buff_iter + set_size;
2315 [ # # ]: 0 : assert( set_size % 2 == 0 );
2316 [ # # ]: 0 : while( j != end )
2317 : : {
2318 : 0 : long start = *( j++ );
2319 : 0 : long count = *( j++ );
2320 [ # # ]: 0 : tmp_range.insert( start, start + count - 1 );
2321 : : }
2322 : : }
2323 : : else
2324 [ # # ][ # # ]: 0 : std::copy( buff_iter, buff_iter + set_size, range_inserter( tmp_range ) );
2325 [ # # ][ # # ]: 0 : tmp_range = intersect( tmp_range, file_ids );
2326 : : }
2327 : :
2328 [ # # ][ # # ]: 0 : if( !tmp_range.empty() ||
[ # # ][ # # ]
2329 [ # # ]: 0 : set_map_intersect( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT, buff_iter, set_size, idMap ) )
2330 : : {
2331 : 0 : long id = fileInfo->sets.start_id + sets_offset + i;
2332 [ # # ]: 0 : hint = file_ids.insert( hint, id, id );
2333 : : }
2334 : 0 : buff_iter += set_size;
2335 : : }
2336 : : }
2337 : :
2338 : 0 : sets_offset += sets_count;
2339 : : }
2340 : :
2341 : 0 : return MB_SUCCESS;
2342 : : }
2343 : :
2344 : 0 : static Range::iterator copy_set_contents( Range::iterator hint, int ranged, EntityHandle* contents, long length,
2345 : : Range& results )
2346 : : {
2347 [ # # ]: 0 : if( ranged )
2348 : : {
2349 [ # # ]: 0 : assert( length % 2 == 0 );
2350 [ # # ]: 0 : for( long i = 0; i < length; i += 2 )
2351 : 0 : hint = results.insert( hint, contents[i], contents[i] + contents[i + 1] - 1 );
2352 : : }
2353 : : else
2354 : : {
2355 : 0 : std::sort( contents, contents + length );
2356 [ # # ]: 0 : for( long i = 0; i < length; ++i )
2357 : 0 : hint = results.insert( hint, contents[i] );
2358 : : }
2359 : 0 : return hint;
2360 : : }
2361 : :
2362 : 146 : ErrorCode ReadHDF5::read_set_data( const Range& set_file_ids, EntityHandle start_handle, ReadHDF5Dataset& data,
2363 : : SetMode mode, Range* file_ids_out )
2364 : : {
2365 : : ErrorCode rval;
2366 [ + - ]: 146 : Range::const_pair_iterator pi;
2367 [ + - ]: 146 : Range::iterator out_hint;
2368 [ - + ][ # # ]: 146 : if( file_ids_out ) out_hint = file_ids_out->begin();
2369 : :
2370 : : // Construct range of offsets into data table at which to read
2371 : : // Note: all offsets are incremented by TWEAK because Range cannot
2372 : : // store zeros.
2373 : 146 : const long TWEAK = 1;
2374 [ + - ]: 146 : Range data_offsets;
2375 [ + - ]: 146 : Range::iterator hint = data_offsets.begin();
2376 [ + - ]: 146 : pi = set_file_ids.const_pair_begin();
2377 [ + - ][ + - ]: 146 : if( (long)pi->first == fileInfo->sets.start_id )
2378 : : {
2379 [ + - ]: 146 : long second = pi->second - fileInfo->sets.start_id;
2380 [ + - ][ + - ]: 146 : if( setMeta[second][mode] >= 0 ) hint = data_offsets.insert( hint, TWEAK, setMeta[second][mode] + TWEAK );
2381 [ + - ]: 146 : ++pi;
2382 : : }
2383 [ # # ][ + - ]: 146 : for( ; pi != set_file_ids.const_pair_end(); ++pi )
[ + - ]
[ - + # # ]
2384 : : {
2385 [ # # ]: 0 : long first = pi->first - fileInfo->sets.start_id;
2386 [ # # ]: 0 : long second = pi->second - fileInfo->sets.start_id;
2387 : 0 : long idx1 = setMeta[first - 1][mode] + 1;
2388 : 0 : long idx2 = setMeta[second][mode];
2389 [ # # ][ # # ]: 0 : if( idx2 >= idx1 ) hint = data_offsets.insert( hint, idx1 + TWEAK, idx2 + TWEAK );
2390 : : }
2391 : : try
2392 : : {
2393 [ + - ]: 146 : data.set_file_ids( data_offsets, TWEAK, bufferSize / sizeof( EntityHandle ), handleType );
2394 : : }
2395 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
2396 : : {
2397 : 0 : return MB_FAILURE;
2398 : : }
2399 : :
2400 : : // We need to increment this for each processed set because
2401 : : // the sets were created in the order of the ids in file_ids.
2402 : 146 : EntityHandle h = start_handle;
2403 : :
2404 [ + + ]: 146 : const long ranged_flag = ( mode == CONTENT ) ? mhdf_SET_RANGE_BIT : 0;
2405 : :
2406 [ + - ]: 292 : std::vector< EntityHandle > partial; // For when we read only part of the contents of a set/entity
2407 [ + - ]: 146 : Range::const_iterator fileid_iter = set_file_ids.begin();
2408 : 146 : EntityHandle* buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
2409 : : size_t count, offset;
2410 : :
2411 : 146 : int nn = 0;
2412 : : /*
2413 : : #ifdef MOAB_HAVE_MPI
2414 : : if (nativeParallel && mode==CONTENT && myPcomm->proc_config().proc_size()>1 &&
2415 : : data_offsets.empty())
2416 : : {
2417 : : MB_SET_ERR_CONT( "ReadHDF5 Failure: Attempt reading an empty dataset on proc " <<
2418 : : myPcomm->proc_config().proc_rank());
2419 : : MPI_Abort(myPcomm->proc_config().proc_comm(), 1);
2420 : : }
2421 : : #endif
2422 : : */
2423 [ + - ][ + + ]: 146 : if( ( 1 >= set_file_ids.size() ) && ( data.done() ) && moab::ReadHDF5::CONTENT == mode )
[ + - ][ - + ]
[ # # ][ - + ]
2424 : : // do at least one null read, it is needed in parallel
2425 [ # # ]: 0 : data.null_read();
2426 : :
2427 [ + - ][ + + ]: 292 : while( !data.done() )
2428 : : {
2429 [ + - ][ + - ]: 146 : dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data.get_debug_desc() );
2430 : : try
2431 : : {
2432 [ + - ]: 146 : data.read( buffer, count );
2433 : : }
2434 : 0 : catch( ReadHDF5Dataset::Exception )
2435 : : {
2436 : 0 : return MB_FAILURE;
2437 : : }
2438 : :
2439 : : // Assert not appropriate here - I might have treated all my file ids, but maybe
2440 : : // another proc hasn't; for me, count will be zero, so I won't do anything, but
2441 : : // I still need to go through the motions to make the read work
2442 : :
2443 : : // Handle 'special' case where we read some, but not all
2444 : : // of the data for an entity during the last iteration.
2445 : 146 : offset = 0;
2446 [ - + ]: 146 : if( !partial.empty() )
2447 : : { // Didn't read all of previous entity
2448 [ # # ][ # # ]: 0 : assert( fileid_iter != set_file_ids.end() );
[ # # ]
2449 : 0 : size_t num_prev = partial.size();
2450 [ # # ]: 0 : size_t idx = *fileid_iter - fileInfo->sets.start_id;
2451 [ # # ]: 0 : size_t len = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
2452 : 0 : offset = len - num_prev;
2453 [ # # ]: 0 : if( offset > count )
2454 : : { // Still don't have all
2455 [ # # ]: 0 : partial.insert( partial.end(), buffer, buffer + count );
2456 : 0 : continue;
2457 : : }
2458 : :
2459 [ # # ]: 0 : partial.insert( partial.end(), buffer, buffer + offset );
2460 [ # # ]: 0 : if( file_ids_out )
2461 : : {
2462 [ # # ]: 0 : out_hint = copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, &partial[0], partial.size(),
2463 [ # # ]: 0 : *file_ids_out );
2464 : : }
2465 : : else
2466 : : {
2467 [ # # # # ]: 0 : switch( mode )
2468 : : {
2469 : : size_t valid;
2470 : : case CONTENT:
2471 [ # # ]: 0 : if( setMeta[idx][3] & ranged_flag )
2472 : : {
2473 [ # # ][ # # ]: 0 : if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
[ # # ]
2474 [ # # ]: 0 : Range range;
2475 [ # # ][ # # ]: 0 : convert_range_to_handle( &partial[0], len / 2, range );
2476 [ # # ][ # # ]: 0 : rval = moab()->add_entities( h, range );
2477 : : }
2478 : : else
2479 : : {
2480 [ # # ][ # # ]: 0 : convert_id_to_handle( &partial[0], len, valid );
2481 [ # # ][ # # ]: 0 : rval = moab()->add_entities( h, &partial[0], valid );
[ # # ]
2482 : : }
2483 : 0 : break;
2484 : : case CHILD:
2485 [ # # ][ # # ]: 0 : convert_id_to_handle( &partial[0], len, valid );
2486 [ # # ][ # # ]: 0 : rval = moab()->add_child_meshsets( h, &partial[0], valid );
[ # # ]
2487 : 0 : break;
2488 : : case PARENT:
2489 [ # # ][ # # ]: 0 : convert_id_to_handle( &partial[0], len, valid );
2490 [ # # ][ # # ]: 0 : rval = moab()->add_parent_meshsets( h, &partial[0], valid );
[ # # ]
2491 : 0 : break;
2492 : : default:
2493 : 0 : break;
2494 : : }
2495 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2496 : : }
2497 : :
2498 [ # # ]: 0 : ++fileid_iter;
2499 : 0 : ++h;
2500 : 0 : partial.clear();
2501 : : }
2502 : :
2503 : : // Process contents for all entities for which we
2504 : : // have read the complete list
2505 [ + + ]: 2713 : while( offset < count )
2506 : : {
2507 [ + - ][ + - ]: 2567 : assert( fileid_iter != set_file_ids.end() );
[ - + ]
2508 [ + - ]: 2567 : size_t idx = *fileid_iter - fileInfo->sets.start_id;
2509 [ + + ]: 2567 : size_t len = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
2510 : : // If we did not read all of the final entity,
2511 : : // store what we did read to be processed in the
2512 : : // next iteration
2513 [ - + ]: 2567 : if( offset + len > count )
2514 : : {
2515 [ # # ]: 0 : partial.insert( partial.end(), buffer + offset, buffer + count );
2516 : 0 : break;
2517 : : }
2518 : :
2519 [ - + ]: 2567 : if( file_ids_out )
2520 : : {
2521 : : out_hint =
2522 [ # # ]: 0 : copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, buffer + offset, len, *file_ids_out );
2523 : : }
2524 : : else
2525 : : {
2526 [ + + + - ]: 2567 : switch( mode )
2527 : : {
2528 : : size_t valid;
2529 : : case CONTENT:
2530 [ + + ]: 858 : if( setMeta[idx][3] & ranged_flag )
2531 : : {
2532 [ - + ][ # # ]: 202 : if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
[ # # ]
2533 [ + - ]: 202 : Range range;
2534 [ + - ]: 202 : convert_range_to_handle( buffer + offset, len / 2, range );
2535 [ + - ][ + - ]: 202 : rval = moab()->add_entities( h, range );
2536 : : }
2537 : : else
2538 : : {
2539 [ + - ]: 656 : convert_id_to_handle( buffer + offset, len, valid );
2540 [ + - ][ + - ]: 656 : rval = moab()->add_entities( h, buffer + offset, valid );
2541 : : }
2542 : 858 : break;
2543 : : case CHILD:
2544 [ + - ]: 939 : convert_id_to_handle( buffer + offset, len, valid );
2545 [ + - ][ + - ]: 939 : rval = moab()->add_child_meshsets( h, buffer + offset, valid );
2546 : 939 : break;
2547 : : case PARENT:
2548 [ + - ]: 770 : convert_id_to_handle( buffer + offset, len, valid );
2549 [ + - ][ + - ]: 770 : rval = moab()->add_parent_meshsets( h, buffer + offset, valid );
2550 : 770 : break;
2551 : : default:
2552 : 0 : break;
2553 : : }
2554 [ - + ][ # # ]: 2567 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2555 : : }
2556 : :
2557 [ + - ]: 2567 : ++fileid_iter;
2558 : 2567 : ++h;
2559 : 2567 : offset += len;
2560 : : }
2561 : : }
2562 : :
2563 : 292 : return MB_SUCCESS;
2564 : : }
2565 : :
2566 : 0 : ErrorCode ReadHDF5::get_set_contents( const Range& sets, Range& file_ids )
2567 : : {
2568 [ # # ]: 0 : CHECK_OPEN_HANDLES;
2569 : :
2570 [ # # ]: 0 : if( !fileInfo->have_set_contents ) return MB_SUCCESS;
2571 [ # # ]: 0 : dbgOut.tprint( 2, "Reading set contained file IDs\n" );
2572 : : try
2573 : : {
2574 : : mhdf_Status status;
2575 : : long content_len;
2576 [ # # ]: 0 : hid_t contents = mhdf_openSetData( filePtr, &content_len, &status );
2577 [ # # ][ # # ]: 0 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2578 [ # # ]: 0 : ReadHDF5Dataset data( "set contents", contents, nativeParallel, mpiComm, true );
2579 : :
2580 [ # # ]: 0 : return read_set_data( sets, 0, data, CONTENT, &file_ids );
2581 : : }
2582 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
2583 : : {
2584 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
2585 : 0 : }
2586 : : }
2587 : :
2588 : 3 : ErrorCode ReadHDF5::read_adjacencies( hid_t table, long table_len )
2589 : : {
2590 [ + - ]: 3 : CHECK_OPEN_HANDLES;
2591 : :
2592 : : ErrorCode rval;
2593 : : mhdf_Status status;
2594 : :
2595 [ + - ]: 3 : debug_barrier();
2596 : :
2597 [ + - ]: 3 : hid_t read_type = H5Dget_type( table );
2598 [ - + ][ # # ]: 3 : if( read_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2599 [ + - ]: 3 : const bool convert = !H5Tequal( read_type, handleType );
2600 : :
2601 : 3 : EntityHandle* buffer = (EntityHandle*)dataBuffer;
2602 [ + - ]: 3 : size_t chunk_size = bufferSize / H5Tget_size( read_type );
2603 : 3 : size_t remaining = table_len;
2604 : 3 : size_t left_over = 0;
2605 : 3 : size_t offset = 0;
2606 : : dbgOut.printf( 3, "Reading adjacency list in %lu chunks\n",
2607 [ + - ]: 3 : (unsigned long)( remaining + chunk_size - 1 ) / chunk_size );
2608 : 3 : int nn = 0;
2609 [ + + ]: 6 : while( remaining )
2610 : : {
2611 [ + - ]: 3 : dbgOut.printf( 3, "Reading chunk %d of adjacency list\n", ++nn );
2612 : :
2613 [ + - ]: 3 : size_t count = std::min( chunk_size, remaining );
2614 : 3 : count -= left_over;
2615 : 3 : remaining -= count;
2616 : :
2617 [ - + ][ - + ]: 3 : assert_range( buffer + left_over, count );
2618 [ + - ]: 3 : mhdf_readAdjacencyWithOpt( table, offset, count, read_type, buffer + left_over, collIO, &status );
2619 [ + - ][ - + ]: 3 : if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2620 : :
2621 [ - + ]: 3 : if( convert )
2622 : : {
2623 [ # # ]: 0 : herr_t err = H5Tconvert( read_type, handleType, count, buffer + left_over, 0, H5P_DEFAULT );
2624 [ # # ][ # # ]: 0 : if( err < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2625 : : }
2626 : :
2627 : 3 : EntityHandle* iter = buffer;
2628 : 3 : EntityHandle* end = buffer + count + left_over;
2629 [ + + ]: 83 : while( end - iter >= 3 )
2630 : : {
2631 [ + - ]: 80 : EntityHandle h = idMap.find( *iter++ );
2632 : 80 : EntityHandle count2 = *iter++;
2633 [ - + ]: 80 : if( !h )
2634 : : {
2635 : 0 : iter += count2;
2636 : 0 : continue;
2637 : : }
2638 : :
2639 [ - + ][ # # ]: 80 : if( count2 < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2640 : :
2641 [ - + ]: 80 : if( end < count2 + iter )
2642 : : {
2643 : 0 : iter -= 2;
2644 : 0 : break;
2645 : : }
2646 : :
2647 : : size_t valid;
2648 [ + - ]: 80 : convert_id_to_handle( iter, count2, valid, idMap );
2649 [ + - ]: 80 : rval = iFace->add_adjacencies( h, iter, valid, false );
2650 [ - + ][ # # ]: 80 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2651 : :
2652 : 80 : iter += count2;
2653 : : }
2654 : :
2655 : 3 : left_over = end - iter;
2656 [ - + ][ - + ]: 3 : assert_range( (char*)buffer, left_over );
2657 [ - + ][ - + ]: 3 : assert_range( (char*)iter, left_over );
2658 : 3 : memmove( buffer, iter, left_over );
2659 : : }
2660 : :
2661 [ - + ]: 3 : assert( !left_over ); // Unexpected truncation of data
2662 : :
2663 : 3 : return MB_SUCCESS;
2664 : : }
2665 : :
2666 : 616 : ErrorCode ReadHDF5::read_tag( int tag_index )
2667 : : {
2668 [ + - ]: 616 : CHECK_OPEN_HANDLES;
2669 : :
2670 [ + - ]: 616 : dbgOut.tprintf( 2, "Reading tag \"%s\"\n", fileInfo->tags[tag_index].name );
2671 : :
2672 [ + - ]: 616 : debug_barrier();
2673 : :
2674 : : ErrorCode rval;
2675 : : mhdf_Status status;
2676 : 616 : Tag tag = 0;
2677 : 616 : hid_t read_type = -1;
2678 : : bool table_type;
2679 [ + - ]: 616 : rval = create_tag( fileInfo->tags[tag_index], tag, read_type );
2680 [ - + ][ # # ]: 616 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2681 : :
2682 [ + + ]: 616 : if( fileInfo->tags[tag_index].have_sparse )
2683 : : {
2684 : : hid_t handles[3];
2685 : : long num_ent, num_val;
2686 [ + - ]: 235 : mhdf_openSparseTagData( filePtr, fileInfo->tags[tag_index].name, &num_ent, &num_val, handles, &status );
2687 [ + - ][ - + ]: 235 : if( is_error( status ) )
2688 : : {
2689 [ # # ][ # # ]: 0 : if( read_type ) H5Tclose( read_type );
2690 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2691 : : }
2692 : :
2693 : 235 : table_type = false;
2694 [ + + ]: 235 : if( read_type == 0 )
2695 : : {
2696 [ + - ]: 21 : read_type = H5Dget_type( handles[1] );
2697 [ - + ]: 21 : if( read_type == 0 )
2698 : : {
2699 [ # # ]: 0 : mhdf_closeData( filePtr, handles[0], &status );
2700 [ # # ]: 0 : mhdf_closeData( filePtr, handles[0], &status );
2701 [ # # ][ # # ]: 0 : if( fileInfo->tags[tag_index].size <= 0 ) mhdf_closeData( filePtr, handles[2], &status );
2702 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2703 : : }
2704 : 21 : table_type = true;
2705 : : }
2706 : :
2707 [ + + ]: 235 : if( fileInfo->tags[tag_index].size > 0 )
2708 : : {
2709 [ + - ]: 162 : dbgOut.printf( 2, "Reading sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
2710 [ + - ]: 162 : rval = read_sparse_tag( tag, read_type, handles[0], handles[1], num_ent );
2711 : : }
2712 : : else
2713 : : {
2714 [ + - ]: 73 : dbgOut.printf( 2, "Reading var-len sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
2715 [ + - ]: 73 : rval = read_var_len_tag( tag, read_type, handles[0], handles[1], handles[2], num_ent, num_val );
2716 : : }
2717 : :
2718 [ + + ]: 235 : if( table_type )
2719 : : {
2720 [ + - ]: 21 : H5Tclose( read_type );
2721 : 21 : read_type = 0;
2722 : : }
2723 : :
2724 [ + - ]: 235 : mhdf_closeData( filePtr, handles[0], &status );
2725 [ + - ][ + - ]: 235 : if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
[ - + ][ - + ]
2726 [ + - ]: 235 : mhdf_closeData( filePtr, handles[1], &status );
2727 [ + - ][ + - ]: 235 : if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
[ - + ][ - + ]
2728 [ + + ]: 235 : if( fileInfo->tags[tag_index].size <= 0 )
2729 : : {
2730 [ + - ]: 73 : mhdf_closeData( filePtr, handles[2], &status );
2731 [ + - ][ + - ]: 73 : if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
[ - + ][ - + ]
2732 : : }
2733 [ - + ]: 235 : if( MB_SUCCESS != rval )
2734 : : {
2735 [ # # ][ # # ]: 0 : if( read_type ) H5Tclose( read_type );
2736 [ # # ][ # # ]: 235 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2737 : : }
2738 : : }
2739 : :
2740 [ + + ]: 794 : for( int j = 0; j < fileInfo->tags[tag_index].num_dense_indices; ++j )
2741 : : {
2742 : : long count;
2743 : 178 : const char* name = 0;
2744 : : mhdf_EntDesc* desc;
2745 : 178 : int elem_idx = fileInfo->tags[tag_index].dense_elem_indices[j];
2746 [ + + ]: 178 : if( elem_idx == -2 )
2747 : : {
2748 : 74 : desc = &fileInfo->sets;
2749 [ + - ]: 74 : name = mhdf_set_type_handle();
2750 : : }
2751 [ + + ]: 104 : else if( elem_idx == -1 )
2752 : : {
2753 : 50 : desc = &fileInfo->nodes;
2754 [ + - ]: 50 : name = mhdf_node_type_handle();
2755 : : }
2756 [ + - ][ + - ]: 54 : else if( elem_idx >= 0 && elem_idx < fileInfo->num_elem_desc )
2757 : : {
2758 : 54 : desc = &fileInfo->elems[elem_idx].desc;
2759 : 54 : name = fileInfo->elems[elem_idx].handle;
2760 : : }
2761 : : else
2762 : : {
2763 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2764 : : }
2765 : :
2766 [ + - ]: 178 : dbgOut.printf( 2, "Read dense data block for tag \"%s\" on \"%s\"\n", fileInfo->tags[tag_index].name, name );
2767 : :
2768 [ + - ]: 178 : hid_t handle = mhdf_openDenseTagData( filePtr, fileInfo->tags[tag_index].name, name, &count, &status );
2769 [ + - ][ - + ]: 178 : if( is_error( status ) )
2770 : : {
2771 : 0 : rval = MB_FAILURE; // rval = error(MB_FAILURE);
2772 : 0 : break;
2773 : : }
2774 : :
2775 [ - + ]: 178 : if( count > desc->count )
2776 : : {
2777 [ # # ]: 0 : mhdf_closeData( filePtr, handle, &status );
2778 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE,
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
2779 : : "Invalid data length for dense tag data: " << name << "/" << fileInfo->tags[tag_index].name );
2780 : : }
2781 : :
2782 : 178 : table_type = false;
2783 [ + + ]: 178 : if( read_type == 0 )
2784 : : {
2785 [ + - ]: 1 : read_type = H5Dget_type( handle );
2786 [ - + ]: 1 : if( read_type == 0 )
2787 : : {
2788 [ # # ]: 0 : mhdf_closeData( filePtr, handle, &status );
2789 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2790 : : }
2791 : 1 : table_type = true;
2792 : : }
2793 : :
2794 [ + - ]: 178 : rval = read_dense_tag( tag, name, read_type, handle, desc->start_id, count );
2795 : :
2796 [ + + ]: 178 : if( table_type )
2797 : : {
2798 [ + - ]: 1 : H5Tclose( read_type );
2799 : 1 : read_type = 0;
2800 : : }
2801 : :
2802 [ + - ]: 178 : mhdf_closeData( filePtr, handle, &status );
2803 [ - + ]: 178 : if( MB_SUCCESS != rval ) break;
2804 [ + - ][ - + ]: 178 : if( is_error( status ) )
2805 : : {
2806 : 0 : rval = MB_FAILURE;
2807 : 0 : break;
2808 : : }
2809 : : }
2810 : :
2811 [ + + ][ + - ]: 616 : if( read_type ) H5Tclose( read_type );
2812 : 616 : return rval;
2813 : : }
2814 : :
2815 : 616 : ErrorCode ReadHDF5::create_tag( const mhdf_TagDesc& info, Tag& handle, hid_t& hdf_type )
2816 : : {
2817 [ + - ]: 616 : CHECK_OPEN_HANDLES;
2818 : :
2819 : : ErrorCode rval;
2820 : : mhdf_Status status;
2821 : : TagType storage;
2822 : : DataType mb_type;
2823 : 616 : bool re_read_default = false;
2824 : :
2825 [ + + - - : 616 : switch( info.storage )
- ]
2826 : : {
2827 : : case mhdf_DENSE_TYPE:
2828 : 107 : storage = MB_TAG_DENSE;
2829 : 107 : break;
2830 : : case mhdf_SPARSE_TYPE:
2831 : 509 : storage = MB_TAG_SPARSE;
2832 : 509 : break;
2833 : : case mhdf_BIT_TYPE:
2834 : 0 : storage = MB_TAG_BIT;
2835 : 0 : break;
2836 : : case mhdf_MESH_TYPE:
2837 : 0 : storage = MB_TAG_MESH;
2838 : 0 : break;
2839 : : default:
2840 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "Invalid storage type for tag '" << info.name << "': " << info.storage );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
2841 : : }
2842 : :
2843 : : // Type-specific stuff
2844 [ - + ]: 616 : if( info.type == mhdf_BITFIELD )
2845 : : {
2846 [ # # ][ # # ]: 0 : if( info.size < 1 || info.size > 8 )
2847 [ # # ][ # # ]: 0 : { MB_SET_ERR( MB_FAILURE, "Invalid bit tag: class is MB_TAG_BIT, num bits = " << info.size ); }
[ # # ][ # # ]
[ # # ][ # # ]
2848 [ # # ][ # # ]: 0 : hdf_type = H5Tcopy( H5T_NATIVE_B8 );
2849 : 0 : mb_type = MB_TYPE_BIT;
2850 [ # # ][ # # ]: 0 : if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2851 : : }
2852 [ + + ]: 616 : else if( info.type == mhdf_OPAQUE )
2853 : : {
2854 : 44 : mb_type = MB_TYPE_OPAQUE;
2855 : :
2856 : : // Check for user-provided type
2857 : : Tag type_handle;
2858 [ + - ]: 44 : std::string tag_type_name = "__hdf5_tag_type_";
2859 [ + - ]: 44 : tag_type_name += info.name;
2860 [ + - ]: 44 : rval = iFace->tag_get_handle( tag_type_name.c_str(), sizeof( hid_t ), MB_TYPE_OPAQUE, type_handle );
2861 [ - + ]: 44 : if( MB_SUCCESS == rval )
2862 : : {
2863 : 0 : EntityHandle root = 0;
2864 [ # # ]: 0 : rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
2865 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2866 [ # # ]: 0 : hdf_type = H5Tcopy( hdf_type );
2867 : 0 : re_read_default = true;
2868 : : }
2869 [ + - ]: 44 : else if( MB_TAG_NOT_FOUND == rval )
2870 : : {
2871 : 44 : hdf_type = 0;
2872 : : }
2873 : : else
2874 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2875 : :
2876 [ - + ][ # # ]: 44 : if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ + - ]
2877 : : }
2878 : : else
2879 : : {
2880 [ + + - + : 572 : switch( info.type )
- ]
2881 : : {
2882 : : case mhdf_INTEGER:
2883 [ + - ]: 412 : hdf_type = H5T_NATIVE_INT;
2884 : 412 : mb_type = MB_TYPE_INTEGER;
2885 : 412 : break;
2886 : : case mhdf_FLOAT:
2887 [ + - ]: 81 : hdf_type = H5T_NATIVE_DOUBLE;
2888 : 81 : mb_type = MB_TYPE_DOUBLE;
2889 : 81 : break;
2890 : : case mhdf_BOOLEAN:
2891 [ # # ]: 0 : hdf_type = H5T_NATIVE_UINT;
2892 : 0 : mb_type = MB_TYPE_INTEGER;
2893 : 0 : break;
2894 : : case mhdf_ENTITY_ID:
2895 : 79 : hdf_type = handleType;
2896 : 79 : mb_type = MB_TYPE_HANDLE;
2897 : 79 : break;
2898 : : default:
2899 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2900 : : }
2901 : :
2902 [ + + ]: 572 : if( info.size > 1 )
2903 : : { // Array
2904 : 70 : hsize_t tmpsize = info.size;
2905 : : #if defined( H5Tarray_create_vers ) && H5Tarray_create_vers > 1
2906 [ + - ]: 70 : hdf_type = H5Tarray_create2( hdf_type, 1, &tmpsize );
2907 : : #else
2908 : : hdf_type = H5Tarray_create( hdf_type, 1, &tmpsize, NULL );
2909 : : #endif
2910 : : }
2911 : : else
2912 : : {
2913 [ + - ]: 502 : hdf_type = H5Tcopy( hdf_type );
2914 : : }
2915 [ - + ][ # # ]: 572 : if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2916 : : }
2917 : :
2918 : : // If default or global/mesh value in file, read it.
2919 [ + + ][ - + ]: 616 : if( info.default_value || info.global_value )
2920 : : {
2921 [ - + ]: 307 : if( re_read_default )
2922 : : {
2923 [ # # ]: 0 : mhdf_getTagValues( filePtr, info.name, hdf_type, info.default_value, info.global_value, &status );
2924 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) )
2925 : : {
2926 [ # # ][ # # ]: 0 : if( hdf_type ) H5Tclose( hdf_type );
2927 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
[ # # ][ # # ]
[ # # ][ # # ]
2928 : : }
2929 : : }
2930 : :
2931 [ + + ]: 307 : if( MB_TYPE_HANDLE == mb_type )
2932 : : {
2933 [ + - ]: 21 : if( info.default_value )
2934 : : {
2935 [ + - ]: 21 : rval = convert_id_to_handle( (EntityHandle*)info.default_value, info.default_value_size );
2936 [ - + ]: 21 : if( MB_SUCCESS != rval )
2937 : : {
2938 [ # # ][ # # ]: 0 : if( hdf_type ) H5Tclose( hdf_type );
2939 [ # # ][ # # ]: 21 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2940 : : }
2941 : : }
2942 [ - + ]: 21 : if( info.global_value )
2943 : : {
2944 [ # # ]: 0 : rval = convert_id_to_handle( (EntityHandle*)info.global_value, info.global_value_size );
2945 [ # # ]: 0 : if( MB_SUCCESS != rval )
2946 : : {
2947 [ # # ][ # # ]: 0 : if( hdf_type ) H5Tclose( hdf_type );
2948 [ # # ][ # # ]: 21 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2949 : : }
2950 : : }
2951 : : }
2952 : : }
2953 : :
2954 : : // Get tag handle, creating if necessary
2955 [ + + ]: 616 : if( info.size < 0 )
2956 : : rval = iFace->tag_get_handle( info.name, info.default_value_size, mb_type, handle,
2957 [ + - ]: 78 : storage | MB_TAG_CREAT | MB_TAG_VARLEN | MB_TAG_DFTOK, info.default_value );
2958 : : else
2959 : 538 : rval = iFace->tag_get_handle( info.name, info.size, mb_type, handle, storage | MB_TAG_CREAT | MB_TAG_DFTOK,
2960 [ + - ]: 538 : info.default_value );
2961 [ - + ]: 616 : if( MB_SUCCESS != rval )
2962 : : {
2963 [ # # ][ # # ]: 0 : if( hdf_type ) H5Tclose( hdf_type );
2964 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "Tag type in file does not match type in database for \"" << info.name << "\"" );
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
2965 : : }
2966 : :
2967 [ + + ]: 616 : if( info.global_value )
2968 : : {
2969 : 286 : EntityHandle root = 0;
2970 [ + - ]: 286 : if( info.size > 0 )
2971 : : { // Fixed-length tag
2972 [ + - ]: 286 : rval = iFace->tag_set_data( handle, &root, 1, info.global_value );
2973 : : }
2974 : : else
2975 : : {
2976 : 0 : int tag_size = info.global_value_size;
2977 [ # # ]: 0 : rval = iFace->tag_set_by_ptr( handle, &root, 1, &info.global_value, &tag_size );
2978 : : }
2979 [ - + ]: 286 : if( MB_SUCCESS != rval )
2980 : : {
2981 [ # # ][ # # ]: 0 : if( hdf_type ) H5Tclose( hdf_type );
2982 [ # # ][ # # ]: 286 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
2983 : : }
2984 : : }
2985 : :
2986 : 616 : return MB_SUCCESS;
2987 : : }
2988 : :
2989 : 178 : ErrorCode ReadHDF5::read_dense_tag( Tag tag_handle, const char* ent_name, hid_t hdf_read_type, hid_t data,
2990 : : long start_id, long num_values )
2991 : : {
2992 [ + - ]: 178 : CHECK_OPEN_HANDLES;
2993 : :
2994 : : ErrorCode rval;
2995 : : DataType mb_type;
2996 : :
2997 [ + - ]: 178 : rval = iFace->tag_get_data_type( tag_handle, mb_type );
2998 [ - + ][ # # ]: 178 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
2999 : :
3000 : : int read_size;
3001 [ + - ]: 178 : rval = iFace->tag_get_bytes( tag_handle, read_size );
3002 [ - + ]: 178 : if( MB_SUCCESS != rval ) // Wrong function for variable-length tags
3003 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3004 : : // if (MB_TYPE_BIT == mb_type)
3005 : : // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
3006 : :
3007 [ + - ]: 178 : if( hdf_read_type )
3008 : : { // If not opaque
3009 [ + - ]: 178 : hsize_t hdf_size = H5Tget_size( hdf_read_type );
3010 [ - + ][ # # ]: 178 : if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3011 : : }
3012 : :
3013 : : // Get actual entities read from file
3014 [ + - ][ + - ]: 356 : Range file_ids, handles;
3015 [ + - ][ + - ]: 178 : Range::iterator f_ins = file_ids.begin(), h_ins = handles.begin();
3016 : 178 : IDMap::iterator l, u;
3017 [ + - ]: 178 : l = idMap.lower_bound( start_id );
3018 [ + - ]: 178 : u = idMap.lower_bound( start_id + num_values - 1 );
3019 [ + - ][ + - ]: 178 : if( l != idMap.end() && start_id + num_values > l->begin )
[ + - ][ + - ]
[ + - ][ + - ]
[ + - # # ]
3020 : : {
3021 [ + - ][ + - ]: 178 : if( l == u )
3022 : : {
3023 [ + - ][ + - ]: 178 : size_t beg = std::max( start_id, l->begin );
3024 [ + - ][ + - ]: 178 : size_t end = std::min( start_id + num_values, u->begin + u->count ) - 1;
[ + - ]
3025 [ + - ]: 178 : f_ins = file_ids.insert( f_ins, beg, end );
3026 [ + - ][ + - ]: 178 : h_ins = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + ( end - l->begin ) );
[ + - ][ + - ]
[ + - ]
3027 : : }
3028 : : else
3029 : : {
3030 [ # # ][ # # ]: 0 : size_t beg = std::max( start_id, l->begin );
3031 [ # # ][ # # ]: 0 : f_ins = file_ids.insert( f_ins, beg, l->begin + l->count - 1 );
[ # # ]
3032 [ # # ][ # # ]: 0 : h_ins = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + l->count - 1 );
[ # # ][ # # ]
[ # # ]
3033 [ # # ][ # # ]: 0 : for( ++l; l != u; ++l )
[ # # ][ # # ]
3034 : : {
3035 [ # # ][ # # ]: 0 : f_ins = file_ids.insert( f_ins, l->begin, l->begin + l->count - 1 );
[ # # ][ # # ]
3036 [ # # ][ # # ]: 0 : h_ins = handles.insert( h_ins, l->value, l->value + l->count - 1 );
[ # # ][ # # ]
3037 : : }
3038 [ # # ][ # # ]: 0 : if( u != idMap.end() && u->begin < start_id + num_values )
[ # # ][ # # ]
[ # # ][ # # ]
[ # # # # ]
3039 : : {
3040 [ # # ][ # # ]: 0 : size_t end = std::min( start_id + num_values, u->begin + u->count - 1 );
[ # # ]
3041 [ # # ][ # # ]: 0 : f_ins = file_ids.insert( f_ins, u->begin, end );
3042 [ # # ][ # # ]: 178 : h_ins = handles.insert( h_ins, u->value, u->value + end - u->begin );
[ # # ][ # # ]
3043 : : }
3044 : : }
3045 : : }
3046 : :
3047 : : // Given that all of the entities for this dense tag data should
3048 : : // have been created as a single contiguous block, the resulting
3049 : : // MOAB handle range should be contiguous.
3050 : : // THE ABOVE IS NOT NECESSARILY TRUE. SOMETIMES LOWER-DIMENSION
3051 : : // ENTS ARE READ AND THEN DELETED FOR PARTIAL READS.
3052 : : // assert(handles.empty() || handles.size() == (handles.back() - handles.front() + 1));
3053 : :
3054 [ + - ]: 356 : std::string tn( "<error>" );
3055 [ + - ]: 178 : iFace->tag_get_name( tag_handle, tn );
3056 [ + - ]: 178 : tn += " data for ";
3057 [ + - ]: 178 : tn += ent_name;
3058 : : try
3059 : : {
3060 [ + - ]: 178 : h_ins = handles.begin();
3061 [ + - ]: 178 : ReadHDF5Dataset reader( tn.c_str(), data, nativeParallel, mpiComm, false );
3062 : 178 : long buffer_size = bufferSize / read_size;
3063 [ + - ]: 178 : reader.set_file_ids( file_ids, start_id, buffer_size, hdf_read_type );
3064 : : dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tn.c_str(), ent_name,
3065 [ + - ][ + - ]: 178 : reader.get_read_count() );
3066 : 178 : int nn = 0;
3067 [ + - ][ + + ]: 356 : while( !reader.done() )
[ + - ]
3068 : : {
3069 [ + - ]: 178 : dbgOut.printf( 3, "Reading chunk %d of \"%s\" data\n", ++nn, tn.c_str() );
3070 : :
3071 : : size_t count;
3072 [ + - ]: 178 : reader.read( dataBuffer, count );
3073 : :
3074 [ - + ]: 178 : if( MB_TYPE_HANDLE == mb_type )
3075 : : {
3076 [ # # ]: 0 : rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * read_size / sizeof( EntityHandle ) );
3077 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3078 : : }
3079 : :
3080 [ + - ]: 178 : Range ents;
3081 : 178 : Range::iterator end = h_ins;
3082 [ + - ]: 178 : end += count;
3083 [ + - ]: 178 : ents.insert( h_ins, end );
3084 : 178 : h_ins = end;
3085 : :
3086 [ + - ]: 178 : rval = iFace->tag_set_data( tag_handle, ents, dataBuffer );
3087 [ - + ]: 178 : if( MB_SUCCESS != rval )
3088 : : {
3089 [ # # ]: 0 : dbgOut.printf( 1, "Internal error setting data for tag \"%s\"\n", tn.c_str() );
3090 [ # # ][ # # ]: 178 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ + - ]
3091 : : }
3092 : 356 : }
3093 : : }
3094 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
3095 : : {
3096 [ # # ]: 0 : dbgOut.printf( 1, "Internal error reading dense data for tag \"%s\"\n", tn.c_str() );
3097 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
3098 : : }
3099 : :
3100 : 356 : return MB_SUCCESS;
3101 : : }
3102 : :
3103 : : // Read entire ID table and for those file IDs corresponding
3104 : : // to entities that we have read from the file add both the
3105 : : // offset into the offset range and the handle into the handle
3106 : : // range. If handles are not ordered, switch to using a vector.
3107 : 235 : ErrorCode ReadHDF5::read_sparse_tag_indices( const char* name, hid_t id_table,
3108 : : EntityHandle start_offset, // Can't put zero in a Range
3109 : : Range& offset_range, Range& handle_range,
3110 : : std::vector< EntityHandle >& handle_vect )
3111 : : {
3112 [ + - ]: 235 : CHECK_OPEN_HANDLES;
3113 : :
3114 [ + - ]: 235 : offset_range.clear();
3115 [ + - ]: 235 : handle_range.clear();
3116 : 235 : handle_vect.clear();
3117 : :
3118 : : ErrorCode rval;
3119 [ + - ]: 235 : Range::iterator handle_hint = handle_range.begin();
3120 [ + - ]: 235 : Range::iterator offset_hint = offset_range.begin();
3121 : :
3122 : 235 : EntityHandle* idbuf = (EntityHandle*)dataBuffer;
3123 : 235 : size_t idbuf_size = bufferSize / sizeof( EntityHandle );
3124 : :
3125 [ + - ]: 470 : std::string tn( name );
3126 [ + - ]: 235 : tn += " indices";
3127 : :
3128 [ - + ]: 235 : assert( start_offset > 0 ); // Can't put zero in a Range
3129 : : try
3130 : : {
3131 [ + - ]: 235 : ReadHDF5Dataset id_reader( tn.c_str(), id_table, nativeParallel, mpiComm, false );
3132 [ + - ]: 235 : id_reader.set_all_file_ids( idbuf_size, handleType );
3133 : 235 : size_t offset = start_offset;
3134 [ + - ][ + - ]: 235 : dbgOut.printf( 3, "Reading file ids for sparse tag \"%s\" in %lu chunks\n", name, id_reader.get_read_count() );
3135 : 235 : int nn = 0;
3136 [ + - ][ + + ]: 470 : while( !id_reader.done() )
[ + - ]
3137 : : {
3138 [ + - ]: 235 : dbgOut.printf( 3, "Reading chunk %d of \"%s\" IDs\n", ++nn, name );
3139 : : size_t count;
3140 [ + - ]: 235 : id_reader.read( idbuf, count );
3141 : :
3142 [ + - ]: 235 : rval = convert_id_to_handle( idbuf, count );
3143 [ - + ][ # # ]: 235 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3144 : :
3145 : : // idbuf will now contain zero-valued handles for those
3146 : : // tag values that correspond to entities we are not reading
3147 : : // from the file.
3148 [ + + ]: 2391 : for( size_t i = 0; i < count; ++i )
3149 : : {
3150 [ + - ]: 2156 : if( idbuf[i] )
3151 : : {
3152 [ + - ]: 2156 : offset_hint = offset_range.insert( offset_hint, offset + i );
3153 [ - + ][ # # ]: 2156 : if( !handle_vect.empty() ) { handle_vect.push_back( idbuf[i] ); }
3154 [ + - ][ + + ]: 2156 : else if( handle_range.empty() || idbuf[i] > handle_range.back() )
[ + - ][ + - ]
[ + - ]
3155 : : {
3156 [ + - ]: 2156 : handle_hint = handle_range.insert( handle_hint, idbuf[i] );
3157 : : }
3158 : : else
3159 : : {
3160 [ # # ][ # # ]: 0 : handle_vect.resize( handle_range.size() );
3161 [ # # ][ # # ]: 0 : std::copy( handle_range.begin(), handle_range.end(), handle_vect.begin() );
[ # # ]
3162 [ # # ]: 0 : handle_range.clear();
3163 [ # # ]: 0 : handle_vect.push_back( idbuf[i] );
3164 [ # # ]: 2156 : dbgOut.print( 2, "Switching to unordered list for tag handle list\n" );
3165 : : }
3166 : : }
3167 : : }
3168 : :
3169 : 235 : offset += count;
3170 : 235 : }
3171 : : }
3172 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
3173 : : {
3174 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
3175 : : }
3176 : :
3177 : 470 : return MB_SUCCESS;
3178 : : }
3179 : :
3180 : 162 : ErrorCode ReadHDF5::read_sparse_tag( Tag tag_handle, hid_t hdf_read_type, hid_t id_table, hid_t value_table,
3181 : : long /*num_values*/ )
3182 : : {
3183 [ + - ]: 162 : CHECK_OPEN_HANDLES;
3184 : :
3185 : : // Read entire ID table and for those file IDs corresponding
3186 : : // to entities that we have read from the file add both the
3187 : : // offset into the offset range and the handle into the handle
3188 : : // range. If handles are not ordered, switch to using a vector.
3189 : 162 : const EntityHandle base_offset = 1; // Can't put zero in a Range
3190 [ + - ]: 324 : std::vector< EntityHandle > handle_vect;
3191 [ + - ][ + - ]: 324 : Range handle_range, offset_range;
3192 [ + - ]: 324 : std::string tn( "<error>" );
3193 [ + - ]: 162 : iFace->tag_get_name( tag_handle, tn );
3194 : : ErrorCode rval =
3195 [ + - ]: 162 : read_sparse_tag_indices( tn.c_str(), id_table, base_offset, offset_range, handle_range, handle_vect );
3196 [ - + ][ # # ]: 162 : if( MB_SUCCESS != rval ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3197 : :
3198 : : DataType mbtype;
3199 [ + - ]: 162 : rval = iFace->tag_get_data_type( tag_handle, mbtype );
3200 [ - + ][ # # ]: 162 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3201 : :
3202 : : int read_size;
3203 [ + - ]: 162 : rval = iFace->tag_get_bytes( tag_handle, read_size );
3204 [ - + ]: 162 : if( MB_SUCCESS != rval ) // Wrong function for variable-length tags
3205 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3206 : : // if (MB_TYPE_BIT == mbtype)
3207 : : // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
3208 : :
3209 [ + - ]: 162 : if( hdf_read_type )
3210 : : { // If not opaque
3211 [ + - ]: 162 : hsize_t hdf_size = H5Tget_size( hdf_read_type );
3212 [ - + ][ # # ]: 162 : if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3213 : : }
3214 : :
3215 : 162 : const int handles_per_tag = read_size / sizeof( EntityHandle );
3216 : :
3217 : : // Now read data values
3218 : 162 : size_t chunk_size = bufferSize / read_size;
3219 : : try
3220 : : {
3221 [ + - ][ + - ]: 162 : ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), value_table, nativeParallel, mpiComm, false );
3222 [ + - ]: 162 : val_reader.set_file_ids( offset_range, base_offset, chunk_size, hdf_read_type );
3223 : : dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tn.c_str(),
3224 [ + - ][ + - ]: 162 : val_reader.get_read_count() );
3225 : 162 : int nn = 0;
3226 : 162 : size_t offset = 0;
3227 [ + - ][ + + ]: 324 : while( !val_reader.done() )
[ + - ]
3228 : : {
3229 [ + - ]: 162 : dbgOut.printf( 3, "Reading chunk %d of \"%s\" values\n", ++nn, tn.c_str() );
3230 : : size_t count;
3231 [ + - ]: 162 : val_reader.read( dataBuffer, count );
3232 [ + + ]: 162 : if( MB_TYPE_HANDLE == mbtype )
3233 : : {
3234 [ + - ]: 32 : rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * handles_per_tag );
3235 [ - + ][ # # ]: 32 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3236 : : }
3237 : :
3238 [ - + ]: 162 : if( !handle_vect.empty() )
3239 : : {
3240 [ # # ][ # # ]: 0 : rval = iFace->tag_set_data( tag_handle, &handle_vect[offset], count, dataBuffer );
3241 : 0 : offset += count;
3242 : : }
3243 : : else
3244 : : {
3245 [ + - ]: 162 : Range r;
3246 [ + - ][ + - ]: 162 : r.merge( handle_range.begin(), handle_range.begin() + count );
[ + - ][ + - ]
3247 [ + - ][ + - ]: 162 : handle_range.erase( handle_range.begin(), handle_range.begin() + count );
[ + - ][ + - ]
3248 [ + - ]: 162 : rval = iFace->tag_set_data( tag_handle, r, dataBuffer );
3249 : : }
3250 [ - + ][ # # ]: 162 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3251 : 162 : }
3252 : : }
3253 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
3254 : : {
3255 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
3256 : : }
3257 : :
3258 : 324 : return MB_SUCCESS;
3259 : : }
3260 : :
3261 : 73 : ErrorCode ReadHDF5::read_var_len_tag( Tag tag_handle, hid_t hdf_read_type, hid_t ent_table, hid_t val_table,
3262 : : hid_t off_table, long /*num_entities*/, long /*num_values*/ )
3263 : : {
3264 [ + - ]: 73 : CHECK_OPEN_HANDLES;
3265 : :
3266 : : ErrorCode rval;
3267 : : DataType mbtype;
3268 : :
3269 [ + - ]: 73 : rval = iFace->tag_get_data_type( tag_handle, mbtype );
3270 [ - + ][ # # ]: 73 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3271 : :
3272 : : // Can't do variable-length bit tags
3273 [ - + ][ # # ]: 73 : if( MB_TYPE_BIT == mbtype ) MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
[ # # ]
3274 : :
3275 : : // If here, MOAB tag must be variable-length
3276 : : int mbsize;
3277 [ + - ][ - + ]: 73 : if( MB_VARIABLE_DATA_LENGTH != iFace->tag_get_bytes( tag_handle, mbsize ) )
3278 : : {
3279 : 0 : assert( false );MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
3280 : : }
3281 : :
3282 : : int read_size;
3283 [ + - ]: 73 : if( hdf_read_type )
3284 : : {
3285 [ + - ]: 73 : hsize_t hdf_size = H5Tget_size( hdf_read_type );
3286 [ - + ][ # # ]: 73 : if( hdf_size < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3287 : 73 : read_size = hdf_size;
3288 : : }
3289 : : else
3290 : : {
3291 : : // Opaque
3292 : 0 : read_size = 1;
3293 : : }
3294 : :
3295 [ + - ]: 146 : std::string tn( "<error>" );
3296 [ + - ]: 73 : iFace->tag_get_name( tag_handle, tn );
3297 : :
3298 : : // Read entire ID table and for those file IDs corresponding
3299 : : // to entities that we have read from the file add both the
3300 : : // offset into the offset range and the handle into the handle
3301 : : // range. If handles are not ordered, switch to using a vector.
3302 : 73 : const EntityHandle base_offset = 1; // Can't put zero in a Range
3303 [ + - ]: 146 : std::vector< EntityHandle > handle_vect;
3304 [ + - ][ + - ]: 146 : Range handle_range, offset_range;
3305 [ + - ]: 73 : rval = read_sparse_tag_indices( tn.c_str(), ent_table, base_offset, offset_range, handle_range, handle_vect );
3306 : :
3307 : : // This code only works if the id_table is an ordered list.
3308 : : // This assumption was also true for the previous iteration
3309 : : // of this code, but wasn't checked. MOAB's file writer
3310 : : // always writes an ordered list for id_table.
3311 [ - + ][ # # ]: 73 : if( !handle_vect.empty() ) { MB_SET_ERR( MB_FAILURE, "Unordered file ids for variable length tag not supported" ); }
[ # # ][ # # ]
[ # # ][ # # ]
3312 : :
3313 [ - + ]: 146 : class VTReader : public ReadHDF5VarLen
3314 : : {
3315 : : Tag tagHandle;
3316 : : bool isHandle;
3317 : : size_t readSize;
3318 : : ReadHDF5* readHDF5;
3319 : :
3320 : : public:
3321 : 661 : ErrorCode store_data( EntityHandle file_id, void* data, long count, bool )
3322 : : {
3323 : : ErrorCode rval1;
3324 [ + + ]: 661 : if( isHandle )
3325 : : {
3326 [ - + ]: 330 : assert( readSize == sizeof( EntityHandle ) );
3327 [ + - ][ - + ]: 330 : rval1 = readHDF5->convert_id_to_handle( (EntityHandle*)data, count );MB_CHK_ERR( rval1 );
[ # # ][ # # ]
3328 : : }
3329 : 661 : int n = count;
3330 [ + - ][ + - ]: 661 : return readHDF5->moab()->tag_set_by_ptr( tagHandle, &file_id, 1, &data, &n );
3331 : : }
3332 : 73 : VTReader( DebugOutput& debug_output, void* buffer, size_t buffer_size, Tag tag, bool is_handle_tag,
3333 : : size_t read_size1, ReadHDF5* owner )
3334 : : : ReadHDF5VarLen( debug_output, buffer, buffer_size ), tagHandle( tag ), isHandle( is_handle_tag ),
3335 : 73 : readSize( read_size1 ), readHDF5( owner )
3336 : : {
3337 : 73 : }
3338 : : };
3339 : :
3340 [ + - ]: 146 : VTReader tool( dbgOut, dataBuffer, bufferSize, tag_handle, MB_TYPE_HANDLE == mbtype, read_size, this );
3341 : : try
3342 : : {
3343 : : // Read offsets into value table.
3344 [ + - ]: 73 : std::vector< unsigned > counts;
3345 [ + - ][ + - ]: 146 : Range offsets;
3346 [ + - ][ + - ]: 146 : ReadHDF5Dataset off_reader( ( tn + " offsets" ).c_str(), off_table, nativeParallel, mpiComm, false );
[ + - ]
3347 [ + - ]: 73 : rval = tool.read_offsets( off_reader, offset_range, base_offset, base_offset, offsets, counts );
3348 [ - + ][ # # ]: 73 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3349 : :
3350 : : // Read tag values
3351 [ + - ][ + - ]: 146 : Range empty;
3352 [ + - ][ + - ]: 146 : ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), val_table, nativeParallel, mpiComm, false );
[ + - ]
3353 [ + - ]: 73 : rval = tool.read_data( val_reader, offsets, base_offset, hdf_read_type, handle_range, counts, empty );
3354 [ - + ][ # # ]: 146 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
[ + - ]
3355 : : }
3356 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
3357 : : {
3358 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
3359 : : }
3360 : :
3361 : 146 : return MB_SUCCESS;
3362 : : }
3363 : :
3364 : 723 : ErrorCode ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size )
3365 : : {
3366 : 723 : convert_id_to_handle( array, size, idMap );
3367 : 723 : return MB_SUCCESS;
3368 : : }
3369 : :
3370 : 723 : void ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size, const RangeMap< long, EntityHandle >& id_map )
3371 : : {
3372 [ + + ]: 199966 : for( EntityHandle* const end = array + size; array != end; ++array )
3373 : 199243 : *array = id_map.find( *array );
3374 : 723 : }
3375 : :
3376 : 2445 : void ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size, size_t& new_size,
3377 : : const RangeMap< long, EntityHandle >& id_map )
3378 : : {
3379 : 2445 : RangeMap< long, EntityHandle >::const_iterator it;
3380 : 2445 : new_size = 0;
3381 [ + + ]: 10485 : for( size_t i = 0; i < size; ++i )
3382 : : {
3383 [ + - ]: 8040 : it = id_map.lower_bound( array[i] );
3384 [ + - ][ + - ]: 8040 : if( it != id_map.end() && it->begin <= (long)array[i] )
[ + - ][ + - ]
[ + - ][ + - ]
[ + - # # ]
3385 [ + - ][ + - ]: 8040 : array[new_size++] = it->value + ( array[i] - it->begin );
3386 : : }
3387 : 2445 : }
3388 : :
3389 : 202 : void ReadHDF5::convert_range_to_handle( const EntityHandle* ranges, size_t num_ranges,
3390 : : const RangeMap< long, EntityHandle >& id_map, Range& merge )
3391 : : {
3392 [ + - ]: 202 : RangeMap< long, EntityHandle >::iterator it = id_map.begin();
3393 [ + - ]: 202 : Range::iterator hint = merge.begin();
3394 [ + + ]: 927 : for( size_t i = 0; i < num_ranges; ++i )
3395 : : {
3396 : 725 : long id = ranges[2 * i];
3397 : 725 : const long end = id + ranges[2 * i + 1];
3398 : : // We assume that 'ranges' is sorted, but check just in case it isn't.
3399 [ + - ][ + - ]: 725 : if( it == id_map.end() || it->begin > id ) it = id_map.begin();
[ + - ][ + - ]
[ - + ][ + - ]
[ - + ][ # # ]
[ # # ]
3400 [ + - ][ + - ]: 725 : it = id_map.lower_bound( it, id_map.end(), id );
3401 [ + - ][ + - ]: 725 : if( it == id_map.end() ) continue;
[ - + ]
3402 [ + - ][ - + ]: 725 : if( id < it->begin ) id = it->begin;
[ # # ]
3403 [ + + ]: 1487 : while( id < end )
3404 : : {
3405 [ + - ][ - + ]: 762 : if( id < it->begin ) id = it->begin;
[ # # ]
3406 [ + - ]: 762 : const long off = id - it->begin;
3407 [ + - ][ + - ]: 762 : long count = std::min( it->count - off, end - id );
3408 : : // It is possible that this new subrange is starting after the end
3409 : : // It will result in negative count, which does not make sense
3410 : : // We are done with this range, go to the next one
3411 [ - + ]: 762 : if( count <= 0 ) break;
3412 [ + - ][ + - ]: 762 : hint = merge.insert( hint, it->value + off, it->value + off + count - 1 );
[ + - ]
3413 : 762 : id += count;
3414 [ + + ]: 762 : if( id < end )
3415 : : {
3416 [ + - ][ + - ]: 37 : if( ++it == id_map.end() ) break;
[ + - ][ - + ]
3417 [ + - ][ - + ]: 37 : if( it->begin > end ) break;
3418 : : }
3419 : : }
3420 : : }
3421 : 202 : }
3422 : :
3423 : 202 : ErrorCode ReadHDF5::convert_range_to_handle( const EntityHandle* array, size_t num_ranges, Range& range )
3424 : : {
3425 : 202 : convert_range_to_handle( array, num_ranges, idMap, range );
3426 : 202 : return MB_SUCCESS;
3427 : : }
3428 : :
3429 : 236 : ErrorCode ReadHDF5::insert_in_id_map( const Range& file_ids, EntityHandle start_id )
3430 : : {
3431 [ + - ]: 236 : IDMap tmp_map;
3432 [ + - ][ + + ]: 236 : bool merge = !idMap.empty() && !file_ids.empty() && idMap.back().begin > (long)file_ids.front();
[ + - ][ + - ]
[ + - ][ + - ]
[ + + ]
3433 [ + + ]: 236 : IDMap& map = merge ? tmp_map : idMap;
3434 [ + - ]: 236 : Range::const_pair_iterator p;
3435 [ + - ][ + - ]: 472 : for( p = file_ids.const_pair_begin(); p != file_ids.const_pair_end(); ++p )
[ + - ][ + - ]
[ + + ]
3436 : : {
3437 [ + - ][ + - ]: 236 : size_t count = p->second - p->first + 1;
3438 [ + - ][ + - ]: 236 : if( !map.insert( p->first, start_id, count ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ - + ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3439 : 236 : start_id += count;
3440 : : }
3441 [ + + ][ + - ]: 236 : if( merge && !idMap.merge( tmp_map ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ - + ][ - + ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ]
3442 : :
3443 : 236 : return MB_SUCCESS;
3444 : : }
3445 : :
3446 : 0 : ErrorCode ReadHDF5::insert_in_id_map( long file_id, EntityHandle handle )
3447 : : {
3448 [ # # ][ # # ]: 0 : if( !idMap.insert( file_id, handle, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3449 : 0 : return MB_SUCCESS;
3450 : : }
3451 : :
3452 : 21 : ErrorCode ReadHDF5::read_qa( EntityHandle )
3453 : : {
3454 [ + - ]: 21 : CHECK_OPEN_HANDLES;
3455 : :
3456 : : mhdf_Status status;
3457 : : // std::vector<std::string> qa_list;
3458 : :
3459 : : int qa_len;
3460 [ + - ]: 21 : char** qa = mhdf_readHistory( filePtr, &qa_len, &status );
3461 [ + - ][ - + ]: 21 : if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3462 : : // qa_list.resize(qa_len);
3463 [ + + ]: 105 : for( int i = 0; i < qa_len; i++ )
3464 : : {
3465 : : // qa_list[i] = qa[i];
3466 : 84 : free( qa[i] );
3467 : : }
3468 : 21 : free( qa );
3469 : :
3470 : : /** FIX ME - how to put QA list on set?? */
3471 : :
3472 : 21 : return MB_SUCCESS;
3473 : : }
3474 : :
3475 : 0 : ErrorCode ReadHDF5::store_file_ids( Tag tag )
3476 : : {
3477 [ # # ]: 0 : CHECK_OPEN_HANDLES;
3478 : :
3479 : : // typedef int tag_type;
3480 : : typedef long tag_type;
3481 : : // change it to be able to read much bigger files (long is 64 bits ...)
3482 : :
3483 : 0 : tag_type* buffer = reinterpret_cast< tag_type* >( dataBuffer );
3484 : 0 : const long buffer_size = bufferSize / sizeof( tag_type );
3485 [ # # ][ # # ]: 0 : for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
[ # # ][ # # ]
[ # # ]
3486 : : {
3487 [ # # ]: 0 : IDMap::Range range = *i;
3488 : :
3489 : : // Make sure the values will fit in the tag type
3490 : 0 : IDMap::key_type rv = range.begin + ( range.count - 1 );
3491 : 0 : tag_type tv = (tag_type)rv;
3492 [ # # ]: 0 : if( (IDMap::key_type)tv != rv )
3493 : : {
3494 : 0 : assert( false );
3495 : 0 : return MB_INDEX_OUT_OF_RANGE;
3496 : : }
3497 : :
3498 [ # # ]: 0 : while( range.count )
3499 : : {
3500 [ # # ]: 0 : long count = buffer_size < range.count ? buffer_size : range.count;
3501 : :
3502 [ # # ]: 0 : Range handles;
3503 [ # # ]: 0 : handles.insert( range.value, range.value + count - 1 );
3504 : 0 : range.value += count;
3505 : 0 : range.count -= count;
3506 [ # # ]: 0 : for( long j = 0; j < count; ++j )
3507 : 0 : buffer[j] = (tag_type)range.begin++;
3508 : :
3509 [ # # ]: 0 : ErrorCode rval = iFace->tag_set_data( tag, handles, buffer );
3510 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) return rval;
3511 : 0 : }
3512 : : }
3513 : :
3514 : 0 : return MB_SUCCESS;
3515 : : }
3516 : :
3517 : 0 : ErrorCode ReadHDF5::store_sets_file_ids()
3518 : : {
3519 [ # # ]: 0 : CHECK_OPEN_HANDLES;
3520 : :
3521 : : // create a tag that will not be saved, but it will be
3522 : : // used by visit plugin to match the sets and their file ids
3523 : : // it is the same type as the tag defined in ReadParallelcpp, for file id
3524 : : Tag setFileIdTag;
3525 : 0 : long default_val = 0;
3526 : : ErrorCode rval = iFace->tag_get_handle( "__FILE_ID_FOR_SETS", sizeof( long ), MB_TYPE_OPAQUE, setFileIdTag,
3527 [ # # ]: 0 : ( MB_TAG_DENSE | MB_TAG_CREAT ), &default_val );
3528 : :
3529 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval || 0 == setFileIdTag ) return rval;
3530 : : // typedef int tag_type;
3531 : : typedef long tag_type;
3532 : : // change it to be able to read much bigger files (long is 64 bits ...)
3533 : :
3534 : 0 : tag_type* buffer = reinterpret_cast< tag_type* >( dataBuffer );
3535 : 0 : const long buffer_size = bufferSize / sizeof( tag_type );
3536 [ # # ][ # # ]: 0 : for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
[ # # ][ # # ]
[ # # ]
3537 : : {
3538 [ # # ]: 0 : IDMap::Range range = *i;
3539 [ # # ]: 0 : EntityType htype = iFace->type_from_handle( range.value );
3540 [ # # ]: 0 : if( MBENTITYSET != htype ) continue;
3541 : : // work only with entity sets
3542 : : // Make sure the values will fit in the tag type
3543 : 0 : IDMap::key_type rv = range.begin + ( range.count - 1 );
3544 : 0 : tag_type tv = (tag_type)rv;
3545 [ # # ]: 0 : if( (IDMap::key_type)tv != rv )
3546 : : {
3547 : 0 : assert( false );
3548 : 0 : return MB_INDEX_OUT_OF_RANGE;
3549 : : }
3550 : :
3551 [ # # ]: 0 : while( range.count )
3552 : : {
3553 [ # # ]: 0 : long count = buffer_size < range.count ? buffer_size : range.count;
3554 : :
3555 [ # # ]: 0 : Range handles;
3556 [ # # ]: 0 : handles.insert( range.value, range.value + count - 1 );
3557 : 0 : range.value += count;
3558 : 0 : range.count -= count;
3559 [ # # ]: 0 : for( long j = 0; j < count; ++j )
3560 : 0 : buffer[j] = (tag_type)range.begin++;
3561 : :
3562 [ # # ]: 0 : rval = iFace->tag_set_data( setFileIdTag, handles, buffer );
3563 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) return rval;
3564 : 0 : }
3565 : : }
3566 : 0 : return MB_SUCCESS;
3567 : : }
3568 : :
3569 : 0 : ErrorCode ReadHDF5::read_tag_values( const char* file_name, const char* tag_name, const FileOptions& opts,
3570 : : std::vector< int >& tag_values_out, const SubsetList* subset_list )
3571 : : {
3572 : : ErrorCode rval;
3573 : :
3574 [ # # ]: 0 : rval = set_up_read( file_name, opts );
3575 [ # # ][ # # ]: 0 : if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3576 : :
3577 : : int tag_index;
3578 [ # # ]: 0 : rval = find_int_tag( tag_name, tag_index );
3579 [ # # ]: 0 : if( MB_SUCCESS != rval )
3580 : : {
3581 [ # # ]: 0 : clean_up_read( opts );
3582 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3583 : : }
3584 : :
3585 [ # # ]: 0 : if( subset_list )
3586 : : {
3587 [ # # ]: 0 : Range file_ids;
3588 [ # # ]: 0 : rval = get_subset_ids( subset_list->tag_list, subset_list->tag_list_length, file_ids );
3589 [ # # ]: 0 : if( MB_SUCCESS != rval )
3590 : : {
3591 [ # # ]: 0 : clean_up_read( opts );
3592 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3593 : : }
3594 : :
3595 [ # # ]: 0 : rval = read_tag_values_partial( tag_index, file_ids, tag_values_out );
3596 [ # # ]: 0 : if( MB_SUCCESS != rval )
3597 : : {
3598 [ # # ]: 0 : clean_up_read( opts );
3599 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ][ # # ]
3600 : 0 : }
3601 : : }
3602 : : else
3603 : : {
3604 [ # # ]: 0 : rval = read_tag_values_all( tag_index, tag_values_out );
3605 [ # # ]: 0 : if( MB_SUCCESS != rval )
3606 : : {
3607 [ # # ]: 0 : clean_up_read( opts );
3608 [ # # ][ # # ]: 0 : MB_SET_ERR( rval, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3609 : : }
3610 : : }
3611 : :
3612 [ # # ]: 0 : return clean_up_read( opts );
3613 : : }
3614 : :
3615 : 0 : ErrorCode ReadHDF5::read_tag_values_partial( int tag_index, const Range& file_ids, std::vector< int >& tag_values )
3616 : : {
3617 [ # # ]: 0 : CHECK_OPEN_HANDLES;
3618 : :
3619 : : mhdf_Status status;
3620 : 0 : const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
3621 : : long num_ent, num_val;
3622 : : size_t count;
3623 [ # # ]: 0 : std::string tn( tag.name );
3624 : :
3625 : : // Read sparse values
3626 [ # # ]: 0 : if( tag.have_sparse )
3627 : : {
3628 : : hid_t handles[3];
3629 [ # # ]: 0 : mhdf_openSparseTagData( filePtr, tag.name, &num_ent, &num_val, handles, &status );
3630 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3631 : :
3632 : : try
3633 : : {
3634 : : // Read all entity handles and fill 'offsets' with ranges of
3635 : : // offsets into the data table for entities that we want.
3636 [ # # ]: 0 : Range offsets;
3637 : 0 : long* buffer = reinterpret_cast< long* >( dataBuffer );
3638 : 0 : const long buffer_size = bufferSize / sizeof( long );
3639 [ # # ][ # # ]: 0 : ReadHDF5Dataset ids( ( tn + " ids" ).c_str(), handles[0], nativeParallel, mpiComm );
3640 [ # # ][ # # ]: 0 : ids.set_all_file_ids( buffer_size, H5T_NATIVE_LONG );
3641 : 0 : size_t offset = 0;
3642 [ # # ][ # # ]: 0 : dbgOut.printf( 3, "Reading sparse IDs for tag \"%s\" in %lu chunks\n", tag.name, ids.get_read_count() );
3643 : 0 : int nn = 0;
3644 [ # # ][ # # ]: 0 : while( !ids.done() )
3645 : : {
3646 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d of IDs for \"%s\"\n", ++nn, tag.name );
3647 [ # # ]: 0 : ids.read( buffer, count );
3648 : :
3649 [ # # ]: 0 : std::sort( buffer, buffer + count );
3650 [ # # ]: 0 : Range::iterator ins = offsets.begin();
3651 [ # # ]: 0 : Range::const_iterator i = file_ids.begin();
3652 [ # # ]: 0 : for( size_t j = 0; j < count; ++j )
3653 : : {
3654 [ # # ][ # # ]: 0 : while( i != file_ids.end() && (long)*i < buffer[j] )
[ # # ][ # # ]
[ # # ][ # # ]
[ # # # # ]
3655 [ # # ]: 0 : ++i;
3656 [ # # ][ # # ]: 0 : if( i == file_ids.end() ) break;
[ # # ]
3657 [ # # ][ # # ]: 0 : if( (long)*i == buffer[j] ) { ins = offsets.insert( ins, j + offset, j + offset ); }
[ # # ]
3658 : : }
3659 : :
3660 : 0 : offset += count;
3661 : : }
3662 : :
3663 : 0 : tag_values.clear();
3664 [ # # ][ # # ]: 0 : tag_values.reserve( offsets.size() );
3665 : 0 : const size_t data_buffer_size = bufferSize / sizeof( int );
3666 : 0 : int* data_buffer = reinterpret_cast< int* >( dataBuffer );
3667 [ # # ][ # # ]: 0 : ReadHDF5Dataset vals( ( tn + " sparse vals" ).c_str(), handles[1], nativeParallel, mpiComm );
3668 [ # # ][ # # ]: 0 : vals.set_file_ids( offsets, 0, data_buffer_size, H5T_NATIVE_INT );
3669 [ # # ][ # # ]: 0 : dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tag.name, vals.get_read_count() );
3670 : 0 : nn = 0;
3671 : : // Should normally only have one read call, unless sparse nature
3672 : : // of file_ids caused reader to do something strange
3673 [ # # ][ # # ]: 0 : while( !vals.done() )
3674 : : {
3675 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d of values for \"%s\"\n", ++nn, tag.name );
3676 [ # # ]: 0 : vals.read( data_buffer, count );
3677 [ # # ]: 0 : tag_values.insert( tag_values.end(), data_buffer, data_buffer + count );
3678 : 0 : }
3679 : : }
3680 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
3681 : : {
3682 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
3683 : : }
3684 : : }
3685 : :
3686 [ # # ]: 0 : std::sort( tag_values.begin(), tag_values.end() );
3687 [ # # ][ # # ]: 0 : tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
3688 : :
3689 : : // Read dense values
3690 [ # # ][ # # ]: 0 : std::vector< int > prev_data, curr_data;
3691 [ # # ]: 0 : for( int i = 0; i < tag.num_dense_indices; ++i )
3692 : : {
3693 : 0 : int grp = tag.dense_elem_indices[i];
3694 : 0 : const char* gname = 0;
3695 : 0 : mhdf_EntDesc* desc = 0;
3696 [ # # ]: 0 : if( grp == -1 )
3697 : : {
3698 [ # # ]: 0 : gname = mhdf_node_type_handle();
3699 : 0 : desc = &fileInfo->nodes;
3700 : : }
3701 [ # # ]: 0 : else if( grp == -2 )
3702 : : {
3703 [ # # ]: 0 : gname = mhdf_set_type_handle();
3704 : 0 : desc = &fileInfo->sets;
3705 : : }
3706 : : else
3707 : : {
3708 [ # # ][ # # ]: 0 : assert( grp >= 0 && grp < fileInfo->num_elem_desc );
3709 : 0 : gname = fileInfo->elems[grp].handle;
3710 : 0 : desc = &fileInfo->elems[grp].desc;
3711 : : }
3712 : :
3713 [ # # ]: 0 : Range::iterator s = file_ids.lower_bound( ( EntityHandle )( desc->start_id ) );
3714 [ # # ][ # # ]: 0 : Range::iterator e = Range::lower_bound( s, file_ids.end(), ( EntityHandle )( desc->start_id ) + desc->count );
3715 [ # # ]: 0 : Range subset;
3716 [ # # ]: 0 : subset.merge( s, e );
3717 : :
3718 [ # # ]: 0 : hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
3719 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3720 : :
3721 : : try
3722 : : {
3723 : 0 : curr_data.clear();
3724 [ # # ][ # # ]: 0 : tag_values.reserve( subset.size() );
3725 : 0 : const size_t data_buffer_size = bufferSize / sizeof( int );
3726 : 0 : int* data_buffer = reinterpret_cast< int* >( dataBuffer );
3727 : :
3728 [ # # ][ # # ]: 0 : ReadHDF5Dataset reader( ( tn + " dense vals" ).c_str(), handle, nativeParallel, mpiComm );
3729 [ # # ][ # # ]: 0 : reader.set_file_ids( subset, desc->start_id, data_buffer_size, H5T_NATIVE_INT );
3730 : : dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tag.name,
3731 [ # # ][ # # ]: 0 : fileInfo->elems[grp].handle, reader.get_read_count() );
3732 : 0 : int nn = 0;
3733 : : // Should normally only have one read call, unless sparse nature
3734 : : // of file_ids caused reader to do something strange
3735 [ # # ][ # # ]: 0 : while( !reader.done() )
3736 : : {
3737 [ # # ]: 0 : dbgOut.printf( 3, "Reading chunk %d of \"%s\"/\"%s\"\n", ++nn, tag.name, fileInfo->elems[grp].handle );
3738 [ # # ]: 0 : reader.read( data_buffer, count );
3739 [ # # ]: 0 : curr_data.insert( curr_data.end(), data_buffer, data_buffer + count );
3740 : 0 : }
3741 : : }
3742 [ # # ]: 0 : catch( ReadHDF5Dataset::Exception )
3743 : : {
3744 [ # # # # : 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
# # # # #
# ]
3745 : : }
3746 : :
3747 [ # # ]: 0 : std::sort( curr_data.begin(), curr_data.end() );
3748 [ # # ][ # # ]: 0 : curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
3749 : 0 : prev_data.clear();
3750 : 0 : tag_values.swap( prev_data );
3751 : : std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
3752 [ # # ][ # # ]: 0 : std::back_inserter( tag_values ) );
[ # # ]
3753 : 0 : }
3754 : :
3755 : 0 : return MB_SUCCESS;
3756 : : }
3757 : :
3758 : 0 : ErrorCode ReadHDF5::read_tag_values_all( int tag_index, std::vector< int >& tag_values )
3759 : : {
3760 [ # # ]: 0 : CHECK_OPEN_HANDLES;
3761 : :
3762 : : mhdf_Status status;
3763 : 0 : const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
3764 : : long junk, num_val;
3765 : :
3766 : : // Read sparse values
3767 [ # # ]: 0 : if( tag.have_sparse )
3768 : : {
3769 : : hid_t handles[3];
3770 [ # # ]: 0 : mhdf_openSparseTagData( filePtr, tag.name, &junk, &num_val, handles, &status );
3771 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3772 : :
3773 [ # # ]: 0 : mhdf_closeData( filePtr, handles[0], &status );
3774 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) )
3775 : : {
3776 [ # # ][ # # ]: 0 : MB_SET_ERR_CONT( mhdf_message( &status ) );
[ # # ][ # # ]
[ # # ][ # # ]
3777 [ # # ]: 0 : mhdf_closeData( filePtr, handles[1], &status );
3778 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3779 : : }
3780 : :
3781 [ # # ]: 0 : hid_t file_type = H5Dget_type( handles[1] );
3782 [ # # ]: 0 : tag_values.resize( num_val );
3783 [ # # ][ # # ]: 0 : mhdf_readTagValuesWithOpt( handles[1], 0, num_val, file_type, &tag_values[0], collIO, &status );
3784 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) )
3785 : : {
3786 [ # # ][ # # ]: 0 : MB_SET_ERR_CONT( mhdf_message( &status ) );
[ # # ][ # # ]
[ # # ][ # # ]
3787 [ # # ]: 0 : H5Tclose( file_type );
3788 [ # # ]: 0 : mhdf_closeData( filePtr, handles[1], &status );
3789 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3790 : : }
3791 [ # # ][ # # ]: 0 : H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &tag_values[0], 0, H5P_DEFAULT );
[ # # ]
3792 [ # # ]: 0 : H5Tclose( file_type );
3793 : :
3794 [ # # ]: 0 : mhdf_closeData( filePtr, handles[1], &status );
3795 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3796 : : }
3797 : :
3798 [ # # ]: 0 : std::sort( tag_values.begin(), tag_values.end() );
3799 [ # # ][ # # ]: 0 : tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
3800 : :
3801 : : // Read dense values
3802 [ # # ][ # # ]: 0 : std::vector< int > prev_data, curr_data;
3803 [ # # ]: 0 : for( int i = 0; i < tag.num_dense_indices; ++i )
3804 : : {
3805 : 0 : int grp = tag.dense_elem_indices[i];
3806 : 0 : const char* gname = 0;
3807 [ # # ]: 0 : if( grp == -1 )
3808 [ # # ]: 0 : gname = mhdf_node_type_handle();
3809 [ # # ]: 0 : else if( grp == -2 )
3810 [ # # ]: 0 : gname = mhdf_set_type_handle();
3811 : : else
3812 : 0 : gname = fileInfo->elems[grp].handle;
3813 [ # # ]: 0 : hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
3814 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3815 : :
3816 [ # # ]: 0 : hid_t file_type = H5Dget_type( handle );
3817 [ # # ]: 0 : curr_data.resize( num_val );
3818 [ # # ][ # # ]: 0 : mhdf_readTagValuesWithOpt( handle, 0, num_val, file_type, &curr_data[0], collIO, &status );
3819 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) )
3820 : : {
3821 [ # # ][ # # ]: 0 : MB_SET_ERR_CONT( mhdf_message( &status ) );
[ # # ][ # # ]
[ # # ][ # # ]
3822 [ # # ]: 0 : H5Tclose( file_type );
3823 [ # # ]: 0 : mhdf_closeData( filePtr, handle, &status );
3824 [ # # ][ # # ]: 0 : MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
[ # # ][ # # ]
[ # # ]
3825 : : }
3826 : :
3827 [ # # ][ # # ]: 0 : H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &curr_data[0], 0, H5P_DEFAULT );
[ # # ]
3828 [ # # ]: 0 : H5Tclose( file_type );
3829 [ # # ]: 0 : mhdf_closeData( filePtr, handle, &status );
3830 [ # # ][ # # ]: 0 : if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
3831 : :
3832 [ # # ]: 0 : std::sort( curr_data.begin(), curr_data.end() );
3833 [ # # ][ # # ]: 0 : curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
3834 : :
3835 : 0 : prev_data.clear();
3836 : 0 : tag_values.swap( prev_data );
3837 : : std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
3838 [ # # ][ # # ]: 0 : std::back_inserter( tag_values ) );
3839 : : }
3840 : :
3841 : 0 : return MB_SUCCESS;
3842 : : }
3843 : 0 : void ReadHDF5::print_times()
3844 : : {
3845 : : #ifdef MOAB_HAVE_MPI
3846 [ # # ]: 0 : if( !myPcomm )
3847 : : {
3848 : : double recv[NUM_TIMES];
3849 [ # # ][ # # ]: 0 : MPI_Reduce( (void*)_times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
[ # # ]
3850 [ # # ]: 0 : for( int i = 0; i < NUM_TIMES; i++ )
3851 : 0 : _times[i] = recv[i]; // just get the max from all of them
3852 : : }
3853 [ # # ]: 0 : if( 0 == myPcomm->proc_config().proc_rank() )
3854 : : {
3855 : : #endif
3856 : :
3857 : 0 : std::cout << "ReadHDF5: " << _times[TOTAL_TIME] << std::endl
3858 : 0 : << " get set meta " << _times[SET_META_TIME] << std::endl
3859 : 0 : << " partial subsets " << _times[SUBSET_IDS_TIME] << std::endl
3860 : 0 : << " partition time " << _times[GET_PARTITION_TIME] << std::endl
3861 : 0 : << " get set ids " << _times[GET_SET_IDS_TIME] << std::endl
3862 : 0 : << " set contents " << _times[GET_SET_CONTENTS_TIME] << std::endl
3863 : 0 : << " polyhedra " << _times[GET_POLYHEDRA_TIME] << std::endl
3864 : 0 : << " elements " << _times[GET_ELEMENTS_TIME] << std::endl
3865 : 0 : << " nodes " << _times[GET_NODES_TIME] << std::endl
3866 : 0 : << " node adjacency " << _times[GET_NODEADJ_TIME] << std::endl
3867 : 0 : << " side elements " << _times[GET_SIDEELEM_TIME] << std::endl
3868 : 0 : << " update connectivity " << _times[UPDATECONN_TIME] << std::endl
3869 : 0 : << " adjacency " << _times[ADJACENCY_TIME] << std::endl
3870 : 0 : << " delete non_adj " << _times[DELETE_NON_SIDEELEM_TIME] << std::endl
3871 : 0 : << " recursive sets " << _times[READ_SET_IDS_RECURS_TIME] << std::endl
3872 : 0 : << " find contain_sets " << _times[FIND_SETS_CONTAINING_TIME] << std::endl
3873 : 0 : << " read sets " << _times[READ_SETS_TIME] << std::endl
3874 : 0 : << " read tags " << _times[READ_TAGS_TIME] << std::endl
3875 : 0 : << " store file ids " << _times[STORE_FILE_IDS_TIME] << std::endl
3876 : 0 : << " read qa records " << _times[READ_QA_TIME] << std::endl;
3877 : :
3878 : : #ifdef MOAB_HAVE_MPI
3879 : : }
3880 : : #endif
3881 : 0 : }
3882 : :
3883 [ + - ][ + - ]: 228 : } // namespace moab
|