MOAB: Mesh Oriented datABase  (version 5.2.1)
ReadHDF5.cpp
Go to the documentation of this file.
00001 /**
00002  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003  * storing and accessing finite element mesh data.
00004  *
00005  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00006  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007  * retains certain rights in this software.
00008  *
00009  * This library is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU Lesser General Public
00011  * License as published by the Free Software Foundation; either
00012  * version 2.1 of the License, or (at your option) any later version.
00013  *
00014  */
00015 
00016 //-------------------------------------------------------------------------
00017 // Filename      : ReadHDF5.cpp
00018 //
00019 // Purpose       : HDF5 Writer
00020 //
00021 // Creator       : Jason Kraftcheck
00022 //
00023 // Creation Date : 04/18/04
00024 //-------------------------------------------------------------------------
00025 
00026 #include <assert.h>
00027 #include "moab/MOABConfig.h"
00028 /* Include our MPI header before any HDF5 because otherwise
00029    it will get included indirectly by HDF5 */
00030 #ifdef MOAB_HAVE_MPI
00031 #include "moab_mpi.h"
00032 #include "moab/ParallelComm.hpp"
00033 #endif
00034 #include <H5Tpublic.h>
00035 #include <H5Ppublic.h>
00036 #include <H5Epublic.h>
00037 #include "moab/Interface.hpp"
00038 #include "Internals.hpp"
00039 #include "MBTagConventions.hpp"
00040 #include "ReadHDF5.hpp"
00041 #include "moab/CN.hpp"
00042 #include "moab/FileOptions.hpp"
00043 #include "moab/CpuTimer.hpp"
00044 #ifdef MOAB_HAVE_HDF5_PARALLEL
00045 #include <H5FDmpi.h>
00046 #include <H5FDmpio.h>
00047 #endif
00048 //#include "WriteHDF5.hpp"
00049 
00050 #include <stdlib.h>
00051 #include <string.h>
00052 #include <limits>
00053 #include <functional>
00054 #include <iostream>
00055 
00056 #include "IODebugTrack.hpp"
00057 #include "ReadHDF5Dataset.hpp"
00058 #include "ReadHDF5VarLen.hpp"
00059 #include "moab_mpe.h"
00060 
00061 namespace moab
00062 {
00063 
00064 /* If true, coordinates are read in blocked format (all X values before
00065  * Y values before Z values.) If undefined, then all coordinates for a
00066  * given vertex are read at the same time.
00067  */
00068 const bool DEFAULT_BLOCKED_COORDINATE_IO = false;
00069 
00070 /* If true, file is opened first by root node only to read summary,
00071  * file is the closed and the summary is broadcast to all nodes, after
00072  * which all nodes open file in parallel to read data. If undefined,
00073  * file is opened once in parallel and all nodes read summary data.
00074  */
00075 const bool DEFAULT_BCAST_SUMMARY = true;
00076 
00077 /* If true and all processors are to read the same block of data,
00078  * read it on one and broadcast to others rather than using collective
00079  * io
00080  */
00081 const bool DEFAULT_BCAST_DUPLICATE_READS = true;
00082 
00083 #define READ_HDF5_BUFFER_SIZE ( 128 * 1024 * 1024 )
00084 
00085 #define assert_range( PTR, CNT )            \
00086     assert( ( PTR ) >= (void*)dataBuffer ); \
00087     assert( ( ( PTR ) + ( CNT ) ) <= (void*)( dataBuffer + bufferSize ) );
00088 
00089 // Call \c error function during HDF5 library errors to make
00090 // it easier to trap such errors in the debugger. This function
00091 // gets registered with the HDF5 library as a callback. It
00092 // works the same as the default (H5Eprint), except that it
00093 // also calls the \c error function as a no-op.
00094 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
00095 static herr_t handle_hdf5_error( hid_t stack, void* data )
00096 {
00097     ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
00098     herr_t result                 = 0;
00099     if( h->func ) result = ( *h->func )( stack, h->data );MB_CHK_ERR_CONT( MB_FAILURE );
00100     return result;
00101 }
00102 #else
00103 static herr_t handle_hdf5_error( void* data )
00104 {
00105     ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
00106     herr_t result                 = 0;
00107     if( h->func ) result = ( *h->func )( h->data );MB_CHK_ERR_CONT( MB_FAILURE );
00108     return result;
00109 }
00110 #endif
00111 
00112 static void copy_sorted_file_ids( const EntityHandle* sorted_ids, long num_ids, Range& results )
00113 {
00114     Range::iterator hint = results.begin();
00115     long i               = 0;
00116     while( i < num_ids )
00117     {
00118         EntityHandle start = sorted_ids[i];
00119         for( ++i; i < num_ids && sorted_ids[i] == 1 + sorted_ids[i - 1]; ++i )
00120             ;
00121         hint = results.insert( hint, start, sorted_ids[i - 1] );
00122     }
00123 }
00124 
00125 static void intersect( const mhdf_EntDesc& group, const Range& range, Range& result )
00126 {
00127     Range::const_iterator s, e;
00128     s = Range::lower_bound( range.begin(), range.end(), group.start_id );
00129     e = Range::lower_bound( s, range.end(), group.start_id + group.count );
00130     result.merge( s, e );
00131 }
00132 
00133 #define debug_barrier() debug_barrier_line( __LINE__ )
00134 void ReadHDF5::debug_barrier_line( int lineno )
00135 {
00136 #ifdef MOAB_HAVE_MPI
00137     if( mpiComm )
00138     {
00139         const unsigned threshold   = 2;
00140         static unsigned long count = 0;
00141         if( dbgOut.get_verbosity() >= threshold )
00142         {
00143             dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
00144             MPI_Barrier( *mpiComm );
00145         }
00146     }
00147 #else
00148     if( lineno ) {}
00149 #endif
00150 }
00151 
00152 class CheckOpenReadHDF5Handles
00153 {
00154     int fileline;
00155     mhdf_FileHandle handle;
00156     int enter_count;
00157 
00158   public:
00159     CheckOpenReadHDF5Handles( mhdf_FileHandle file, int line )
00160         : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
00161     {
00162     }
00163     ~CheckOpenReadHDF5Handles()
00164     {
00165         int new_count = mhdf_countOpenHandles( handle );
00166         if( new_count != enter_count )
00167         {
00168             std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
00169                       << "Open at entrance: " << enter_count << std::endl
00170                       << "Open at exit:     " << new_count << std::endl;
00171         }
00172     }
00173 };
00174 
00175 #ifdef NDEBUG
00176 #define CHECK_OPEN_HANDLES
00177 #else
00178 #define CHECK_OPEN_HANDLES CheckOpenReadHDF5Handles check_open_handles_( filePtr, __LINE__ )
00179 #endif
00180 
00181 ReaderIface* ReadHDF5::factory( Interface* iface )
00182 {
00183     return new ReadHDF5( iface );
00184 }
00185 
00186 ReadHDF5::ReadHDF5( Interface* iface )
00187     : bufferSize( READ_HDF5_BUFFER_SIZE ), dataBuffer( NULL ), iFace( iface ), filePtr( 0 ), fileInfo( NULL ),
00188       readUtil( NULL ), handleType( 0 ), indepIO( H5P_DEFAULT ), collIO( H5P_DEFAULT ), myPcomm( NULL ),
00189       debugTrack( false ), dbgOut( stderr ), nativeParallel( false ), mpiComm( NULL ),
00190       blockedCoordinateIO( DEFAULT_BLOCKED_COORDINATE_IO ), bcastSummary( DEFAULT_BCAST_SUMMARY ),
00191       bcastDuplicateReads( DEFAULT_BCAST_DUPLICATE_READS ), setMeta( 0 ), timer( NULL ), cputime( false )
00192 {
00193 }
00194 
00195 ErrorCode ReadHDF5::init()
00196 {
00197     ErrorCode rval;
00198 
00199     if( readUtil ) return MB_SUCCESS;
00200 
00201     indepIO = collIO = H5P_DEFAULT;
00202     // WriteHDF5::register_known_tag_types(iFace);
00203 
00204     handleType = H5Tcopy( H5T_NATIVE_ULONG );
00205     if( handleType < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00206 
00207     if( H5Tset_size( handleType, sizeof( EntityHandle ) ) < 0 )
00208     {
00209         H5Tclose( handleType );
00210         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00211     }
00212 
00213     rval = iFace->query_interface( readUtil );
00214     if( MB_SUCCESS != rval )
00215     {
00216         H5Tclose( handleType );
00217         MB_SET_ERR( rval, "ReadHDF5 Failure" );
00218     }
00219 
00220     idMap.clear();
00221     fileInfo   = 0;
00222     debugTrack = false;
00223     myPcomm    = 0;
00224 
00225     return MB_SUCCESS;
00226 }
00227 
00228 ReadHDF5::~ReadHDF5()
00229 {
00230     if( !readUtil )  // init() failed.
00231         return;
00232 
00233     delete[] setMeta;
00234     setMeta = 0;
00235     iFace->release_interface( readUtil );
00236     H5Tclose( handleType );
00237 }
00238 
00239 ErrorCode ReadHDF5::set_up_read( const char* filename, const FileOptions& opts )
00240 {
00241     ErrorCode rval;
00242     mhdf_Status status;
00243     indepIO = collIO = H5P_DEFAULT;
00244     mpiComm          = 0;
00245 
00246     if( MB_SUCCESS != init() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00247 
00248 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00249     herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
00250 #else
00251     herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
00252 #endif
00253     if( err < 0 )
00254     {
00255         errorHandler.func = 0;
00256         errorHandler.data = 0;
00257     }
00258     else
00259     {
00260 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
00261         err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
00262 #else
00263         err           = H5Eset_auto( &handle_hdf5_error, &errorHandler );
00264 #endif
00265         if( err < 0 )
00266         {
00267             errorHandler.func = 0;
00268             errorHandler.data = 0;
00269         }
00270     }
00271 
00272     // Set up debug output
00273     int tmpval;
00274     if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) )
00275     {
00276         dbgOut.set_verbosity( tmpval );
00277         dbgOut.set_prefix( "H5M " );
00278     }
00279     dbgOut.limit_output_to_first_N_procs( 32 );
00280 
00281     // Enable some extra checks for reads. Note: amongst other things this
00282     // will print errors if the entire file is not read, so if doing a
00283     // partial read that is not a parallel read, this should be disabled.
00284     debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
00285 
00286     opts.get_toggle_option( "BLOCKED_COORDINATE_IO", DEFAULT_BLOCKED_COORDINATE_IO, blockedCoordinateIO );
00287     opts.get_toggle_option( "BCAST_SUMMARY", DEFAULT_BCAST_SUMMARY, bcastSummary );
00288     opts.get_toggle_option( "BCAST_DUPLICATE_READS", DEFAULT_BCAST_DUPLICATE_READS, bcastDuplicateReads );
00289 
00290     // Handle parallel options
00291     bool use_mpio  = ( MB_SUCCESS == opts.get_null_option( "USE_MPIO" ) );
00292     rval           = opts.match_option( "PARALLEL", "READ_PART" );
00293     bool parallel  = ( rval != MB_ENTITY_NOT_FOUND );
00294     nativeParallel = ( rval == MB_SUCCESS );
00295     if( use_mpio && !parallel )
00296     { MB_SET_ERR( MB_NOT_IMPLEMENTED, "'USE_MPIO' option specified w/out 'PARALLEL' option" ); }
00297 
00298     // This option is intended for testing purposes only, and thus
00299     // is not documented anywhere.  Decreasing the buffer size can
00300     // expose bugs that would otherwise only be seen when reading
00301     // very large files.
00302     rval = opts.get_int_option( "BUFFER_SIZE", bufferSize );
00303     if( MB_SUCCESS != rval ) { bufferSize = READ_HDF5_BUFFER_SIZE; }
00304     else if( bufferSize < (int)std::max( sizeof( EntityHandle ), sizeof( void* ) ) )
00305     {
00306         MB_CHK_ERR( MB_INVALID_SIZE );
00307     }
00308 
00309     dataBuffer = (char*)malloc( bufferSize );
00310     if( !dataBuffer ) MB_CHK_ERR( MB_MEMORY_ALLOCATION_FAILED );
00311 
00312     if( use_mpio || nativeParallel )
00313     {
00314 
00315 #ifndef MOAB_HAVE_HDF5_PARALLEL
00316         free( dataBuffer );
00317         dataBuffer = NULL;
00318         MB_SET_ERR( MB_NOT_IMPLEMENTED, "MOAB not configured with parallel HDF5 support" );
00319 #else
00320         MPI_Info info = MPI_INFO_NULL;
00321         std::string cb_size;
00322         rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
00323         if( MB_SUCCESS == rval )
00324         {
00325             MPI_Info_create( &info );
00326             MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
00327         }
00328 
00329         int pcomm_no = 0;
00330         rval         = opts.get_int_option( "PARALLEL_COMM", pcomm_no );
00331         if( rval == MB_TYPE_OUT_OF_RANGE ) { MB_SET_ERR( rval, "Invalid value for PARALLEL_COMM option" ); }
00332         myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
00333         if( 0 == myPcomm ) { myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD ); }
00334         const int rank = myPcomm->proc_config().proc_rank();
00335         dbgOut.set_rank( rank );
00336         dbgOut.limit_output_to_first_N_procs( 32 );
00337         mpiComm = new MPI_Comm( myPcomm->proc_config().proc_comm() );
00338 
00339 #ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
00340         dbgOut.print( 1, "H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS is not defined\n" );
00341 #endif
00342 
00343         // Open the file in serial on root to read summary
00344         dbgOut.tprint( 1, "Getting file summary\n" );
00345         fileInfo = 0;
00346 
00347         hid_t file_prop;
00348         if( bcastSummary )
00349         {
00350             unsigned long size = 0;
00351             if( rank == 0 )
00352             {
00353                 file_prop = H5Pcreate( H5P_FILE_ACCESS );
00354                 err       = H5Pset_fapl_mpio( file_prop, MPI_COMM_SELF, MPI_INFO_NULL );
00355                 assert( file_prop >= 0 );
00356                 assert( err >= 0 );
00357                 filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
00358                 H5Pclose( file_prop );
00359 
00360                 if( filePtr )
00361                 {
00362                     fileInfo = mhdf_getFileSummary( filePtr, handleType, &status,
00363                                                     0 );  // no extra set info
00364                     if( !is_error( status ) )
00365                     {
00366                         size             = fileInfo->total_size;
00367                         fileInfo->offset = (unsigned char*)fileInfo;
00368                     }
00369                 }
00370                 mhdf_closeFile( filePtr, &status );
00371                 if( fileInfo && mhdf_isError( &status ) )
00372                 {
00373                     free( fileInfo );
00374                     fileInfo = NULL;
00375                 }
00376             }
00377 
00378             dbgOut.tprint( 1, "Communicating file summary\n" );
00379             int mpi_err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, myPcomm->proc_config().proc_comm() );
00380             if( mpi_err || !size ) return MB_FAILURE;
00381 
00382             if( rank != 0 ) fileInfo = reinterpret_cast< mhdf_FileDesc* >( malloc( size ) );
00383 
00384             MPI_Bcast( fileInfo, size, MPI_BYTE, 0, myPcomm->proc_config().proc_comm() );
00385 
00386             if( rank != 0 ) mhdf_fixFileDesc( fileInfo, reinterpret_cast< mhdf_FileDesc* >( fileInfo->offset ) );
00387         }
00388 
00389         file_prop = H5Pcreate( H5P_FILE_ACCESS );
00390         err       = H5Pset_fapl_mpio( file_prop, myPcomm->proc_config().proc_comm(), info );
00391         assert( file_prop >= 0 );
00392         assert( err >= 0 );
00393 
00394         collIO = H5Pcreate( H5P_DATASET_XFER );
00395         assert( collIO > 0 );
00396         err = H5Pset_dxpl_mpio( collIO, H5FD_MPIO_COLLECTIVE );
00397         assert( err >= 0 );
00398         indepIO = nativeParallel ? H5P_DEFAULT : collIO;
00399 
00400         // Re-open file in parallel
00401         dbgOut.tprintf( 1, "Opening \"%s\" for parallel IO\n", filename );
00402         filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
00403 
00404         H5Pclose( file_prop );
00405         if( !filePtr )
00406         {
00407             free( dataBuffer );
00408             dataBuffer = NULL;
00409             H5Pclose( indepIO );
00410             if( collIO != indepIO ) H5Pclose( collIO );
00411             collIO = indepIO = H5P_DEFAULT;
00412             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
00413         }
00414 
00415         if( !bcastSummary )
00416         {
00417             fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
00418             if( is_error( status ) )
00419             {
00420                 free( dataBuffer );
00421                 dataBuffer = NULL;
00422                 mhdf_closeFile( filePtr, &status );
00423                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00424             }
00425         }
00426 #endif  // HDF5_PARALLEL
00427     }
00428     else
00429     {
00430         // Open the file
00431         filePtr = mhdf_openFile( filename, 0, NULL, handleType, &status );
00432         if( !filePtr )
00433         {
00434             free( dataBuffer );
00435             dataBuffer = NULL;
00436             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
00437         }
00438 
00439         // Get file info
00440         fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
00441         if( is_error( status ) )
00442         {
00443             free( dataBuffer );
00444             dataBuffer = NULL;
00445             mhdf_closeFile( filePtr, &status );
00446             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00447         }
00448     }
00449 
00450     ReadHDF5Dataset::default_hyperslab_selection_limit();
00451     int hslimit;
00452     rval = opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit );
00453     if( MB_SUCCESS == rval && hslimit > 0 )
00454         ReadHDF5Dataset::set_hyperslab_selection_limit( hslimit );
00455     else
00456         ReadHDF5Dataset::default_hyperslab_selection_limit();
00457     if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
00458         ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
00459     {
00460         ReadHDF5Dataset::append_hyperslabs();
00461         if( MB_SUCCESS != opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit ) )
00462             ReadHDF5Dataset::set_hyperslab_selection_limit( std::numeric_limits< int >::max() );
00463         dbgOut.print( 1, "Using H5S_APPEND for hyperslab selection\n" );
00464     }
00465 
00466     return MB_SUCCESS;
00467 }
00468 
00469 ErrorCode ReadHDF5::clean_up_read( const FileOptions& )
00470 {
00471     HDF5ErrorHandler handler;
00472 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00473     herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
00474 #else
00475     herr_t err = H5Eget_auto( &handler.func, &handler.data );
00476 #endif
00477     if( err >= 0 && handler.func == &handle_hdf5_error )
00478     {
00479         assert( handler.data == &errorHandler );
00480 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00481         H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
00482 #else
00483         H5Eset_auto( errorHandler.func, errorHandler.data );
00484 #endif
00485     }
00486 
00487     free( dataBuffer );
00488     dataBuffer = NULL;
00489     free( fileInfo );
00490     fileInfo = NULL;
00491     delete mpiComm;
00492     mpiComm = 0;
00493 
00494     if( indepIO != H5P_DEFAULT ) H5Pclose( indepIO );
00495     if( collIO != indepIO ) H5Pclose( collIO );
00496     collIO = indepIO = H5P_DEFAULT;
00497 
00498     delete[] setMeta;
00499     setMeta = 0;
00500 
00501     mhdf_Status status;
00502     mhdf_closeFile( filePtr, &status );
00503     filePtr = 0;
00504     return is_error( status ) ? MB_FAILURE : MB_SUCCESS;
00505 }
00506 
00507 ErrorCode ReadHDF5::load_file( const char* filename, const EntityHandle* file_set, const FileOptions& opts,
00508                                const ReaderIface::SubsetList* subset_list, const Tag* file_id_tag )
00509 {
00510     ErrorCode rval;
00511 
00512     rval = set_up_read( filename, opts );
00513     if( MB_SUCCESS != rval )
00514     {
00515         clean_up_read( opts );
00516         return rval;
00517     }
00518     // See if we need to report times
00519 
00520     rval = opts.get_null_option( "CPUTIME" );
00521     if( MB_SUCCESS == rval )
00522     {
00523         cputime = true;
00524         timer   = new CpuTimer;
00525         for( int i = 0; i < NUM_TIMES; i++ )
00526             _times[i] = 0;
00527     }
00528 
00529     // We read the entire set description table regardless of partial
00530     // or complete reads or serial vs parallel reads
00531     rval = read_all_set_meta();
00532 
00533     if( cputime ) _times[SET_META_TIME] = timer->time_elapsed();
00534     if( subset_list && MB_SUCCESS == rval )
00535         rval = load_file_partial( subset_list->tag_list, subset_list->tag_list_length, subset_list->num_parts,
00536                                   subset_list->part_number, opts );
00537     else
00538         rval = load_file_impl( opts );
00539 
00540     if( MB_SUCCESS == rval && file_id_tag )
00541     {
00542         dbgOut.tprint( 1, "Storing file IDs in tag\n" );
00543         rval = store_file_ids( *file_id_tag );
00544     }
00545     ErrorCode rval3 = opts.get_null_option( "STORE_SETS_FILEIDS" );
00546     if( MB_SUCCESS == rval3 )
00547     {
00548         rval = store_sets_file_ids();
00549         if( MB_SUCCESS != rval ) return rval;
00550     }
00551 
00552     if( cputime ) _times[STORE_FILE_IDS_TIME] = timer->time_elapsed();
00553 
00554     if( MB_SUCCESS == rval && 0 != file_set )
00555     {
00556         dbgOut.tprint( 1, "Reading QA records\n" );
00557         rval = read_qa( *file_set );
00558     }
00559 
00560     if( cputime ) _times[READ_QA_TIME] = timer->time_elapsed();
00561     dbgOut.tprint( 1, "Cleaning up\n" );
00562     ErrorCode rval2 = clean_up_read( opts );
00563     if( rval == MB_SUCCESS && rval2 != MB_SUCCESS ) rval = rval2;
00564 
00565     if( MB_SUCCESS == rval )
00566         dbgOut.tprint( 1, "Read finished.\n" );
00567     else
00568     {
00569         std::string msg;
00570         iFace->get_last_error( msg );
00571         dbgOut.tprintf( 1, "READ FAILED (ERROR CODE %s): %s\n", ErrorCodeStr[rval], msg.c_str() );
00572     }
00573 
00574     if( cputime )
00575     {
00576         _times[TOTAL_TIME] = timer->time_since_birth();
00577         print_times();
00578         delete timer;
00579     }
00580     if( H5P_DEFAULT != collIO ) H5Pclose( collIO );
00581     if( H5P_DEFAULT != indepIO ) H5Pclose( indepIO );
00582     collIO = indepIO = H5P_DEFAULT;
00583 
00584     return rval;
00585 }
00586 
00587 ErrorCode ReadHDF5::load_file_impl( const FileOptions& )
00588 {
00589     ErrorCode rval;
00590     mhdf_Status status;
00591     int i;
00592 
00593     CHECK_OPEN_HANDLES;
00594 
00595     dbgOut.tprint( 1, "Reading all nodes...\n" );
00596     Range ids;
00597     if( fileInfo->nodes.count )
00598     {
00599         ids.insert( fileInfo->nodes.start_id, fileInfo->nodes.start_id + fileInfo->nodes.count - 1 );
00600         rval = read_nodes( ids );
00601         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00602     }
00603 
00604     dbgOut.tprint( 1, "Reading all element connectivity...\n" );
00605     std::vector< int > polyhedra;  // Need to do these last so that faces are loaded
00606     for( i = 0; i < fileInfo->num_elem_desc; ++i )
00607     {
00608         if( CN::EntityTypeFromName( fileInfo->elems[i].type ) == MBPOLYHEDRON )
00609         {
00610             polyhedra.push_back( i );
00611             continue;
00612         }
00613 
00614         rval = read_elems( i );
00615         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00616     }
00617     for( std::vector< int >::iterator it = polyhedra.begin(); it != polyhedra.end(); ++it )
00618     {
00619         rval = read_elems( *it );
00620         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00621     }
00622 
00623     dbgOut.tprint( 1, "Reading all sets...\n" );
00624     ids.clear();
00625     if( fileInfo->sets.count )
00626     {
00627         ids.insert( fileInfo->sets.start_id, fileInfo->sets.start_id + fileInfo->sets.count - 1 );
00628         rval = read_sets( ids );
00629         if( rval != MB_SUCCESS ) { MB_SET_ERR( rval, "ReadHDF5 Failure" ); }
00630     }
00631 
00632     dbgOut.tprint( 1, "Reading all adjacencies...\n" );
00633     for( i = 0; i < fileInfo->num_elem_desc; ++i )
00634     {
00635         if( !fileInfo->elems[i].have_adj ) continue;
00636 
00637         long table_len;
00638         hid_t table = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &table_len, &status );
00639         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00640 
00641         rval = read_adjacencies( table, table_len );
00642         mhdf_closeData( filePtr, table, &status );
00643         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00644         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00645     }
00646 
00647     dbgOut.tprint( 1, "Reading all tags...\n" );
00648     for( i = 0; i < fileInfo->num_tag_desc; ++i )
00649     {
00650         rval = read_tag( i );
00651         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00652     }
00653 
00654     dbgOut.tprint( 1, "Core read finished.  Cleaning up...\n" );
00655     return MB_SUCCESS;
00656 }
00657 
00658 ErrorCode ReadHDF5::find_int_tag( const char* name, int& index )
00659 {
00660     for( index = 0; index < fileInfo->num_tag_desc; ++index )
00661         if( !strcmp( name, fileInfo->tags[index].name ) ) break;
00662 
00663     if( index == fileInfo->num_tag_desc )
00664     { MB_SET_ERR( MB_TAG_NOT_FOUND, "File does not contain subset tag '" << name << "'" ); }
00665 
00666     if( fileInfo->tags[index].type != mhdf_INTEGER || fileInfo->tags[index].size != 1 )
00667     { MB_SET_ERR( MB_TAG_NOT_FOUND, "Tag ' " << name << "' does not contain single integer value" ); }
00668 
00669     return MB_SUCCESS;
00670 }
00671 
00672 ErrorCode ReadHDF5::get_subset_ids( const ReaderIface::IDTag* subset_list, int subset_list_length, Range& file_ids )
00673 {
00674     ErrorCode rval;
00675 
00676     for( int i = 0; i < subset_list_length; ++i )
00677     {
00678         int tag_index;
00679         rval = find_int_tag( subset_list[i].tag_name, tag_index );
00680         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00681 
00682         Range tmp_file_ids;
00683         if( !subset_list[i].num_tag_values ) { rval = get_tagged_entities( tag_index, tmp_file_ids ); }
00684         else
00685         {
00686             std::vector< int > ids( subset_list[i].tag_values,
00687                                     subset_list[i].tag_values + subset_list[i].num_tag_values );
00688             std::sort( ids.begin(), ids.end() );
00689             rval = search_tag_values( tag_index, ids, tmp_file_ids );
00690             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00691         }
00692 
00693         if( tmp_file_ids.empty() ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
00694 
00695         if( i == 0 )
00696             file_ids.swap( tmp_file_ids );
00697         else
00698             file_ids = intersect( tmp_file_ids, file_ids );
00699     }
00700 
00701     return MB_SUCCESS;
00702 }
00703 
00704 ErrorCode ReadHDF5::get_partition( Range& tmp_file_ids, int num_parts, int part_number )
00705 {
00706     CHECK_OPEN_HANDLES;
00707 
00708     // Check that the tag only identified sets
00709     if( (unsigned long)fileInfo->sets.start_id > tmp_file_ids.front() )
00710     {
00711         dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
00712         tmp_file_ids.erase( tmp_file_ids.begin(), tmp_file_ids.lower_bound( (EntityHandle)fileInfo->sets.start_id ) );
00713     }
00714     unsigned long set_end = (unsigned long)fileInfo->sets.start_id + fileInfo->sets.count;
00715     if( tmp_file_ids.back() >= set_end )
00716     {
00717         dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
00718         tmp_file_ids.erase( tmp_file_ids.upper_bound( (EntityHandle)set_end ), tmp_file_ids.end() );
00719     }
00720 
00721     Range::iterator s   = tmp_file_ids.begin();
00722     size_t num_per_proc = tmp_file_ids.size() / num_parts;
00723     size_t num_extra    = tmp_file_ids.size() % num_parts;
00724     Range::iterator e;
00725     if( part_number < (long)num_extra )
00726     {
00727         s += ( num_per_proc + 1 ) * part_number;
00728         e = s;
00729         e += ( num_per_proc + 1 );
00730     }
00731     else
00732     {
00733         s += num_per_proc * part_number + num_extra;
00734         e = s;
00735         e += num_per_proc;
00736     }
00737     tmp_file_ids.erase( e, tmp_file_ids.end() );
00738     tmp_file_ids.erase( tmp_file_ids.begin(), s );
00739 
00740     return MB_SUCCESS;
00741 }
00742 
00743 ErrorCode ReadHDF5::load_file_partial( const ReaderIface::IDTag* subset_list, int subset_list_length, int num_parts,
00744                                        int part_number, const FileOptions& opts )
00745 {
00746     mhdf_Status status;
00747 
00748     static MPEState mpe_event( "ReadHDF5", "yellow" );
00749 
00750     mpe_event.start( "gather parts" );
00751 
00752     CHECK_OPEN_HANDLES;
00753 
00754     for( int i = 0; i < subset_list_length; ++i )
00755     {
00756         dbgOut.printf( 2, "Select by \"%s\" with num_tag_values = %d\n", subset_list[i].tag_name,
00757                        subset_list[i].num_tag_values );
00758         if( subset_list[i].num_tag_values )
00759         {
00760             assert( 0 != subset_list[i].tag_values );
00761             dbgOut.printf( 2, "  \"%s\" values = { %d", subset_list[i].tag_name, subset_list[i].tag_values[0] );
00762             for( int j = 1; j < subset_list[i].num_tag_values; ++j )
00763                 dbgOut.printf( 2, ", %d", subset_list[i].tag_values[j] );
00764             dbgOut.printf( 2, " }\n" );
00765         }
00766     }
00767     if( num_parts ) dbgOut.printf( 2, "Partition with num_parts = %d and part_number = %d\n", num_parts, part_number );
00768 
00769     dbgOut.tprint( 1, "RETRIEVING TAGGED ENTITIES\n" );
00770 
00771     Range file_ids;
00772     ErrorCode rval = get_subset_ids( subset_list, subset_list_length, file_ids );
00773     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00774 
00775     if( cputime ) _times[SUBSET_IDS_TIME] = timer->time_elapsed();
00776 
00777     if( num_parts )
00778     {
00779         /*if (num_parts>(int)file_ids.size())
00780         {
00781           MB_SET_ERR(MB_FAILURE, "Only " << file_ids.size() << " parts to distribute to " <<
00782         num_parts << " processes.");
00783         }*/
00784         rval = get_partition( file_ids, num_parts, part_number );
00785         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00786     }
00787 
00788     if( cputime ) _times[GET_PARTITION_TIME] = timer->time_elapsed();
00789 
00790     dbgOut.print_ints( 4, "Set file IDs for partial read: ", file_ids );
00791     mpe_event.end();
00792     mpe_event.start( "gather related sets" );
00793     dbgOut.tprint( 1, "GATHERING ADDITIONAL ENTITIES\n" );
00794 
00795     enum RecusiveSetMode
00796     {
00797         RSM_NONE,
00798         RSM_SETS,
00799         RSM_CONTENTS
00800     };
00801     const char* const set_opts[] = { "NONE", "SETS", "CONTENTS", NULL };
00802     int child_mode;
00803     rval = opts.match_option( "CHILDREN", set_opts, child_mode );
00804     if( MB_ENTITY_NOT_FOUND == rval )
00805         child_mode = RSM_CONTENTS;
00806     else if( MB_SUCCESS != rval )
00807     {
00808         MB_SET_ERR( rval, "Invalid value for 'CHILDREN' option" );
00809     }
00810     int content_mode;
00811     rval = opts.match_option( "SETS", set_opts, content_mode );
00812     if( MB_ENTITY_NOT_FOUND == rval )
00813         content_mode = RSM_CONTENTS;
00814     else if( MB_SUCCESS != rval )
00815     {
00816         MB_SET_ERR( rval, "Invalid value for 'SETS' option" );
00817     }
00818 
00819     // If we want the contents of contained/child sets,
00820     // search for them now (before gathering the non-set contents
00821     // of the sets.)
00822     Range sets;
00823     intersect( fileInfo->sets, file_ids, sets );
00824     if( content_mode == RSM_CONTENTS || child_mode == RSM_CONTENTS )
00825     {
00826         dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
00827         rval = read_set_ids_recursive( sets, content_mode == RSM_CONTENTS, child_mode == RSM_CONTENTS );
00828         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00829     }
00830 
00831     if( cputime ) _times[GET_SET_IDS_TIME] = timer->time_elapsed();
00832     debug_barrier();
00833 
00834     // Get elements and vertices contained in sets
00835     dbgOut.tprint( 1, "  doing get_set_contents\n" );
00836     rval = get_set_contents( sets, file_ids );
00837     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00838 
00839     if( cputime ) _times[GET_SET_CONTENTS_TIME] = timer->time_elapsed();
00840 
00841     dbgOut.print_ints( 5, "File IDs for partial read: ", file_ids );
00842     debug_barrier();
00843     mpe_event.end();
00844     dbgOut.tprint( 1, "GATHERING NODE IDS\n" );
00845 
00846     // Figure out the maximum dimension of entity to be read
00847     int max_dim = 0;
00848     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00849     {
00850         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00851         if( type <= MBVERTEX || type >= MBENTITYSET )
00852         {
00853             assert( false );  // For debug code die for unknown element types
00854             continue;         // For release code, skip unknown element types
00855         }
00856         int dim = CN::Dimension( type );
00857         if( dim > max_dim )
00858         {
00859             Range subset;
00860             intersect( fileInfo->elems[i].desc, file_ids, subset );
00861             if( !subset.empty() ) max_dim = dim;
00862         }
00863     }
00864 #ifdef MOAB_HAVE_MPI
00865     if( nativeParallel )
00866     {
00867         int send = max_dim;
00868         MPI_Allreduce( &send, &max_dim, 1, MPI_INT, MPI_MAX, *mpiComm );
00869     }
00870 #endif
00871 
00872     // If input contained any polyhedra, then need to get faces
00873     // of the polyhedra before the next loop because we need to
00874     // read said faces in that loop.
00875     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00876     {
00877         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00878         if( type != MBPOLYHEDRON ) continue;
00879 
00880         debug_barrier();
00881         dbgOut.print( 2, "    Getting polyhedra faces\n" );
00882         mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00883 
00884         Range polyhedra;
00885         intersect( fileInfo->elems[i].desc, file_ids, polyhedra );
00886         rval = read_elems( i, polyhedra, &file_ids );
00887         mpe_event.end( rval );
00888         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00889     }
00890 
00891     if( cputime ) _times[GET_POLYHEDRA_TIME] = timer->time_elapsed();
00892     // Get node file ids for all elements
00893     Range nodes;
00894     intersect( fileInfo->nodes, file_ids, nodes );
00895     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00896     {
00897         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00898         if( type <= MBVERTEX || type >= MBENTITYSET )
00899         {
00900             assert( false );  // For debug code die for unknown element types
00901             continue;         // For release code, skip unknown element types
00902         }
00903         if( MBPOLYHEDRON == type ) continue;
00904 
00905         debug_barrier();
00906         dbgOut.printf( 2, "    Getting element node IDs for: %s\n", fileInfo->elems[i].handle );
00907 
00908         Range subset;
00909         intersect( fileInfo->elems[i].desc, file_ids, subset );
00910         mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00911 
00912         // If dimension is max_dim, then we can create the elements now
00913         // so we don't have to read the table again later (connectivity
00914         // will be fixed up after nodes are created when update_connectivity())
00915         // is called.  For elements of a smaller dimension, we just build
00916         // the node ID range now because a) we'll have to read the whole
00917         // connectivity table again later, and b) we don't want to worry
00918         // about accidentally creating multiple copies of the same element.
00919         if( CN::Dimension( type ) == max_dim )
00920             rval = read_elems( i, subset, &nodes );
00921         else
00922             rval = read_elems( i, subset, nodes );
00923         mpe_event.end( rval );
00924         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00925     }
00926     if( cputime ) _times[GET_ELEMENTS_TIME] = timer->time_elapsed();
00927     debug_barrier();
00928     mpe_event.start( "read coords" );
00929     dbgOut.tprintf( 1, "READING NODE COORDINATES (%lu nodes in %lu selects)\n", (unsigned long)nodes.size(),
00930                     (unsigned long)nodes.psize() );
00931 
00932     // Read node coordinates and create vertices in MOAB
00933     // NOTE: This populates the RangeMap with node file ids,
00934     //       which is expected by read_node_adj_elems.
00935     rval = read_nodes( nodes );
00936     mpe_event.end( rval );
00937     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00938 
00939     if( cputime ) _times[GET_NODES_TIME] = timer->time_elapsed();
00940 
00941     debug_barrier();
00942     dbgOut.tprint( 1, "READING ELEMENTS\n" );
00943 
00944     // Decide if we need to read additional elements
00945     enum SideMode
00946     {
00947         SM_EXPLICIT,
00948         SM_NODES,
00949         SM_SIDES
00950     };
00951     int side_mode;
00952     const char* const options[] = { "EXPLICIT", "NODES", "SIDES", 0 };
00953     rval                        = opts.match_option( "ELEMENTS", options, side_mode );
00954     if( MB_ENTITY_NOT_FOUND == rval )
00955     {
00956         // If only nodes were specified, then default to "NODES", otherwise
00957         // default to "SIDES".
00958         if( 0 == max_dim )
00959             side_mode = SM_NODES;
00960         else
00961             side_mode = SM_SIDES;
00962     }
00963     else if( MB_SUCCESS != rval )
00964     {
00965         MB_SET_ERR( rval, "Invalid value for 'ELEMENTS' option" );
00966     }
00967 
00968     if( side_mode == SM_SIDES /*ELEMENTS=SIDES*/ && max_dim == 0 /*node-based*/ )
00969     {
00970         // Read elements until we find something. Once we find something,
00971         // read only elements of the same dimension. NOTE: loop termination
00972         // criterion changes on both sides (max_dim can be changed in loop
00973         // body).
00974         for( int dim = 3; dim >= max_dim; --dim )
00975         {
00976             for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00977             {
00978                 EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00979                 if( CN::Dimension( type ) == dim )
00980                 {
00981                     debug_barrier();
00982                     dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
00983                     mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00984                     Range ents;
00985                     rval = read_node_adj_elems( fileInfo->elems[i] );
00986                     mpe_event.end( rval );
00987                     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00988                     if( !ents.empty() ) max_dim = 3;
00989                 }
00990             }
00991         }
00992     }
00993 
00994     if( cputime ) _times[GET_NODEADJ_TIME] = timer->time_elapsed();
00995     Range side_entities;
00996     if( side_mode != SM_EXPLICIT /*ELEMENTS=NODES || ELEMENTS=SIDES*/ )
00997     {
00998         if( 0 == max_dim ) max_dim = 4;
00999         // Now read any additional elements for which we've already read all
01000         // of the nodes.
01001         for( int dim = max_dim - 1; dim > 0; --dim )
01002         {
01003             for( int i = 0; i < fileInfo->num_elem_desc; ++i )
01004             {
01005                 EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
01006                 if( CN::Dimension( type ) == dim )
01007                 {
01008                     debug_barrier();
01009                     dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
01010                     mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
01011                     rval = read_node_adj_elems( fileInfo->elems[i], &side_entities );
01012                     mpe_event.end( rval );
01013                     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01014                 }
01015             }
01016         }
01017     }
01018 
01019     // We need to do this here for polyhedra to be handled correctly.
01020     // We have to wait until the faces are read in the above code block,
01021     // but need to create the connectivity before doing update_connectivity,
01022     // which might otherwise delete polyhedra faces.
01023     if( cputime ) _times[GET_SIDEELEM_TIME] = timer->time_elapsed();
01024 
01025     debug_barrier();
01026     dbgOut.tprint( 1, "UPDATING CONNECTIVITY ARRAYS FOR READ ELEMENTS\n" );
01027     mpe_event.start( "updating connectivity for elements read before vertices" );
01028     rval = update_connectivity();
01029     mpe_event.end();
01030     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01031 
01032     if( cputime ) _times[UPDATECONN_TIME] = timer->time_elapsed();
01033 
01034     dbgOut.tprint( 1, "READING ADJACENCIES\n" );
01035     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
01036     {
01037         if (fileInfo->elems[i].have_adj  /*&&
01038         idMap.intersects(fileInfo->elems[i].desc.start_id, fileInfo->elems[i].desc.count) */)
01039         {
01040             mpe_event.start( "reading adjacencies for ", fileInfo->elems[i].handle );
01041             long len;
01042             hid_t th = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &len, &status );
01043             if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01044 
01045             rval = read_adjacencies( th, len );
01046             mhdf_closeData( filePtr, th, &status );
01047             mpe_event.end( rval );
01048             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01049         }
01050     }
01051 
01052     if( cputime ) _times[ADJACENCY_TIME] = timer->time_elapsed();
01053 
01054     // If doing ELEMENTS=SIDES then we need to delete any entities
01055     // that we read that aren't actually sides (e.g. an interior face
01056     // that connects two disjoint portions of the part). Both
01057     // update_connectivity and reading of any explicit adjacencies must
01058     // happen before this.
01059     if( side_mode == SM_SIDES )
01060     {
01061         debug_barrier();
01062         mpe_event.start( "cleaning up non-side lower-dim elements" );
01063         dbgOut.tprint( 1, "CHECKING FOR AND DELETING NON-SIDE ELEMENTS\n" );
01064         rval = delete_non_side_elements( side_entities );
01065         mpe_event.end( rval );
01066         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01067     }
01068 
01069     if( cputime ) _times[DELETE_NON_SIDEELEM_TIME] = timer->time_elapsed();
01070 
01071     debug_barrier();
01072     dbgOut.tprint( 1, "READING SETS\n" );
01073 
01074     // If reading contained/child sets but not their contents then find
01075     // them now. If we were also reading their contents we would
01076     // have found them already.
01077     if( content_mode == RSM_SETS || child_mode == RSM_SETS )
01078     {
01079         dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
01080         mpe_event.start( "finding recursively contained sets" );
01081         rval = read_set_ids_recursive( sets, content_mode == RSM_SETS, child_mode == RSM_SETS );
01082         mpe_event.end( rval );
01083         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01084     }
01085 
01086     if( cputime ) _times[READ_SET_IDS_RECURS_TIME] = timer->time_elapsed();
01087 
01088     dbgOut.tprint( 1, "  doing find_sets_containing\n" );
01089     mpe_event.start( "finding sets containing any read entities" );
01090 
01091     // Decide whether to read set-containing parents
01092     bool read_set_containing_parents = true;
01093     std::string tmp_opt;
01094     rval = opts.get_option( "NO_SET_CONTAINING_PARENTS", tmp_opt );
01095     if( MB_SUCCESS == rval ) read_set_containing_parents = false;
01096 
01097     // Append file IDs of sets containing any of the nodes or elements
01098     // we've read up to this point.
01099     rval = find_sets_containing( sets, read_set_containing_parents );
01100     mpe_event.end( rval );
01101     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01102 
01103     if( cputime ) _times[FIND_SETS_CONTAINING_TIME] = timer->time_elapsed();
01104 
01105     // Now actually read all set data and instantiate sets in MOAB.
01106     // Get any contained sets out of file_ids.
01107     mpe_event.start( "reading set contents/parents/children" );
01108     EntityHandle first_set = fileInfo->sets.start_id;
01109     sets.merge( file_ids.lower_bound( first_set ), file_ids.lower_bound( first_set + fileInfo->sets.count ) );
01110     dbgOut.tprint( 1, "  doing read_sets\n" );
01111     rval = read_sets( sets );
01112     mpe_event.end( rval );
01113     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01114 
01115     if( cputime ) _times[READ_SETS_TIME] = timer->time_elapsed();
01116 
01117     dbgOut.tprint( 1, "READING TAGS\n" );
01118 
01119     for( int i = 0; i < fileInfo->num_tag_desc; ++i )
01120     {
01121         mpe_event.start( "reading tag: ", fileInfo->tags[i].name );
01122         rval = read_tag( i );
01123         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01124     }
01125 
01126     if( cputime ) _times[READ_TAGS_TIME] = timer->time_elapsed();
01127 
01128     dbgOut.tprint( 1, "PARTIAL READ COMPLETE.\n" );
01129 
01130     return MB_SUCCESS;
01131 }
01132 
01133 ErrorCode ReadHDF5::search_tag_values( int tag_index, const std::vector< int >& sorted_values, Range& file_ids,
01134                                        bool sets_only )
01135 {
01136     ErrorCode rval;
01137     mhdf_Status status;
01138     std::vector< EntityHandle >::iterator iter;
01139     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
01140     long size;
01141     long start_id;
01142 
01143     CHECK_OPEN_HANDLES;
01144 
01145     debug_barrier();
01146 
01147     // Do dense data
01148 
01149     hid_t table;
01150     const char* name;
01151     std::vector< EntityHandle > indices;
01152     // These are probably in order of dimension, so iterate
01153     // in reverse order to make Range insertions more efficient.
01154     std::vector< int > grp_indices( tag.dense_elem_indices, tag.dense_elem_indices + tag.num_dense_indices );
01155     for( std::vector< int >::reverse_iterator i = grp_indices.rbegin(); i != grp_indices.rend(); ++i )
01156     {
01157         int idx = *i;
01158         if( idx == -2 )
01159         {
01160             name     = mhdf_set_type_handle();
01161             start_id = fileInfo->sets.start_id;
01162         }
01163         else if( sets_only )
01164         {
01165             continue;
01166         }
01167         else if( idx == -1 )
01168         {
01169             name     = mhdf_node_type_handle();
01170             start_id = fileInfo->nodes.start_id;
01171         }
01172         else
01173         {
01174             if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01175             name     = fileInfo->elems[idx].handle;
01176             start_id = fileInfo->elems[idx].desc.start_id;
01177         }
01178         table = mhdf_openDenseTagData( filePtr, tag.name, name, &size, &status );
01179         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01180         rval = search_tag_values( table, size, sorted_values, indices );
01181         mhdf_closeData( filePtr, table, &status );
01182         if( MB_SUCCESS != rval || is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01183         // Convert from table indices to file IDs and add to result list
01184         std::sort( indices.begin(), indices.end(), std::greater< EntityHandle >() );
01185         std::transform( indices.begin(), indices.end(), range_inserter( file_ids ),
01186                         // std::bind1st(std::plus<long>(), start_id));
01187                         std::bind( std::plus< long >(), start_id, std::placeholders::_1 ) );
01188         indices.clear();
01189     }
01190 
01191     if( !tag.have_sparse ) return MB_SUCCESS;
01192 
01193     // Do sparse data
01194 
01195     hid_t tables[2];
01196     long junk;  // Redundant value for non-variable-length tags
01197     mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
01198     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01199     rval = search_tag_values( tables[1], size, sorted_values, indices );
01200     mhdf_closeData( filePtr, tables[1], &status );
01201     if( MB_SUCCESS != rval || is_error( status ) )
01202     {
01203         mhdf_closeData( filePtr, tables[0], &status );
01204         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01205     }
01206     // Convert to ranges
01207     std::sort( indices.begin(), indices.end() );
01208     std::vector< EntityHandle > ranges;
01209     iter = indices.begin();
01210     while( iter != indices.end() )
01211     {
01212         ranges.push_back( *iter );
01213         EntityHandle last = *iter;
01214         for( ++iter; iter != indices.end() && ( last + 1 ) == *iter; ++iter, ++last )
01215             ;
01216         ranges.push_back( last );
01217     }
01218     // Read file ids
01219     iter                 = ranges.begin();
01220     unsigned long offset = 0;
01221     while( iter != ranges.end() )
01222     {
01223         long begin = *iter;
01224         ++iter;
01225         long end = *iter;
01226         ++iter;
01227         mhdf_readSparseTagEntitiesWithOpt( tables[0], begin, end - begin + 1, handleType, &indices[offset], indepIO,
01228                                            &status );
01229         if( is_error( status ) )
01230         {
01231             mhdf_closeData( filePtr, tables[0], &status );
01232             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01233         }
01234         offset += end - begin + 1;
01235     }
01236     mhdf_closeData( filePtr, tables[0], &status );
01237     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01238     assert( offset == indices.size() );
01239     std::sort( indices.begin(), indices.end() );
01240 
01241     if( sets_only )
01242     {
01243         iter = std::lower_bound( indices.begin(), indices.end(),
01244                                  ( EntityHandle )( fileInfo->sets.start_id + fileInfo->sets.count ) );
01245         indices.erase( iter, indices.end() );
01246         iter = std::lower_bound( indices.begin(), indices.end(), fileInfo->sets.start_id );
01247         indices.erase( indices.begin(), iter );
01248     }
01249     copy_sorted_file_ids( &indices[0], indices.size(), file_ids );
01250 
01251     return MB_SUCCESS;
01252 }
01253 
01254 ErrorCode ReadHDF5::get_tagged_entities( int tag_index, Range& file_ids )
01255 {
01256     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
01257 
01258     CHECK_OPEN_HANDLES;
01259 
01260     // Do dense data
01261     Range::iterator hint = file_ids.begin();
01262     for( int i = 0; i < tag.num_dense_indices; ++i )
01263     {
01264         int idx = tag.dense_elem_indices[i];
01265         mhdf_EntDesc* ents;
01266         if( idx == -2 )
01267             ents = &fileInfo->sets;
01268         else if( idx == -1 )
01269             ents = &fileInfo->nodes;
01270         else
01271         {
01272             if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01273             ents = &( fileInfo->elems[idx].desc );
01274         }
01275 
01276         EntityHandle h = (EntityHandle)ents->start_id;
01277         hint           = file_ids.insert( hint, h, h + ents->count - 1 );
01278     }
01279 
01280     if( !tag.have_sparse ) return MB_SUCCESS;
01281 
01282     // Do sparse data
01283 
01284     mhdf_Status status;
01285     hid_t tables[2];
01286     long size, junk;
01287     mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
01288     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01289     mhdf_closeData( filePtr, tables[1], &status );
01290     if( is_error( status ) )
01291     {
01292         mhdf_closeData( filePtr, tables[0], &status );
01293         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01294     }
01295 
01296     hid_t file_type = H5Dget_type( tables[0] );
01297     if( file_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01298 
01299     hint                   = file_ids.begin();
01300     EntityHandle* buffer   = reinterpret_cast< EntityHandle* >( dataBuffer );
01301     const long buffer_size = bufferSize / std::max( sizeof( EntityHandle ), H5Tget_size( file_type ) );
01302     long remaining = size, offset = 0;
01303     while( remaining )
01304     {
01305         long count = std::min( buffer_size, remaining );
01306         assert_range( buffer, count );
01307         mhdf_readSparseTagEntitiesWithOpt( *tables, offset, count, file_type, buffer, collIO, &status );
01308         if( is_error( status ) )
01309         {
01310             H5Tclose( file_type );
01311             mhdf_closeData( filePtr, *tables, &status );
01312             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01313         }
01314         H5Tconvert( file_type, handleType, count, buffer, NULL, H5P_DEFAULT );
01315 
01316         std::sort( buffer, buffer + count );
01317         for( long i = 0; i < count; ++i )
01318             hint = file_ids.insert( hint, buffer[i], buffer[i] );
01319 
01320         remaining -= count;
01321         offset += count;
01322     }
01323 
01324     H5Tclose( file_type );
01325     mhdf_closeData( filePtr, *tables, &status );
01326     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01327 
01328     return MB_SUCCESS;
01329 }
01330 
01331 ErrorCode ReadHDF5::search_tag_values( hid_t tag_table, unsigned long table_size,
01332                                        const std::vector< int >& sorted_values,
01333                                        std::vector< EntityHandle >& value_indices )
01334 {
01335     debug_barrier();
01336 
01337     CHECK_OPEN_HANDLES;
01338 
01339     mhdf_Status status;
01340     size_t chunk_size = bufferSize / sizeof( int );
01341     int* buffer       = reinterpret_cast< int* >( dataBuffer );
01342     size_t remaining = table_size, offset = 0;
01343     while( remaining )
01344     {
01345         // Get a block of tag values
01346         size_t count = std::min( chunk_size, remaining );
01347         assert_range( buffer, count );
01348         mhdf_readTagValuesWithOpt( tag_table, offset, count, H5T_NATIVE_INT, buffer, collIO, &status );
01349         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01350 
01351         // Search tag values
01352         for( size_t i = 0; i < count; ++i )
01353             if( std::binary_search( sorted_values.begin(), sorted_values.end(), (int)buffer[i] ) )
01354                 value_indices.push_back( i + offset );
01355 
01356         offset += count;
01357         remaining -= count;
01358     }
01359 
01360     return MB_SUCCESS;
01361 }
01362 
01363 ErrorCode ReadHDF5::read_nodes( const Range& node_file_ids )
01364 {
01365     ErrorCode rval;
01366     mhdf_Status status;
01367     const int dim = fileInfo->nodes.vals_per_ent;
01368     Range range;
01369 
01370     CHECK_OPEN_HANDLES;
01371 
01372     if( node_file_ids.empty() && !nativeParallel ) return MB_SUCCESS;
01373 
01374     int cdim;
01375     rval = iFace->get_dimension( cdim );
01376     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01377 
01378     if( cdim < dim )
01379     {
01380         rval = iFace->set_dimension( dim );
01381         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01382     }
01383 
01384     hid_t data_id = mhdf_openNodeCoordsSimple( filePtr, &status );
01385     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01386 
01387     EntityHandle handle;
01388     std::vector< double* > arrays( dim );
01389     const size_t num_nodes = node_file_ids.size();
01390     if( num_nodes > 0 )
01391     {
01392         rval = readUtil->get_node_coords( dim, (int)num_nodes, 0, handle, arrays );
01393         if( MB_SUCCESS != rval )
01394         {
01395             mhdf_closeData( filePtr, data_id, &status );
01396             MB_SET_ERR( rval, "ReadHDF5 Failure" );
01397         }
01398     }
01399 
01400     if( blockedCoordinateIO )
01401     {
01402         try
01403         {
01404             for( int d = 0; d < dim; ++d )
01405             {
01406                 ReadHDF5Dataset reader( "blocked coords", data_id, nativeParallel, mpiComm, false );
01407                 reader.set_column( d );
01408                 reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, num_nodes, H5T_NATIVE_DOUBLE );
01409                 dbgOut.printf( 3, "Reading %lu chunks for coordinate dimension %d\n", reader.get_read_count(), d );
01410                 // Should normally only have one read call, unless sparse nature
01411                 // of file_ids caused reader to do something strange
01412                 size_t count, offset = 0;
01413                 int nn = 0;
01414                 while( !reader.done() )
01415                 {
01416                     dbgOut.printf( 3, "Reading chunk %d for dimension %d\n", ++nn, d );
01417                     reader.read( arrays[d] + offset, count );
01418                     offset += count;
01419                 }
01420                 if( offset != num_nodes )
01421                 {
01422                     mhdf_closeData( filePtr, data_id, &status );
01423                     assert( false );
01424                     return MB_FAILURE;
01425                 }
01426             }
01427         }
01428         catch( ReadHDF5Dataset::Exception )
01429         {
01430             mhdf_closeData( filePtr, data_id, &status );
01431             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01432         }
01433     }
01434     else
01435     {  // !blockedCoordinateIO
01436         double* buffer  = (double*)dataBuffer;
01437         long chunk_size = bufferSize / ( 3 * sizeof( double ) );
01438         long coffset    = 0;
01439         int nn          = 0;
01440         try
01441         {
01442             ReadHDF5Dataset reader( "interleaved coords", data_id, nativeParallel, mpiComm, false );
01443             reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, chunk_size, H5T_NATIVE_DOUBLE );
01444             dbgOut.printf( 3, "Reading %lu chunks for coordinate coordinates\n", reader.get_read_count() );
01445             while( !reader.done() )
01446             {
01447                 dbgOut.tprintf( 3, "Reading chunk %d of node coords\n", ++nn );
01448 
01449                 size_t count;
01450                 reader.read( buffer, count );
01451 
01452                 for( size_t i = 0; i < count; ++i )
01453                     for( int d = 0; d < dim; ++d )
01454                         arrays[d][coffset + i] = buffer[dim * i + d];
01455                 coffset += count;
01456             }
01457         }
01458         catch( ReadHDF5Dataset::Exception )
01459         {
01460             mhdf_closeData( filePtr, data_id, &status );
01461             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01462         }
01463     }
01464 
01465     dbgOut.print( 3, "Closing node coordinate table\n" );
01466     mhdf_closeData( filePtr, data_id, &status );
01467     for( int d = dim; d < cdim; ++d )
01468         memset( arrays[d], 0, num_nodes * sizeof( double ) );
01469 
01470     dbgOut.printf( 3, "Updating ID to handle map for %lu nodes\n", (unsigned long)node_file_ids.size() );
01471     return insert_in_id_map( node_file_ids, handle );
01472 }
01473 
01474 ErrorCode ReadHDF5::read_elems( int i )
01475 {
01476     Range ids;
01477     ids.insert( fileInfo->elems[i].desc.start_id,
01478                 fileInfo->elems[i].desc.start_id + fileInfo->elems[i].desc.count - 1 );
01479     return read_elems( i, ids );
01480 }
01481 
01482 ErrorCode ReadHDF5::read_elems( int i, const Range& file_ids, Range* node_ids )
01483 {
01484     if( fileInfo->elems[i].desc.vals_per_ent < 0 )
01485     {
01486         if( node_ids != 0 )  // Not implemented for version 3 format of poly data
01487             MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
01488         return read_poly( fileInfo->elems[i], file_ids );
01489     }
01490     else
01491         return read_elems( fileInfo->elems[i], file_ids, node_ids );
01492 }
01493 
01494 ErrorCode ReadHDF5::read_elems( const mhdf_ElemDesc& elems, const Range& file_ids, Range* node_ids )
01495 {
01496     CHECK_OPEN_HANDLES;
01497 
01498     debug_barrier();
01499     dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", elems.handle,
01500                     (unsigned long)file_ids.size(), (unsigned long)file_ids.psize() );
01501 
01502     ErrorCode rval = MB_SUCCESS;
01503     mhdf_Status status;
01504 
01505     EntityType type = CN::EntityTypeFromName( elems.type );
01506     if( type == MBMAXTYPE ) { MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" ); }
01507 
01508     const int nodes_per_elem = elems.desc.vals_per_ent;
01509     const size_t count       = file_ids.size();
01510     hid_t data_id            = mhdf_openConnectivitySimple( filePtr, elems.handle, &status );
01511     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01512 
01513     EntityHandle handle;
01514     EntityHandle* array = 0;
01515     if( count > 0 ) rval = readUtil->get_element_connect( count, nodes_per_elem, type, 0, handle, array );
01516     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01517 
01518     try
01519     {
01520         EntityHandle* buffer     = reinterpret_cast< EntityHandle* >( dataBuffer );
01521         const size_t buffer_size = bufferSize / ( sizeof( EntityHandle ) * nodes_per_elem );
01522         ReadHDF5Dataset reader( elems.handle, data_id, nativeParallel, mpiComm );
01523         reader.set_file_ids( file_ids, elems.desc.start_id, buffer_size, handleType );
01524         dbgOut.printf( 3, "Reading connectivity in %lu chunks for element group \"%s\"\n", reader.get_read_count(),
01525                        elems.handle );
01526         EntityHandle* iter = array;
01527         int nn             = 0;
01528         while( !reader.done() )
01529         {
01530             dbgOut.printf( 3, "Reading chunk %d for \"%s\"\n", ++nn, elems.handle );
01531 
01532             size_t num_read;
01533             reader.read( buffer, num_read );
01534             iter = std::copy( buffer, buffer + num_read * nodes_per_elem, iter );
01535 
01536             if( node_ids )
01537             {
01538                 std::sort( buffer, buffer + num_read * nodes_per_elem );
01539                 num_read = std::unique( buffer, buffer + num_read * nodes_per_elem ) - buffer;
01540                 copy_sorted_file_ids( buffer, num_read, *node_ids );
01541             }
01542         }
01543         assert( iter - array == (ptrdiff_t)count * nodes_per_elem );
01544     }
01545     catch( ReadHDF5Dataset::Exception )
01546     {
01547         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01548     }
01549 
01550     if( !node_ids )
01551     {
01552         rval = convert_id_to_handle( array, count * nodes_per_elem );
01553         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01554 
01555         rval = readUtil->update_adjacencies( handle, count, nodes_per_elem, array );
01556         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01557     }
01558     else
01559     {
01560         IDConnectivity t;
01561         t.handle         = handle;
01562         t.count          = count;
01563         t.nodes_per_elem = nodes_per_elem;
01564         t.array          = array;
01565         idConnectivityList.push_back( t );
01566     }
01567 
01568     return insert_in_id_map( file_ids, handle );
01569 }
01570 
01571 ErrorCode ReadHDF5::update_connectivity()
01572 {
01573     ErrorCode rval;
01574     std::vector< IDConnectivity >::iterator i;
01575     for( i = idConnectivityList.begin(); i != idConnectivityList.end(); ++i )
01576     {
01577         rval = convert_id_to_handle( i->array, i->count * i->nodes_per_elem );
01578         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01579 
01580         rval = readUtil->update_adjacencies( i->handle, i->count, i->nodes_per_elem, i->array );
01581         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01582     }
01583     idConnectivityList.clear();
01584 
01585     return MB_SUCCESS;
01586 }
01587 
01588 ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, Range* handles_out )
01589 {
01590     mhdf_Status status;
01591     ErrorCode rval;
01592 
01593     CHECK_OPEN_HANDLES;
01594 
01595     hid_t table = mhdf_openConnectivitySimple( filePtr, group.handle, &status );
01596     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01597 
01598     rval = read_node_adj_elems( group, table, handles_out );
01599 
01600     mhdf_closeData( filePtr, table, &status );
01601     if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR_RET_VAL( "ReadHDF5 Failure", MB_FAILURE );
01602 
01603     return rval;
01604 }
01605 
01606 ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, hid_t table_handle, Range* handles_out )
01607 {
01608     CHECK_OPEN_HANDLES;
01609 
01610     debug_barrier();
01611 
01612     mhdf_Status status;
01613     ErrorCode rval;
01614     IODebugTrack debug_track( debugTrack, std::string( group.handle ) );
01615 
01616     // Copy data to local variables (makes other code clearer)
01617     const int node_per_elem = group.desc.vals_per_ent;
01618     long start_id           = group.desc.start_id;
01619     long remaining          = group.desc.count;
01620     const EntityType type   = CN::EntityTypeFromName( group.type );
01621 
01622     // Figure out how many elements we can read in each pass
01623     long* const buffer     = reinterpret_cast< long* >( dataBuffer );
01624     const long buffer_size = bufferSize / ( node_per_elem * sizeof( buffer[0] ) );
01625     // Read all element connectivity in buffer_size blocks
01626     long offset = 0;
01627     dbgOut.printf( 3, "Reading node-adjacent elements from \"%s\" in %ld chunks\n", group.handle,
01628                    ( remaining + buffer_size - 1 ) / buffer_size );
01629     int nn = 0;
01630     Range::iterator hint;
01631     if( handles_out ) hint = handles_out->begin();
01632     while( remaining )
01633     {
01634         dbgOut.printf( 3, "Reading chunk %d of connectivity data for \"%s\"\n", ++nn, group.handle );
01635 
01636         // Read a block of connectivity data
01637         const long count = std::min( remaining, buffer_size );
01638         debug_track.record_io( offset, count );
01639         assert_range( buffer, count * node_per_elem );
01640         mhdf_readConnectivityWithOpt( table_handle, offset, count, H5T_NATIVE_LONG, buffer, collIO, &status );
01641         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01642         offset += count;
01643         remaining -= count;
01644 
01645         // Count the number of elements in the block that we want,
01646         // zero connectivity for other elements
01647         long num_elem = 0;
01648         long* iter    = buffer;
01649         for( long i = 0; i < count; ++i )
01650         {
01651             for( int j = 0; j < node_per_elem; ++j )
01652             {
01653                 iter[j] = (long)idMap.find( iter[j] );
01654                 if( !iter[j] )
01655                 {
01656                     iter[0] = 0;
01657                     break;
01658                 }
01659             }
01660             if( iter[0] ) ++num_elem;
01661             iter += node_per_elem;
01662         }
01663 
01664         if( !num_elem )
01665         {
01666             start_id += count;
01667             continue;
01668         }
01669 
01670         // Create elements
01671         EntityHandle handle;
01672         EntityHandle* array;
01673         rval = readUtil->get_element_connect( (int)num_elem, node_per_elem, type, 0, handle, array );
01674         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01675 
01676         // Copy all non-zero connectivity values
01677         iter                = buffer;
01678         EntityHandle* iter2 = array;
01679         EntityHandle h      = handle;
01680         for( long i = 0; i < count; ++i )
01681         {
01682             if( !*iter )
01683             {
01684                 iter += node_per_elem;
01685                 continue;
01686             }
01687             if( !idMap.insert( start_id + i, h++, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01688 
01689             long* const end = iter + node_per_elem;
01690             for( ; iter != end; ++iter, ++iter2 )
01691                 *iter2 = (EntityHandle)*iter;
01692         }
01693         assert( iter2 - array == num_elem * node_per_elem );
01694         start_id += count;
01695 
01696         rval = readUtil->update_adjacencies( handle, num_elem, node_per_elem, array );
01697         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01698         if( handles_out ) hint = handles_out->insert( hint, handle, handle + num_elem - 1 );
01699     }
01700 
01701     debug_track.all_reduce();
01702     return MB_SUCCESS;
01703 }
01704 
01705 ErrorCode ReadHDF5::read_elems( int i, const Range& elems_in, Range& nodes )
01706 {
01707     CHECK_OPEN_HANDLES;
01708 
01709     debug_barrier();
01710     dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", fileInfo->elems[i].handle,
01711                     (unsigned long)elems_in.size(), (unsigned long)elems_in.psize() );
01712 
01713     EntityHandle* const buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
01714     const int node_per_elem    = fileInfo->elems[i].desc.vals_per_ent;
01715     const size_t buffer_size   = bufferSize / ( node_per_elem * sizeof( EntityHandle ) );
01716 
01717     if( elems_in.empty() ) return MB_SUCCESS;
01718 
01719     assert( (long)elems_in.front() >= fileInfo->elems[i].desc.start_id );
01720     assert( (long)elems_in.back() - fileInfo->elems[i].desc.start_id < fileInfo->elems[i].desc.count );
01721 
01722     // We don't support version 3 style poly element data
01723     if( fileInfo->elems[i].desc.vals_per_ent <= 0 ) MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
01724 
01725     mhdf_Status status;
01726     hid_t table = mhdf_openConnectivitySimple( filePtr, fileInfo->elems[i].handle, &status );
01727     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01728 
01729     try
01730     {
01731         ReadHDF5Dataset reader( fileInfo->elems[i].handle, table, nativeParallel, mpiComm );
01732         reader.set_file_ids( elems_in, fileInfo->elems[i].desc.start_id, buffer_size, handleType );
01733         dbgOut.printf( 3, "Reading node list in %lu chunks for \"%s\"\n", reader.get_read_count(),
01734                        fileInfo->elems[i].handle );
01735         int nn = 0;
01736         while( !reader.done() )
01737         {
01738             dbgOut.printf( 3, "Reading chunk %d of \"%s\" connectivity\n", ++nn, fileInfo->elems[i].handle );
01739             size_t num_read;
01740             reader.read( buffer, num_read );
01741             std::sort( buffer, buffer + num_read * node_per_elem );
01742             num_read = std::unique( buffer, buffer + num_read * node_per_elem ) - buffer;
01743             copy_sorted_file_ids( buffer, num_read, nodes );
01744         }
01745     }
01746     catch( ReadHDF5Dataset::Exception )
01747     {
01748         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01749     }
01750 
01751     return MB_SUCCESS;
01752 }
01753 
01754 ErrorCode ReadHDF5::read_poly( const mhdf_ElemDesc& elems, const Range& file_ids )
01755 {
01756     class PolyReader : public ReadHDF5VarLen
01757     {
01758       private:
01759         const EntityType type;
01760         ReadHDF5* readHDF5;
01761 
01762       public:
01763         PolyReader( EntityType elem_type, void* buffer, size_t buffer_size, ReadHDF5* owner, DebugOutput& dbg )
01764             : ReadHDF5VarLen( dbg, buffer, buffer_size ), type( elem_type ), readHDF5( owner )
01765         {
01766         }
01767         virtual ~PolyReader() {}
01768         ErrorCode store_data( EntityHandle file_id, void* data, long len, bool )
01769         {
01770             size_t valid;
01771             EntityHandle* conn = reinterpret_cast< EntityHandle* >( data );
01772             readHDF5->convert_id_to_handle( conn, len, valid );
01773             if( valid != (size_t)len ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
01774             EntityHandle handle;
01775             ErrorCode rval = readHDF5->moab()->create_element( type, conn, len, handle );
01776             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01777 
01778             rval = readHDF5->insert_in_id_map( file_id, handle );
01779             return rval;
01780         }
01781     };
01782 
01783     CHECK_OPEN_HANDLES;
01784 
01785     debug_barrier();
01786 
01787     EntityType type = CN::EntityTypeFromName( elems.type );
01788     if( type == MBMAXTYPE ) { MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" ); }
01789 
01790     hid_t handles[2];
01791     mhdf_Status status;
01792     long num_poly, num_conn, first_id;
01793     mhdf_openPolyConnectivity( filePtr, elems.handle, &num_poly, &num_conn, &first_id, handles, &status );
01794     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01795 
01796     std::string nm( elems.handle );
01797     ReadHDF5Dataset offset_reader( ( nm + " offsets" ).c_str(), handles[0], nativeParallel, mpiComm, true );
01798     ReadHDF5Dataset connect_reader( ( nm + " data" ).c_str(), handles[1], nativeParallel, mpiComm, true );
01799 
01800     PolyReader tool( type, dataBuffer, bufferSize, this, dbgOut );
01801     return tool.read( offset_reader, connect_reader, file_ids, first_id, handleType );
01802 }
01803 
01804 ErrorCode ReadHDF5::delete_non_side_elements( const Range& side_ents )
01805 {
01806     ErrorCode rval;
01807 
01808     // Build list of entities that we need to find the sides of
01809     Range explicit_ents;
01810     Range::iterator hint = explicit_ents.begin();
01811     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
01812     {
01813         EntityHandle start = i->value;
01814         EntityHandle end   = i->value + i->count - 1;
01815         EntityType type    = TYPE_FROM_HANDLE( start );
01816         assert( type == TYPE_FROM_HANDLE( end ) );  // Otherwise handle space entirely full!!
01817         if( type != MBVERTEX && type != MBENTITYSET ) hint = explicit_ents.insert( hint, start, end );
01818     }
01819     explicit_ents = subtract( explicit_ents, side_ents );
01820 
01821     // Figure out which entities we want to delete
01822     Range dead_ents( side_ents );
01823     Range::iterator ds, de, es;
01824     ds = dead_ents.lower_bound( CN::TypeDimensionMap[1].first );
01825     de = dead_ents.lower_bound( CN::TypeDimensionMap[2].first, ds );
01826     if( ds != de )
01827     {
01828         // Get subset of explicit ents of dimension greater than 1
01829         es = explicit_ents.lower_bound( CN::TypeDimensionMap[2].first );
01830         Range subset, adj;
01831         subset.insert( es, explicit_ents.end() );
01832         rval = iFace->get_adjacencies( subset, 1, false, adj, Interface::UNION );
01833         if( MB_SUCCESS != rval ) return rval;
01834         dead_ents = subtract( dead_ents, adj );
01835     }
01836     ds = dead_ents.lower_bound( CN::TypeDimensionMap[2].first );
01837     de = dead_ents.lower_bound( CN::TypeDimensionMap[3].first, ds );
01838     assert( de == dead_ents.end() );
01839     if( ds != de )
01840     {
01841         // Get subset of explicit ents of dimension 3
01842         es = explicit_ents.lower_bound( CN::TypeDimensionMap[3].first );
01843         Range subset, adj;
01844         subset.insert( es, explicit_ents.end() );
01845         rval = iFace->get_adjacencies( subset, 2, false, adj, Interface::UNION );
01846         if( MB_SUCCESS != rval ) return rval;
01847         dead_ents = subtract( dead_ents, adj );
01848     }
01849 
01850     // Now delete anything remaining in dead_ents
01851     dbgOut.printf( 2, "Deleting %lu elements\n", (unsigned long)dead_ents.size() );
01852     dbgOut.print( 4, "\tDead entities: ", dead_ents );
01853     rval = iFace->delete_entities( dead_ents );
01854     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01855 
01856     // Remove dead entities from ID map
01857     while( !dead_ents.empty() )
01858     {
01859         EntityHandle start = dead_ents.front();
01860         EntityID count     = dead_ents.const_pair_begin()->second - start + 1;
01861         IDMap::iterator rit;
01862         for( rit = idMap.begin(); rit != idMap.end(); ++rit )
01863             if( rit->value <= start && ( EntityID )( start - rit->value ) < rit->count ) break;
01864         if( rit == idMap.end() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01865 
01866         EntityID offset = start - rit->value;
01867         EntityID avail  = rit->count - offset;
01868         if( avail < count ) count = avail;
01869 
01870         dead_ents.erase( dead_ents.begin(), dead_ents.begin() + count );
01871         idMap.erase( rit->begin + offset, count );
01872     }
01873 
01874     return MB_SUCCESS;
01875 }
01876 
01877 ErrorCode ReadHDF5::read_sets( const Range& file_ids )
01878 {
01879     CHECK_OPEN_HANDLES;
01880 
01881     debug_barrier();
01882 
01883     mhdf_Status status;
01884     ErrorCode rval;
01885 
01886     const size_t num_sets = fileInfo->sets.count;
01887     if( !num_sets )  // If no sets at all!
01888         return MB_SUCCESS;
01889 
01890     // Create sets
01891     std::vector< unsigned > flags( file_ids.size() );
01892     Range::iterator si = file_ids.begin();
01893     for( size_t i = 0; i < flags.size(); ++i, ++si )
01894         flags[i] = setMeta[*si - fileInfo->sets.start_id][3] & ~(long)mhdf_SET_RANGE_BIT;
01895     EntityHandle start_handle;
01896     // the files ids could be empty, for empty partitions
01897     if( !file_ids.empty() )
01898     {
01899         rval = readUtil->create_entity_sets( flags.size(), &flags[0], 0, start_handle );
01900         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01901         rval = insert_in_id_map( file_ids, start_handle );
01902         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01903     }
01904 
01905     // Read contents
01906     if( fileInfo->have_set_contents )
01907     {
01908         long len     = 0;
01909         hid_t handle = mhdf_openSetData( filePtr, &len, &status );
01910         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01911 
01912         ReadHDF5Dataset dat( "set contents", handle, nativeParallel, mpiComm, true );
01913         rval = read_set_data( file_ids, start_handle, dat, CONTENT );
01914         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01915     }
01916 
01917     // Read set child lists
01918     if( fileInfo->have_set_children )
01919     {
01920         long len     = 0;
01921         hid_t handle = mhdf_openSetChildren( filePtr, &len, &status );
01922         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01923 
01924         ReadHDF5Dataset dat( "set children", handle, nativeParallel, mpiComm, true );
01925         rval = read_set_data( file_ids, start_handle, dat, CHILD );
01926         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01927     }
01928 
01929     // Read set parent lists
01930     if( fileInfo->have_set_parents )
01931     {
01932         long len     = 0;
01933         hid_t handle = mhdf_openSetParents( filePtr, &len, &status );
01934         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01935 
01936         ReadHDF5Dataset dat( "set parents", handle, nativeParallel, mpiComm, true );
01937         rval = read_set_data( file_ids, start_handle, dat, PARENT );
01938         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01939     }
01940 
01941     return MB_SUCCESS;
01942 }
01943 
01944 ErrorCode ReadHDF5::read_all_set_meta()
01945 {
01946     CHECK_OPEN_HANDLES;
01947 
01948     assert( !setMeta );
01949     const long num_sets = fileInfo->sets.count;
01950     if( !num_sets ) return MB_SUCCESS;
01951 
01952     mhdf_Status status;
01953     hid_t handle = mhdf_openSetMetaSimple( filePtr, &status );
01954     if( is_error( status ) ) { MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" ); }
01955 
01956     // Allocate extra space if we need it for data conversion
01957     hid_t meta_type = H5Dget_type( handle );
01958     size_t size     = H5Tget_size( meta_type );
01959     if( size > sizeof( long ) )
01960         setMeta = new long[( num_sets * size + ( sizeof( long ) - 1 ) ) / sizeof( long )][4];
01961     else
01962         setMeta = new long[num_sets][4];
01963 
01964     // Set some parameters based on whether or not each proc reads the
01965     // table or only the root reads it and bcasts it to the others
01966     int rank     = 0;
01967     bool bcast   = false;
01968     hid_t ioprop = H5P_DEFAULT;
01969 #ifdef MOAB_HAVE_MPI
01970     MPI_Comm comm = 0;
01971     if( nativeParallel )
01972     {
01973         rank  = myPcomm->proc_config().proc_rank();
01974         comm  = myPcomm->proc_config().proc_comm();
01975         bcast = bcastDuplicateReads;
01976         if( !bcast ) ioprop = collIO;
01977     }
01978 #endif
01979 
01980     if( !bcast || 0 == rank )
01981     {
01982         mhdf_readSetMetaWithOpt( handle, 0, num_sets, meta_type, setMeta, ioprop, &status );
01983         if( is_error( status ) )
01984         {
01985             H5Tclose( meta_type );
01986             mhdf_closeData( filePtr, handle, &status );
01987             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01988         }
01989 
01990         H5Tconvert( meta_type, H5T_NATIVE_LONG, num_sets * 4, setMeta, 0, H5P_DEFAULT );
01991     }
01992     mhdf_closeData( filePtr, handle, &status );
01993     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01994     H5Tclose( meta_type );
01995 
01996     if( bcast )
01997     {
01998 #ifdef MOAB_HAVE_MPI
01999         int ierr = MPI_Bcast( (void*)setMeta, num_sets * 4, MPI_LONG, 0, comm );
02000         if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02001 #else
02002         assert( rank == 0 );              // If not MPI, then only one proc
02003 #endif
02004     }
02005 
02006     return MB_SUCCESS;
02007 }
02008 
02009 ErrorCode ReadHDF5::read_set_ids_recursive( Range& sets_in_out, bool contained_sets, bool child_sets )
02010 {
02011     CHECK_OPEN_HANDLES;
02012     mhdf_Status status;
02013 
02014     if( !fileInfo->have_set_children ) child_sets = false;
02015     if( !fileInfo->have_set_contents ) contained_sets = false;
02016     if( !child_sets && !contained_sets ) return MB_SUCCESS;
02017 
02018     // Open data tables
02019     if( fileInfo->sets.count == 0 )
02020     {
02021         assert( sets_in_out.empty() );
02022         return MB_SUCCESS;
02023     }
02024 
02025     if( !contained_sets && !child_sets ) return MB_SUCCESS;
02026 
02027     ReadHDF5Dataset cont( "set contents", false, mpiComm );
02028     ReadHDF5Dataset child( "set children", false, mpiComm );
02029 
02030     if( contained_sets )
02031     {
02032         long content_len     = 0;
02033         hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
02034         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02035         try
02036         {
02037             cont.init( content_handle, true );
02038         }
02039         catch( ReadHDF5Dataset::Exception )
02040         {
02041             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02042         }
02043     }
02044 
02045     if( child_sets )
02046     {
02047         long child_len     = 0;
02048         hid_t child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
02049         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02050         try
02051         {
02052             child.init( child_handle, true );
02053         }
02054         catch( ReadHDF5Dataset::Exception )
02055         {
02056             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02057         }
02058     }
02059 
02060     ErrorCode rval = MB_SUCCESS;
02061     Range children, new_children( sets_in_out );
02062     int iteration_count = 0;
02063     do
02064     {
02065         ++iteration_count;
02066         dbgOut.tprintf( 2, "Iteration %d of read_set_ids_recursive\n", iteration_count );
02067         children.clear();
02068         if( child_sets )
02069         {
02070             rval = read_set_data( new_children, 0, child, CHILD, &children );
02071             if( MB_SUCCESS != rval ) break;
02072         }
02073         if( contained_sets )
02074         {
02075             rval = read_set_data( new_children, 0, cont, CONTENT, &children );
02076             // Remove any non-set values
02077             Range::iterator it = children.lower_bound( fileInfo->sets.start_id );
02078             children.erase( children.begin(), it );
02079             it = children.lower_bound( fileInfo->sets.start_id + fileInfo->sets.count );
02080             children.erase( it, children.end() );
02081             if( MB_SUCCESS != rval ) break;
02082         }
02083         new_children = subtract( children, sets_in_out );
02084         dbgOut.print_ints( 2, "Adding additional contained/child sets", new_children );
02085         sets_in_out.merge( new_children );
02086     } while( !new_children.empty() );
02087 
02088     return MB_SUCCESS;
02089 }
02090 
02091 ErrorCode ReadHDF5::find_sets_containing( Range& sets_out, bool read_set_containing_parents )
02092 {
02093     ErrorCode rval;
02094     mhdf_Status status;
02095 
02096     CHECK_OPEN_HANDLES;
02097 
02098     if( !fileInfo->have_set_contents ) return MB_SUCCESS;
02099     assert( fileInfo->sets.count );
02100 
02101     // Open data tables
02102     long content_len     = 0;
02103     hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
02104     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02105 
02106     hid_t data_type = H5Dget_type( content_handle );
02107 
02108     rval = find_sets_containing( content_handle, data_type, content_len, read_set_containing_parents, sets_out );
02109 
02110     H5Tclose( data_type );
02111 
02112     mhdf_closeData( filePtr, content_handle, &status );
02113     if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02114 
02115     return rval;
02116 }
02117 
02118 static bool set_map_intersect( bool ranged, const long* contents, int content_len,
02119                                const RangeMap< long, EntityHandle >& id_map )
02120 {
02121     if( ranged )
02122     {
02123         if( !content_len || id_map.empty() ) return false;
02124 
02125         const long* j         = contents;
02126         const long* const end = contents + content_len;
02127         assert( content_len % 2 == 0 );
02128         while( j != end )
02129         {
02130             long start = *( j++ );
02131             long count = *( j++ );
02132             if( id_map.intersects( start, count ) ) return true;
02133         }
02134     }
02135     else
02136     {
02137         const long* const end = contents + content_len;
02138         for( const long* i = contents; i != end; ++i )
02139             if( id_map.exists( *i ) ) return true;
02140     }
02141 
02142     return false;
02143 }
02144 
02145 struct SetContOffComp
02146 {
02147     bool operator()( const long a1[4], const long a2[4] )
02148     {
02149         return a1[ReadHDF5::CONTENT] < a2[0];
02150     }
02151 };
02152 
02153 ErrorCode ReadHDF5::find_sets_containing( hid_t contents_handle, hid_t content_type, long contents_len,
02154                                           bool read_set_containing_parents, Range& file_ids )
02155 {
02156     CHECK_OPEN_HANDLES;
02157 
02158     // Scan all set contents data
02159 
02160     const size_t content_size = H5Tget_size( content_type );
02161     const long num_sets       = fileInfo->sets.count;
02162     dbgOut.printf( 2, "Searching contents of %ld\n", num_sets );
02163     mhdf_Status status;
02164 
02165     int rank   = 0;
02166     bool bcast = false;
02167 #ifdef MOAB_HAVE_MPI
02168     MPI_Comm comm = 0;
02169     if( nativeParallel )
02170     {
02171         rank  = myPcomm->proc_config().proc_rank();
02172         comm  = myPcomm->proc_config().proc_comm();
02173         bcast = bcastDuplicateReads;
02174     }
02175 #endif
02176 
02177     // Check offsets so that we don't read past end of table or
02178     // walk off end of array.
02179     long prev = -1;
02180     for( long i = 0; i < num_sets; ++i )
02181     {
02182         if( setMeta[i][CONTENT] < prev )
02183         {
02184             std::cerr << "Invalid data in set contents offsets at position " << i << ": index " << setMeta[i][CONTENT]
02185                       << " is less than previous index " << prev << std::endl;
02186             std::cerr.flush();
02187             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02188         }
02189         prev = setMeta[i][CONTENT];
02190     }
02191     if( setMeta[num_sets - 1][CONTENT] >= contents_len )
02192     {
02193         std::cerr << "Maximum set content index " << setMeta[num_sets - 1][CONTENT]
02194                   << " exceeds contents table length of " << contents_len << std::endl;
02195         std::cerr.flush();
02196         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02197     }
02198 
02199     // Set up buffer for reading set contents
02200     long* const content_buffer = (long*)dataBuffer;
02201     const long content_len     = bufferSize / std::max( content_size, sizeof( long ) );
02202 
02203     // Scan set table
02204     Range::iterator hint = file_ids.begin();
02205     Range tmp_range;
02206     long prev_idx    = -1;
02207     int mm           = 0;
02208     long sets_offset = 0;
02209     long temp_content[4];
02210     while( sets_offset < num_sets )
02211     {
02212         temp_content[0] = content_len + prev_idx;
02213         long sets_count =
02214             std::lower_bound( setMeta + sets_offset, setMeta + num_sets, temp_content, SetContOffComp() ) - setMeta -
02215             sets_offset;
02216         assert( sets_count >= 0 && sets_offset + sets_count <= num_sets );
02217         if( !sets_count )
02218         {  // Contents of single set don't fit in buffer
02219             long content_remaining = setMeta[sets_offset][CONTENT] - prev_idx;
02220             long content_offset    = prev_idx + 1;
02221             while( content_remaining )
02222             {
02223                 long content_count = content_len < content_remaining ? 2 * ( content_len / 2 ) : content_remaining;
02224                 assert_range( content_buffer, content_count );
02225                 dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, content_count );
02226                 if( !bcast || 0 == rank )
02227                 {
02228                     if( !bcast )
02229                         mhdf_readSetDataWithOpt( contents_handle, content_offset, content_count, content_type,
02230                                                  content_buffer, collIO, &status );
02231                     else
02232                         mhdf_readSetData( contents_handle, content_offset, content_count, content_type, content_buffer,
02233                                           &status );
02234                     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02235 
02236                     H5Tconvert( content_type, H5T_NATIVE_LONG, content_count, content_buffer, 0, H5P_DEFAULT );
02237                 }
02238                 if( bcast )
02239                 {
02240 #ifdef MOAB_HAVE_MPI
02241                     int ierr = MPI_Bcast( content_buffer, content_count, MPI_LONG, 0, comm );
02242                     if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02243 #else
02244                     assert( rank == 0 );  // If not MPI, then only one proc
02245 #endif
02246                 }
02247 
02248                 if( read_set_containing_parents )
02249                 {
02250                     tmp_range.clear();
02251                     if( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT )
02252                         tmp_range.insert( *content_buffer, *( content_buffer + 1 ) );
02253                     else
02254                         std::copy( content_buffer, content_buffer + content_count, range_inserter( tmp_range ) );
02255                     tmp_range = intersect( tmp_range, file_ids );
02256                 }
02257 
02258                 if( !tmp_range.empty() || set_map_intersect( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT,
02259                                                              content_buffer, content_count, idMap ) )
02260                 {
02261                     long id = fileInfo->sets.start_id + sets_offset;
02262                     hint    = file_ids.insert( hint, id, id );
02263                     if( !nativeParallel )  // Don't stop if doing READ_PART because we need to read
02264                                            // collectively
02265                         break;
02266                 }
02267                 content_remaining -= content_count;
02268                 content_offset += content_count;
02269             }
02270             prev_idx   = setMeta[sets_offset][CONTENT];
02271             sets_count = 1;
02272         }
02273         else if( long read_num = setMeta[sets_offset + sets_count - 1][CONTENT] - prev_idx )
02274         {
02275             assert( sets_count > 0 );
02276             assert_range( content_buffer, read_num );
02277             dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, read_num );
02278             if( !bcast || 0 == rank )
02279             {
02280                 if( !bcast )
02281                     mhdf_readSetDataWithOpt( contents_handle, prev_idx + 1, read_num, content_type, content_buffer,
02282                                              collIO, &status );
02283                 else
02284                     mhdf_readSetData( contents_handle, prev_idx + 1, read_num, content_type, content_buffer, &status );
02285                 if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02286 
02287                 H5Tconvert( content_type, H5T_NATIVE_LONG, read_num, content_buffer, 0, H5P_DEFAULT );
02288             }
02289             if( bcast )
02290             {
02291 #ifdef MOAB_HAVE_MPI
02292                 int ierr = MPI_Bcast( content_buffer, read_num, MPI_LONG, 0, comm );
02293                 if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02294 #else
02295                 assert( rank == 0 );      // If not MPI, then only one proc
02296 #endif
02297             }
02298 
02299             long* buff_iter = content_buffer;
02300             for( long i = 0; i < sets_count; ++i )
02301             {
02302                 long set_size = setMeta[i + sets_offset][CONTENT] - prev_idx;
02303                 prev_idx += set_size;
02304 
02305                 // Check whether contents include set already being loaded
02306                 if( read_set_containing_parents )
02307                 {
02308                     tmp_range.clear();
02309                     if( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT )
02310                     {
02311                         // put in tmp_range the contents on the set
02312                         // file_ids contain at this points only other sets
02313                         const long* j         = buff_iter;
02314                         const long* const end = buff_iter + set_size;
02315                         assert( set_size % 2 == 0 );
02316                         while( j != end )
02317                         {
02318                             long start = *( j++ );
02319                             long count = *( j++ );
02320                             tmp_range.insert( start, start + count - 1 );
02321                         }
02322                     }
02323                     else
02324                         std::copy( buff_iter, buff_iter + set_size, range_inserter( tmp_range ) );
02325                     tmp_range = intersect( tmp_range, file_ids );
02326                 }
02327 
02328                 if( !tmp_range.empty() ||
02329                     set_map_intersect( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT, buff_iter, set_size, idMap ) )
02330                 {
02331                     long id = fileInfo->sets.start_id + sets_offset + i;
02332                     hint    = file_ids.insert( hint, id, id );
02333                 }
02334                 buff_iter += set_size;
02335             }
02336         }
02337 
02338         sets_offset += sets_count;
02339     }
02340 
02341     return MB_SUCCESS;
02342 }
02343 
02344 static Range::iterator copy_set_contents( Range::iterator hint, int ranged, EntityHandle* contents, long length,
02345                                           Range& results )
02346 {
02347     if( ranged )
02348     {
02349         assert( length % 2 == 0 );
02350         for( long i = 0; i < length; i += 2 )
02351             hint = results.insert( hint, contents[i], contents[i] + contents[i + 1] - 1 );
02352     }
02353     else
02354     {
02355         std::sort( contents, contents + length );
02356         for( long i = 0; i < length; ++i )
02357             hint = results.insert( hint, contents[i] );
02358     }
02359     return hint;
02360 }
02361 
02362 ErrorCode ReadHDF5::read_set_data( const Range& set_file_ids, EntityHandle start_handle, ReadHDF5Dataset& data,
02363                                    SetMode mode, Range* file_ids_out )
02364 {
02365     ErrorCode rval;
02366     Range::const_pair_iterator pi;
02367     Range::iterator out_hint;
02368     if( file_ids_out ) out_hint = file_ids_out->begin();
02369 
02370     // Construct range of offsets into data table at which to read
02371     // Note: all offsets are incremented by TWEAK because Range cannot
02372     // store zeros.
02373     const long TWEAK = 1;
02374     Range data_offsets;
02375     Range::iterator hint = data_offsets.begin();
02376     pi                   = set_file_ids.const_pair_begin();
02377     if( (long)pi->first == fileInfo->sets.start_id )
02378     {
02379         long second = pi->second - fileInfo->sets.start_id;
02380         if( setMeta[second][mode] >= 0 ) hint = data_offsets.insert( hint, TWEAK, setMeta[second][mode] + TWEAK );
02381         ++pi;
02382     }
02383     for( ; pi != set_file_ids.const_pair_end(); ++pi )
02384     {
02385         long first  = pi->first - fileInfo->sets.start_id;
02386         long second = pi->second - fileInfo->sets.start_id;
02387         long idx1   = setMeta[first - 1][mode] + 1;
02388         long idx2   = setMeta[second][mode];
02389         if( idx2 >= idx1 ) hint = data_offsets.insert( hint, idx1 + TWEAK, idx2 + TWEAK );
02390     }
02391     try
02392     {
02393         data.set_file_ids( data_offsets, TWEAK, bufferSize / sizeof( EntityHandle ), handleType );
02394     }
02395     catch( ReadHDF5Dataset::Exception )
02396     {
02397         return MB_FAILURE;
02398     }
02399 
02400     // We need to increment this for each processed set because
02401     // the sets were created in the order of the ids in file_ids.
02402     EntityHandle h = start_handle;
02403 
02404     const long ranged_flag = ( mode == CONTENT ) ? mhdf_SET_RANGE_BIT : 0;
02405 
02406     std::vector< EntityHandle > partial;  // For when we read only part of the contents of a set/entity
02407     Range::const_iterator fileid_iter = set_file_ids.begin();
02408     EntityHandle* buffer              = reinterpret_cast< EntityHandle* >( dataBuffer );
02409     size_t count, offset;
02410 
02411     int nn = 0;
02412     /*
02413     #ifdef  MOAB_HAVE_MPI
02414       if (nativeParallel && mode==CONTENT && myPcomm->proc_config().proc_size()>1 &&
02415     data_offsets.empty())
02416       {
02417         MB_SET_ERR_CONT( "ReadHDF5 Failure: Attempt reading an empty dataset on proc " <<
02418             myPcomm->proc_config().proc_rank());
02419         MPI_Abort(myPcomm->proc_config().proc_comm(), 1);
02420       }
02421     #endif
02422     */
02423     if( ( 1 >= set_file_ids.size() ) && ( data.done() ) && moab::ReadHDF5::CONTENT == mode )
02424         // do at least one null read, it is needed in parallel
02425         data.null_read();
02426 
02427     while( !data.done() )
02428     {
02429         dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data.get_debug_desc() );
02430         try
02431         {
02432             data.read( buffer, count );
02433         }
02434         catch( ReadHDF5Dataset::Exception )
02435         {
02436             return MB_FAILURE;
02437         }
02438 
02439         // Assert not appropriate here - I might have treated all my file ids, but maybe
02440         // another proc hasn't; for me, count will be zero, so I won't do anything, but
02441         // I still need to go through the motions to make the read work
02442 
02443         // Handle 'special' case where we read some, but not all
02444         // of the data for an entity during the last iteration.
02445         offset = 0;
02446         if( !partial.empty() )
02447         {  // Didn't read all of previous entity
02448             assert( fileid_iter != set_file_ids.end() );
02449             size_t num_prev = partial.size();
02450             size_t idx      = *fileid_iter - fileInfo->sets.start_id;
02451             size_t len      = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
02452             offset          = len - num_prev;
02453             if( offset > count )
02454             {  // Still don't have all
02455                 partial.insert( partial.end(), buffer, buffer + count );
02456                 continue;
02457             }
02458 
02459             partial.insert( partial.end(), buffer, buffer + offset );
02460             if( file_ids_out )
02461             {
02462                 out_hint = copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, &partial[0], partial.size(),
02463                                               *file_ids_out );
02464             }
02465             else
02466             {
02467                 switch( mode )
02468                 {
02469                     size_t valid;
02470                     case CONTENT:
02471                         if( setMeta[idx][3] & ranged_flag )
02472                         {
02473                             if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
02474                             Range range;
02475                             convert_range_to_handle( &partial[0], len / 2, range );
02476                             rval = moab()->add_entities( h, range );
02477                         }
02478                         else
02479                         {
02480                             convert_id_to_handle( &partial[0], len, valid );
02481                             rval = moab()->add_entities( h, &partial[0], valid );
02482                         }
02483                         break;
02484                     case CHILD:
02485                         convert_id_to_handle( &partial[0], len, valid );
02486                         rval = moab()->add_child_meshsets( h, &partial[0], valid );
02487                         break;
02488                     case PARENT:
02489                         convert_id_to_handle( &partial[0], len, valid );
02490                         rval = moab()->add_parent_meshsets( h, &partial[0], valid );
02491                         break;
02492                     default:
02493                         break;
02494                 }
02495                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02496             }
02497 
02498             ++fileid_iter;
02499             ++h;
02500             partial.clear();
02501         }
02502 
02503         // Process contents for all entities for which we
02504         // have read the complete list
02505         while( offset < count )
02506         {
02507             assert( fileid_iter != set_file_ids.end() );
02508             size_t idx = *fileid_iter - fileInfo->sets.start_id;
02509             size_t len = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
02510             // If we did not read all of the final entity,
02511             // store what we did read to be processed in the
02512             // next iteration
02513             if( offset + len > count )
02514             {
02515                 partial.insert( partial.end(), buffer + offset, buffer + count );
02516                 break;
02517             }
02518 
02519             if( file_ids_out )
02520             {
02521                 out_hint =
02522                     copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, buffer + offset, len, *file_ids_out );
02523             }
02524             else
02525             {
02526                 switch( mode )
02527                 {
02528                     size_t valid;
02529                     case CONTENT:
02530                         if( setMeta[idx][3] & ranged_flag )
02531                         {
02532                             if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
02533                             Range range;
02534                             convert_range_to_handle( buffer + offset, len / 2, range );
02535                             rval = moab()->add_entities( h, range );
02536                         }
02537                         else
02538                         {
02539                             convert_id_to_handle( buffer + offset, len, valid );
02540                             rval = moab()->add_entities( h, buffer + offset, valid );
02541                         }
02542                         break;
02543                     case CHILD:
02544                         convert_id_to_handle( buffer + offset, len, valid );
02545                         rval = moab()->add_child_meshsets( h, buffer + offset, valid );
02546                         break;
02547                     case PARENT:
02548                         convert_id_to_handle( buffer + offset, len, valid );
02549                         rval = moab()->add_parent_meshsets( h, buffer + offset, valid );
02550                         break;
02551                     default:
02552                         break;
02553                 }
02554                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02555             }
02556 
02557             ++fileid_iter;
02558             ++h;
02559             offset += len;
02560         }
02561     }
02562 
02563     return MB_SUCCESS;
02564 }
02565 
02566 ErrorCode ReadHDF5::get_set_contents( const Range& sets, Range& file_ids )
02567 {
02568     CHECK_OPEN_HANDLES;
02569 
02570     if( !fileInfo->have_set_contents ) return MB_SUCCESS;
02571     dbgOut.tprint( 2, "Reading set contained file IDs\n" );
02572     try
02573     {
02574         mhdf_Status status;
02575         long content_len;
02576         hid_t contents = mhdf_openSetData( filePtr, &content_len, &status );
02577         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02578         ReadHDF5Dataset data( "set contents", contents, nativeParallel, mpiComm, true );
02579 
02580         return read_set_data( sets, 0, data, CONTENT, &file_ids );
02581     }
02582     catch( ReadHDF5Dataset::Exception )
02583     {
02584         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02585     }
02586 }
02587 
02588 ErrorCode ReadHDF5::read_adjacencies( hid_t table, long table_len )
02589 {
02590     CHECK_OPEN_HANDLES;
02591 
02592     ErrorCode rval;
02593     mhdf_Status status;
02594 
02595     debug_barrier();
02596 
02597     hid_t read_type = H5Dget_type( table );
02598     if( read_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02599     const bool convert = !H5Tequal( read_type, handleType );
02600 
02601     EntityHandle* buffer = (EntityHandle*)dataBuffer;
02602     size_t chunk_size    = bufferSize / H5Tget_size( read_type );
02603     size_t remaining     = table_len;
02604     size_t left_over     = 0;
02605     size_t offset        = 0;
02606     dbgOut.printf( 3, "Reading adjacency list in %lu chunks\n",
02607                    (unsigned long)( remaining + chunk_size - 1 ) / chunk_size );
02608     int nn = 0;
02609     while( remaining )
02610     {
02611         dbgOut.printf( 3, "Reading chunk %d of adjacency list\n", ++nn );
02612 
02613         size_t count = std::min( chunk_size, remaining );
02614         count -= left_over;
02615         remaining -= count;
02616 
02617         assert_range( buffer + left_over, count );
02618         mhdf_readAdjacencyWithOpt( table, offset, count, read_type, buffer + left_over, collIO, &status );
02619         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02620 
02621         if( convert )
02622         {
02623             herr_t err = H5Tconvert( read_type, handleType, count, buffer + left_over, 0, H5P_DEFAULT );
02624             if( err < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02625         }
02626 
02627         EntityHandle* iter = buffer;
02628         EntityHandle* end  = buffer + count + left_over;
02629         while( end - iter >= 3 )
02630         {
02631             EntityHandle h      = idMap.find( *iter++ );
02632             EntityHandle count2 = *iter++;
02633             if( !h )
02634             {
02635                 iter += count2;
02636                 continue;
02637             }
02638 
02639             if( count2 < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02640 
02641             if( end < count2 + iter )
02642             {
02643                 iter -= 2;
02644                 break;
02645             }
02646 
02647             size_t valid;
02648             convert_id_to_handle( iter, count2, valid, idMap );
02649             rval = iFace->add_adjacencies( h, iter, valid, false );
02650             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02651 
02652             iter += count2;
02653         }
02654 
02655         left_over = end - iter;
02656         assert_range( (char*)buffer, left_over );
02657         assert_range( (char*)iter, left_over );
02658         memmove( buffer, iter, left_over );
02659     }
02660 
02661     assert( !left_over );  // Unexpected truncation of data
02662 
02663     return MB_SUCCESS;
02664 }
02665 
02666 ErrorCode ReadHDF5::read_tag( int tag_index )
02667 {
02668     CHECK_OPEN_HANDLES;
02669 
02670     dbgOut.tprintf( 2, "Reading tag \"%s\"\n", fileInfo->tags[tag_index].name );
02671 
02672     debug_barrier();
02673 
02674     ErrorCode rval;
02675     mhdf_Status status;
02676     Tag tag         = 0;
02677     hid_t read_type = -1;
02678     bool table_type;
02679     rval = create_tag( fileInfo->tags[tag_index], tag, read_type );
02680     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02681 
02682     if( fileInfo->tags[tag_index].have_sparse )
02683     {
02684         hid_t handles[3];
02685         long num_ent, num_val;
02686         mhdf_openSparseTagData( filePtr, fileInfo->tags[tag_index].name, &num_ent, &num_val, handles, &status );
02687         if( is_error( status ) )
02688         {
02689             if( read_type ) H5Tclose( read_type );
02690             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02691         }
02692 
02693         table_type = false;
02694         if( read_type == 0 )
02695         {
02696             read_type = H5Dget_type( handles[1] );
02697             if( read_type == 0 )
02698             {
02699                 mhdf_closeData( filePtr, handles[0], &status );
02700                 mhdf_closeData( filePtr, handles[0], &status );
02701                 if( fileInfo->tags[tag_index].size <= 0 ) mhdf_closeData( filePtr, handles[2], &status );
02702                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02703             }
02704             table_type = true;
02705         }
02706 
02707         if( fileInfo->tags[tag_index].size > 0 )
02708         {
02709             dbgOut.printf( 2, "Reading sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
02710             rval = read_sparse_tag( tag, read_type, handles[0], handles[1], num_ent );
02711         }
02712         else
02713         {
02714             dbgOut.printf( 2, "Reading var-len sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
02715             rval = read_var_len_tag( tag, read_type, handles[0], handles[1], handles[2], num_ent, num_val );
02716         }
02717 
02718         if( table_type )
02719         {
02720             H5Tclose( read_type );
02721             read_type = 0;
02722         }
02723 
02724         mhdf_closeData( filePtr, handles[0], &status );
02725         if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02726         mhdf_closeData( filePtr, handles[1], &status );
02727         if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02728         if( fileInfo->tags[tag_index].size <= 0 )
02729         {
02730             mhdf_closeData( filePtr, handles[2], &status );
02731             if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02732         }
02733         if( MB_SUCCESS != rval )
02734         {
02735             if( read_type ) H5Tclose( read_type );
02736             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02737         }
02738     }
02739 
02740     for( int j = 0; j < fileInfo->tags[tag_index].num_dense_indices; ++j )
02741     {
02742         long count;
02743         const char* name = 0;
02744         mhdf_EntDesc* desc;
02745         int elem_idx = fileInfo->tags[tag_index].dense_elem_indices[j];
02746         if( elem_idx == -2 )
02747         {
02748             desc = &fileInfo->sets;
02749             name = mhdf_set_type_handle();
02750         }
02751         else if( elem_idx == -1 )
02752         {
02753             desc = &fileInfo->nodes;
02754             name = mhdf_node_type_handle();
02755         }
02756         else if( elem_idx >= 0 && elem_idx < fileInfo->num_elem_desc )
02757         {
02758             desc = &fileInfo->elems[elem_idx].desc;
02759             name = fileInfo->elems[elem_idx].handle;
02760         }
02761         else
02762         {
02763             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02764         }
02765 
02766         dbgOut.printf( 2, "Read dense data block for tag \"%s\" on \"%s\"\n", fileInfo->tags[tag_index].name, name );
02767 
02768         hid_t handle = mhdf_openDenseTagData( filePtr, fileInfo->tags[tag_index].name, name, &count, &status );
02769         if( is_error( status ) )
02770         {
02771             rval = MB_FAILURE;  // rval = error(MB_FAILURE);
02772             break;
02773         }
02774 
02775         if( count > desc->count )
02776         {
02777             mhdf_closeData( filePtr, handle, &status );
02778             MB_SET_ERR( MB_FAILURE,
02779                         "Invalid data length for dense tag data: " << name << "/" << fileInfo->tags[tag_index].name );
02780         }
02781 
02782         table_type = false;
02783         if( read_type == 0 )
02784         {
02785             read_type = H5Dget_type( handle );
02786             if( read_type == 0 )
02787             {
02788                 mhdf_closeData( filePtr, handle, &status );
02789                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02790             }
02791             table_type = true;
02792         }
02793 
02794         rval = read_dense_tag( tag, name, read_type, handle, desc->start_id, count );
02795 
02796         if( table_type )
02797         {
02798             H5Tclose( read_type );
02799             read_type = 0;
02800         }
02801 
02802         mhdf_closeData( filePtr, handle, &status );
02803         if( MB_SUCCESS != rval ) break;
02804         if( is_error( status ) )
02805         {
02806             rval = MB_FAILURE;
02807             break;
02808         }
02809     }
02810 
02811     if( read_type ) H5Tclose( read_type );
02812     return rval;
02813 }
02814 
02815 ErrorCode ReadHDF5::create_tag( const mhdf_TagDesc& info, Tag& handle, hid_t& hdf_type )
02816 {
02817     CHECK_OPEN_HANDLES;
02818 
02819     ErrorCode rval;
02820     mhdf_Status status;
02821     TagType storage;
02822     DataType mb_type;
02823     bool re_read_default = false;
02824 
02825     switch( info.storage )
02826     {
02827         case mhdf_DENSE_TYPE:
02828             storage = MB_TAG_DENSE;
02829             break;
02830         case mhdf_SPARSE_TYPE:
02831             storage = MB_TAG_SPARSE;
02832             break;
02833         case mhdf_BIT_TYPE:
02834             storage = MB_TAG_BIT;
02835             break;
02836         case mhdf_MESH_TYPE:
02837             storage = MB_TAG_MESH;
02838             break;
02839         default:
02840             MB_SET_ERR( MB_FAILURE, "Invalid storage type for tag '" << info.name << "': " << info.storage );
02841     }
02842 
02843     // Type-specific stuff
02844     if( info.type == mhdf_BITFIELD )
02845     {
02846         if( info.size < 1 || info.size > 8 )
02847         { MB_SET_ERR( MB_FAILURE, "Invalid bit tag: class is MB_TAG_BIT, num bits = " << info.size ); }
02848         hdf_type = H5Tcopy( H5T_NATIVE_B8 );
02849         mb_type  = MB_TYPE_BIT;
02850         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02851     }
02852     else if( info.type == mhdf_OPAQUE )
02853     {
02854         mb_type = MB_TYPE_OPAQUE;
02855 
02856         // Check for user-provided type
02857         Tag type_handle;
02858         std::string tag_type_name = "__hdf5_tag_type_";
02859         tag_type_name += info.name;
02860         rval = iFace->tag_get_handle( tag_type_name.c_str(), sizeof( hid_t ), MB_TYPE_OPAQUE, type_handle );
02861         if( MB_SUCCESS == rval )
02862         {
02863             EntityHandle root = 0;
02864             rval              = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
02865             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02866             hdf_type        = H5Tcopy( hdf_type );
02867             re_read_default = true;
02868         }
02869         else if( MB_TAG_NOT_FOUND == rval )
02870         {
02871             hdf_type = 0;
02872         }
02873         else
02874             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02875 
02876         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02877     }
02878     else
02879     {
02880         switch( info.type )
02881         {
02882             case mhdf_INTEGER:
02883                 hdf_type = H5T_NATIVE_INT;
02884                 mb_type  = MB_TYPE_INTEGER;
02885                 break;
02886             case mhdf_FLOAT:
02887                 hdf_type = H5T_NATIVE_DOUBLE;
02888                 mb_type  = MB_TYPE_DOUBLE;
02889                 break;
02890             case mhdf_BOOLEAN:
02891                 hdf_type = H5T_NATIVE_UINT;
02892                 mb_type  = MB_TYPE_INTEGER;
02893                 break;
02894             case mhdf_ENTITY_ID:
02895                 hdf_type = handleType;
02896                 mb_type  = MB_TYPE_HANDLE;
02897                 break;
02898             default:
02899                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02900         }
02901 
02902         if( info.size > 1 )
02903         {  // Array
02904             hsize_t tmpsize = info.size;
02905 #if defined( H5Tarray_create_vers ) && H5Tarray_create_vers > 1
02906             hdf_type = H5Tarray_create2( hdf_type, 1, &tmpsize );
02907 #else
02908             hdf_type = H5Tarray_create( hdf_type, 1, &tmpsize, NULL );
02909 #endif
02910         }
02911         else
02912         {
02913             hdf_type = H5Tcopy( hdf_type );
02914         }
02915         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02916     }
02917 
02918     // If default or global/mesh value in file, read it.
02919     if( info.default_value || info.global_value )
02920     {
02921         if( re_read_default )
02922         {
02923             mhdf_getTagValues( filePtr, info.name, hdf_type, info.default_value, info.global_value, &status );
02924             if( mhdf_isError( &status ) )
02925             {
02926                 if( hdf_type ) H5Tclose( hdf_type );
02927                 MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
02928             }
02929         }
02930 
02931         if( MB_TYPE_HANDLE == mb_type )
02932         {
02933             if( info.default_value )
02934             {
02935                 rval = convert_id_to_handle( (EntityHandle*)info.default_value, info.default_value_size );
02936                 if( MB_SUCCESS != rval )
02937                 {
02938                     if( hdf_type ) H5Tclose( hdf_type );
02939                     MB_SET_ERR( rval, "ReadHDF5 Failure" );
02940                 }
02941             }
02942             if( info.global_value )
02943             {
02944                 rval = convert_id_to_handle( (EntityHandle*)info.global_value, info.global_value_size );
02945                 if( MB_SUCCESS != rval )
02946                 {
02947                     if( hdf_type ) H5Tclose( hdf_type );
02948                     MB_SET_ERR( rval, "ReadHDF5 Failure" );
02949                 }
02950             }
02951         }
02952     }
02953 
02954     // Get tag handle, creating if necessary
02955     if( info.size < 0 )
02956         rval = iFace->tag_get_handle( info.name, info.default_value_size, mb_type, handle,
02957                                       storage | MB_TAG_CREAT | MB_TAG_VARLEN | MB_TAG_DFTOK, info.default_value );
02958     else
02959         rval = iFace->tag_get_handle( info.name, info.size, mb_type, handle, storage | MB_TAG_CREAT | MB_TAG_DFTOK,
02960                                       info.default_value );
02961     if( MB_SUCCESS != rval )
02962     {
02963         if( hdf_type ) H5Tclose( hdf_type );
02964         MB_SET_ERR( MB_FAILURE, "Tag type in file does not match type in database for \"" << info.name << "\"" );
02965     }
02966 
02967     if( info.global_value )
02968     {
02969         EntityHandle root = 0;
02970         if( info.size > 0 )
02971         {  // Fixed-length tag
02972             rval = iFace->tag_set_data( handle, &root, 1, info.global_value );
02973         }
02974         else
02975         {
02976             int tag_size = info.global_value_size;
02977             rval         = iFace->tag_set_by_ptr( handle, &root, 1, &info.global_value, &tag_size );
02978         }
02979         if( MB_SUCCESS != rval )
02980         {
02981             if( hdf_type ) H5Tclose( hdf_type );
02982             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02983         }
02984     }
02985 
02986     return MB_SUCCESS;
02987 }
02988 
02989 ErrorCode ReadHDF5::read_dense_tag( Tag tag_handle, const char* ent_name, hid_t hdf_read_type, hid_t data,
02990                                     long start_id, long num_values )
02991 {
02992     CHECK_OPEN_HANDLES;
02993 
02994     ErrorCode rval;
02995     DataType mb_type;
02996 
02997     rval = iFace->tag_get_data_type( tag_handle, mb_type );
02998     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02999 
03000     int read_size;
03001     rval = iFace->tag_get_bytes( tag_handle, read_size );
03002     if( MB_SUCCESS != rval )  // Wrong function for variable-length tags
03003         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03004     // if (MB_TYPE_BIT == mb_type)
03005     // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
03006 
03007     if( hdf_read_type )
03008     {  // If not opaque
03009         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03010         if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03011     }
03012 
03013     // Get actual entities read from file
03014     Range file_ids, handles;
03015     Range::iterator f_ins = file_ids.begin(), h_ins = handles.begin();
03016     IDMap::iterator l, u;
03017     l = idMap.lower_bound( start_id );
03018     u = idMap.lower_bound( start_id + num_values - 1 );
03019     if( l != idMap.end() && start_id + num_values > l->begin )
03020     {
03021         if( l == u )
03022         {
03023             size_t beg = std::max( start_id, l->begin );
03024             size_t end = std::min( start_id + num_values, u->begin + u->count ) - 1;
03025             f_ins      = file_ids.insert( f_ins, beg, end );
03026             h_ins      = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + ( end - l->begin ) );
03027         }
03028         else
03029         {
03030             size_t beg = std::max( start_id, l->begin );
03031             f_ins      = file_ids.insert( f_ins, beg, l->begin + l->count - 1 );
03032             h_ins      = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + l->count - 1 );
03033             for( ++l; l != u; ++l )
03034             {
03035                 f_ins = file_ids.insert( f_ins, l->begin, l->begin + l->count - 1 );
03036                 h_ins = handles.insert( h_ins, l->value, l->value + l->count - 1 );
03037             }
03038             if( u != idMap.end() && u->begin < start_id + num_values )
03039             {
03040                 size_t end = std::min( start_id + num_values, u->begin + u->count - 1 );
03041                 f_ins      = file_ids.insert( f_ins, u->begin, end );
03042                 h_ins      = handles.insert( h_ins, u->value, u->value + end - u->begin );
03043             }
03044         }
03045     }
03046 
03047     // Given that all of the entities for this dense tag data should
03048     // have been created as a single contiguous block, the resulting
03049     // MOAB handle range should be contiguous.
03050     // THE ABOVE IS NOT NECESSARILY TRUE. SOMETIMES LOWER-DIMENSION
03051     // ENTS ARE READ AND THEN DELETED FOR PARTIAL READS.
03052     // assert(handles.empty() || handles.size() == (handles.back() - handles.front() + 1));
03053 
03054     std::string tn( "<error>" );
03055     iFace->tag_get_name( tag_handle, tn );
03056     tn += " data for ";
03057     tn += ent_name;
03058     try
03059     {
03060         h_ins = handles.begin();
03061         ReadHDF5Dataset reader( tn.c_str(), data, nativeParallel, mpiComm, false );
03062         long buffer_size = bufferSize / read_size;
03063         reader.set_file_ids( file_ids, start_id, buffer_size, hdf_read_type );
03064         dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tn.c_str(), ent_name,
03065                        reader.get_read_count() );
03066         int nn = 0;
03067         while( !reader.done() )
03068         {
03069             dbgOut.printf( 3, "Reading chunk %d of \"%s\" data\n", ++nn, tn.c_str() );
03070 
03071             size_t count;
03072             reader.read( dataBuffer, count );
03073 
03074             if( MB_TYPE_HANDLE == mb_type )
03075             {
03076                 rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * read_size / sizeof( EntityHandle ) );
03077                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03078             }
03079 
03080             Range ents;
03081             Range::iterator end = h_ins;
03082             end += count;
03083             ents.insert( h_ins, end );
03084             h_ins = end;
03085 
03086             rval = iFace->tag_set_data( tag_handle, ents, dataBuffer );
03087             if( MB_SUCCESS != rval )
03088             {
03089                 dbgOut.printf( 1, "Internal error setting data for tag \"%s\"\n", tn.c_str() );
03090                 MB_SET_ERR( rval, "ReadHDF5 Failure" );
03091             }
03092         }
03093     }
03094     catch( ReadHDF5Dataset::Exception )
03095     {
03096         dbgOut.printf( 1, "Internal error reading dense data for tag \"%s\"\n", tn.c_str() );
03097         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03098     }
03099 
03100     return MB_SUCCESS;
03101 }
03102 
03103 // Read entire ID table and for those file IDs corresponding
03104 // to entities that we have read from the file add both the
03105 // offset into the offset range and the handle into the handle
03106 // range. If handles are not ordered, switch to using a vector.
03107 ErrorCode ReadHDF5::read_sparse_tag_indices( const char* name, hid_t id_table,
03108                                              EntityHandle start_offset,  // Can't put zero in a Range
03109                                              Range& offset_range, Range& handle_range,
03110                                              std::vector< EntityHandle >& handle_vect )
03111 {
03112     CHECK_OPEN_HANDLES;
03113 
03114     offset_range.clear();
03115     handle_range.clear();
03116     handle_vect.clear();
03117 
03118     ErrorCode rval;
03119     Range::iterator handle_hint = handle_range.begin();
03120     Range::iterator offset_hint = offset_range.begin();
03121 
03122     EntityHandle* idbuf = (EntityHandle*)dataBuffer;
03123     size_t idbuf_size   = bufferSize / sizeof( EntityHandle );
03124 
03125     std::string tn( name );
03126     tn += " indices";
03127 
03128     assert( start_offset > 0 );  // Can't put zero in a Range
03129     try
03130     {
03131         ReadHDF5Dataset id_reader( tn.c_str(), id_table, nativeParallel, mpiComm, false );
03132         id_reader.set_all_file_ids( idbuf_size, handleType );
03133         size_t offset = start_offset;
03134         dbgOut.printf( 3, "Reading file ids for sparse tag \"%s\" in %lu chunks\n", name, id_reader.get_read_count() );
03135         int nn = 0;
03136         while( !id_reader.done() )
03137         {
03138             dbgOut.printf( 3, "Reading chunk %d of \"%s\" IDs\n", ++nn, name );
03139             size_t count;
03140             id_reader.read( idbuf, count );
03141 
03142             rval = convert_id_to_handle( idbuf, count );
03143             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03144 
03145             // idbuf will now contain zero-valued handles for those
03146             // tag values that correspond to entities we are not reading
03147             // from the file.
03148             for( size_t i = 0; i < count; ++i )
03149             {
03150                 if( idbuf[i] )
03151                 {
03152                     offset_hint = offset_range.insert( offset_hint, offset + i );
03153                     if( !handle_vect.empty() ) { handle_vect.push_back( idbuf[i] ); }
03154                     else if( handle_range.empty() || idbuf[i] > handle_range.back() )
03155                     {
03156                         handle_hint = handle_range.insert( handle_hint, idbuf[i] );
03157                     }
03158                     else
03159                     {
03160                         handle_vect.resize( handle_range.size() );
03161                         std::copy( handle_range.begin(), handle_range.end(), handle_vect.begin() );
03162                         handle_range.clear();
03163                         handle_vect.push_back( idbuf[i] );
03164                         dbgOut.print( 2, "Switching to unordered list for tag handle list\n" );
03165                     }
03166                 }
03167             }
03168 
03169             offset += count;
03170         }
03171     }
03172     catch( ReadHDF5Dataset::Exception )
03173     {
03174         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03175     }
03176 
03177     return MB_SUCCESS;
03178 }
03179 
03180 ErrorCode ReadHDF5::read_sparse_tag( Tag tag_handle, hid_t hdf_read_type, hid_t id_table, hid_t value_table,
03181                                      long /*num_values*/ )
03182 {
03183     CHECK_OPEN_HANDLES;
03184 
03185     // Read entire ID table and for those file IDs corresponding
03186     // to entities that we have read from the file add both the
03187     // offset into the offset range and the handle into the handle
03188     // range.  If handles are not ordered, switch to using a vector.
03189     const EntityHandle base_offset = 1;  // Can't put zero in a Range
03190     std::vector< EntityHandle > handle_vect;
03191     Range handle_range, offset_range;
03192     std::string tn( "<error>" );
03193     iFace->tag_get_name( tag_handle, tn );
03194     ErrorCode rval =
03195         read_sparse_tag_indices( tn.c_str(), id_table, base_offset, offset_range, handle_range, handle_vect );
03196     if( MB_SUCCESS != rval ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03197 
03198     DataType mbtype;
03199     rval = iFace->tag_get_data_type( tag_handle, mbtype );
03200     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03201 
03202     int read_size;
03203     rval = iFace->tag_get_bytes( tag_handle, read_size );
03204     if( MB_SUCCESS != rval )  // Wrong function for variable-length tags
03205         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03206     // if (MB_TYPE_BIT == mbtype)
03207     // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
03208 
03209     if( hdf_read_type )
03210     {  // If not opaque
03211         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03212         if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03213     }
03214 
03215     const int handles_per_tag = read_size / sizeof( EntityHandle );
03216 
03217     // Now read data values
03218     size_t chunk_size = bufferSize / read_size;
03219     try
03220     {
03221         ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), value_table, nativeParallel, mpiComm, false );
03222         val_reader.set_file_ids( offset_range, base_offset, chunk_size, hdf_read_type );
03223         dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tn.c_str(),
03224                        val_reader.get_read_count() );
03225         int nn        = 0;
03226         size_t offset = 0;
03227         while( !val_reader.done() )
03228         {
03229             dbgOut.printf( 3, "Reading chunk %d of \"%s\" values\n", ++nn, tn.c_str() );
03230             size_t count;
03231             val_reader.read( dataBuffer, count );
03232             if( MB_TYPE_HANDLE == mbtype )
03233             {
03234                 rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * handles_per_tag );
03235                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03236             }
03237 
03238             if( !handle_vect.empty() )
03239             {
03240                 rval = iFace->tag_set_data( tag_handle, &handle_vect[offset], count, dataBuffer );
03241                 offset += count;
03242             }
03243             else
03244             {
03245                 Range r;
03246                 r.merge( handle_range.begin(), handle_range.begin() + count );
03247                 handle_range.erase( handle_range.begin(), handle_range.begin() + count );
03248                 rval = iFace->tag_set_data( tag_handle, r, dataBuffer );
03249             }
03250             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03251         }
03252     }
03253     catch( ReadHDF5Dataset::Exception )
03254     {
03255         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03256     }
03257 
03258     return MB_SUCCESS;
03259 }
03260 
03261 ErrorCode ReadHDF5::read_var_len_tag( Tag tag_handle, hid_t hdf_read_type, hid_t ent_table, hid_t val_table,
03262                                       hid_t off_table, long /*num_entities*/, long /*num_values*/ )
03263 {
03264     CHECK_OPEN_HANDLES;
03265 
03266     ErrorCode rval;
03267     DataType mbtype;
03268 
03269     rval = iFace->tag_get_data_type( tag_handle, mbtype );
03270     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03271 
03272     // Can't do variable-length bit tags
03273     if( MB_TYPE_BIT == mbtype ) MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
03274 
03275     // If here, MOAB tag must be variable-length
03276     int mbsize;
03277     if( MB_VARIABLE_DATA_LENGTH != iFace->tag_get_bytes( tag_handle, mbsize ) )
03278     {
03279         assert( false );MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
03280     }
03281 
03282     int read_size;
03283     if( hdf_read_type )
03284     {
03285         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03286         if( hdf_size < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03287         read_size = hdf_size;
03288     }
03289     else
03290     {
03291         // Opaque
03292         read_size = 1;
03293     }
03294 
03295     std::string tn( "<error>" );
03296     iFace->tag_get_name( tag_handle, tn );
03297 
03298     // Read entire ID table and for those file IDs corresponding
03299     // to entities that we have read from the file add both the
03300     // offset into the offset range and the handle into the handle
03301     // range. If handles are not ordered, switch to using a vector.
03302     const EntityHandle base_offset = 1;  // Can't put zero in a Range
03303     std::vector< EntityHandle > handle_vect;
03304     Range handle_range, offset_range;
03305     rval = read_sparse_tag_indices( tn.c_str(), ent_table, base_offset, offset_range, handle_range, handle_vect );
03306 
03307     // This code only works if the id_table is an ordered list.
03308     // This assumption was also true for the previous iteration
03309     // of this code, but wasn't checked. MOAB's file writer
03310     // always writes an ordered list for id_table.
03311     if( !handle_vect.empty() ) { MB_SET_ERR( MB_FAILURE, "Unordered file ids for variable length tag not supported" ); }
03312 
03313     class VTReader : public ReadHDF5VarLen
03314     {
03315         Tag tagHandle;
03316         bool isHandle;
03317         size_t readSize;
03318         ReadHDF5* readHDF5;
03319 
03320       public:
03321         ErrorCode store_data( EntityHandle file_id, void* data, long count, bool )
03322         {
03323             ErrorCode rval1;
03324             if( isHandle )
03325             {
03326                 assert( readSize == sizeof( EntityHandle ) );
03327                 rval1 = readHDF5->convert_id_to_handle( (EntityHandle*)data, count );MB_CHK_ERR( rval1 );
03328             }
03329             int n = count;
03330             return readHDF5->moab()->tag_set_by_ptr( tagHandle, &file_id, 1, &data, &n );
03331         }
03332         VTReader( DebugOutput& debug_output, void* buffer, size_t buffer_size, Tag tag, bool is_handle_tag,
03333                   size_t read_size1, ReadHDF5* owner )
03334             : ReadHDF5VarLen( debug_output, buffer, buffer_size ), tagHandle( tag ), isHandle( is_handle_tag ),
03335               readSize( read_size1 ), readHDF5( owner )
03336         {
03337         }
03338     };
03339 
03340     VTReader tool( dbgOut, dataBuffer, bufferSize, tag_handle, MB_TYPE_HANDLE == mbtype, read_size, this );
03341     try
03342     {
03343         // Read offsets into value table.
03344         std::vector< unsigned > counts;
03345         Range offsets;
03346         ReadHDF5Dataset off_reader( ( tn + " offsets" ).c_str(), off_table, nativeParallel, mpiComm, false );
03347         rval = tool.read_offsets( off_reader, offset_range, base_offset, base_offset, offsets, counts );
03348         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03349 
03350         // Read tag values
03351         Range empty;
03352         ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), val_table, nativeParallel, mpiComm, false );
03353         rval = tool.read_data( val_reader, offsets, base_offset, hdf_read_type, handle_range, counts, empty );
03354         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03355     }
03356     catch( ReadHDF5Dataset::Exception )
03357     {
03358         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03359     }
03360 
03361     return MB_SUCCESS;
03362 }
03363 
03364 ErrorCode ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size )
03365 {
03366     convert_id_to_handle( array, size, idMap );
03367     return MB_SUCCESS;
03368 }
03369 
03370 void ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size, const RangeMap< long, EntityHandle >& id_map )
03371 {
03372     for( EntityHandle* const end = array + size; array != end; ++array )
03373         *array = id_map.find( *array );
03374 }
03375 
03376 void ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size, size_t& new_size,
03377                                      const RangeMap< long, EntityHandle >& id_map )
03378 {
03379     RangeMap< long, EntityHandle >::const_iterator it;
03380     new_size = 0;
03381     for( size_t i = 0; i < size; ++i )
03382     {
03383         it = id_map.lower_bound( array[i] );
03384         if( it != id_map.end() && it->begin <= (long)array[i] )
03385             array[new_size++] = it->value + ( array[i] - it->begin );
03386     }
03387 }
03388 
03389 void ReadHDF5::convert_range_to_handle( const EntityHandle* ranges, size_t num_ranges,
03390                                         const RangeMap< long, EntityHandle >& id_map, Range& merge )
03391 {
03392     RangeMap< long, EntityHandle >::iterator it = id_map.begin();
03393     Range::iterator hint                        = merge.begin();
03394     for( size_t i = 0; i < num_ranges; ++i )
03395     {
03396         long id        = ranges[2 * i];
03397         const long end = id + ranges[2 * i + 1];
03398         // We assume that 'ranges' is sorted, but check just in case it isn't.
03399         if( it == id_map.end() || it->begin > id ) it = id_map.begin();
03400         it = id_map.lower_bound( it, id_map.end(), id );
03401         if( it == id_map.end() ) continue;
03402         if( id < it->begin ) id = it->begin;
03403         while( id < end )
03404         {
03405             if( id < it->begin ) id = it->begin;
03406             const long off = id - it->begin;
03407             long count     = std::min( it->count - off, end - id );
03408             // It is possible that this new subrange is starting after the end
03409             // It will result in negative count, which does not make sense
03410             // We are done with this range, go to the next one
03411             if( count <= 0 ) break;
03412             hint = merge.insert( hint, it->value + off, it->value + off + count - 1 );
03413             id += count;
03414             if( id < end )
03415             {
03416                 if( ++it == id_map.end() ) break;
03417                 if( it->begin > end ) break;
03418             }
03419         }
03420     }
03421 }
03422 
03423 ErrorCode ReadHDF5::convert_range_to_handle( const EntityHandle* array, size_t num_ranges, Range& range )
03424 {
03425     convert_range_to_handle( array, num_ranges, idMap, range );
03426     return MB_SUCCESS;
03427 }
03428 
03429 ErrorCode ReadHDF5::insert_in_id_map( const Range& file_ids, EntityHandle start_id )
03430 {
03431     IDMap tmp_map;
03432     bool merge = !idMap.empty() && !file_ids.empty() && idMap.back().begin > (long)file_ids.front();
03433     IDMap& map = merge ? tmp_map : idMap;
03434     Range::const_pair_iterator p;
03435     for( p = file_ids.const_pair_begin(); p != file_ids.const_pair_end(); ++p )
03436     {
03437         size_t count = p->second - p->first + 1;
03438         if( !map.insert( p->first, start_id, count ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03439         start_id += count;
03440     }
03441     if( merge && !idMap.merge( tmp_map ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03442 
03443     return MB_SUCCESS;
03444 }
03445 
03446 ErrorCode ReadHDF5::insert_in_id_map( long file_id, EntityHandle handle )
03447 {
03448     if( !idMap.insert( file_id, handle, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03449     return MB_SUCCESS;
03450 }
03451 
03452 ErrorCode ReadHDF5::read_qa( EntityHandle )
03453 {
03454     CHECK_OPEN_HANDLES;
03455 
03456     mhdf_Status status;
03457     // std::vector<std::string> qa_list;
03458 
03459     int qa_len;
03460     char** qa = mhdf_readHistory( filePtr, &qa_len, &status );
03461     if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03462     // qa_list.resize(qa_len);
03463     for( int i = 0; i < qa_len; i++ )
03464     {
03465         // qa_list[i] = qa[i];
03466         free( qa[i] );
03467     }
03468     free( qa );
03469 
03470     /** FIX ME - how to put QA list on set?? */
03471 
03472     return MB_SUCCESS;
03473 }
03474 
03475 ErrorCode ReadHDF5::store_file_ids( Tag tag )
03476 {
03477     CHECK_OPEN_HANDLES;
03478 
03479     // typedef int tag_type;
03480     typedef long tag_type;
03481     // change it to be able to read much bigger files (long is 64 bits ...)
03482 
03483     tag_type* buffer       = reinterpret_cast< tag_type* >( dataBuffer );
03484     const long buffer_size = bufferSize / sizeof( tag_type );
03485     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
03486     {
03487         IDMap::Range range = *i;
03488 
03489         // Make sure the values will fit in the tag type
03490         IDMap::key_type rv = range.begin + ( range.count - 1 );
03491         tag_type tv        = (tag_type)rv;
03492         if( (IDMap::key_type)tv != rv )
03493         {
03494             assert( false );
03495             return MB_INDEX_OUT_OF_RANGE;
03496         }
03497 
03498         while( range.count )
03499         {
03500             long count = buffer_size < range.count ? buffer_size : range.count;
03501 
03502             Range handles;
03503             handles.insert( range.value, range.value + count - 1 );
03504             range.value += count;
03505             range.count -= count;
03506             for( long j = 0; j < count; ++j )
03507                 buffer[j] = (tag_type)range.begin++;
03508 
03509             ErrorCode rval = iFace->tag_set_data( tag, handles, buffer );
03510             if( MB_SUCCESS != rval ) return rval;
03511         }
03512     }
03513 
03514     return MB_SUCCESS;
03515 }
03516 
03517 ErrorCode ReadHDF5::store_sets_file_ids()
03518 {
03519     CHECK_OPEN_HANDLES;
03520 
03521     // create a tag that will not be saved, but it will be
03522     // used by visit plugin to match the sets and their file ids
03523     // it is the same type as the tag defined in ReadParallelcpp, for file id
03524     Tag setFileIdTag;
03525     long default_val = 0;
03526     ErrorCode rval   = iFace->tag_get_handle( "__FILE_ID_FOR_SETS", sizeof( long ), MB_TYPE_OPAQUE, setFileIdTag,
03527                                             ( MB_TAG_DENSE | MB_TAG_CREAT ), &default_val );
03528 
03529     if( MB_SUCCESS != rval || 0 == setFileIdTag ) return rval;
03530     // typedef int tag_type;
03531     typedef long tag_type;
03532     // change it to be able to read much bigger files (long is 64 bits ...)
03533 
03534     tag_type* buffer       = reinterpret_cast< tag_type* >( dataBuffer );
03535     const long buffer_size = bufferSize / sizeof( tag_type );
03536     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
03537     {
03538         IDMap::Range range = *i;
03539         EntityType htype   = iFace->type_from_handle( range.value );
03540         if( MBENTITYSET != htype ) continue;
03541         // work only with entity sets
03542         // Make sure the values will fit in the tag type
03543         IDMap::key_type rv = range.begin + ( range.count - 1 );
03544         tag_type tv        = (tag_type)rv;
03545         if( (IDMap::key_type)tv != rv )
03546         {
03547             assert( false );
03548             return MB_INDEX_OUT_OF_RANGE;
03549         }
03550 
03551         while( range.count )
03552         {
03553             long count = buffer_size < range.count ? buffer_size : range.count;
03554 
03555             Range handles;
03556             handles.insert( range.value, range.value + count - 1 );
03557             range.value += count;
03558             range.count -= count;
03559             for( long j = 0; j < count; ++j )
03560                 buffer[j] = (tag_type)range.begin++;
03561 
03562             rval = iFace->tag_set_data( setFileIdTag, handles, buffer );
03563             if( MB_SUCCESS != rval ) return rval;
03564         }
03565     }
03566     return MB_SUCCESS;
03567 }
03568 
03569 ErrorCode ReadHDF5::read_tag_values( const char* file_name, const char* tag_name, const FileOptions& opts,
03570                                      std::vector< int >& tag_values_out, const SubsetList* subset_list )
03571 {
03572     ErrorCode rval;
03573 
03574     rval = set_up_read( file_name, opts );
03575     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03576 
03577     int tag_index;
03578     rval = find_int_tag( tag_name, tag_index );
03579     if( MB_SUCCESS != rval )
03580     {
03581         clean_up_read( opts );
03582         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03583     }
03584 
03585     if( subset_list )
03586     {
03587         Range file_ids;
03588         rval = get_subset_ids( subset_list->tag_list, subset_list->tag_list_length, file_ids );
03589         if( MB_SUCCESS != rval )
03590         {
03591             clean_up_read( opts );
03592             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03593         }
03594 
03595         rval = read_tag_values_partial( tag_index, file_ids, tag_values_out );
03596         if( MB_SUCCESS != rval )
03597         {
03598             clean_up_read( opts );
03599             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03600         }
03601     }
03602     else
03603     {
03604         rval = read_tag_values_all( tag_index, tag_values_out );
03605         if( MB_SUCCESS != rval )
03606         {
03607             clean_up_read( opts );
03608             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03609         }
03610     }
03611 
03612     return clean_up_read( opts );
03613 }
03614 
03615 ErrorCode ReadHDF5::read_tag_values_partial( int tag_index, const Range& file_ids, std::vector< int >& tag_values )
03616 {
03617     CHECK_OPEN_HANDLES;
03618 
03619     mhdf_Status status;
03620     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
03621     long num_ent, num_val;
03622     size_t count;
03623     std::string tn( tag.name );
03624 
03625     // Read sparse values
03626     if( tag.have_sparse )
03627     {
03628         hid_t handles[3];
03629         mhdf_openSparseTagData( filePtr, tag.name, &num_ent, &num_val, handles, &status );
03630         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03631 
03632         try
03633         {
03634             // Read all entity handles and fill 'offsets' with ranges of
03635             // offsets into the data table for entities that we want.
03636             Range offsets;
03637             long* buffer           = reinterpret_cast< long* >( dataBuffer );
03638             const long buffer_size = bufferSize / sizeof( long );
03639             ReadHDF5Dataset ids( ( tn + " ids" ).c_str(), handles[0], nativeParallel, mpiComm );
03640             ids.set_all_file_ids( buffer_size, H5T_NATIVE_LONG );
03641             size_t offset = 0;
03642             dbgOut.printf( 3, "Reading sparse IDs for tag \"%s\" in %lu chunks\n", tag.name, ids.get_read_count() );
03643             int nn = 0;
03644             while( !ids.done() )
03645             {
03646                 dbgOut.printf( 3, "Reading chunk %d of IDs for \"%s\"\n", ++nn, tag.name );
03647                 ids.read( buffer, count );
03648 
03649                 std::sort( buffer, buffer + count );
03650                 Range::iterator ins     = offsets.begin();
03651                 Range::const_iterator i = file_ids.begin();
03652                 for( size_t j = 0; j < count; ++j )
03653                 {
03654                     while( i != file_ids.end() && (long)*i < buffer[j] )
03655                         ++i;
03656                     if( i == file_ids.end() ) break;
03657                     if( (long)*i == buffer[j] ) { ins = offsets.insert( ins, j + offset, j + offset ); }
03658                 }
03659 
03660                 offset += count;
03661             }
03662 
03663             tag_values.clear();
03664             tag_values.reserve( offsets.size() );
03665             const size_t data_buffer_size = bufferSize / sizeof( int );
03666             int* data_buffer              = reinterpret_cast< int* >( dataBuffer );
03667             ReadHDF5Dataset vals( ( tn + " sparse vals" ).c_str(), handles[1], nativeParallel, mpiComm );
03668             vals.set_file_ids( offsets, 0, data_buffer_size, H5T_NATIVE_INT );
03669             dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tag.name, vals.get_read_count() );
03670             nn = 0;
03671             // Should normally only have one read call, unless sparse nature
03672             // of file_ids caused reader to do something strange
03673             while( !vals.done() )
03674             {
03675                 dbgOut.printf( 3, "Reading chunk %d of values for \"%s\"\n", ++nn, tag.name );
03676                 vals.read( data_buffer, count );
03677                 tag_values.insert( tag_values.end(), data_buffer, data_buffer + count );
03678             }
03679         }
03680         catch( ReadHDF5Dataset::Exception )
03681         {
03682             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03683         }
03684     }
03685 
03686     std::sort( tag_values.begin(), tag_values.end() );
03687     tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
03688 
03689     // Read dense values
03690     std::vector< int > prev_data, curr_data;
03691     for( int i = 0; i < tag.num_dense_indices; ++i )
03692     {
03693         int grp            = tag.dense_elem_indices[i];
03694         const char* gname  = 0;
03695         mhdf_EntDesc* desc = 0;
03696         if( grp == -1 )
03697         {
03698             gname = mhdf_node_type_handle();
03699             desc  = &fileInfo->nodes;
03700         }
03701         else if( grp == -2 )
03702         {
03703             gname = mhdf_set_type_handle();
03704             desc  = &fileInfo->sets;
03705         }
03706         else
03707         {
03708             assert( grp >= 0 && grp < fileInfo->num_elem_desc );
03709             gname = fileInfo->elems[grp].handle;
03710             desc  = &fileInfo->elems[grp].desc;
03711         }
03712 
03713         Range::iterator s = file_ids.lower_bound( ( EntityHandle )( desc->start_id ) );
03714         Range::iterator e = Range::lower_bound( s, file_ids.end(), ( EntityHandle )( desc->start_id ) + desc->count );
03715         Range subset;
03716         subset.merge( s, e );
03717 
03718         hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
03719         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03720 
03721         try
03722         {
03723             curr_data.clear();
03724             tag_values.reserve( subset.size() );
03725             const size_t data_buffer_size = bufferSize / sizeof( int );
03726             int* data_buffer              = reinterpret_cast< int* >( dataBuffer );
03727 
03728             ReadHDF5Dataset reader( ( tn + " dense vals" ).c_str(), handle, nativeParallel, mpiComm );
03729             reader.set_file_ids( subset, desc->start_id, data_buffer_size, H5T_NATIVE_INT );
03730             dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tag.name,
03731                            fileInfo->elems[grp].handle, reader.get_read_count() );
03732             int nn = 0;
03733             // Should normally only have one read call, unless sparse nature
03734             // of file_ids caused reader to do something strange
03735             while( !reader.done() )
03736             {
03737                 dbgOut.printf( 3, "Reading chunk %d of \"%s\"/\"%s\"\n", ++nn, tag.name, fileInfo->elems[grp].handle );
03738                 reader.read( data_buffer, count );
03739                 curr_data.insert( curr_data.end(), data_buffer, data_buffer + count );
03740             }
03741         }
03742         catch( ReadHDF5Dataset::Exception )
03743         {
03744             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03745         }
03746 
03747         std::sort( curr_data.begin(), curr_data.end() );
03748         curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
03749         prev_data.clear();
03750         tag_values.swap( prev_data );
03751         std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
03752                         std::back_inserter( tag_values ) );
03753     }
03754 
03755     return MB_SUCCESS;
03756 }
03757 
03758 ErrorCode ReadHDF5::read_tag_values_all( int tag_index, std::vector< int >& tag_values )
03759 {
03760     CHECK_OPEN_HANDLES;
03761 
03762     mhdf_Status status;
03763     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
03764     long junk, num_val;
03765 
03766     // Read sparse values
03767     if( tag.have_sparse )
03768     {
03769         hid_t handles[3];
03770         mhdf_openSparseTagData( filePtr, tag.name, &junk, &num_val, handles, &status );
03771         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03772 
03773         mhdf_closeData( filePtr, handles[0], &status );
03774         if( mhdf_isError( &status ) )
03775         {
03776             MB_SET_ERR_CONT( mhdf_message( &status ) );
03777             mhdf_closeData( filePtr, handles[1], &status );
03778             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03779         }
03780 
03781         hid_t file_type = H5Dget_type( handles[1] );
03782         tag_values.resize( num_val );
03783         mhdf_readTagValuesWithOpt( handles[1], 0, num_val, file_type, &tag_values[0], collIO, &status );
03784         if( mhdf_isError( &status ) )
03785         {
03786             MB_SET_ERR_CONT( mhdf_message( &status ) );
03787             H5Tclose( file_type );
03788             mhdf_closeData( filePtr, handles[1], &status );
03789             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03790         }
03791         H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &tag_values[0], 0, H5P_DEFAULT );
03792         H5Tclose( file_type );
03793 
03794         mhdf_closeData( filePtr, handles[1], &status );
03795         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03796     }
03797 
03798     std::sort( tag_values.begin(), tag_values.end() );
03799     tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
03800 
03801     // Read dense values
03802     std::vector< int > prev_data, curr_data;
03803     for( int i = 0; i < tag.num_dense_indices; ++i )
03804     {
03805         int grp           = tag.dense_elem_indices[i];
03806         const char* gname = 0;
03807         if( grp == -1 )
03808             gname = mhdf_node_type_handle();
03809         else if( grp == -2 )
03810             gname = mhdf_set_type_handle();
03811         else
03812             gname = fileInfo->elems[grp].handle;
03813         hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
03814         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03815 
03816         hid_t file_type = H5Dget_type( handle );
03817         curr_data.resize( num_val );
03818         mhdf_readTagValuesWithOpt( handle, 0, num_val, file_type, &curr_data[0], collIO, &status );
03819         if( mhdf_isError( &status ) )
03820         {
03821             MB_SET_ERR_CONT( mhdf_message( &status ) );
03822             H5Tclose( file_type );
03823             mhdf_closeData( filePtr, handle, &status );
03824             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03825         }
03826 
03827         H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &curr_data[0], 0, H5P_DEFAULT );
03828         H5Tclose( file_type );
03829         mhdf_closeData( filePtr, handle, &status );
03830         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03831 
03832         std::sort( curr_data.begin(), curr_data.end() );
03833         curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
03834 
03835         prev_data.clear();
03836         tag_values.swap( prev_data );
03837         std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
03838                         std::back_inserter( tag_values ) );
03839     }
03840 
03841     return MB_SUCCESS;
03842 }
03843 void ReadHDF5::print_times()
03844 {
03845 #ifdef MOAB_HAVE_MPI
03846     if( !myPcomm )
03847     {
03848         double recv[NUM_TIMES];
03849         MPI_Reduce( (void*)_times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
03850         for( int i = 0; i < NUM_TIMES; i++ )
03851             _times[i] = recv[i];  // just get the max from all of them
03852     }
03853     if( 0 == myPcomm->proc_config().proc_rank() )
03854     {
03855 #endif
03856 
03857         std::cout << "ReadHDF5:             " << _times[TOTAL_TIME] << std::endl
03858                   << "  get set meta        " << _times[SET_META_TIME] << std::endl
03859                   << "  partial subsets     " << _times[SUBSET_IDS_TIME] << std::endl
03860                   << "  partition time      " << _times[GET_PARTITION_TIME] << std::endl
03861                   << "  get set ids         " << _times[GET_SET_IDS_TIME] << std::endl
03862                   << "  set contents        " << _times[GET_SET_CONTENTS_TIME] << std::endl
03863                   << "  polyhedra           " << _times[GET_POLYHEDRA_TIME] << std::endl
03864                   << "  elements            " << _times[GET_ELEMENTS_TIME] << std::endl
03865                   << "  nodes               " << _times[GET_NODES_TIME] << std::endl
03866                   << "  node adjacency      " << _times[GET_NODEADJ_TIME] << std::endl
03867                   << "  side elements       " << _times[GET_SIDEELEM_TIME] << std::endl
03868                   << "  update connectivity " << _times[UPDATECONN_TIME] << std::endl
03869                   << "  adjacency           " << _times[ADJACENCY_TIME] << std::endl
03870                   << "  delete non_adj      " << _times[DELETE_NON_SIDEELEM_TIME] << std::endl
03871                   << "  recursive sets      " << _times[READ_SET_IDS_RECURS_TIME] << std::endl
03872                   << "  find contain_sets   " << _times[FIND_SETS_CONTAINING_TIME] << std::endl
03873                   << "  read sets           " << _times[READ_SETS_TIME] << std::endl
03874                   << "  read tags           " << _times[READ_TAGS_TIME] << std::endl
03875                   << "  store file ids      " << _times[STORE_FILE_IDS_TIME] << std::endl
03876                   << "  read qa records     " << _times[READ_QA_TIME] << std::endl;
03877 
03878 #ifdef MOAB_HAVE_MPI
03879     }
03880 #endif
03881 }
03882 
03883 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines