Mesh Oriented datABase  (version 5.4.1)
Array-based unstructured mesh datastructure
ReadHDF5.cpp
Go to the documentation of this file.
00001 /**
00002  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003  * storing and accessing finite element mesh data.
00004  *
00005  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00006  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007  * retains certain rights in this software.
00008  *
00009  * This library is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU Lesser General Public
00011  * License as published by the Free Software Foundation; either
00012  * version 2.1 of the License, or (at your option) any later version.
00013  *
00014  */
00015 
00016 //-------------------------------------------------------------------------
00017 // Filename      : ReadHDF5.cpp
00018 //
00019 // Purpose       : HDF5 Writer
00020 //
00021 // Creator       : Jason Kraftcheck
00022 //
00023 // Creation Date : 04/18/04
00024 //-------------------------------------------------------------------------
00025 
00026 #include <cassert>
00027 #include "moab/MOABConfig.h"
00028 /* Include our MPI header before any HDF5 because otherwise
00029    it will get included indirectly by HDF5 */
00030 #ifdef MOAB_HAVE_MPI
00031 #include "moab_mpi.h"
00032 #include "moab/ParallelComm.hpp"
00033 #endif
00034 #include <H5Tpublic.h>
00035 #include <H5Ppublic.h>
00036 #include <H5Epublic.h>
00037 #include "moab/Interface.hpp"
00038 #include "Internals.hpp"
00039 #include "MBTagConventions.hpp"
00040 #include "ReadHDF5.hpp"
00041 #include "moab/CN.hpp"
00042 #include "moab/FileOptions.hpp"
00043 #include "moab/CpuTimer.hpp"
00044 #ifdef MOAB_HAVE_HDF5_PARALLEL
00045 #include <H5FDmpi.h>
00046 #include <H5FDmpio.h>
00047 #endif
00048 //#include "WriteHDF5.hpp"
00049 
00050 #include <cstdlib>
00051 #include <cstring>
00052 #include <limits>
00053 #include <functional>
00054 #include <iostream>
00055 
00056 #include "IODebugTrack.hpp"
00057 #include "ReadHDF5Dataset.hpp"
00058 #include "ReadHDF5VarLen.hpp"
00059 #include "moab_mpe.h"
00060 
00061 namespace moab
00062 {
00063 
00064 /* If true, coordinates are read in blocked format (all X values before
00065  * Y values before Z values.) If undefined, then all coordinates for a
00066  * given vertex are read at the same time.
00067  */
00068 const bool DEFAULT_BLOCKED_COORDINATE_IO = false;
00069 
00070 /* If true, file is opened first by root node only to read summary,
00071  * file is the closed and the summary is broadcast to all nodes, after
00072  * which all nodes open file in parallel to read data. If undefined,
00073  * file is opened once in parallel and all nodes read summary data.
00074  */
00075 const bool DEFAULT_BCAST_SUMMARY = true;
00076 
00077 /* If true and all processors are to read the same block of data,
00078  * read it on one and broadcast to others rather than using collective
00079  * io
00080  */
00081 const bool DEFAULT_BCAST_DUPLICATE_READS = true;
00082 
00083 #define READ_HDF5_BUFFER_SIZE ( 128 * 1024 * 1024 )
00084 
00085 #define assert_range( PTR, CNT )            \
00086     assert( ( PTR ) >= (void*)dataBuffer ); \
00087     assert( ( ( PTR ) + ( CNT ) ) <= (void*)( dataBuffer + bufferSize ) );
00088 
00089 // Call \c error function during HDF5 library errors to make
00090 // it easier to trap such errors in the debugger. This function
00091 // gets registered with the HDF5 library as a callback. It
00092 // works the same as the default (H5Eprint), except that it
00093 // also calls the \c error function as a no-op.
00094 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
00095 static herr_t handle_hdf5_error( hid_t stack, void* data )
00096 {
00097     ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
00098     herr_t result                 = 0;
00099     if( h->func ) result = ( *h->func )( stack, h->data );MB_CHK_ERR_CONT( MB_FAILURE );
00100     return result;
00101 }
00102 #else
00103 static herr_t handle_hdf5_error( void* data )
00104 {
00105     ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
00106     herr_t result                 = 0;
00107     if( h->func ) result = ( *h->func )( h->data );MB_CHK_ERR_CONT( MB_FAILURE );
00108     return result;
00109 }
00110 #endif
00111 
00112 static void copy_sorted_file_ids( const EntityHandle* sorted_ids, long num_ids, Range& results )
00113 {
00114     Range::iterator hint = results.begin();
00115     long i               = 0;
00116     while( i < num_ids )
00117     {
00118         EntityHandle start = sorted_ids[i];
00119         for( ++i; i < num_ids && sorted_ids[i] == 1 + sorted_ids[i - 1]; ++i )
00120             ;
00121         hint = results.insert( hint, start, sorted_ids[i - 1] );
00122     }
00123 }
00124 
00125 static void intersect( const mhdf_EntDesc& group, const Range& range, Range& result )
00126 {
00127     Range::const_iterator s, e;
00128     s = Range::lower_bound( range.begin(), range.end(), group.start_id );
00129     e = Range::lower_bound( s, range.end(), group.start_id + group.count );
00130     result.merge( s, e );
00131 }
00132 
00133 #define debug_barrier() debug_barrier_line( __LINE__ )
00134 void ReadHDF5::debug_barrier_line( int lineno )
00135 {
00136 #ifdef MOAB_HAVE_MPI
00137     if( mpiComm )
00138     {
00139         const unsigned threshold   = 2;
00140         static unsigned long count = 0;
00141         if( dbgOut.get_verbosity() >= threshold )
00142         {
00143             dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
00144             MPI_Barrier( *mpiComm );
00145         }
00146     }
00147 #else
00148     if( lineno )
00149     {
00150     }
00151 #endif
00152 }
00153 
00154 class CheckOpenReadHDF5Handles
00155 {
00156     int fileline;
00157     mhdf_FileHandle handle;
00158     int enter_count;
00159 
00160   public:
00161     CheckOpenReadHDF5Handles( mhdf_FileHandle file, int line )
00162         : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
00163     {
00164     }
00165     ~CheckOpenReadHDF5Handles()
00166     {
00167         int new_count = mhdf_countOpenHandles( handle );
00168         if( new_count != enter_count )
00169         {
00170             std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
00171                       << "Open at entrance: " << enter_count << std::endl
00172                       << "Open at exit:     " << new_count << std::endl;
00173         }
00174     }
00175 };
00176 
00177 #ifdef NDEBUG
00178 #define CHECK_OPEN_HANDLES
00179 #else
00180 #define CHECK_OPEN_HANDLES CheckOpenReadHDF5Handles check_open_handles_( filePtr, __LINE__ )
00181 #endif
00182 
00183 ReaderIface* ReadHDF5::factory( Interface* iface )
00184 {
00185     return new ReadHDF5( iface );
00186 }
00187 
00188 ReadHDF5::ReadHDF5( Interface* iface )
00189     : bufferSize( READ_HDF5_BUFFER_SIZE ), dataBuffer( NULL ), iFace( iface ), filePtr( 0 ), fileInfo( NULL ),
00190       readUtil( NULL ), handleType( 0 ), indepIO( H5P_DEFAULT ), collIO( H5P_DEFAULT ), myPcomm( NULL ),
00191       debugTrack( false ), dbgOut( stderr ), nativeParallel( false ), mpiComm( NULL ),
00192       blockedCoordinateIO( DEFAULT_BLOCKED_COORDINATE_IO ), bcastSummary( DEFAULT_BCAST_SUMMARY ),
00193       bcastDuplicateReads( DEFAULT_BCAST_DUPLICATE_READS ), setMeta( 0 ), timer( NULL ), cputime( false )
00194 {
00195 }
00196 
00197 ErrorCode ReadHDF5::init()
00198 {
00199     ErrorCode rval;
00200 
00201     if( readUtil ) return MB_SUCCESS;
00202 
00203     indepIO = collIO = H5P_DEFAULT;
00204     // WriteHDF5::register_known_tag_types(iFace);
00205 
00206     handleType = H5Tcopy( H5T_NATIVE_ULONG );
00207     if( handleType < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00208 
00209     if( H5Tset_size( handleType, sizeof( EntityHandle ) ) < 0 )
00210     {
00211         H5Tclose( handleType );
00212         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00213     }
00214 
00215     rval = iFace->query_interface( readUtil );
00216     if( MB_SUCCESS != rval )
00217     {
00218         H5Tclose( handleType );
00219         MB_SET_ERR( rval, "ReadHDF5 Failure" );
00220     }
00221 
00222     idMap.clear();
00223     fileInfo   = 0;
00224     debugTrack = false;
00225     myPcomm    = 0;
00226 
00227     return MB_SUCCESS;
00228 }
00229 
00230 ReadHDF5::~ReadHDF5()
00231 {
00232     if( !readUtil )  // init() failed.
00233         return;
00234 
00235     delete[] setMeta;
00236     setMeta = 0;
00237     iFace->release_interface( readUtil );
00238     H5Tclose( handleType );
00239 }
00240 
00241 ErrorCode ReadHDF5::set_up_read( const char* filename, const FileOptions& opts )
00242 {
00243     ErrorCode rval;
00244     mhdf_Status status;
00245     indepIO = collIO = H5P_DEFAULT;
00246     mpiComm          = 0;
00247 
00248     if( MB_SUCCESS != init() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00249 
00250 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00251     herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
00252 #else
00253     herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
00254 #endif
00255     if( err < 0 )
00256     {
00257         errorHandler.func = 0;
00258         errorHandler.data = 0;
00259     }
00260     else
00261     {
00262 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
00263         err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
00264 #else
00265         err           = H5Eset_auto( &handle_hdf5_error, &errorHandler );
00266 #endif
00267         if( err < 0 )
00268         {
00269             errorHandler.func = 0;
00270             errorHandler.data = 0;
00271         }
00272     }
00273 
00274     // Set up debug output
00275     int tmpval;
00276     if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) )
00277     {
00278         dbgOut.set_verbosity( tmpval );
00279         dbgOut.set_prefix( "H5M " );
00280     }
00281     dbgOut.limit_output_to_first_N_procs( 32 );
00282 
00283     // Enable some extra checks for reads. Note: amongst other things this
00284     // will print errors if the entire file is not read, so if doing a
00285     // partial read that is not a parallel read, this should be disabled.
00286     debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
00287 
00288     opts.get_toggle_option( "BLOCKED_COORDINATE_IO", DEFAULT_BLOCKED_COORDINATE_IO, blockedCoordinateIO );
00289     opts.get_toggle_option( "BCAST_SUMMARY", DEFAULT_BCAST_SUMMARY, bcastSummary );
00290     opts.get_toggle_option( "BCAST_DUPLICATE_READS", DEFAULT_BCAST_DUPLICATE_READS, bcastDuplicateReads );
00291 
00292     // Handle parallel options
00293     bool use_mpio  = ( MB_SUCCESS == opts.get_null_option( "USE_MPIO" ) );
00294     rval           = opts.match_option( "PARALLEL", "READ_PART" );
00295     bool parallel  = ( rval != MB_ENTITY_NOT_FOUND );
00296     nativeParallel = ( rval == MB_SUCCESS );
00297     if( use_mpio && !parallel )
00298     {
00299         MB_SET_ERR( MB_NOT_IMPLEMENTED, "'USE_MPIO' option specified w/out 'PARALLEL' option" );
00300     }
00301 
00302     // This option is intended for testing purposes only, and thus
00303     // is not documented anywhere.  Decreasing the buffer size can
00304     // expose bugs that would otherwise only be seen when reading
00305     // very large files.
00306     rval = opts.get_int_option( "BUFFER_SIZE", bufferSize );
00307     if( MB_SUCCESS != rval )
00308     {
00309         bufferSize = READ_HDF5_BUFFER_SIZE;
00310     }
00311     else if( bufferSize < (int)std::max( sizeof( EntityHandle ), sizeof( void* ) ) )
00312     {
00313         MB_CHK_ERR( MB_INVALID_SIZE );
00314     }
00315 
00316     dataBuffer = (char*)malloc( bufferSize );
00317     if( !dataBuffer ) MB_CHK_ERR( MB_MEMORY_ALLOCATION_FAILED );
00318 
00319     if( use_mpio || nativeParallel )
00320     {
00321 
00322 #ifndef MOAB_HAVE_HDF5_PARALLEL
00323         free( dataBuffer );
00324         dataBuffer = NULL;
00325         MB_SET_ERR( MB_NOT_IMPLEMENTED, "MOAB not configured with parallel HDF5 support" );
00326 #else
00327         MPI_Info info = MPI_INFO_NULL;
00328         std::string cb_size;
00329         rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
00330         if( MB_SUCCESS == rval )
00331         {
00332             MPI_Info_create( &info );
00333             MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
00334         }
00335 
00336         int pcomm_no = 0;
00337         rval         = opts.get_int_option( "PARALLEL_COMM", pcomm_no );
00338         if( rval == MB_TYPE_OUT_OF_RANGE )
00339         {
00340             MB_SET_ERR( rval, "Invalid value for PARALLEL_COMM option" );
00341         }
00342         myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
00343         if( 0 == myPcomm )
00344         {
00345             myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD );
00346         }
00347         const int rank = myPcomm->proc_config().proc_rank();
00348         dbgOut.set_rank( rank );
00349         dbgOut.limit_output_to_first_N_procs( 32 );
00350         mpiComm = new MPI_Comm( myPcomm->proc_config().proc_comm() );
00351 
00352 #ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
00353         dbgOut.print( 1, "H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS is not defined\n" );
00354 #endif
00355 
00356         // Open the file in serial on root to read summary
00357         dbgOut.tprint( 1, "Getting file summary\n" );
00358         fileInfo = 0;
00359 
00360         hid_t file_prop;
00361         if( bcastSummary )
00362         {
00363             unsigned long size = 0;
00364             if( rank == 0 )
00365             {
00366                 file_prop = H5Pcreate( H5P_FILE_ACCESS );
00367                 err       = H5Pset_fapl_mpio( file_prop, MPI_COMM_SELF, MPI_INFO_NULL );
00368                 assert( file_prop >= 0 );
00369                 assert( err >= 0 );
00370                 filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
00371                 H5Pclose( file_prop );
00372 
00373                 if( filePtr )
00374                 {
00375                     fileInfo = mhdf_getFileSummary( filePtr, handleType, &status,
00376                                                     0 );  // no extra set info
00377                     if( !is_error( status ) )
00378                     {
00379                         size             = fileInfo->total_size;
00380                         fileInfo->offset = (unsigned char*)fileInfo;
00381                     }
00382                 }
00383                 mhdf_closeFile( filePtr, &status );
00384                 if( fileInfo && mhdf_isError( &status ) )
00385                 {
00386                     free( fileInfo );
00387                     fileInfo = NULL;
00388                 }
00389             }
00390 
00391             dbgOut.tprint( 1, "Communicating file summary\n" );
00392             int mpi_err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, myPcomm->proc_config().proc_comm() );
00393             if( mpi_err || !size ) return MB_FAILURE;
00394 
00395             if( rank != 0 ) fileInfo = reinterpret_cast< mhdf_FileDesc* >( malloc( size ) );
00396 
00397             MPI_Bcast( fileInfo, size, MPI_BYTE, 0, myPcomm->proc_config().proc_comm() );
00398 
00399             if( rank != 0 ) mhdf_fixFileDesc( fileInfo, reinterpret_cast< mhdf_FileDesc* >( fileInfo->offset ) );
00400         }
00401 
00402         file_prop = H5Pcreate( H5P_FILE_ACCESS );
00403         err       = H5Pset_fapl_mpio( file_prop, myPcomm->proc_config().proc_comm(), info );
00404         assert( file_prop >= 0 );
00405         assert( err >= 0 );
00406 
00407         collIO = H5Pcreate( H5P_DATASET_XFER );
00408         assert( collIO > 0 );
00409         err = H5Pset_dxpl_mpio( collIO, H5FD_MPIO_COLLECTIVE );
00410         assert( err >= 0 );
00411         indepIO = nativeParallel ? H5P_DEFAULT : collIO;
00412 
00413         // Re-open file in parallel
00414         dbgOut.tprintf( 1, "Opening \"%s\" for parallel IO\n", filename );
00415         filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
00416 
00417         H5Pclose( file_prop );
00418         if( !filePtr )
00419         {
00420             free( dataBuffer );
00421             dataBuffer = NULL;
00422             H5Pclose( indepIO );
00423             if( collIO != indepIO ) H5Pclose( collIO );
00424             collIO = indepIO = H5P_DEFAULT;
00425             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
00426         }
00427 
00428         if( !bcastSummary )
00429         {
00430             fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
00431             if( is_error( status ) )
00432             {
00433                 free( dataBuffer );
00434                 dataBuffer = NULL;
00435                 mhdf_closeFile( filePtr, &status );
00436                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00437             }
00438         }
00439 #endif  // HDF5_PARALLEL
00440     }
00441     else
00442     {
00443         // Open the file
00444         filePtr = mhdf_openFile( filename, 0, NULL, handleType, &status );
00445         if( !filePtr )
00446         {
00447             free( dataBuffer );
00448             dataBuffer = NULL;
00449             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
00450         }
00451 
00452         // Get file info
00453         fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
00454         if( is_error( status ) )
00455         {
00456             free( dataBuffer );
00457             dataBuffer = NULL;
00458             mhdf_closeFile( filePtr, &status );
00459             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00460         }
00461     }
00462 
00463     ReadHDF5Dataset::default_hyperslab_selection_limit();
00464     int hslimit;
00465     rval = opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit );
00466     if( MB_SUCCESS == rval && hslimit > 0 )
00467         ReadHDF5Dataset::set_hyperslab_selection_limit( hslimit );
00468     else
00469         ReadHDF5Dataset::default_hyperslab_selection_limit();
00470     if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
00471         ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
00472     {
00473         ReadHDF5Dataset::append_hyperslabs();
00474         if( MB_SUCCESS != opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit ) )
00475             ReadHDF5Dataset::set_hyperslab_selection_limit( std::numeric_limits< int >::max() );
00476         dbgOut.print( 1, "Using H5S_APPEND for hyperslab selection\n" );
00477     }
00478 
00479     return MB_SUCCESS;
00480 }
00481 
00482 ErrorCode ReadHDF5::clean_up_read( const FileOptions& )
00483 {
00484     HDF5ErrorHandler handler;
00485 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00486     herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
00487 #else
00488     herr_t err = H5Eget_auto( &handler.func, &handler.data );
00489 #endif
00490     if( err >= 0 && handler.func == &handle_hdf5_error )
00491     {
00492         assert( handler.data == &errorHandler );
00493 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00494         H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
00495 #else
00496         H5Eset_auto( errorHandler.func, errorHandler.data );
00497 #endif
00498     }
00499 
00500     free( dataBuffer );
00501     dataBuffer = NULL;
00502     free( fileInfo );
00503     fileInfo = NULL;
00504     delete mpiComm;
00505     mpiComm = 0;
00506 
00507     if( indepIO != H5P_DEFAULT ) H5Pclose( indepIO );
00508     if( collIO != indepIO ) H5Pclose( collIO );
00509     collIO = indepIO = H5P_DEFAULT;
00510 
00511     delete[] setMeta;
00512     setMeta = 0;
00513 
00514     mhdf_Status status;
00515     mhdf_closeFile( filePtr, &status );
00516     filePtr = 0;
00517     return is_error( status ) ? MB_FAILURE : MB_SUCCESS;
00518 }
00519 
00520 ErrorCode ReadHDF5::load_file( const char* filename,
00521                                const EntityHandle* file_set,
00522                                const FileOptions& opts,
00523                                const ReaderIface::SubsetList* subset_list,
00524                                const Tag* file_id_tag )
00525 {
00526     ErrorCode rval;
00527 
00528     rval = set_up_read( filename, opts );
00529     if( MB_SUCCESS != rval )
00530     {
00531         clean_up_read( opts );
00532         return rval;
00533     }
00534     // See if we need to report times
00535 
00536     rval = opts.get_null_option( "CPUTIME" );
00537     if( MB_SUCCESS == rval )
00538     {
00539         cputime = true;
00540         timer   = new CpuTimer;
00541         for( int i = 0; i < NUM_TIMES; i++ )
00542             _times[i] = 0;
00543     }
00544 
00545     // We read the entire set description table regardless of partial
00546     // or complete reads or serial vs parallel reads
00547     rval = read_all_set_meta();
00548 
00549     if( cputime ) _times[SET_META_TIME] = timer->time_elapsed();
00550     if( subset_list && MB_SUCCESS == rval )
00551         rval = load_file_partial( subset_list->tag_list, subset_list->tag_list_length, subset_list->num_parts,
00552                                   subset_list->part_number, opts );
00553     else
00554         rval = load_file_impl( opts );
00555 
00556     if( MB_SUCCESS == rval && file_id_tag )
00557     {
00558         dbgOut.tprint( 1, "Storing file IDs in tag\n" );
00559         rval = store_file_ids( *file_id_tag );
00560     }
00561     ErrorCode rval3 = opts.get_null_option( "STORE_SETS_FILEIDS" );
00562     if( MB_SUCCESS == rval3 )
00563     {
00564         rval = store_sets_file_ids();
00565         if( MB_SUCCESS != rval ) return rval;
00566     }
00567 
00568     if( cputime ) _times[STORE_FILE_IDS_TIME] = timer->time_elapsed();
00569 
00570     if( MB_SUCCESS == rval && 0 != file_set )
00571     {
00572         dbgOut.tprint( 1, "Reading QA records\n" );
00573         rval = read_qa( *file_set );
00574     }
00575 
00576     if( cputime ) _times[READ_QA_TIME] = timer->time_elapsed();
00577     dbgOut.tprint( 1, "Cleaning up\n" );
00578     ErrorCode rval2 = clean_up_read( opts );
00579     if( rval == MB_SUCCESS && rval2 != MB_SUCCESS ) rval = rval2;
00580 
00581     if( MB_SUCCESS == rval )
00582         dbgOut.tprint( 1, "Read finished.\n" );
00583     else
00584     {
00585         std::string msg;
00586         iFace->get_last_error( msg );
00587         dbgOut.tprintf( 1, "READ FAILED (ERROR CODE %s): %s\n", ErrorCodeStr[rval], msg.c_str() );
00588     }
00589 
00590     if( cputime )
00591     {
00592         _times[TOTAL_TIME] = timer->time_since_birth();
00593         print_times();
00594         delete timer;
00595     }
00596     if( H5P_DEFAULT != collIO ) H5Pclose( collIO );
00597     if( H5P_DEFAULT != indepIO ) H5Pclose( indepIO );
00598     collIO = indepIO = H5P_DEFAULT;
00599 
00600     return rval;
00601 }
00602 
00603 ErrorCode ReadHDF5::load_file_impl( const FileOptions& )
00604 {
00605     ErrorCode rval;
00606     mhdf_Status status;
00607     int i;
00608 
00609     CHECK_OPEN_HANDLES;
00610 
00611     dbgOut.tprint( 1, "Reading all nodes...\n" );
00612     Range ids;
00613     if( fileInfo->nodes.count )
00614     {
00615         ids.insert( fileInfo->nodes.start_id, fileInfo->nodes.start_id + fileInfo->nodes.count - 1 );
00616         rval = read_nodes( ids );
00617         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00618     }
00619 
00620     dbgOut.tprint( 1, "Reading all element connectivity...\n" );
00621     std::vector< int > polyhedra;  // Need to do these last so that faces are loaded
00622     for( i = 0; i < fileInfo->num_elem_desc; ++i )
00623     {
00624         if( CN::EntityTypeFromName( fileInfo->elems[i].type ) == MBPOLYHEDRON )
00625         {
00626             polyhedra.push_back( i );
00627             continue;
00628         }
00629 
00630         rval = read_elems( i );
00631         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00632     }
00633     for( std::vector< int >::iterator it = polyhedra.begin(); it != polyhedra.end(); ++it )
00634     {
00635         rval = read_elems( *it );
00636         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00637     }
00638 
00639     dbgOut.tprint( 1, "Reading all sets...\n" );
00640     ids.clear();
00641     if( fileInfo->sets.count )
00642     {
00643         ids.insert( fileInfo->sets.start_id, fileInfo->sets.start_id + fileInfo->sets.count - 1 );
00644         rval = read_sets( ids );
00645         if( rval != MB_SUCCESS )
00646         {
00647             MB_SET_ERR( rval, "ReadHDF5 Failure" );
00648         }
00649     }
00650 
00651     dbgOut.tprint( 1, "Reading all adjacencies...\n" );
00652     for( i = 0; i < fileInfo->num_elem_desc; ++i )
00653     {
00654         if( !fileInfo->elems[i].have_adj ) continue;
00655 
00656         long table_len;
00657         hid_t table = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &table_len, &status );
00658         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00659 
00660         rval = read_adjacencies( table, table_len );
00661         mhdf_closeData( filePtr, table, &status );
00662         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00663         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00664     }
00665 
00666     dbgOut.tprint( 1, "Reading all tags...\n" );
00667     for( i = 0; i < fileInfo->num_tag_desc; ++i )
00668     {
00669         rval = read_tag( i );
00670         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00671     }
00672 
00673     dbgOut.tprint( 1, "Core read finished.  Cleaning up...\n" );
00674     return MB_SUCCESS;
00675 }
00676 
00677 ErrorCode ReadHDF5::find_int_tag( const char* name, int& index )
00678 {
00679     for( index = 0; index < fileInfo->num_tag_desc; ++index )
00680         if( !strcmp( name, fileInfo->tags[index].name ) ) break;
00681 
00682     if( index == fileInfo->num_tag_desc )
00683     {
00684         MB_SET_ERR( MB_TAG_NOT_FOUND, "File does not contain subset tag '" << name << "'" );
00685     }
00686 
00687     if( fileInfo->tags[index].type != mhdf_INTEGER || fileInfo->tags[index].size != 1 )
00688     {
00689         MB_SET_ERR( MB_TAG_NOT_FOUND, "Tag ' " << name << "' does not contain single integer value" );
00690     }
00691 
00692     return MB_SUCCESS;
00693 }
00694 
00695 ErrorCode ReadHDF5::get_subset_ids( const ReaderIface::IDTag* subset_list, int subset_list_length, Range& file_ids )
00696 {
00697     ErrorCode rval;
00698 
00699     for( int i = 0; i < subset_list_length; ++i )
00700     {
00701         int tag_index;
00702         rval = find_int_tag( subset_list[i].tag_name, tag_index );
00703         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00704 
00705         Range tmp_file_ids;
00706         if( !subset_list[i].num_tag_values )
00707         {
00708             rval = get_tagged_entities( tag_index, tmp_file_ids );
00709         }
00710         else
00711         {
00712             std::vector< int > ids( subset_list[i].tag_values,
00713                                     subset_list[i].tag_values + subset_list[i].num_tag_values );
00714             std::sort( ids.begin(), ids.end() );
00715             rval = search_tag_values( tag_index, ids, tmp_file_ids );
00716             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00717         }
00718 
00719         if( tmp_file_ids.empty() ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
00720 
00721         if( i == 0 )
00722             file_ids.swap( tmp_file_ids );
00723         else
00724             file_ids = intersect( tmp_file_ids, file_ids );
00725     }
00726 
00727     return MB_SUCCESS;
00728 }
00729 
00730 ErrorCode ReadHDF5::get_partition( Range& tmp_file_ids, int num_parts, int part_number )
00731 {
00732     CHECK_OPEN_HANDLES;
00733 
00734     // Check that the tag only identified sets
00735     if( (unsigned long)fileInfo->sets.start_id > tmp_file_ids.front() )
00736     {
00737         dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
00738         tmp_file_ids.erase( tmp_file_ids.begin(), tmp_file_ids.lower_bound( (EntityHandle)fileInfo->sets.start_id ) );
00739     }
00740     unsigned long set_end = (unsigned long)fileInfo->sets.start_id + fileInfo->sets.count;
00741     if( tmp_file_ids.back() >= set_end )
00742     {
00743         dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
00744         tmp_file_ids.erase( tmp_file_ids.upper_bound( (EntityHandle)set_end ), tmp_file_ids.end() );
00745     }
00746 
00747     Range::iterator s   = tmp_file_ids.begin();
00748     size_t num_per_proc = tmp_file_ids.size() / num_parts;
00749     size_t num_extra    = tmp_file_ids.size() % num_parts;
00750     Range::iterator e;
00751     if( part_number < (long)num_extra )
00752     {
00753         s += ( num_per_proc + 1 ) * part_number;
00754         e = s;
00755         e += ( num_per_proc + 1 );
00756     }
00757     else
00758     {
00759         s += num_per_proc * part_number + num_extra;
00760         e = s;
00761         e += num_per_proc;
00762     }
00763     tmp_file_ids.erase( e, tmp_file_ids.end() );
00764     tmp_file_ids.erase( tmp_file_ids.begin(), s );
00765 
00766     return MB_SUCCESS;
00767 }
00768 
00769 ErrorCode ReadHDF5::load_file_partial( const ReaderIface::IDTag* subset_list,
00770                                        int subset_list_length,
00771                                        int num_parts,
00772                                        int part_number,
00773                                        const FileOptions& opts )
00774 {
00775     mhdf_Status status;
00776 
00777     static MPEState mpe_event( "ReadHDF5", "yellow" );
00778 
00779     mpe_event.start( "gather parts" );
00780 
00781     CHECK_OPEN_HANDLES;
00782 
00783     for( int i = 0; i < subset_list_length; ++i )
00784     {
00785         dbgOut.printf( 2, "Select by \"%s\" with num_tag_values = %d\n", subset_list[i].tag_name,
00786                        subset_list[i].num_tag_values );
00787         if( subset_list[i].num_tag_values )
00788         {
00789             assert( 0 != subset_list[i].tag_values );
00790             dbgOut.printf( 2, "  \"%s\" values = { %d", subset_list[i].tag_name, subset_list[i].tag_values[0] );
00791             for( int j = 1; j < subset_list[i].num_tag_values; ++j )
00792                 dbgOut.printf( 2, ", %d", subset_list[i].tag_values[j] );
00793             dbgOut.printf( 2, " }\n" );
00794         }
00795     }
00796     if( num_parts ) dbgOut.printf( 2, "Partition with num_parts = %d and part_number = %d\n", num_parts, part_number );
00797 
00798     dbgOut.tprint( 1, "RETRIEVING TAGGED ENTITIES\n" );
00799 
00800     Range file_ids;
00801     ErrorCode rval = get_subset_ids( subset_list, subset_list_length, file_ids );
00802     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00803 
00804     if( cputime ) _times[SUBSET_IDS_TIME] = timer->time_elapsed();
00805 
00806     if( num_parts )
00807     {
00808         /*if (num_parts>(int)file_ids.size())
00809         {
00810           MB_SET_ERR(MB_FAILURE, "Only " << file_ids.size() << " parts to distribute to " <<
00811         num_parts << " processes.");
00812         }*/
00813         rval = get_partition( file_ids, num_parts, part_number );
00814         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00815     }
00816 
00817     if( cputime ) _times[GET_PARTITION_TIME] = timer->time_elapsed();
00818 
00819     dbgOut.print_ints( 4, "Set file IDs for partial read: ", file_ids );
00820     mpe_event.end();
00821     mpe_event.start( "gather related sets" );
00822     dbgOut.tprint( 1, "GATHERING ADDITIONAL ENTITIES\n" );
00823 
00824     enum RecusiveSetMode
00825     {
00826         RSM_NONE,
00827         RSM_SETS,
00828         RSM_CONTENTS
00829     };
00830     const char* const set_opts[] = { "NONE", "SETS", "CONTENTS", NULL };
00831     int child_mode;
00832     rval = opts.match_option( "CHILDREN", set_opts, child_mode );
00833     if( MB_ENTITY_NOT_FOUND == rval )
00834         child_mode = RSM_CONTENTS;
00835     else if( MB_SUCCESS != rval )
00836     {
00837         MB_SET_ERR( rval, "Invalid value for 'CHILDREN' option" );
00838     }
00839     int content_mode;
00840     rval = opts.match_option( "SETS", set_opts, content_mode );
00841     if( MB_ENTITY_NOT_FOUND == rval )
00842         content_mode = RSM_CONTENTS;
00843     else if( MB_SUCCESS != rval )
00844     {
00845         MB_SET_ERR( rval, "Invalid value for 'SETS' option" );
00846     }
00847 
00848     // If we want the contents of contained/child sets,
00849     // search for them now (before gathering the non-set contents
00850     // of the sets.)
00851     Range sets;
00852     intersect( fileInfo->sets, file_ids, sets );
00853     if( content_mode == RSM_CONTENTS || child_mode == RSM_CONTENTS )
00854     {
00855         dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
00856         rval = read_set_ids_recursive( sets, content_mode == RSM_CONTENTS, child_mode == RSM_CONTENTS );
00857         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00858     }
00859 
00860     if( cputime ) _times[GET_SET_IDS_TIME] = timer->time_elapsed();
00861     debug_barrier();
00862 
00863     // Get elements and vertices contained in sets
00864     dbgOut.tprint( 1, "  doing get_set_contents\n" );
00865     rval = get_set_contents( sets, file_ids );
00866     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00867 
00868     if( cputime ) _times[GET_SET_CONTENTS_TIME] = timer->time_elapsed();
00869 
00870     dbgOut.print_ints( 5, "File IDs for partial read: ", file_ids );
00871     debug_barrier();
00872     mpe_event.end();
00873     dbgOut.tprint( 1, "GATHERING NODE IDS\n" );
00874 
00875     // Figure out the maximum dimension of entity to be read
00876     int max_dim = 0;
00877     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00878     {
00879         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00880         if( type <= MBVERTEX || type >= MBENTITYSET )
00881         {
00882             assert( false );  // For debug code die for unknown element types
00883             continue;         // For release code, skip unknown element types
00884         }
00885         int dim = CN::Dimension( type );
00886         if( dim > max_dim )
00887         {
00888             Range subset;
00889             intersect( fileInfo->elems[i].desc, file_ids, subset );
00890             if( !subset.empty() ) max_dim = dim;
00891         }
00892     }
00893 #ifdef MOAB_HAVE_MPI
00894     if( nativeParallel )
00895     {
00896         int send = max_dim;
00897         MPI_Allreduce( &send, &max_dim, 1, MPI_INT, MPI_MAX, *mpiComm );
00898     }
00899 #endif
00900 
00901     // If input contained any polyhedra, then need to get faces
00902     // of the polyhedra before the next loop because we need to
00903     // read said faces in that loop.
00904     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00905     {
00906         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00907         if( type != MBPOLYHEDRON ) continue;
00908 
00909         debug_barrier();
00910         dbgOut.print( 2, "    Getting polyhedra faces\n" );
00911         mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00912 
00913         Range polyhedra;
00914         intersect( fileInfo->elems[i].desc, file_ids, polyhedra );
00915         rval = read_elems( i, polyhedra, &file_ids );
00916         mpe_event.end( rval );
00917         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00918     }
00919 
00920     if( cputime ) _times[GET_POLYHEDRA_TIME] = timer->time_elapsed();
00921     // Get node file ids for all elements
00922     Range nodes;
00923     intersect( fileInfo->nodes, file_ids, nodes );
00924     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00925     {
00926         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00927         if( type <= MBVERTEX || type >= MBENTITYSET )
00928         {
00929             assert( false );  // For debug code die for unknown element types
00930             continue;         // For release code, skip unknown element types
00931         }
00932         if( MBPOLYHEDRON == type ) continue;
00933 
00934         debug_barrier();
00935         dbgOut.printf( 2, "    Getting element node IDs for: %s\n", fileInfo->elems[i].handle );
00936 
00937         Range subset;
00938         intersect( fileInfo->elems[i].desc, file_ids, subset );
00939         mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00940 
00941         // If dimension is max_dim, then we can create the elements now
00942         // so we don't have to read the table again later (connectivity
00943         // will be fixed up after nodes are created when update_connectivity())
00944         // is called.  For elements of a smaller dimension, we just build
00945         // the node ID range now because a) we'll have to read the whole
00946         // connectivity table again later, and b) we don't want to worry
00947         // about accidentally creating multiple copies of the same element.
00948         if( CN::Dimension( type ) == max_dim )
00949             rval = read_elems( i, subset, &nodes );
00950         else
00951             rval = read_elems( i, subset, nodes );
00952         mpe_event.end( rval );
00953         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00954     }
00955     if( cputime ) _times[GET_ELEMENTS_TIME] = timer->time_elapsed();
00956     debug_barrier();
00957     mpe_event.start( "read coords" );
00958     dbgOut.tprintf( 1, "READING NODE COORDINATES (%lu nodes in %lu selects)\n", (unsigned long)nodes.size(),
00959                     (unsigned long)nodes.psize() );
00960 
00961     // Read node coordinates and create vertices in MOAB
00962     // NOTE: This populates the RangeMap with node file ids,
00963     //       which is expected by read_node_adj_elems.
00964     rval = read_nodes( nodes );
00965     mpe_event.end( rval );
00966     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00967 
00968     if( cputime ) _times[GET_NODES_TIME] = timer->time_elapsed();
00969 
00970     debug_barrier();
00971     dbgOut.tprint( 1, "READING ELEMENTS\n" );
00972 
00973     // Decide if we need to read additional elements
00974     enum SideMode
00975     {
00976         SM_EXPLICIT,
00977         SM_NODES,
00978         SM_SIDES
00979     };
00980     int side_mode;
00981     const char* const options[] = { "EXPLICIT", "NODES", "SIDES", 0 };
00982     rval                        = opts.match_option( "ELEMENTS", options, side_mode );
00983     if( MB_ENTITY_NOT_FOUND == rval )
00984     {
00985         // If only nodes were specified, then default to "NODES", otherwise
00986         // default to "SIDES".
00987         if( 0 == max_dim )
00988             side_mode = SM_NODES;
00989         else
00990             side_mode = SM_SIDES;
00991     }
00992     else if( MB_SUCCESS != rval )
00993     {
00994         MB_SET_ERR( rval, "Invalid value for 'ELEMENTS' option" );
00995     }
00996 
00997     if( side_mode == SM_SIDES /*ELEMENTS=SIDES*/ && max_dim == 0 /*node-based*/ )
00998     {
00999         // Read elements until we find something. Once we find something,
01000         // read only elements of the same dimension. NOTE: loop termination
01001         // criterion changes on both sides (max_dim can be changed in loop
01002         // body).
01003         for( int dim = 3; dim >= max_dim; --dim )
01004         {
01005             for( int i = 0; i < fileInfo->num_elem_desc; ++i )
01006             {
01007                 EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
01008                 if( CN::Dimension( type ) == dim )
01009                 {
01010                     debug_barrier();
01011                     dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
01012                     mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
01013                     Range ents;
01014                     rval = read_node_adj_elems( fileInfo->elems[i] );
01015                     mpe_event.end( rval );
01016                     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01017                     if( !ents.empty() ) max_dim = 3;
01018                 }
01019             }
01020         }
01021     }
01022 
01023     if( cputime ) _times[GET_NODEADJ_TIME] = timer->time_elapsed();
01024     Range side_entities;
01025     if( side_mode != SM_EXPLICIT /*ELEMENTS=NODES || ELEMENTS=SIDES*/ )
01026     {
01027         if( 0 == max_dim ) max_dim = 4;
01028         // Now read any additional elements for which we've already read all
01029         // of the nodes.
01030         for( int dim = max_dim - 1; dim > 0; --dim )
01031         {
01032             for( int i = 0; i < fileInfo->num_elem_desc; ++i )
01033             {
01034                 EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
01035                 if( CN::Dimension( type ) == dim )
01036                 {
01037                     debug_barrier();
01038                     dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
01039                     mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
01040                     rval = read_node_adj_elems( fileInfo->elems[i], &side_entities );
01041                     mpe_event.end( rval );
01042                     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01043                 }
01044             }
01045         }
01046     }
01047 
01048     // We need to do this here for polyhedra to be handled correctly.
01049     // We have to wait until the faces are read in the above code block,
01050     // but need to create the connectivity before doing update_connectivity,
01051     // which might otherwise delete polyhedra faces.
01052     if( cputime ) _times[GET_SIDEELEM_TIME] = timer->time_elapsed();
01053 
01054     debug_barrier();
01055     dbgOut.tprint( 1, "UPDATING CONNECTIVITY ARRAYS FOR READ ELEMENTS\n" );
01056     mpe_event.start( "updating connectivity for elements read before vertices" );
01057     rval = update_connectivity();
01058     mpe_event.end();
01059     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01060 
01061     if( cputime ) _times[UPDATECONN_TIME] = timer->time_elapsed();
01062 
01063     dbgOut.tprint( 1, "READING ADJACENCIES\n" );
01064     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
01065     {
01066         if (fileInfo->elems[i].have_adj  /*&&
01067         idMap.intersects(fileInfo->elems[i].desc.start_id, fileInfo->elems[i].desc.count) */)
01068         {
01069             mpe_event.start( "reading adjacencies for ", fileInfo->elems[i].handle );
01070             long len;
01071             hid_t th = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &len, &status );
01072             if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01073 
01074             rval = read_adjacencies( th, len );
01075             mhdf_closeData( filePtr, th, &status );
01076             mpe_event.end( rval );
01077             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01078         }
01079     }
01080 
01081     if( cputime ) _times[ADJACENCY_TIME] = timer->time_elapsed();
01082 
01083     // If doing ELEMENTS=SIDES then we need to delete any entities
01084     // that we read that aren't actually sides (e.g. an interior face
01085     // that connects two disjoint portions of the part). Both
01086     // update_connectivity and reading of any explicit adjacencies must
01087     // happen before this.
01088     if( side_mode == SM_SIDES )
01089     {
01090         debug_barrier();
01091         mpe_event.start( "cleaning up non-side lower-dim elements" );
01092         dbgOut.tprint( 1, "CHECKING FOR AND DELETING NON-SIDE ELEMENTS\n" );
01093         rval = delete_non_side_elements( side_entities );
01094         mpe_event.end( rval );
01095         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01096     }
01097 
01098     if( cputime ) _times[DELETE_NON_SIDEELEM_TIME] = timer->time_elapsed();
01099 
01100     debug_barrier();
01101     dbgOut.tprint( 1, "READING SETS\n" );
01102 
01103     // If reading contained/child sets but not their contents then find
01104     // them now. If we were also reading their contents we would
01105     // have found them already.
01106     if( content_mode == RSM_SETS || child_mode == RSM_SETS )
01107     {
01108         dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
01109         mpe_event.start( "finding recursively contained sets" );
01110         rval = read_set_ids_recursive( sets, content_mode == RSM_SETS, child_mode == RSM_SETS );
01111         mpe_event.end( rval );
01112         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01113     }
01114 
01115     if( cputime ) _times[READ_SET_IDS_RECURS_TIME] = timer->time_elapsed();
01116 
01117     dbgOut.tprint( 1, "  doing find_sets_containing\n" );
01118     mpe_event.start( "finding sets containing any read entities" );
01119 
01120     // Decide whether to read set-containing parents
01121     bool read_set_containing_parents = true;
01122     std::string tmp_opt;
01123     rval = opts.get_option( "NO_SET_CONTAINING_PARENTS", tmp_opt );
01124     if( MB_SUCCESS == rval ) read_set_containing_parents = false;
01125 
01126     // Append file IDs of sets containing any of the nodes or elements
01127     // we've read up to this point.
01128     rval = find_sets_containing( sets, read_set_containing_parents );
01129     mpe_event.end( rval );
01130     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01131 
01132     if( cputime ) _times[FIND_SETS_CONTAINING_TIME] = timer->time_elapsed();
01133 
01134     // Now actually read all set data and instantiate sets in MOAB.
01135     // Get any contained sets out of file_ids.
01136     mpe_event.start( "reading set contents/parents/children" );
01137     EntityHandle first_set = fileInfo->sets.start_id;
01138     sets.merge( file_ids.lower_bound( first_set ), file_ids.lower_bound( first_set + fileInfo->sets.count ) );
01139     dbgOut.tprint( 1, "  doing read_sets\n" );
01140     rval = read_sets( sets );
01141     mpe_event.end( rval );
01142     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01143 
01144     if( cputime ) _times[READ_SETS_TIME] = timer->time_elapsed();
01145 
01146     dbgOut.tprint( 1, "READING TAGS\n" );
01147 
01148     for( int i = 0; i < fileInfo->num_tag_desc; ++i )
01149     {
01150         mpe_event.start( "reading tag: ", fileInfo->tags[i].name );
01151         rval = read_tag( i );
01152         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01153     }
01154 
01155     if( cputime ) _times[READ_TAGS_TIME] = timer->time_elapsed();
01156 
01157     dbgOut.tprint( 1, "PARTIAL READ COMPLETE.\n" );
01158 
01159     return MB_SUCCESS;
01160 }
01161 
01162 ErrorCode ReadHDF5::search_tag_values( int tag_index,
01163                                        const std::vector< int >& sorted_values,
01164                                        Range& file_ids,
01165                                        bool sets_only )
01166 {
01167     ErrorCode rval;
01168     mhdf_Status status;
01169     std::vector< EntityHandle >::iterator iter;
01170     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
01171     long size;
01172     long start_id;
01173 
01174     CHECK_OPEN_HANDLES;
01175 
01176     debug_barrier();
01177 
01178     // Do dense data
01179 
01180     hid_t table;
01181     const char* name;
01182     std::vector< EntityHandle > indices;
01183     // These are probably in order of dimension, so iterate
01184     // in reverse order to make Range insertions more efficient.
01185     std::vector< int > grp_indices( tag.dense_elem_indices, tag.dense_elem_indices + tag.num_dense_indices );
01186     for( std::vector< int >::reverse_iterator i = grp_indices.rbegin(); i != grp_indices.rend(); ++i )
01187     {
01188         int idx = *i;
01189         if( idx == -2 )
01190         {
01191             name     = mhdf_set_type_handle();
01192             start_id = fileInfo->sets.start_id;
01193         }
01194         else if( sets_only )
01195         {
01196             continue;
01197         }
01198         else if( idx == -1 )
01199         {
01200             name     = mhdf_node_type_handle();
01201             start_id = fileInfo->nodes.start_id;
01202         }
01203         else
01204         {
01205             if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01206             name     = fileInfo->elems[idx].handle;
01207             start_id = fileInfo->elems[idx].desc.start_id;
01208         }
01209         table = mhdf_openDenseTagData( filePtr, tag.name, name, &size, &status );
01210         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01211         rval = search_tag_values( table, size, sorted_values, indices );
01212         mhdf_closeData( filePtr, table, &status );
01213         if( MB_SUCCESS != rval || is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01214         // Convert from table indices to file IDs and add to result list
01215         std::sort( indices.begin(), indices.end(), std::greater< EntityHandle >() );
01216         std::transform( indices.begin(), indices.end(), range_inserter( file_ids ),
01217                         // std::bind1st(std::plus<long>(), start_id));
01218                         std::bind( std::plus< long >(), start_id, std::placeholders::_1 ) );
01219         indices.clear();
01220     }
01221 
01222     if( !tag.have_sparse ) return MB_SUCCESS;
01223 
01224     // Do sparse data
01225 
01226     hid_t tables[2];
01227     long junk;  // Redundant value for non-variable-length tags
01228     mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
01229     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01230     rval = search_tag_values( tables[1], size, sorted_values, indices );
01231     mhdf_closeData( filePtr, tables[1], &status );
01232     if( MB_SUCCESS != rval || is_error( status ) )
01233     {
01234         mhdf_closeData( filePtr, tables[0], &status );
01235         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01236     }
01237     // Convert to ranges
01238     std::sort( indices.begin(), indices.end() );
01239     std::vector< EntityHandle > ranges;
01240     iter = indices.begin();
01241     while( iter != indices.end() )
01242     {
01243         ranges.push_back( *iter );
01244         EntityHandle last = *iter;
01245         for( ++iter; iter != indices.end() && ( last + 1 ) == *iter; ++iter, ++last )
01246             ;
01247         ranges.push_back( last );
01248     }
01249     // Read file ids
01250     iter                 = ranges.begin();
01251     unsigned long offset = 0;
01252     while( iter != ranges.end() )
01253     {
01254         long begin = *iter;
01255         ++iter;
01256         long end = *iter;
01257         ++iter;
01258         mhdf_readSparseTagEntitiesWithOpt( tables[0], begin, end - begin + 1, handleType, &indices[offset], indepIO,
01259                                            &status );
01260         if( is_error( status ) )
01261         {
01262             mhdf_closeData( filePtr, tables[0], &status );
01263             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01264         }
01265         offset += end - begin + 1;
01266     }
01267     mhdf_closeData( filePtr, tables[0], &status );
01268     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01269     assert( offset == indices.size() );
01270     std::sort( indices.begin(), indices.end() );
01271 
01272     if( sets_only )
01273     {
01274         iter = std::lower_bound( indices.begin(), indices.end(),
01275                                  (EntityHandle)( fileInfo->sets.start_id + fileInfo->sets.count ) );
01276         indices.erase( iter, indices.end() );
01277         iter = std::lower_bound( indices.begin(), indices.end(), fileInfo->sets.start_id );
01278         indices.erase( indices.begin(), iter );
01279     }
01280     copy_sorted_file_ids( &indices[0], indices.size(), file_ids );
01281 
01282     return MB_SUCCESS;
01283 }
01284 
01285 ErrorCode ReadHDF5::get_tagged_entities( int tag_index, Range& file_ids )
01286 {
01287     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
01288 
01289     CHECK_OPEN_HANDLES;
01290 
01291     // Do dense data
01292     Range::iterator hint = file_ids.begin();
01293     for( int i = 0; i < tag.num_dense_indices; ++i )
01294     {
01295         int idx = tag.dense_elem_indices[i];
01296         mhdf_EntDesc* ents;
01297         if( idx == -2 )
01298             ents = &fileInfo->sets;
01299         else if( idx == -1 )
01300             ents = &fileInfo->nodes;
01301         else
01302         {
01303             if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01304             ents = &( fileInfo->elems[idx].desc );
01305         }
01306 
01307         EntityHandle h = (EntityHandle)ents->start_id;
01308         hint           = file_ids.insert( hint, h, h + ents->count - 1 );
01309     }
01310 
01311     if( !tag.have_sparse ) return MB_SUCCESS;
01312 
01313     // Do sparse data
01314 
01315     mhdf_Status status;
01316     hid_t tables[2];
01317     long size, junk;
01318     mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
01319     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01320     mhdf_closeData( filePtr, tables[1], &status );
01321     if( is_error( status ) )
01322     {
01323         mhdf_closeData( filePtr, tables[0], &status );
01324         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01325     }
01326 
01327     hid_t file_type = H5Dget_type( tables[0] );
01328     if( file_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01329 
01330     hint                   = file_ids.begin();
01331     EntityHandle* buffer   = reinterpret_cast< EntityHandle* >( dataBuffer );
01332     const long buffer_size = bufferSize / std::max( sizeof( EntityHandle ), H5Tget_size( file_type ) );
01333     long remaining = size, offset = 0;
01334     while( remaining )
01335     {
01336         long count = std::min( buffer_size, remaining );
01337         assert_range( buffer, count );
01338         mhdf_readSparseTagEntitiesWithOpt( *tables, offset, count, file_type, buffer, collIO, &status );
01339         if( is_error( status ) )
01340         {
01341             H5Tclose( file_type );
01342             mhdf_closeData( filePtr, *tables, &status );
01343             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01344         }
01345         H5Tconvert( file_type, handleType, count, buffer, NULL, H5P_DEFAULT );
01346 
01347         std::sort( buffer, buffer + count );
01348         for( long i = 0; i < count; ++i )
01349             hint = file_ids.insert( hint, buffer[i], buffer[i] );
01350 
01351         remaining -= count;
01352         offset += count;
01353     }
01354 
01355     H5Tclose( file_type );
01356     mhdf_closeData( filePtr, *tables, &status );
01357     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01358 
01359     return MB_SUCCESS;
01360 }
01361 
01362 ErrorCode ReadHDF5::search_tag_values( hid_t tag_table,
01363                                        unsigned long table_size,
01364                                        const std::vector< int >& sorted_values,
01365                                        std::vector< EntityHandle >& value_indices )
01366 {
01367     debug_barrier();
01368 
01369     CHECK_OPEN_HANDLES;
01370 
01371     mhdf_Status status;
01372     size_t chunk_size = bufferSize / sizeof( int );
01373     int* buffer       = reinterpret_cast< int* >( dataBuffer );
01374     size_t remaining = table_size, offset = 0;
01375     while( remaining )
01376     {
01377         // Get a block of tag values
01378         size_t count = std::min( chunk_size, remaining );
01379         assert_range( buffer, count );
01380         mhdf_readTagValuesWithOpt( tag_table, offset, count, H5T_NATIVE_INT, buffer, collIO, &status );
01381         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01382 
01383         // Search tag values
01384         for( size_t i = 0; i < count; ++i )
01385             if( std::binary_search( sorted_values.begin(), sorted_values.end(), (int)buffer[i] ) )
01386                 value_indices.push_back( i + offset );
01387 
01388         offset += count;
01389         remaining -= count;
01390     }
01391 
01392     return MB_SUCCESS;
01393 }
01394 
01395 ErrorCode ReadHDF5::read_nodes( const Range& node_file_ids )
01396 {
01397     ErrorCode rval;
01398     mhdf_Status status;
01399     const int dim = fileInfo->nodes.vals_per_ent;
01400     Range range;
01401 
01402     CHECK_OPEN_HANDLES;
01403 
01404     if( node_file_ids.empty() && !nativeParallel ) return MB_SUCCESS;
01405 
01406     int cdim;
01407     rval = iFace->get_dimension( cdim );
01408     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01409 
01410     if( cdim < dim )
01411     {
01412         rval = iFace->set_dimension( dim );
01413         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01414     }
01415 
01416     hid_t data_id = mhdf_openNodeCoordsSimple( filePtr, &status );
01417     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01418 
01419     EntityHandle handle;
01420     std::vector< double* > arrays( dim );
01421     const size_t num_nodes = node_file_ids.size();
01422     if( num_nodes > 0 )
01423     {
01424         rval = readUtil->get_node_coords( dim, (int)num_nodes, 0, handle, arrays );
01425         if( MB_SUCCESS != rval )
01426         {
01427             mhdf_closeData( filePtr, data_id, &status );
01428             MB_SET_ERR( rval, "ReadHDF5 Failure" );
01429         }
01430     }
01431 
01432     if( blockedCoordinateIO )
01433     {
01434         try
01435         {
01436             for( int d = 0; d < dim; ++d )
01437             {
01438                 ReadHDF5Dataset reader( "blocked coords", data_id, nativeParallel, mpiComm, false );
01439                 reader.set_column( d );
01440                 reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, num_nodes, H5T_NATIVE_DOUBLE );
01441                 dbgOut.printf( 3, "Reading %lu chunks for coordinate dimension %d\n", reader.get_read_count(), d );
01442                 // Should normally only have one read call, unless sparse nature
01443                 // of file_ids caused reader to do something strange
01444                 size_t count, offset = 0;
01445                 int nn = 0;
01446                 while( !reader.done() )
01447                 {
01448                     dbgOut.printf( 3, "Reading chunk %d for dimension %d\n", ++nn, d );
01449                     reader.read( arrays[d] + offset, count );
01450                     offset += count;
01451                 }
01452                 if( offset != num_nodes )
01453                 {
01454                     mhdf_closeData( filePtr, data_id, &status );
01455                     assert( false );
01456                     return MB_FAILURE;
01457                 }
01458             }
01459         }
01460         catch( ReadHDF5Dataset::Exception )
01461         {
01462             mhdf_closeData( filePtr, data_id, &status );
01463             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01464         }
01465     }
01466     else
01467     {  // !blockedCoordinateIO
01468         double* buffer  = (double*)dataBuffer;
01469         long chunk_size = bufferSize / ( 3 * sizeof( double ) );
01470         long coffset    = 0;
01471         int nn          = 0;
01472         try
01473         {
01474             ReadHDF5Dataset reader( "interleaved coords", data_id, nativeParallel, mpiComm, false );
01475             reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, chunk_size, H5T_NATIVE_DOUBLE );
01476             dbgOut.printf( 3, "Reading %lu chunks for coordinate coordinates\n", reader.get_read_count() );
01477             while( !reader.done() )
01478             {
01479                 dbgOut.tprintf( 3, "Reading chunk %d of node coords\n", ++nn );
01480 
01481                 size_t count;
01482                 reader.read( buffer, count );
01483 
01484                 for( size_t i = 0; i < count; ++i )
01485                     for( int d = 0; d < dim; ++d )
01486                         arrays[d][coffset + i] = buffer[dim * i + d];
01487                 coffset += count;
01488             }
01489         }
01490         catch( ReadHDF5Dataset::Exception )
01491         {
01492             mhdf_closeData( filePtr, data_id, &status );
01493             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01494         }
01495     }
01496 
01497     dbgOut.print( 3, "Closing node coordinate table\n" );
01498     mhdf_closeData( filePtr, data_id, &status );
01499     for( int d = dim; d < cdim; ++d )
01500         memset( arrays[d], 0, num_nodes * sizeof( double ) );
01501 
01502     dbgOut.printf( 3, "Updating ID to handle map for %lu nodes\n", (unsigned long)node_file_ids.size() );
01503     return insert_in_id_map( node_file_ids, handle );
01504 }
01505 
01506 ErrorCode ReadHDF5::read_elems( int i )
01507 {
01508     Range ids;
01509     ids.insert( fileInfo->elems[i].desc.start_id,
01510                 fileInfo->elems[i].desc.start_id + fileInfo->elems[i].desc.count - 1 );
01511     return read_elems( i, ids );
01512 }
01513 
01514 ErrorCode ReadHDF5::read_elems( int i, const Range& file_ids, Range* node_ids )
01515 {
01516     if( fileInfo->elems[i].desc.vals_per_ent < 0 )
01517     {
01518         if( node_ids != 0 )  // Not implemented for version 3 format of poly data
01519             MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
01520         return read_poly( fileInfo->elems[i], file_ids );
01521     }
01522     else
01523         return read_elems( fileInfo->elems[i], file_ids, node_ids );
01524 }
01525 
01526 ErrorCode ReadHDF5::read_elems( const mhdf_ElemDesc& elems, const Range& file_ids, Range* node_ids )
01527 {
01528     CHECK_OPEN_HANDLES;
01529 
01530     debug_barrier();
01531     dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", elems.handle,
01532                     (unsigned long)file_ids.size(), (unsigned long)file_ids.psize() );
01533 
01534     ErrorCode rval = MB_SUCCESS;
01535     mhdf_Status status;
01536 
01537     EntityType type = CN::EntityTypeFromName( elems.type );
01538     if( type == MBMAXTYPE )
01539     {
01540         MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" );
01541     }
01542 
01543     const int nodes_per_elem = elems.desc.vals_per_ent;
01544     const size_t count       = file_ids.size();
01545     hid_t data_id            = mhdf_openConnectivitySimple( filePtr, elems.handle, &status );
01546     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01547 
01548     EntityHandle handle;
01549     EntityHandle* array = 0;
01550     if( count > 0 ) rval = readUtil->get_element_connect( count, nodes_per_elem, type, 0, handle, array );
01551     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01552 
01553     try
01554     {
01555         EntityHandle* buffer     = reinterpret_cast< EntityHandle* >( dataBuffer );
01556         const size_t buffer_size = bufferSize / ( sizeof( EntityHandle ) * nodes_per_elem );
01557         ReadHDF5Dataset reader( elems.handle, data_id, nativeParallel, mpiComm );
01558         reader.set_file_ids( file_ids, elems.desc.start_id, buffer_size, handleType );
01559         dbgOut.printf( 3, "Reading connectivity in %lu chunks for element group \"%s\"\n", reader.get_read_count(),
01560                        elems.handle );
01561         EntityHandle* iter = array;
01562         int nn             = 0;
01563         while( !reader.done() )
01564         {
01565             dbgOut.printf( 3, "Reading chunk %d for \"%s\"\n", ++nn, elems.handle );
01566 
01567             size_t num_read;
01568             reader.read( buffer, num_read );
01569             iter = std::copy( buffer, buffer + num_read * nodes_per_elem, iter );
01570 
01571             if( node_ids )
01572             {
01573                 std::sort( buffer, buffer + num_read * nodes_per_elem );
01574                 num_read = std::unique( buffer, buffer + num_read * nodes_per_elem ) - buffer;
01575                 copy_sorted_file_ids( buffer, num_read, *node_ids );
01576             }
01577         }
01578         assert( iter - array == (ptrdiff_t)count * nodes_per_elem );
01579     }
01580     catch( ReadHDF5Dataset::Exception )
01581     {
01582         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01583     }
01584 
01585     if( !node_ids )
01586     {
01587         rval = convert_id_to_handle( array, count * nodes_per_elem );
01588         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01589 
01590         rval = readUtil->update_adjacencies( handle, count, nodes_per_elem, array );
01591         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01592     }
01593     else
01594     {
01595         IDConnectivity t;
01596         t.handle         = handle;
01597         t.count          = count;
01598         t.nodes_per_elem = nodes_per_elem;
01599         t.array          = array;
01600         idConnectivityList.push_back( t );
01601     }
01602 
01603     return insert_in_id_map( file_ids, handle );
01604 }
01605 
01606 ErrorCode ReadHDF5::update_connectivity()
01607 {
01608     ErrorCode rval;
01609     std::vector< IDConnectivity >::iterator i;
01610     for( i = idConnectivityList.begin(); i != idConnectivityList.end(); ++i )
01611     {
01612         rval = convert_id_to_handle( i->array, i->count * i->nodes_per_elem );
01613         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01614 
01615         rval = readUtil->update_adjacencies( i->handle, i->count, i->nodes_per_elem, i->array );
01616         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01617     }
01618     idConnectivityList.clear();
01619 
01620     return MB_SUCCESS;
01621 }
01622 
01623 ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, Range* handles_out )
01624 {
01625     mhdf_Status status;
01626     ErrorCode rval;
01627 
01628     CHECK_OPEN_HANDLES;
01629 
01630     hid_t table = mhdf_openConnectivitySimple( filePtr, group.handle, &status );
01631     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01632 
01633     rval = read_node_adj_elems( group, table, handles_out );
01634 
01635     mhdf_closeData( filePtr, table, &status );
01636     if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR_RET_VAL( "ReadHDF5 Failure", MB_FAILURE );
01637 
01638     return rval;
01639 }
01640 
01641 ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, hid_t table_handle, Range* handles_out )
01642 {
01643     CHECK_OPEN_HANDLES;
01644 
01645     debug_barrier();
01646 
01647     mhdf_Status status;
01648     ErrorCode rval;
01649     IODebugTrack debug_track( debugTrack, std::string( group.handle ) );
01650 
01651     // Copy data to local variables (makes other code clearer)
01652     const int node_per_elem = group.desc.vals_per_ent;
01653     long start_id           = group.desc.start_id;
01654     long remaining          = group.desc.count;
01655     const EntityType type   = CN::EntityTypeFromName( group.type );
01656 
01657     // Figure out how many elements we can read in each pass
01658     long* const buffer     = reinterpret_cast< long* >( dataBuffer );
01659     const long buffer_size = bufferSize / ( node_per_elem * sizeof( buffer[0] ) );
01660     // Read all element connectivity in buffer_size blocks
01661     long offset = 0;
01662     dbgOut.printf( 3, "Reading node-adjacent elements from \"%s\" in %ld chunks\n", group.handle,
01663                    ( remaining + buffer_size - 1 ) / buffer_size );
01664     int nn = 0;
01665     Range::iterator hint;
01666     if( handles_out ) hint = handles_out->begin();
01667     while( remaining )
01668     {
01669         dbgOut.printf( 3, "Reading chunk %d of connectivity data for \"%s\"\n", ++nn, group.handle );
01670 
01671         // Read a block of connectivity data
01672         const long count = std::min( remaining, buffer_size );
01673         debug_track.record_io( offset, count );
01674         assert_range( buffer, count * node_per_elem );
01675         mhdf_readConnectivityWithOpt( table_handle, offset, count, H5T_NATIVE_LONG, buffer, collIO, &status );
01676         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01677         offset += count;
01678         remaining -= count;
01679 
01680         // Count the number of elements in the block that we want,
01681         // zero connectivity for other elements
01682         long num_elem = 0;
01683         long* iter    = buffer;
01684         for( long i = 0; i < count; ++i )
01685         {
01686             for( int j = 0; j < node_per_elem; ++j )
01687             {
01688                 iter[j] = (long)idMap.find( iter[j] );
01689                 if( !iter[j] )
01690                 {
01691                     iter[0] = 0;
01692                     break;
01693                 }
01694             }
01695             if( iter[0] ) ++num_elem;
01696             iter += node_per_elem;
01697         }
01698 
01699         if( !num_elem )
01700         {
01701             start_id += count;
01702             continue;
01703         }
01704 
01705         // Create elements
01706         EntityHandle handle;
01707         EntityHandle* array;
01708         rval = readUtil->get_element_connect( (int)num_elem, node_per_elem, type, 0, handle, array );
01709         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01710 
01711         // Copy all non-zero connectivity values
01712         iter                = buffer;
01713         EntityHandle* iter2 = array;
01714         EntityHandle h      = handle;
01715         for( long i = 0; i < count; ++i )
01716         {
01717             if( !*iter )
01718             {
01719                 iter += node_per_elem;
01720                 continue;
01721             }
01722             if( !idMap.insert( start_id + i, h++, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01723 
01724             long* const end = iter + node_per_elem;
01725             for( ; iter != end; ++iter, ++iter2 )
01726                 *iter2 = (EntityHandle)*iter;
01727         }
01728         assert( iter2 - array == num_elem * node_per_elem );
01729         start_id += count;
01730 
01731         rval = readUtil->update_adjacencies( handle, num_elem, node_per_elem, array );
01732         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01733         if( handles_out ) hint = handles_out->insert( hint, handle, handle + num_elem - 1 );
01734     }
01735 
01736     debug_track.all_reduce();
01737     return MB_SUCCESS;
01738 }
01739 
01740 ErrorCode ReadHDF5::read_elems( int i, const Range& elems_in, Range& nodes )
01741 {
01742     CHECK_OPEN_HANDLES;
01743 
01744     debug_barrier();
01745     dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", fileInfo->elems[i].handle,
01746                     (unsigned long)elems_in.size(), (unsigned long)elems_in.psize() );
01747 
01748     EntityHandle* const buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
01749     const int node_per_elem    = fileInfo->elems[i].desc.vals_per_ent;
01750     const size_t buffer_size   = bufferSize / ( node_per_elem * sizeof( EntityHandle ) );
01751 
01752     if( elems_in.empty() ) return MB_SUCCESS;
01753 
01754     assert( (long)elems_in.front() >= fileInfo->elems[i].desc.start_id );
01755     assert( (long)elems_in.back() - fileInfo->elems[i].desc.start_id < fileInfo->elems[i].desc.count );
01756 
01757     // We don't support version 3 style poly element data
01758     if( fileInfo->elems[i].desc.vals_per_ent <= 0 ) MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
01759 
01760     mhdf_Status status;
01761     hid_t table = mhdf_openConnectivitySimple( filePtr, fileInfo->elems[i].handle, &status );
01762     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01763 
01764     try
01765     {
01766         ReadHDF5Dataset reader( fileInfo->elems[i].handle, table, nativeParallel, mpiComm );
01767         reader.set_file_ids( elems_in, fileInfo->elems[i].desc.start_id, buffer_size, handleType );
01768         dbgOut.printf( 3, "Reading node list in %lu chunks for \"%s\"\n", reader.get_read_count(),
01769                        fileInfo->elems[i].handle );
01770         int nn = 0;
01771         while( !reader.done() )
01772         {
01773             dbgOut.printf( 3, "Reading chunk %d of \"%s\" connectivity\n", ++nn, fileInfo->elems[i].handle );
01774             size_t num_read;
01775             reader.read( buffer, num_read );
01776             std::sort( buffer, buffer + num_read * node_per_elem );
01777             num_read = std::unique( buffer, buffer + num_read * node_per_elem ) - buffer;
01778             copy_sorted_file_ids( buffer, num_read, nodes );
01779         }
01780     }
01781     catch( ReadHDF5Dataset::Exception )
01782     {
01783         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01784     }
01785 
01786     return MB_SUCCESS;
01787 }
01788 
01789 ErrorCode ReadHDF5::read_poly( const mhdf_ElemDesc& elems, const Range& file_ids )
01790 {
01791     class PolyReader : public ReadHDF5VarLen
01792     {
01793       private:
01794         const EntityType type;
01795         ReadHDF5* readHDF5;
01796 
01797       public:
01798         PolyReader( EntityType elem_type, void* buffer, size_t buffer_size, ReadHDF5* owner, DebugOutput& dbg )
01799             : ReadHDF5VarLen( dbg, buffer, buffer_size ), type( elem_type ), readHDF5( owner )
01800         {
01801         }
01802         virtual ~PolyReader() {}
01803         ErrorCode store_data( EntityHandle file_id, void* data, long len, bool )
01804         {
01805             size_t valid;
01806             EntityHandle* conn = reinterpret_cast< EntityHandle* >( data );
01807             readHDF5->convert_id_to_handle( conn, len, valid );
01808             if( valid != (size_t)len ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
01809             EntityHandle handle;
01810             ErrorCode rval = readHDF5->moab()->create_element( type, conn, len, handle );
01811             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01812 
01813             rval = readHDF5->insert_in_id_map( file_id, handle );
01814             return rval;
01815         }
01816     };
01817 
01818     CHECK_OPEN_HANDLES;
01819 
01820     debug_barrier();
01821 
01822     EntityType type = CN::EntityTypeFromName( elems.type );
01823     if( type == MBMAXTYPE )
01824     {
01825         MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" );
01826     }
01827 
01828     hid_t handles[2];
01829     mhdf_Status status;
01830     long num_poly, num_conn, first_id;
01831     mhdf_openPolyConnectivity( filePtr, elems.handle, &num_poly, &num_conn, &first_id, handles, &status );
01832     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01833 
01834     std::string nm( elems.handle );
01835     ReadHDF5Dataset offset_reader( ( nm + " offsets" ).c_str(), handles[0], nativeParallel, mpiComm, true );
01836     ReadHDF5Dataset connect_reader( ( nm + " data" ).c_str(), handles[1], nativeParallel, mpiComm, true );
01837 
01838     PolyReader tool( type, dataBuffer, bufferSize, this, dbgOut );
01839     return tool.read( offset_reader, connect_reader, file_ids, first_id, handleType );
01840 }
01841 
01842 ErrorCode ReadHDF5::delete_non_side_elements( const Range& side_ents )
01843 {
01844     ErrorCode rval;
01845 
01846     // Build list of entities that we need to find the sides of
01847     Range explicit_ents;
01848     Range::iterator hint = explicit_ents.begin();
01849     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
01850     {
01851         EntityHandle start = i->value;
01852         EntityHandle end   = i->value + i->count - 1;
01853         EntityType type    = TYPE_FROM_HANDLE( start );
01854         assert( type == TYPE_FROM_HANDLE( end ) );  // Otherwise handle space entirely full!!
01855         if( type != MBVERTEX && type != MBENTITYSET ) hint = explicit_ents.insert( hint, start, end );
01856     }
01857     explicit_ents = subtract( explicit_ents, side_ents );
01858 
01859     // Figure out which entities we want to delete
01860     Range dead_ents( side_ents );
01861     Range::iterator ds, de, es;
01862     ds = dead_ents.lower_bound( CN::TypeDimensionMap[1].first );
01863     de = dead_ents.lower_bound( CN::TypeDimensionMap[2].first, ds );
01864     if( ds != de )
01865     {
01866         // Get subset of explicit ents of dimension greater than 1
01867         es = explicit_ents.lower_bound( CN::TypeDimensionMap[2].first );
01868         Range subset, adj;
01869         subset.insert( es, explicit_ents.end() );
01870         rval = iFace->get_adjacencies( subset, 1, false, adj, Interface::UNION );
01871         if( MB_SUCCESS != rval ) return rval;
01872         dead_ents = subtract( dead_ents, adj );
01873     }
01874     ds = dead_ents.lower_bound( CN::TypeDimensionMap[2].first );
01875     de = dead_ents.lower_bound( CN::TypeDimensionMap[3].first, ds );
01876     assert( de == dead_ents.end() );
01877     if( ds != de )
01878     {
01879         // Get subset of explicit ents of dimension 3
01880         es = explicit_ents.lower_bound( CN::TypeDimensionMap[3].first );
01881         Range subset, adj;
01882         subset.insert( es, explicit_ents.end() );
01883         rval = iFace->get_adjacencies( subset, 2, false, adj, Interface::UNION );
01884         if( MB_SUCCESS != rval ) return rval;
01885         dead_ents = subtract( dead_ents, adj );
01886     }
01887 
01888     // Now delete anything remaining in dead_ents
01889     dbgOut.printf( 2, "Deleting %lu elements\n", (unsigned long)dead_ents.size() );
01890     dbgOut.print( 4, "\tDead entities: ", dead_ents );
01891     rval = iFace->delete_entities( dead_ents );
01892     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01893 
01894     // Remove dead entities from ID map
01895     while( !dead_ents.empty() )
01896     {
01897         EntityHandle start = dead_ents.front();
01898         EntityID count     = dead_ents.const_pair_begin()->second - start + 1;
01899         IDMap::iterator rit;
01900         for( rit = idMap.begin(); rit != idMap.end(); ++rit )
01901             if( rit->value <= start && (EntityID)( start - rit->value ) < rit->count ) break;
01902         if( rit == idMap.end() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01903 
01904         EntityID offset = start - rit->value;
01905         EntityID avail  = rit->count - offset;
01906         if( avail < count ) count = avail;
01907 
01908         dead_ents.erase( dead_ents.begin(), dead_ents.begin() + count );
01909         idMap.erase( rit->begin + offset, count );
01910     }
01911 
01912     return MB_SUCCESS;
01913 }
01914 
01915 ErrorCode ReadHDF5::read_sets( const Range& file_ids )
01916 {
01917     CHECK_OPEN_HANDLES;
01918 
01919     debug_barrier();
01920 
01921     mhdf_Status status;
01922     ErrorCode rval;
01923 
01924     const size_t num_sets = fileInfo->sets.count;
01925     if( !num_sets )  // If no sets at all!
01926         return MB_SUCCESS;
01927 
01928     // Create sets
01929     std::vector< unsigned > flags( file_ids.size() );
01930     Range::iterator si = file_ids.begin();
01931     for( size_t i = 0; i < flags.size(); ++i, ++si )
01932         flags[i] = setMeta[*si - fileInfo->sets.start_id][3] & ~(long)mhdf_SET_RANGE_BIT;
01933     EntityHandle start_handle;
01934     // the files ids could be empty, for empty partitions
01935     if( !file_ids.empty() )
01936     {
01937         rval = readUtil->create_entity_sets( flags.size(), &flags[0], 0, start_handle );
01938         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01939         rval = insert_in_id_map( file_ids, start_handle );
01940         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01941     }
01942 
01943     // Read contents
01944     if( fileInfo->have_set_contents )
01945     {
01946         long len     = 0;
01947         hid_t handle = mhdf_openSetData( filePtr, &len, &status );
01948         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01949 
01950         ReadHDF5Dataset dat( "set contents", handle, nativeParallel, mpiComm, true );
01951         rval = read_set_data( file_ids, start_handle, dat, CONTENT );
01952         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01953     }
01954 
01955     // Read set child lists
01956     if( fileInfo->have_set_children )
01957     {
01958         long len     = 0;
01959         hid_t handle = mhdf_openSetChildren( filePtr, &len, &status );
01960         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01961 
01962         ReadHDF5Dataset dat( "set children", handle, nativeParallel, mpiComm, true );
01963         rval = read_set_data( file_ids, start_handle, dat, CHILD );
01964         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01965     }
01966 
01967     // Read set parent lists
01968     if( fileInfo->have_set_parents )
01969     {
01970         long len     = 0;
01971         hid_t handle = mhdf_openSetParents( filePtr, &len, &status );
01972         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01973 
01974         ReadHDF5Dataset dat( "set parents", handle, nativeParallel, mpiComm, true );
01975         rval = read_set_data( file_ids, start_handle, dat, PARENT );
01976         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01977     }
01978 
01979     return MB_SUCCESS;
01980 }
01981 
01982 ErrorCode ReadHDF5::read_all_set_meta()
01983 {
01984     CHECK_OPEN_HANDLES;
01985 
01986     assert( !setMeta );
01987     const long num_sets = fileInfo->sets.count;
01988     if( !num_sets ) return MB_SUCCESS;
01989 
01990     mhdf_Status status;
01991     hid_t handle = mhdf_openSetMetaSimple( filePtr, &status );
01992     if( is_error( status ) )
01993     {
01994         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01995     }
01996 
01997     // Allocate extra space if we need it for data conversion
01998     hid_t meta_type = H5Dget_type( handle );
01999     size_t size     = H5Tget_size( meta_type );
02000     if( size > sizeof( long ) )
02001         setMeta = new long[( num_sets * size + ( sizeof( long ) - 1 ) ) / sizeof( long )][4];
02002     else
02003         setMeta = new long[num_sets][4];
02004 
02005     // Set some parameters based on whether or not each proc reads the
02006     // table or only the root reads it and bcasts it to the others
02007     int rank     = 0;
02008     bool bcast   = false;
02009     hid_t ioprop = H5P_DEFAULT;
02010 #ifdef MOAB_HAVE_MPI
02011     MPI_Comm comm = 0;
02012     if( nativeParallel )
02013     {
02014         rank  = myPcomm->proc_config().proc_rank();
02015         comm  = myPcomm->proc_config().proc_comm();
02016         bcast = bcastDuplicateReads;
02017         if( !bcast ) ioprop = collIO;
02018     }
02019 #endif
02020 
02021     if( !bcast || 0 == rank )
02022     {
02023         mhdf_readSetMetaWithOpt( handle, 0, num_sets, meta_type, setMeta, ioprop, &status );
02024         if( is_error( status ) )
02025         {
02026             H5Tclose( meta_type );
02027             mhdf_closeData( filePtr, handle, &status );
02028             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02029         }
02030 
02031         H5Tconvert( meta_type, H5T_NATIVE_LONG, num_sets * 4, setMeta, 0, H5P_DEFAULT );
02032     }
02033     mhdf_closeData( filePtr, handle, &status );
02034     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02035     H5Tclose( meta_type );
02036 
02037     if( bcast )
02038     {
02039 #ifdef MOAB_HAVE_MPI
02040         int ierr = MPI_Bcast( (void*)setMeta, num_sets * 4, MPI_LONG, 0, comm );
02041         if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02042 #else
02043         assert( rank == 0 );              // If not MPI, then only one proc
02044 #endif
02045     }
02046 
02047     return MB_SUCCESS;
02048 }
02049 
02050 ErrorCode ReadHDF5::read_set_ids_recursive( Range& sets_in_out, bool contained_sets, bool child_sets )
02051 {
02052     CHECK_OPEN_HANDLES;
02053     mhdf_Status status;
02054 
02055     if( !fileInfo->have_set_children ) child_sets = false;
02056     if( !fileInfo->have_set_contents ) contained_sets = false;
02057     if( !child_sets && !contained_sets ) return MB_SUCCESS;
02058 
02059     // Open data tables
02060     if( fileInfo->sets.count == 0 )
02061     {
02062         assert( sets_in_out.empty() );
02063         return MB_SUCCESS;
02064     }
02065 
02066     if( !contained_sets && !child_sets ) return MB_SUCCESS;
02067 
02068     ReadHDF5Dataset cont( "set contents", false, mpiComm );
02069     ReadHDF5Dataset child( "set children", false, mpiComm );
02070 
02071     if( contained_sets )
02072     {
02073         long content_len     = 0;
02074         hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
02075         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02076         try
02077         {
02078             cont.init( content_handle, true );
02079         }
02080         catch( ReadHDF5Dataset::Exception )
02081         {
02082             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02083         }
02084     }
02085 
02086     if( child_sets )
02087     {
02088         long child_len     = 0;
02089         hid_t child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
02090         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02091         try
02092         {
02093             child.init( child_handle, true );
02094         }
02095         catch( ReadHDF5Dataset::Exception )
02096         {
02097             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02098         }
02099     }
02100 
02101     ErrorCode rval = MB_SUCCESS;
02102     Range children, new_children( sets_in_out );
02103     int iteration_count = 0;
02104     do
02105     {
02106         ++iteration_count;
02107         dbgOut.tprintf( 2, "Iteration %d of read_set_ids_recursive\n", iteration_count );
02108         children.clear();
02109         if( child_sets )
02110         {
02111             rval = read_set_data( new_children, 0, child, CHILD, &children );
02112             if( MB_SUCCESS != rval ) break;
02113         }
02114         if( contained_sets )
02115         {
02116             rval = read_set_data( new_children, 0, cont, CONTENT, &children );
02117             // Remove any non-set values
02118             Range::iterator it = children.lower_bound( fileInfo->sets.start_id );
02119             children.erase( children.begin(), it );
02120             it = children.lower_bound( fileInfo->sets.start_id + fileInfo->sets.count );
02121             children.erase( it, children.end() );
02122             if( MB_SUCCESS != rval ) break;
02123         }
02124         new_children = subtract( children, sets_in_out );
02125         dbgOut.print_ints( 2, "Adding additional contained/child sets", new_children );
02126         sets_in_out.merge( new_children );
02127     } while( !new_children.empty() );
02128 
02129     return MB_SUCCESS;
02130 }
02131 
02132 ErrorCode ReadHDF5::find_sets_containing( Range& sets_out, bool read_set_containing_parents )
02133 {
02134     ErrorCode rval;
02135     mhdf_Status status;
02136 
02137     CHECK_OPEN_HANDLES;
02138 
02139     if( !fileInfo->have_set_contents ) return MB_SUCCESS;
02140     assert( fileInfo->sets.count );
02141 
02142     // Open data tables
02143     long content_len     = 0;
02144     hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
02145     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02146 
02147     hid_t data_type = H5Dget_type( content_handle );
02148 
02149     rval = find_sets_containing( content_handle, data_type, content_len, read_set_containing_parents, sets_out );
02150 
02151     H5Tclose( data_type );
02152 
02153     mhdf_closeData( filePtr, content_handle, &status );
02154     if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02155 
02156     return rval;
02157 }
02158 
02159 static bool set_map_intersect( bool ranged,
02160                                const long* contents,
02161                                int content_len,
02162                                const RangeMap< long, EntityHandle >& id_map )
02163 {
02164     if( ranged )
02165     {
02166         if( !content_len || id_map.empty() ) return false;
02167 
02168         const long* j         = contents;
02169         const long* const end = contents + content_len;
02170         assert( content_len % 2 == 0 );
02171         while( j != end )
02172         {
02173             long start = *( j++ );
02174             long count = *( j++ );
02175             if( id_map.intersects( start, count ) ) return true;
02176         }
02177     }
02178     else
02179     {
02180         const long* const end = contents + content_len;
02181         for( const long* i = contents; i != end; ++i )
02182             if( id_map.exists( *i ) ) return true;
02183     }
02184 
02185     return false;
02186 }
02187 
02188 struct SetContOffComp
02189 {
02190     bool operator()( const long a1[4], const long a2[4] )
02191     {
02192         return a1[ReadHDF5::CONTENT] < a2[0];
02193     }
02194 };
02195 
02196 ErrorCode ReadHDF5::find_sets_containing( hid_t contents_handle,
02197                                           hid_t content_type,
02198                                           long contents_len,
02199                                           bool read_set_containing_parents,
02200                                           Range& file_ids )
02201 {
02202     CHECK_OPEN_HANDLES;
02203 
02204     // Scan all set contents data
02205 
02206     const size_t content_size = H5Tget_size( content_type );
02207     const long num_sets       = fileInfo->sets.count;
02208     dbgOut.printf( 2, "Searching contents of %ld\n", num_sets );
02209     mhdf_Status status;
02210 
02211     int rank   = 0;
02212     bool bcast = false;
02213 #ifdef MOAB_HAVE_MPI
02214     MPI_Comm comm = 0;
02215     if( nativeParallel )
02216     {
02217         rank  = myPcomm->proc_config().proc_rank();
02218         comm  = myPcomm->proc_config().proc_comm();
02219         bcast = bcastDuplicateReads;
02220     }
02221 #endif
02222 
02223     // Check offsets so that we don't read past end of table or
02224     // walk off end of array.
02225     long prev = -1;
02226     for( long i = 0; i < num_sets; ++i )
02227     {
02228         if( setMeta[i][CONTENT] < prev )
02229         {
02230             std::cerr << "Invalid data in set contents offsets at position " << i << ": index " << setMeta[i][CONTENT]
02231                       << " is less than previous index " << prev << std::endl;
02232             std::cerr.flush();
02233             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02234         }
02235         prev = setMeta[i][CONTENT];
02236     }
02237     if( setMeta[num_sets - 1][CONTENT] >= contents_len )
02238     {
02239         std::cerr << "Maximum set content index " << setMeta[num_sets - 1][CONTENT]
02240                   << " exceeds contents table length of " << contents_len << std::endl;
02241         std::cerr.flush();
02242         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02243     }
02244 
02245     // Set up buffer for reading set contents
02246     long* const content_buffer = (long*)dataBuffer;
02247     const long content_len     = bufferSize / std::max( content_size, sizeof( long ) );
02248 
02249     // Scan set table
02250     Range::iterator hint = file_ids.begin();
02251     Range tmp_range;
02252     long prev_idx    = -1;
02253     int mm           = 0;
02254     long sets_offset = 0;
02255     long temp_content[4];
02256     while( sets_offset < num_sets )
02257     {
02258         temp_content[0] = content_len + prev_idx;
02259         long sets_count =
02260             std::lower_bound( setMeta + sets_offset, setMeta + num_sets, temp_content, SetContOffComp() ) - setMeta -
02261             sets_offset;
02262         assert( sets_count >= 0 && sets_offset + sets_count <= num_sets );
02263         if( !sets_count )
02264         {  // Contents of single set don't fit in buffer
02265             long content_remaining = setMeta[sets_offset][CONTENT] - prev_idx;
02266             long content_offset    = prev_idx + 1;
02267             while( content_remaining )
02268             {
02269                 long content_count = content_len < content_remaining ? 2 * ( content_len / 2 ) : content_remaining;
02270                 assert_range( content_buffer, content_count );
02271                 dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, content_count );
02272                 if( !bcast || 0 == rank )
02273                 {
02274                     if( !bcast )
02275                         mhdf_readSetDataWithOpt( contents_handle, content_offset, content_count, content_type,
02276                                                  content_buffer, collIO, &status );
02277                     else
02278                         mhdf_readSetData( contents_handle, content_offset, content_count, content_type, content_buffer,
02279                                           &status );
02280                     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02281 
02282                     H5Tconvert( content_type, H5T_NATIVE_LONG, content_count, content_buffer, 0, H5P_DEFAULT );
02283                 }
02284                 if( bcast )
02285                 {
02286 #ifdef MOAB_HAVE_MPI
02287                     int ierr = MPI_Bcast( content_buffer, content_count, MPI_LONG, 0, comm );
02288                     if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02289 #else
02290                     assert( rank == 0 );  // If not MPI, then only one proc
02291 #endif
02292                 }
02293 
02294                 if( read_set_containing_parents )
02295                 {
02296                     tmp_range.clear();
02297                     if( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT )
02298                         tmp_range.insert( *content_buffer, *( content_buffer + 1 ) );
02299                     else
02300                         std::copy( content_buffer, content_buffer + content_count, range_inserter( tmp_range ) );
02301                     tmp_range = intersect( tmp_range, file_ids );
02302                 }
02303 
02304                 if( !tmp_range.empty() || set_map_intersect( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT,
02305                                                              content_buffer, content_count, idMap ) )
02306                 {
02307                     long id = fileInfo->sets.start_id + sets_offset;
02308                     hint    = file_ids.insert( hint, id, id );
02309                     if( !nativeParallel )  // Don't stop if doing READ_PART because we need to read
02310                                            // collectively
02311                         break;
02312                 }
02313                 content_remaining -= content_count;
02314                 content_offset += content_count;
02315             }
02316             prev_idx   = setMeta[sets_offset][CONTENT];
02317             sets_count = 1;
02318         }
02319         else if( long read_num = setMeta[sets_offset + sets_count - 1][CONTENT] - prev_idx )
02320         {
02321             assert( sets_count > 0 );
02322             assert_range( content_buffer, read_num );
02323             dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, read_num );
02324             if( !bcast || 0 == rank )
02325             {
02326                 if( !bcast )
02327                     mhdf_readSetDataWithOpt( contents_handle, prev_idx + 1, read_num, content_type, content_buffer,
02328                                              collIO, &status );
02329                 else
02330                     mhdf_readSetData( contents_handle, prev_idx + 1, read_num, content_type, content_buffer, &status );
02331                 if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02332 
02333                 H5Tconvert( content_type, H5T_NATIVE_LONG, read_num, content_buffer, 0, H5P_DEFAULT );
02334             }
02335             if( bcast )
02336             {
02337 #ifdef MOAB_HAVE_MPI
02338                 int ierr = MPI_Bcast( content_buffer, read_num, MPI_LONG, 0, comm );
02339                 if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02340 #else
02341                 assert( rank == 0 );      // If not MPI, then only one proc
02342 #endif
02343             }
02344 
02345             long* buff_iter = content_buffer;
02346             for( long i = 0; i < sets_count; ++i )
02347             {
02348                 long set_size = setMeta[i + sets_offset][CONTENT] - prev_idx;
02349                 prev_idx += set_size;
02350 
02351                 // Check whether contents include set already being loaded
02352                 if( read_set_containing_parents )
02353                 {
02354                     tmp_range.clear();
02355                     if( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT )
02356                     {
02357                         // put in tmp_range the contents on the set
02358                         // file_ids contain at this points only other sets
02359                         const long* j         = buff_iter;
02360                         const long* const end = buff_iter + set_size;
02361                         assert( set_size % 2 == 0 );
02362                         while( j != end )
02363                         {
02364                             long start = *( j++ );
02365                             long count = *( j++ );
02366                             tmp_range.insert( start, start + count - 1 );
02367                         }
02368                     }
02369                     else
02370                         std::copy( buff_iter, buff_iter + set_size, range_inserter( tmp_range ) );
02371                     tmp_range = intersect( tmp_range, file_ids );
02372                 }
02373 
02374                 if( !tmp_range.empty() ||
02375                     set_map_intersect( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT, buff_iter, set_size, idMap ) )
02376                 {
02377                     long id = fileInfo->sets.start_id + sets_offset + i;
02378                     hint    = file_ids.insert( hint, id, id );
02379                 }
02380                 buff_iter += set_size;
02381             }
02382         }
02383 
02384         sets_offset += sets_count;
02385     }
02386 
02387     return MB_SUCCESS;
02388 }
02389 
02390 static Range::iterator copy_set_contents( Range::iterator hint,
02391                                           int ranged,
02392                                           EntityHandle* contents,
02393                                           long length,
02394                                           Range& results )
02395 {
02396     if( ranged )
02397     {
02398         assert( length % 2 == 0 );
02399         for( long i = 0; i < length; i += 2 )
02400             hint = results.insert( hint, contents[i], contents[i] + contents[i + 1] - 1 );
02401     }
02402     else
02403     {
02404         std::sort( contents, contents + length );
02405         for( long i = 0; i < length; ++i )
02406             hint = results.insert( hint, contents[i] );
02407     }
02408     return hint;
02409 }
02410 
02411 ErrorCode ReadHDF5::read_set_data( const Range& set_file_ids,
02412                                    EntityHandle start_handle,
02413                                    ReadHDF5Dataset& data,
02414                                    SetMode mode,
02415                                    Range* file_ids_out )
02416 {
02417     ErrorCode rval;
02418     Range::const_pair_iterator pi;
02419     Range::iterator out_hint;
02420     if( file_ids_out ) out_hint = file_ids_out->begin();
02421 
02422     // Construct range of offsets into data table at which to read
02423     // Note: all offsets are incremented by TWEAK because Range cannot
02424     // store zeros.
02425     const long TWEAK = 1;
02426     Range data_offsets;
02427     Range::iterator hint = data_offsets.begin();
02428     pi                   = set_file_ids.const_pair_begin();
02429     if( (long)pi->first == fileInfo->sets.start_id )
02430     {
02431         long second = pi->second - fileInfo->sets.start_id;
02432         if( setMeta[second][mode] >= 0 ) hint = data_offsets.insert( hint, TWEAK, setMeta[second][mode] + TWEAK );
02433         ++pi;
02434     }
02435     for( ; pi != set_file_ids.const_pair_end(); ++pi )
02436     {
02437         long first  = pi->first - fileInfo->sets.start_id;
02438         long second = pi->second - fileInfo->sets.start_id;
02439         long idx1   = setMeta[first - 1][mode] + 1;
02440         long idx2   = setMeta[second][mode];
02441         if( idx2 >= idx1 ) hint = data_offsets.insert( hint, idx1 + TWEAK, idx2 + TWEAK );
02442     }
02443     try
02444     {
02445         data.set_file_ids( data_offsets, TWEAK, bufferSize / sizeof( EntityHandle ), handleType );
02446     }
02447     catch( ReadHDF5Dataset::Exception )
02448     {
02449         return MB_FAILURE;
02450     }
02451 
02452     // We need to increment this for each processed set because
02453     // the sets were created in the order of the ids in file_ids.
02454     EntityHandle h = start_handle;
02455 
02456     const long ranged_flag = ( mode == CONTENT ) ? mhdf_SET_RANGE_BIT : 0;
02457 
02458     std::vector< EntityHandle > partial;  // For when we read only part of the contents of a set/entity
02459     Range::const_iterator fileid_iter = set_file_ids.begin();
02460     EntityHandle* buffer              = reinterpret_cast< EntityHandle* >( dataBuffer );
02461     size_t count, offset;
02462 
02463     int nn = 0;
02464     /*
02465     #ifdef  MOAB_HAVE_MPI
02466       if (nativeParallel && mode==CONTENT && myPcomm->proc_config().proc_size()>1 &&
02467     data_offsets.empty())
02468       {
02469         MB_SET_ERR_CONT( "ReadHDF5 Failure: Attempt reading an empty dataset on proc " <<
02470             myPcomm->proc_config().proc_rank());
02471         MPI_Abort(myPcomm->proc_config().proc_comm(), 1);
02472       }
02473     #endif
02474     */
02475     if( ( 1 >= set_file_ids.size() ) && ( data.done() ) && moab::ReadHDF5::CONTENT == mode )
02476         // do at least one null read, it is needed in parallel
02477         data.null_read();
02478 
02479     while( !data.done() )
02480     {
02481         dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data.get_debug_desc() );
02482         try
02483         {
02484             data.read( buffer, count );
02485         }
02486         catch( ReadHDF5Dataset::Exception )
02487         {
02488             return MB_FAILURE;
02489         }
02490 
02491         // Assert not appropriate here - I might have treated all my file ids, but maybe
02492         // another proc hasn't; for me, count will be zero, so I won't do anything, but
02493         // I still need to go through the motions to make the read work
02494 
02495         // Handle 'special' case where we read some, but not all
02496         // of the data for an entity during the last iteration.
02497         offset = 0;
02498         if( !partial.empty() )
02499         {  // Didn't read all of previous entity
02500             assert( fileid_iter != set_file_ids.end() );
02501             size_t num_prev = partial.size();
02502             size_t idx      = *fileid_iter - fileInfo->sets.start_id;
02503             size_t len      = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
02504             offset          = len - num_prev;
02505             if( offset > count )
02506             {  // Still don't have all
02507                 partial.insert( partial.end(), buffer, buffer + count );
02508                 continue;
02509             }
02510 
02511             partial.insert( partial.end(), buffer, buffer + offset );
02512             if( file_ids_out )
02513             {
02514                 out_hint = copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, &partial[0], partial.size(),
02515                                               *file_ids_out );
02516             }
02517             else
02518             {
02519                 switch( mode )
02520                 {
02521                     size_t valid;
02522                     case CONTENT:
02523                         if( setMeta[idx][3] & ranged_flag )
02524                         {
02525                             if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
02526                             Range range;
02527                             convert_range_to_handle( &partial[0], len / 2, range );
02528                             rval = moab()->add_entities( h, range );
02529                         }
02530                         else
02531                         {
02532                             convert_id_to_handle( &partial[0], len, valid );
02533                             rval = moab()->add_entities( h, &partial[0], valid );
02534                         }
02535                         break;
02536                     case CHILD:
02537                         convert_id_to_handle( &partial[0], len, valid );
02538                         rval = moab()->add_child_meshsets( h, &partial[0], valid );
02539                         break;
02540                     case PARENT:
02541                         convert_id_to_handle( &partial[0], len, valid );
02542                         rval = moab()->add_parent_meshsets( h, &partial[0], valid );
02543                         break;
02544                     default:
02545                         break;
02546                 }
02547                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02548             }
02549 
02550             ++fileid_iter;
02551             ++h;
02552             partial.clear();
02553         }
02554 
02555         // Process contents for all entities for which we
02556         // have read the complete list
02557         while( offset < count )
02558         {
02559             assert( fileid_iter != set_file_ids.end() );
02560             size_t idx = *fileid_iter - fileInfo->sets.start_id;
02561             size_t len = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
02562             // If we did not read all of the final entity,
02563             // store what we did read to be processed in the
02564             // next iteration
02565             if( offset + len > count )
02566             {
02567                 partial.insert( partial.end(), buffer + offset, buffer + count );
02568                 break;
02569             }
02570 
02571             if( file_ids_out )
02572             {
02573                 out_hint =
02574                     copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, buffer + offset, len, *file_ids_out );
02575             }
02576             else
02577             {
02578                 switch( mode )
02579                 {
02580                     size_t valid;
02581                     case CONTENT:
02582                         if( setMeta[idx][3] & ranged_flag )
02583                         {
02584                             if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
02585                             Range range;
02586                             convert_range_to_handle( buffer + offset, len / 2, range );
02587                             rval = moab()->add_entities( h, range );
02588                         }
02589                         else
02590                         {
02591                             convert_id_to_handle( buffer + offset, len, valid );
02592                             rval = moab()->add_entities( h, buffer + offset, valid );
02593                         }
02594                         break;
02595                     case CHILD:
02596                         convert_id_to_handle( buffer + offset, len, valid );
02597                         rval = moab()->add_child_meshsets( h, buffer + offset, valid );
02598                         break;
02599                     case PARENT:
02600                         convert_id_to_handle( buffer + offset, len, valid );
02601                         rval = moab()->add_parent_meshsets( h, buffer + offset, valid );
02602                         break;
02603                     default:
02604                         break;
02605                 }
02606                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02607             }
02608 
02609             ++fileid_iter;
02610             ++h;
02611             offset += len;
02612         }
02613     }
02614 
02615     return MB_SUCCESS;
02616 }
02617 
02618 ErrorCode ReadHDF5::get_set_contents( const Range& sets, Range& file_ids )
02619 {
02620     CHECK_OPEN_HANDLES;
02621 
02622     if( !fileInfo->have_set_contents ) return MB_SUCCESS;
02623     dbgOut.tprint( 2, "Reading set contained file IDs\n" );
02624     try
02625     {
02626         mhdf_Status status;
02627         long content_len;
02628         hid_t contents = mhdf_openSetData( filePtr, &content_len, &status );
02629         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02630         ReadHDF5Dataset data( "set contents", contents, nativeParallel, mpiComm, true );
02631 
02632         return read_set_data( sets, 0, data, CONTENT, &file_ids );
02633     }
02634     catch( ReadHDF5Dataset::Exception )
02635     {
02636         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02637     }
02638 }
02639 
02640 ErrorCode ReadHDF5::read_adjacencies( hid_t table, long table_len )
02641 {
02642     CHECK_OPEN_HANDLES;
02643 
02644     ErrorCode rval;
02645     mhdf_Status status;
02646 
02647     debug_barrier();
02648 
02649     hid_t read_type = H5Dget_type( table );
02650     if( read_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02651     const bool convert = !H5Tequal( read_type, handleType );
02652 
02653     EntityHandle* buffer = (EntityHandle*)dataBuffer;
02654     size_t chunk_size    = bufferSize / H5Tget_size( read_type );
02655     size_t remaining     = table_len;
02656     size_t left_over     = 0;
02657     size_t offset        = 0;
02658     dbgOut.printf( 3, "Reading adjacency list in %lu chunks\n",
02659                    (unsigned long)( remaining + chunk_size - 1 ) / chunk_size );
02660     int nn = 0;
02661     while( remaining )
02662     {
02663         dbgOut.printf( 3, "Reading chunk %d of adjacency list\n", ++nn );
02664 
02665         size_t count = std::min( chunk_size, remaining );
02666         count -= left_over;
02667         remaining -= count;
02668 
02669         assert_range( buffer + left_over, count );
02670         mhdf_readAdjacencyWithOpt( table, offset, count, read_type, buffer + left_over, collIO, &status );
02671         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02672 
02673         if( convert )
02674         {
02675             herr_t err = H5Tconvert( read_type, handleType, count, buffer + left_over, 0, H5P_DEFAULT );
02676             if( err < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02677         }
02678 
02679         EntityHandle* iter = buffer;
02680         EntityHandle* end  = buffer + count + left_over;
02681         while( end - iter >= 3 )
02682         {
02683             EntityHandle h      = idMap.find( *iter++ );
02684             EntityHandle count2 = *iter++;
02685             if( !h )
02686             {
02687                 iter += count2;
02688                 continue;
02689             }
02690 
02691             if( count2 < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02692 
02693             if( end < count2 + iter )
02694             {
02695                 iter -= 2;
02696                 break;
02697             }
02698 
02699             size_t valid;
02700             convert_id_to_handle( iter, count2, valid, idMap );
02701             rval = iFace->add_adjacencies( h, iter, valid, false );
02702             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02703 
02704             iter += count2;
02705         }
02706 
02707         left_over = end - iter;
02708         assert_range( (char*)buffer, left_over );
02709         assert_range( (char*)iter, left_over );
02710         memmove( buffer, iter, left_over );
02711     }
02712 
02713     assert( !left_over );  // Unexpected truncation of data
02714 
02715     return MB_SUCCESS;
02716 }
02717 
02718 ErrorCode ReadHDF5::read_tag( int tag_index )
02719 {
02720     CHECK_OPEN_HANDLES;
02721 
02722     dbgOut.tprintf( 2, "Reading tag \"%s\"\n", fileInfo->tags[tag_index].name );
02723 
02724     debug_barrier();
02725 
02726     ErrorCode rval;
02727     mhdf_Status status;
02728     Tag tag         = 0;
02729     hid_t read_type = -1;
02730     bool table_type;
02731     rval = create_tag( fileInfo->tags[tag_index], tag, read_type );
02732     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02733 
02734     if( fileInfo->tags[tag_index].have_sparse )
02735     {
02736         hid_t handles[3];
02737         long num_ent, num_val;
02738         mhdf_openSparseTagData( filePtr, fileInfo->tags[tag_index].name, &num_ent, &num_val, handles, &status );
02739         if( is_error( status ) )
02740         {
02741             if( read_type ) H5Tclose( read_type );
02742             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02743         }
02744 
02745         table_type = false;
02746         if( read_type == 0 )
02747         {
02748             read_type = H5Dget_type( handles[1] );
02749             if( read_type == 0 )
02750             {
02751                 mhdf_closeData( filePtr, handles[0], &status );
02752                 mhdf_closeData( filePtr, handles[0], &status );
02753                 if( fileInfo->tags[tag_index].size <= 0 ) mhdf_closeData( filePtr, handles[2], &status );
02754                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02755             }
02756             table_type = true;
02757         }
02758 
02759         if( fileInfo->tags[tag_index].size > 0 )
02760         {
02761             dbgOut.printf( 2, "Reading sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
02762             rval = read_sparse_tag( tag, read_type, handles[0], handles[1], num_ent );
02763         }
02764         else
02765         {
02766             dbgOut.printf( 2, "Reading var-len sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
02767             rval = read_var_len_tag( tag, read_type, handles[0], handles[1], handles[2], num_ent, num_val );
02768         }
02769 
02770         if( table_type )
02771         {
02772             H5Tclose( read_type );
02773             read_type = 0;
02774         }
02775 
02776         mhdf_closeData( filePtr, handles[0], &status );
02777         if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02778         mhdf_closeData( filePtr, handles[1], &status );
02779         if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02780         if( fileInfo->tags[tag_index].size <= 0 )
02781         {
02782             mhdf_closeData( filePtr, handles[2], &status );
02783             if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02784         }
02785         if( MB_SUCCESS != rval )
02786         {
02787             if( read_type ) H5Tclose( read_type );
02788             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02789         }
02790     }
02791 
02792     for( int j = 0; j < fileInfo->tags[tag_index].num_dense_indices; ++j )
02793     {
02794         long count;
02795         const char* name = 0;
02796         mhdf_EntDesc* desc;
02797         int elem_idx = fileInfo->tags[tag_index].dense_elem_indices[j];
02798         if( elem_idx == -2 )
02799         {
02800             desc = &fileInfo->sets;
02801             name = mhdf_set_type_handle();
02802         }
02803         else if( elem_idx == -1 )
02804         {
02805             desc = &fileInfo->nodes;
02806             name = mhdf_node_type_handle();
02807         }
02808         else if( elem_idx >= 0 && elem_idx < fileInfo->num_elem_desc )
02809         {
02810             desc = &fileInfo->elems[elem_idx].desc;
02811             name = fileInfo->elems[elem_idx].handle;
02812         }
02813         else
02814         {
02815             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02816         }
02817 
02818         dbgOut.printf( 2, "Read dense data block for tag \"%s\" on \"%s\"\n", fileInfo->tags[tag_index].name, name );
02819 
02820         hid_t handle = mhdf_openDenseTagData( filePtr, fileInfo->tags[tag_index].name, name, &count, &status );
02821         if( is_error( status ) )
02822         {
02823             rval = MB_FAILURE;  // rval = error(MB_FAILURE);
02824             break;
02825         }
02826 
02827         if( count > desc->count )
02828         {
02829             mhdf_closeData( filePtr, handle, &status );
02830             MB_SET_ERR( MB_FAILURE,
02831                         "Invalid data length for dense tag data: " << name << "/" << fileInfo->tags[tag_index].name );
02832         }
02833 
02834         table_type = false;
02835         if( read_type == 0 )
02836         {
02837             read_type = H5Dget_type( handle );
02838             if( read_type == 0 )
02839             {
02840                 mhdf_closeData( filePtr, handle, &status );
02841                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02842             }
02843             table_type = true;
02844         }
02845 
02846         rval = read_dense_tag( tag, name, read_type, handle, desc->start_id, count );
02847 
02848         if( table_type )
02849         {
02850             H5Tclose( read_type );
02851             read_type = 0;
02852         }
02853 
02854         mhdf_closeData( filePtr, handle, &status );
02855         if( MB_SUCCESS != rval ) break;
02856         if( is_error( status ) )
02857         {
02858             rval = MB_FAILURE;
02859             break;
02860         }
02861     }
02862 
02863     if( read_type ) H5Tclose( read_type );
02864     return rval;
02865 }
02866 
02867 ErrorCode ReadHDF5::create_tag( const mhdf_TagDesc& info, Tag& handle, hid_t& hdf_type )
02868 {
02869     CHECK_OPEN_HANDLES;
02870 
02871     ErrorCode rval;
02872     mhdf_Status status;
02873     TagType storage;
02874     DataType mb_type;
02875     bool re_read_default = false;
02876 
02877     switch( info.storage )
02878     {
02879         case mhdf_DENSE_TYPE:
02880             storage = MB_TAG_DENSE;
02881             break;
02882         case mhdf_SPARSE_TYPE:
02883             storage = MB_TAG_SPARSE;
02884             break;
02885         case mhdf_BIT_TYPE:
02886             storage = MB_TAG_BIT;
02887             break;
02888         case mhdf_MESH_TYPE:
02889             storage = MB_TAG_MESH;
02890             break;
02891         default:
02892             MB_SET_ERR( MB_FAILURE, "Invalid storage type for tag '" << info.name << "': " << info.storage );
02893     }
02894 
02895     // Type-specific stuff
02896     if( info.type == mhdf_BITFIELD )
02897     {
02898         if( info.size < 1 || info.size > 8 )
02899         {
02900             MB_SET_ERR( MB_FAILURE, "Invalid bit tag: class is MB_TAG_BIT, num bits = " << info.size );
02901         }
02902         hdf_type = H5Tcopy( H5T_NATIVE_B8 );
02903         mb_type  = MB_TYPE_BIT;
02904         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02905     }
02906     else if( info.type == mhdf_OPAQUE )
02907     {
02908         mb_type = MB_TYPE_OPAQUE;
02909 
02910         // Check for user-provided type
02911         Tag type_handle;
02912         std::string tag_type_name = "__hdf5_tag_type_";
02913         tag_type_name += info.name;
02914         rval = iFace->tag_get_handle( tag_type_name.c_str(), sizeof( hid_t ), MB_TYPE_OPAQUE, type_handle );
02915         if( MB_SUCCESS == rval )
02916         {
02917             EntityHandle root = 0;
02918             rval              = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
02919             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02920             hdf_type        = H5Tcopy( hdf_type );
02921             re_read_default = true;
02922         }
02923         else if( MB_TAG_NOT_FOUND == rval )
02924         {
02925             hdf_type = 0;
02926         }
02927         else
02928             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02929 
02930         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02931     }
02932     else
02933     {
02934         switch( info.type )
02935         {
02936             case mhdf_INTEGER:
02937                 hdf_type = H5T_NATIVE_INT;
02938                 mb_type  = MB_TYPE_INTEGER;
02939                 break;
02940             case mhdf_FLOAT:
02941                 hdf_type = H5T_NATIVE_DOUBLE;
02942                 mb_type  = MB_TYPE_DOUBLE;
02943                 break;
02944             case mhdf_BOOLEAN:
02945                 hdf_type = H5T_NATIVE_UINT;
02946                 mb_type  = MB_TYPE_INTEGER;
02947                 break;
02948             case mhdf_ENTITY_ID:
02949                 hdf_type = handleType;
02950                 mb_type  = MB_TYPE_HANDLE;
02951                 break;
02952             default:
02953                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02954         }
02955 
02956         if( info.size > 1 )
02957         {  // Array
02958             hsize_t tmpsize = info.size;
02959 #if defined( H5Tarray_create_vers ) && H5Tarray_create_vers > 1
02960             hdf_type = H5Tarray_create2( hdf_type, 1, &tmpsize );
02961 #else
02962             hdf_type = H5Tarray_create( hdf_type, 1, &tmpsize, NULL );
02963 #endif
02964         }
02965         else
02966         {
02967             hdf_type = H5Tcopy( hdf_type );
02968         }
02969         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02970     }
02971 
02972     // If default or global/mesh value in file, read it.
02973     if( info.default_value || info.global_value )
02974     {
02975         if( re_read_default )
02976         {
02977             mhdf_getTagValues( filePtr, info.name, hdf_type, info.default_value, info.global_value, &status );
02978             if( mhdf_isError( &status ) )
02979             {
02980                 if( hdf_type ) H5Tclose( hdf_type );
02981                 MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
02982             }
02983         }
02984 
02985         if( MB_TYPE_HANDLE == mb_type )
02986         {
02987             if( info.default_value )
02988             {
02989                 rval = convert_id_to_handle( (EntityHandle*)info.default_value, info.default_value_size );
02990                 if( MB_SUCCESS != rval )
02991                 {
02992                     if( hdf_type ) H5Tclose( hdf_type );
02993                     MB_SET_ERR( rval, "ReadHDF5 Failure" );
02994                 }
02995             }
02996             if( info.global_value )
02997             {
02998                 rval = convert_id_to_handle( (EntityHandle*)info.global_value, info.global_value_size );
02999                 if( MB_SUCCESS != rval )
03000                 {
03001                     if( hdf_type ) H5Tclose( hdf_type );
03002                     MB_SET_ERR( rval, "ReadHDF5 Failure" );
03003                 }
03004             }
03005         }
03006     }
03007 
03008     // Get tag handle, creating if necessary
03009     if( info.size < 0 )
03010         rval = iFace->tag_get_handle( info.name, info.default_value_size, mb_type, handle,
03011                                       storage | MB_TAG_CREAT | MB_TAG_VARLEN | MB_TAG_DFTOK, info.default_value );
03012     else
03013         rval = iFace->tag_get_handle( info.name, info.size, mb_type, handle, storage | MB_TAG_CREAT | MB_TAG_DFTOK,
03014                                       info.default_value );
03015     if( MB_SUCCESS != rval )
03016     {
03017         if( hdf_type ) H5Tclose( hdf_type );
03018         MB_SET_ERR( MB_FAILURE, "Tag type in file does not match type in database for \"" << info.name << "\"" );
03019     }
03020 
03021     if( info.global_value )
03022     {
03023         EntityHandle root = 0;
03024         if( info.size > 0 )
03025         {  // Fixed-length tag
03026             rval = iFace->tag_set_data( handle, &root, 1, info.global_value );
03027         }
03028         else
03029         {
03030             int tag_size = info.global_value_size;
03031             rval         = iFace->tag_set_by_ptr( handle, &root, 1, &info.global_value, &tag_size );
03032         }
03033         if( MB_SUCCESS != rval )
03034         {
03035             if( hdf_type ) H5Tclose( hdf_type );
03036             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03037         }
03038     }
03039 
03040     return MB_SUCCESS;
03041 }
03042 
03043 ErrorCode ReadHDF5::read_dense_tag( Tag tag_handle,
03044                                     const char* ent_name,
03045                                     hid_t hdf_read_type,
03046                                     hid_t data,
03047                                     long start_id,
03048                                     long num_values )
03049 {
03050     CHECK_OPEN_HANDLES;
03051 
03052     ErrorCode rval;
03053     DataType mb_type;
03054 
03055     rval = iFace->tag_get_data_type( tag_handle, mb_type );
03056     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03057 
03058     int read_size;
03059     rval = iFace->tag_get_bytes( tag_handle, read_size );
03060     if( MB_SUCCESS != rval )  // Wrong function for variable-length tags
03061         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03062     // if (MB_TYPE_BIT == mb_type)
03063     // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
03064 
03065     if( hdf_read_type )
03066     {  // If not opaque
03067         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03068         if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03069     }
03070 
03071     // Get actual entities read from file
03072     Range file_ids, handles;
03073     Range::iterator f_ins = file_ids.begin(), h_ins = handles.begin();
03074     IDMap::iterator l, u;
03075     l = idMap.lower_bound( start_id );
03076     u = idMap.lower_bound( start_id + num_values - 1 );
03077     if( l != idMap.end() && start_id + num_values > l->begin )
03078     {
03079         if( l == u )
03080         {
03081             size_t beg = std::max( start_id, l->begin );
03082             size_t end = std::min( start_id + num_values, u->begin + u->count ) - 1;
03083             f_ins      = file_ids.insert( f_ins, beg, end );
03084             h_ins      = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + ( end - l->begin ) );
03085         }
03086         else
03087         {
03088             size_t beg = std::max( start_id, l->begin );
03089             f_ins      = file_ids.insert( f_ins, beg, l->begin + l->count - 1 );
03090             h_ins      = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + l->count - 1 );
03091             for( ++l; l != u; ++l )
03092             {
03093                 f_ins = file_ids.insert( f_ins, l->begin, l->begin + l->count - 1 );
03094                 h_ins = handles.insert( h_ins, l->value, l->value + l->count - 1 );
03095             }
03096             if( u != idMap.end() && u->begin < start_id + num_values )
03097             {
03098                 size_t end = std::min( start_id + num_values, u->begin + u->count - 1 );
03099                 f_ins      = file_ids.insert( f_ins, u->begin, end );
03100                 h_ins      = handles.insert( h_ins, u->value, u->value + end - u->begin );
03101             }
03102         }
03103     }
03104 
03105     // Given that all of the entities for this dense tag data should
03106     // have been created as a single contiguous block, the resulting
03107     // MOAB handle range should be contiguous.
03108     // THE ABOVE IS NOT NECESSARILY TRUE. SOMETIMES LOWER-DIMENSION
03109     // ENTS ARE READ AND THEN DELETED FOR PARTIAL READS.
03110     // assert(handles.empty() || handles.size() == (handles.back() - handles.front() + 1));
03111 
03112     std::string tn( "<error>" );
03113     iFace->tag_get_name( tag_handle, tn );
03114     tn += " data for ";
03115     tn += ent_name;
03116     try
03117     {
03118         h_ins = handles.begin();
03119         ReadHDF5Dataset reader( tn.c_str(), data, nativeParallel, mpiComm, false );
03120         long buffer_size = bufferSize / read_size;
03121         reader.set_file_ids( file_ids, start_id, buffer_size, hdf_read_type );
03122         dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tn.c_str(), ent_name,
03123                        reader.get_read_count() );
03124         int nn = 0;
03125         while( !reader.done() )
03126         {
03127             dbgOut.printf( 3, "Reading chunk %d of \"%s\" data\n", ++nn, tn.c_str() );
03128 
03129             size_t count;
03130             reader.read( dataBuffer, count );
03131 
03132             if( MB_TYPE_HANDLE == mb_type )
03133             {
03134                 rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * read_size / sizeof( EntityHandle ) );
03135                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03136             }
03137 
03138             Range ents;
03139             Range::iterator end = h_ins;
03140             end += count;
03141             ents.insert( h_ins, end );
03142             h_ins = end;
03143 
03144             rval = iFace->tag_set_data( tag_handle, ents, dataBuffer );
03145             if( MB_SUCCESS != rval )
03146             {
03147                 dbgOut.printf( 1, "Internal error setting data for tag \"%s\"\n", tn.c_str() );
03148                 MB_SET_ERR( rval, "ReadHDF5 Failure" );
03149             }
03150         }
03151     }
03152     catch( ReadHDF5Dataset::Exception )
03153     {
03154         dbgOut.printf( 1, "Internal error reading dense data for tag \"%s\"\n", tn.c_str() );
03155         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03156     }
03157 
03158     return MB_SUCCESS;
03159 }
03160 
03161 // Read entire ID table and for those file IDs corresponding
03162 // to entities that we have read from the file add both the
03163 // offset into the offset range and the handle into the handle
03164 // range. If handles are not ordered, switch to using a vector.
03165 ErrorCode ReadHDF5::read_sparse_tag_indices( const char* name,
03166                                              hid_t id_table,
03167                                              EntityHandle start_offset,  // Can't put zero in a Range
03168                                              Range& offset_range,
03169                                              Range& handle_range,
03170                                              std::vector< EntityHandle >& handle_vect )
03171 {
03172     CHECK_OPEN_HANDLES;
03173 
03174     offset_range.clear();
03175     handle_range.clear();
03176     handle_vect.clear();
03177 
03178     ErrorCode rval;
03179     Range::iterator handle_hint = handle_range.begin();
03180     Range::iterator offset_hint = offset_range.begin();
03181 
03182     EntityHandle* idbuf = (EntityHandle*)dataBuffer;
03183     size_t idbuf_size   = bufferSize / sizeof( EntityHandle );
03184 
03185     std::string tn( name );
03186     tn += " indices";
03187 
03188     assert( start_offset > 0 );  // Can't put zero in a Range
03189     try
03190     {
03191         ReadHDF5Dataset id_reader( tn.c_str(), id_table, nativeParallel, mpiComm, false );
03192         id_reader.set_all_file_ids( idbuf_size, handleType );
03193         size_t offset = start_offset;
03194         dbgOut.printf( 3, "Reading file ids for sparse tag \"%s\" in %lu chunks\n", name, id_reader.get_read_count() );
03195         int nn = 0;
03196         while( !id_reader.done() )
03197         {
03198             dbgOut.printf( 3, "Reading chunk %d of \"%s\" IDs\n", ++nn, name );
03199             size_t count;
03200             id_reader.read( idbuf, count );
03201 
03202             rval = convert_id_to_handle( idbuf, count );
03203             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03204 
03205             // idbuf will now contain zero-valued handles for those
03206             // tag values that correspond to entities we are not reading
03207             // from the file.
03208             for( size_t i = 0; i < count; ++i )
03209             {
03210                 if( idbuf[i] )
03211                 {
03212                     offset_hint = offset_range.insert( offset_hint, offset + i );
03213                     if( !handle_vect.empty() )
03214                     {
03215                         handle_vect.push_back( idbuf[i] );
03216                     }
03217                     else if( handle_range.empty() || idbuf[i] > handle_range.back() )
03218                     {
03219                         handle_hint = handle_range.insert( handle_hint, idbuf[i] );
03220                     }
03221                     else
03222                     {
03223                         handle_vect.resize( handle_range.size() );
03224                         std::copy( handle_range.begin(), handle_range.end(), handle_vect.begin() );
03225                         handle_range.clear();
03226                         handle_vect.push_back( idbuf[i] );
03227                         dbgOut.print( 2, "Switching to unordered list for tag handle list\n" );
03228                     }
03229                 }
03230             }
03231 
03232             offset += count;
03233         }
03234     }
03235     catch( ReadHDF5Dataset::Exception )
03236     {
03237         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03238     }
03239 
03240     return MB_SUCCESS;
03241 }
03242 
03243 ErrorCode ReadHDF5::read_sparse_tag( Tag tag_handle,
03244                                      hid_t hdf_read_type,
03245                                      hid_t id_table,
03246                                      hid_t value_table,
03247                                      long /*num_values*/ )
03248 {
03249     CHECK_OPEN_HANDLES;
03250 
03251     // Read entire ID table and for those file IDs corresponding
03252     // to entities that we have read from the file add both the
03253     // offset into the offset range and the handle into the handle
03254     // range.  If handles are not ordered, switch to using a vector.
03255     const EntityHandle base_offset = 1;  // Can't put zero in a Range
03256     std::vector< EntityHandle > handle_vect;
03257     Range handle_range, offset_range;
03258     std::string tn( "<error>" );
03259     iFace->tag_get_name( tag_handle, tn );
03260     ErrorCode rval =
03261         read_sparse_tag_indices( tn.c_str(), id_table, base_offset, offset_range, handle_range, handle_vect );
03262     if( MB_SUCCESS != rval ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03263 
03264     DataType mbtype;
03265     rval = iFace->tag_get_data_type( tag_handle, mbtype );
03266     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03267 
03268     int read_size;
03269     rval = iFace->tag_get_bytes( tag_handle, read_size );
03270     if( MB_SUCCESS != rval )  // Wrong function for variable-length tags
03271         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03272     // if (MB_TYPE_BIT == mbtype)
03273     // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
03274 
03275     if( hdf_read_type )
03276     {  // If not opaque
03277         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03278         if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03279     }
03280 
03281     const int handles_per_tag = read_size / sizeof( EntityHandle );
03282 
03283     // Now read data values
03284     size_t chunk_size = bufferSize / read_size;
03285     try
03286     {
03287         ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), value_table, nativeParallel, mpiComm, false );
03288         val_reader.set_file_ids( offset_range, base_offset, chunk_size, hdf_read_type );
03289         dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tn.c_str(),
03290                        val_reader.get_read_count() );
03291         int nn        = 0;
03292         size_t offset = 0;
03293         while( !val_reader.done() )
03294         {
03295             dbgOut.printf( 3, "Reading chunk %d of \"%s\" values\n", ++nn, tn.c_str() );
03296             size_t count;
03297             val_reader.read( dataBuffer, count );
03298             if( MB_TYPE_HANDLE == mbtype )
03299             {
03300                 rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * handles_per_tag );
03301                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03302             }
03303 
03304             if( !handle_vect.empty() )
03305             {
03306                 rval = iFace->tag_set_data( tag_handle, &handle_vect[offset], count, dataBuffer );
03307                 offset += count;
03308             }
03309             else
03310             {
03311                 Range r;
03312                 r.merge( handle_range.begin(), handle_range.begin() + count );
03313                 handle_range.erase( handle_range.begin(), handle_range.begin() + count );
03314                 rval = iFace->tag_set_data( tag_handle, r, dataBuffer );
03315             }
03316             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03317         }
03318     }
03319     catch( ReadHDF5Dataset::Exception )
03320     {
03321         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03322     }
03323 
03324     return MB_SUCCESS;
03325 }
03326 
03327 ErrorCode ReadHDF5::read_var_len_tag( Tag tag_handle,
03328                                       hid_t hdf_read_type,
03329                                       hid_t ent_table,
03330                                       hid_t val_table,
03331                                       hid_t off_table,
03332                                       long /*num_entities*/,
03333                                       long /*num_values*/ )
03334 {
03335     CHECK_OPEN_HANDLES;
03336 
03337     ErrorCode rval;
03338     DataType mbtype;
03339 
03340     rval = iFace->tag_get_data_type( tag_handle, mbtype );
03341     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03342 
03343     // Can't do variable-length bit tags
03344     if( MB_TYPE_BIT == mbtype ) MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
03345 
03346     // If here, MOAB tag must be variable-length
03347     int mbsize;
03348     if( MB_VARIABLE_DATA_LENGTH != iFace->tag_get_bytes( tag_handle, mbsize ) )
03349     {
03350         assert( false );MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
03351     }
03352 
03353     int read_size;
03354     if( hdf_read_type )
03355     {
03356         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03357         if( hdf_size < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03358         read_size = hdf_size;
03359     }
03360     else
03361     {
03362         // Opaque
03363         read_size = 1;
03364     }
03365 
03366     std::string tn( "<error>" );
03367     iFace->tag_get_name( tag_handle, tn );
03368 
03369     // Read entire ID table and for those file IDs corresponding
03370     // to entities that we have read from the file add both the
03371     // offset into the offset range and the handle into the handle
03372     // range. If handles are not ordered, switch to using a vector.
03373     const EntityHandle base_offset = 1;  // Can't put zero in a Range
03374     std::vector< EntityHandle > handle_vect;
03375     Range handle_range, offset_range;
03376     rval = read_sparse_tag_indices( tn.c_str(), ent_table, base_offset, offset_range, handle_range, handle_vect );
03377 
03378     // This code only works if the id_table is an ordered list.
03379     // This assumption was also true for the previous iteration
03380     // of this code, but wasn't checked. MOAB's file writer
03381     // always writes an ordered list for id_table.
03382     if( !handle_vect.empty() )
03383     {
03384         MB_SET_ERR( MB_FAILURE, "Unordered file ids for variable length tag not supported" );
03385     }
03386 
03387     class VTReader : public ReadHDF5VarLen
03388     {
03389         Tag tagHandle;
03390         bool isHandle;
03391         size_t readSize;
03392         ReadHDF5* readHDF5;
03393 
03394       public:
03395         ErrorCode store_data( EntityHandle file_id, void* data, long count, bool )
03396         {
03397             ErrorCode rval1;
03398             if( isHandle )
03399             {
03400                 if( readSize != sizeof( EntityHandle ) ) MB_CHK_SET_ERR( MB_FAILURE, "Invalid read size" );
03401                 rval1 = readHDF5->convert_id_to_handle( (EntityHandle*)data, count );MB_CHK_ERR( rval1 );
03402             }
03403             int n = count;
03404             return readHDF5->moab()->tag_set_by_ptr( tagHandle, &file_id, 1, &data, &n );
03405         }
03406         VTReader( DebugOutput& debug_output,
03407                   void* buffer,
03408                   size_t buffer_size,
03409                   Tag tag,
03410                   bool is_handle_tag,
03411                   size_t read_size1,
03412                   ReadHDF5* owner )
03413             : ReadHDF5VarLen( debug_output, buffer, buffer_size ), tagHandle( tag ), isHandle( is_handle_tag ),
03414               readSize( read_size1 ), readHDF5( owner )
03415         {
03416         }
03417     };
03418 
03419     VTReader tool( dbgOut, dataBuffer, bufferSize, tag_handle, MB_TYPE_HANDLE == mbtype, read_size, this );
03420     try
03421     {
03422         // Read offsets into value table.
03423         std::vector< unsigned > counts;
03424         Range offsets;
03425         ReadHDF5Dataset off_reader( ( tn + " offsets" ).c_str(), off_table, nativeParallel, mpiComm, false );
03426         rval = tool.read_offsets( off_reader, offset_range, base_offset, base_offset, offsets, counts );
03427         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03428 
03429         // Read tag values
03430         Range empty;
03431         ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), val_table, nativeParallel, mpiComm, false );
03432         rval = tool.read_data( val_reader, offsets, base_offset, hdf_read_type, handle_range, counts, empty );
03433         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03434     }
03435     catch( ReadHDF5Dataset::Exception )
03436     {
03437         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03438     }
03439 
03440     return MB_SUCCESS;
03441 }
03442 
03443 ErrorCode ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size )
03444 {
03445     convert_id_to_handle( array, size, idMap );
03446     return MB_SUCCESS;
03447 }
03448 
03449 void ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size, const RangeMap< long, EntityHandle >& id_map )
03450 {
03451     for( EntityHandle* const end = array + size; array != end; ++array )
03452         *array = id_map.find( *array );
03453 }
03454 
03455 void ReadHDF5::convert_id_to_handle( EntityHandle* array,
03456                                      size_t size,
03457                                      size_t& new_size,
03458                                      const RangeMap< long, EntityHandle >& id_map )
03459 {
03460     RangeMap< long, EntityHandle >::const_iterator it;
03461     new_size = 0;
03462     for( size_t i = 0; i < size; ++i )
03463     {
03464         it = id_map.lower_bound( array[i] );
03465         if( it != id_map.end() && it->begin <= (long)array[i] )
03466             array[new_size++] = it->value + ( array[i] - it->begin );
03467     }
03468 }
03469 
03470 void ReadHDF5::convert_range_to_handle( const EntityHandle* ranges,
03471                                         size_t num_ranges,
03472                                         const RangeMap< long, EntityHandle >& id_map,
03473                                         Range& merge )
03474 {
03475     RangeMap< long, EntityHandle >::iterator it = id_map.begin();
03476     Range::iterator hint                        = merge.begin();
03477     for( size_t i = 0; i < num_ranges; ++i )
03478     {
03479         long id        = ranges[2 * i];
03480         const long end = id + ranges[2 * i + 1];
03481         // We assume that 'ranges' is sorted, but check just in case it isn't.
03482         if( it == id_map.end() || it->begin > id ) it = id_map.begin();
03483         it = id_map.lower_bound( it, id_map.end(), id );
03484         if( it == id_map.end() ) continue;
03485         if( id < it->begin ) id = it->begin;
03486         while( id < end )
03487         {
03488             if( id < it->begin ) id = it->begin;
03489             const long off = id - it->begin;
03490             long count     = std::min( it->count - off, end - id );
03491             // It is possible that this new subrange is starting after the end
03492             // It will result in negative count, which does not make sense
03493             // We are done with this range, go to the next one
03494             if( count <= 0 ) break;
03495             hint = merge.insert( hint, it->value + off, it->value + off + count - 1 );
03496             id += count;
03497             if( id < end )
03498             {
03499                 if( ++it == id_map.end() ) break;
03500                 if( it->begin > end ) break;
03501             }
03502         }
03503     }
03504 }
03505 
03506 ErrorCode ReadHDF5::convert_range_to_handle( const EntityHandle* array, size_t num_ranges, Range& range )
03507 {
03508     convert_range_to_handle( array, num_ranges, idMap, range );
03509     return MB_SUCCESS;
03510 }
03511 
03512 ErrorCode ReadHDF5::insert_in_id_map( const Range& file_ids, EntityHandle start_id )
03513 {
03514     IDMap tmp_map;
03515     bool merge = !idMap.empty() && !file_ids.empty() && idMap.back().begin > (long)file_ids.front();
03516     IDMap& map = merge ? tmp_map : idMap;
03517     Range::const_pair_iterator p;
03518     for( p = file_ids.const_pair_begin(); p != file_ids.const_pair_end(); ++p )
03519     {
03520         size_t count = p->second - p->first + 1;
03521         if( !map.insert( p->first, start_id, count ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03522         start_id += count;
03523     }
03524     if( merge && !idMap.merge( tmp_map ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03525 
03526     return MB_SUCCESS;
03527 }
03528 
03529 ErrorCode ReadHDF5::insert_in_id_map( long file_id, EntityHandle handle )
03530 {
03531     if( !idMap.insert( file_id, handle, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03532     return MB_SUCCESS;
03533 }
03534 
03535 ErrorCode ReadHDF5::read_qa( EntityHandle )
03536 {
03537     CHECK_OPEN_HANDLES;
03538 
03539     mhdf_Status status;
03540     // std::vector<std::string> qa_list;
03541 
03542     int qa_len;
03543     char** qa = mhdf_readHistory( filePtr, &qa_len, &status );
03544     if( mhdf_isError( &status ) )
03545     {
03546         MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
03547     }
03548     // qa_list.resize(qa_len);
03549     for( int i = 0; i < qa_len; i++ )
03550     {
03551         // qa_list[i] = qa[i];
03552         free( qa[i] );
03553     }
03554     free( qa );
03555 
03556     /** FIX ME - how to put QA list on set?? */
03557 
03558     return MB_SUCCESS;
03559 }
03560 
03561 ErrorCode ReadHDF5::store_file_ids( Tag tag )
03562 {
03563     CHECK_OPEN_HANDLES;
03564 
03565     // typedef int tag_type;
03566     typedef long tag_type;
03567     // change it to be able to read much bigger files (long is 64 bits ...)
03568 
03569     tag_type* buffer       = reinterpret_cast< tag_type* >( dataBuffer );
03570     const long buffer_size = bufferSize / sizeof( tag_type );
03571     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
03572     {
03573         IDMap::Range range = *i;
03574 
03575         // Make sure the values will fit in the tag type
03576         IDMap::key_type rv = range.begin + ( range.count - 1 );
03577         tag_type tv        = (tag_type)rv;
03578         if( (IDMap::key_type)tv != rv )
03579         {
03580             assert( false );
03581             return MB_INDEX_OUT_OF_RANGE;
03582         }
03583 
03584         while( range.count )
03585         {
03586             long count = buffer_size < range.count ? buffer_size : range.count;
03587 
03588             Range handles;
03589             handles.insert( range.value, range.value + count - 1 );
03590             range.value += count;
03591             range.count -= count;
03592             for( long j = 0; j < count; ++j )
03593                 buffer[j] = (tag_type)range.begin++;
03594 
03595             ErrorCode rval = iFace->tag_set_data( tag, handles, buffer );
03596             if( MB_SUCCESS != rval ) return rval;
03597         }
03598     }
03599 
03600     return MB_SUCCESS;
03601 }
03602 
03603 ErrorCode ReadHDF5::store_sets_file_ids()
03604 {
03605     CHECK_OPEN_HANDLES;
03606 
03607     // create a tag that will not be saved, but it will be
03608     // used by visit plugin to match the sets and their file ids
03609     // it is the same type as the tag defined in ReadParallelcpp, for file id
03610     Tag setFileIdTag;
03611     long default_val = 0;
03612     ErrorCode rval   = iFace->tag_get_handle( "__FILE_ID_FOR_SETS", sizeof( long ), MB_TYPE_OPAQUE, setFileIdTag,
03613                                               ( MB_TAG_DENSE | MB_TAG_CREAT ), &default_val );
03614 
03615     if( MB_SUCCESS != rval || 0 == setFileIdTag ) return rval;
03616     // typedef int tag_type;
03617     typedef long tag_type;
03618     // change it to be able to read much bigger files (long is 64 bits ...)
03619 
03620     tag_type* buffer       = reinterpret_cast< tag_type* >( dataBuffer );
03621     const long buffer_size = bufferSize / sizeof( tag_type );
03622     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
03623     {
03624         IDMap::Range range = *i;
03625         EntityType htype   = iFace->type_from_handle( range.value );
03626         if( MBENTITYSET != htype ) continue;
03627         // work only with entity sets
03628         // Make sure the values will fit in the tag type
03629         IDMap::key_type rv = range.begin + ( range.count - 1 );
03630         tag_type tv        = (tag_type)rv;
03631         if( (IDMap::key_type)tv != rv )
03632         {
03633             assert( false );
03634             return MB_INDEX_OUT_OF_RANGE;
03635         }
03636 
03637         while( range.count )
03638         {
03639             long count = buffer_size < range.count ? buffer_size : range.count;
03640 
03641             Range handles;
03642             handles.insert( range.value, range.value + count - 1 );
03643             range.value += count;
03644             range.count -= count;
03645             for( long j = 0; j < count; ++j )
03646                 buffer[j] = (tag_type)range.begin++;
03647 
03648             rval = iFace->tag_set_data( setFileIdTag, handles, buffer );
03649             if( MB_SUCCESS != rval ) return rval;
03650         }
03651     }
03652     return MB_SUCCESS;
03653 }
03654 
03655 ErrorCode ReadHDF5::read_tag_values( const char* file_name,
03656                                      const char* tag_name,
03657                                      const FileOptions& opts,
03658                                      std::vector< int >& tag_values_out,
03659                                      const SubsetList* subset_list )
03660 {
03661     ErrorCode rval;
03662 
03663     rval = set_up_read( file_name, opts );
03664     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03665 
03666     int tag_index;
03667     rval = find_int_tag( tag_name, tag_index );
03668     if( MB_SUCCESS != rval )
03669     {
03670         clean_up_read( opts );
03671         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03672     }
03673 
03674     if( subset_list )
03675     {
03676         Range file_ids;
03677         rval = get_subset_ids( subset_list->tag_list, subset_list->tag_list_length, file_ids );
03678         if( MB_SUCCESS != rval )
03679         {
03680             clean_up_read( opts );
03681             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03682         }
03683 
03684         rval = read_tag_values_partial( tag_index, file_ids, tag_values_out );
03685         if( MB_SUCCESS != rval )
03686         {
03687             clean_up_read( opts );
03688             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03689         }
03690     }
03691     else
03692     {
03693         rval = read_tag_values_all( tag_index, tag_values_out );
03694         if( MB_SUCCESS != rval )
03695         {
03696             clean_up_read( opts );
03697             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03698         }
03699     }
03700 
03701     return clean_up_read( opts );
03702 }
03703 
03704 ErrorCode ReadHDF5::read_tag_values_partial( int tag_index, const Range& file_ids, std::vector< int >& tag_values )
03705 {
03706     CHECK_OPEN_HANDLES;
03707 
03708     mhdf_Status status;
03709     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
03710     long num_ent, num_val;
03711     size_t count;
03712     std::string tn( tag.name );
03713 
03714     // Read sparse values
03715     if( tag.have_sparse )
03716     {
03717         hid_t handles[3];
03718         mhdf_openSparseTagData( filePtr, tag.name, &num_ent, &num_val, handles, &status );
03719         if( mhdf_isError( &status ) )
03720         {
03721             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
03722         }
03723 
03724         try
03725         {
03726             // Read all entity handles and fill 'offsets' with ranges of
03727             // offsets into the data table for entities that we want.
03728             Range offsets;
03729             long* buffer           = reinterpret_cast< long* >( dataBuffer );
03730             const long buffer_size = bufferSize / sizeof( long );
03731             ReadHDF5Dataset ids( ( tn + " ids" ).c_str(), handles[0], nativeParallel, mpiComm );
03732             ids.set_all_file_ids( buffer_size, H5T_NATIVE_LONG );
03733             size_t offset = 0;
03734             dbgOut.printf( 3, "Reading sparse IDs for tag \"%s\" in %lu chunks\n", tag.name, ids.get_read_count() );
03735             int nn = 0;
03736             while( !ids.done() )
03737             {
03738                 dbgOut.printf( 3, "Reading chunk %d of IDs for \"%s\"\n", ++nn, tag.name );
03739                 ids.read( buffer, count );
03740 
03741                 std::sort( buffer, buffer + count );
03742                 Range::iterator ins     = offsets.begin();
03743                 Range::const_iterator i = file_ids.begin();
03744                 for( size_t j = 0; j < count; ++j )
03745                 {
03746                     while( i != file_ids.end() && (long)*i < buffer[j] )
03747                         ++i;
03748                     if( i == file_ids.end() ) break;
03749                     if( (long)*i == buffer[j] )
03750                     {
03751                         ins = offsets.insert( ins, j + offset, j + offset );
03752                     }
03753                 }
03754 
03755                 offset += count;
03756             }
03757 
03758             tag_values.clear();
03759             tag_values.reserve( offsets.size() );
03760             const size_t data_buffer_size = bufferSize / sizeof( int );
03761             int* data_buffer              = reinterpret_cast< int* >( dataBuffer );
03762             ReadHDF5Dataset vals( ( tn + " sparse vals" ).c_str(), handles[1], nativeParallel, mpiComm );
03763             vals.set_file_ids( offsets, 0, data_buffer_size, H5T_NATIVE_INT );
03764             dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tag.name, vals.get_read_count() );
03765             nn = 0;
03766             // Should normally only have one read call, unless sparse nature
03767             // of file_ids caused reader to do something strange
03768             while( !vals.done() )
03769             {
03770                 dbgOut.printf( 3, "Reading chunk %d of values for \"%s\"\n", ++nn, tag.name );
03771                 vals.read( data_buffer, count );
03772                 tag_values.insert( tag_values.end(), data_buffer, data_buffer + count );
03773             }
03774         }
03775         catch( ReadHDF5Dataset::Exception )
03776         {
03777             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03778         }
03779     }
03780 
03781     std::sort( tag_values.begin(), tag_values.end() );
03782     tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
03783 
03784     // Read dense values
03785     std::vector< int > prev_data, curr_data;
03786     for( int i = 0; i < tag.num_dense_indices; ++i )
03787     {
03788         int grp            = tag.dense_elem_indices[i];
03789         const char* gname  = 0;
03790         mhdf_EntDesc* desc = 0;
03791         if( grp == -1 )
03792         {
03793             gname = mhdf_node_type_handle();
03794             desc  = &fileInfo->nodes;
03795         }
03796         else if( grp == -2 )
03797         {
03798             gname = mhdf_set_type_handle();
03799             desc  = &fileInfo->sets;
03800         }
03801         else
03802         {
03803             assert( grp >= 0 && grp < fileInfo->num_elem_desc );
03804             gname = fileInfo->elems[grp].handle;
03805             desc  = &fileInfo->elems[grp].desc;
03806         }
03807 
03808         Range::iterator s = file_ids.lower_bound( (EntityHandle)( desc->start_id ) );
03809         Range::iterator e = Range::lower_bound( s, file_ids.end(), (EntityHandle)( desc->start_id ) + desc->count );
03810         Range subset;
03811         subset.merge( s, e );
03812 
03813         hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
03814         if( mhdf_isError( &status ) )
03815         {
03816             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
03817         }
03818 
03819         try
03820         {
03821             curr_data.clear();
03822             tag_values.reserve( subset.size() );
03823             const size_t data_buffer_size = bufferSize / sizeof( int );
03824             int* data_buffer              = reinterpret_cast< int* >( dataBuffer );
03825 
03826             ReadHDF5Dataset reader( ( tn + " dense vals" ).c_str(), handle, nativeParallel, mpiComm );
03827             reader.set_file_ids( subset, desc->start_id, data_buffer_size, H5T_NATIVE_INT );
03828             dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tag.name,
03829                            fileInfo->elems[grp].handle, reader.get_read_count() );
03830             int nn = 0;
03831             // Should normally only have one read call, unless sparse nature
03832             // of file_ids caused reader to do something strange
03833             while( !reader.done() )
03834             {
03835                 dbgOut.printf( 3, "Reading chunk %d of \"%s\"/\"%s\"\n", ++nn, tag.name, fileInfo->elems[grp].handle );
03836                 reader.read( data_buffer, count );
03837                 curr_data.insert( curr_data.end(), data_buffer, data_buffer + count );
03838             }
03839         }
03840         catch( ReadHDF5Dataset::Exception )
03841         {
03842             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03843         }
03844 
03845         std::sort( curr_data.begin(), curr_data.end() );
03846         curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
03847         prev_data.clear();
03848         tag_values.swap( prev_data );
03849         std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
03850                         std::back_inserter( tag_values ) );
03851     }
03852 
03853     return MB_SUCCESS;
03854 }
03855 
03856 ErrorCode ReadHDF5::read_tag_values_all( int tag_index, std::vector< int >& tag_values )
03857 {
03858     CHECK_OPEN_HANDLES;
03859 
03860     mhdf_Status status;
03861     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
03862     long junk, num_val;
03863 
03864     // Read sparse values
03865     if( tag.have_sparse )
03866     {
03867         hid_t handles[3];
03868         mhdf_openSparseTagData( filePtr, tag.name, &junk, &num_val, handles, &status );
03869         if( mhdf_isError( &status ) )
03870         {
03871             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
03872         }
03873 
03874         mhdf_closeData( filePtr, handles[0], &status );
03875         if( mhdf_isError( &status ) )
03876         {
03877             MB_SET_ERR_CONT( mhdf_message( &status ) );
03878             mhdf_closeData( filePtr, handles[1], &status );
03879             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03880         }
03881 
03882         hid_t file_type = H5Dget_type( handles[1] );
03883         tag_values.resize( num_val );
03884         mhdf_readTagValuesWithOpt( handles[1], 0, num_val, file_type, &tag_values[0], collIO, &status );
03885         if( mhdf_isError( &status ) )
03886         {
03887             MB_SET_ERR_CONT( mhdf_message( &status ) );
03888             H5Tclose( file_type );
03889             mhdf_closeData( filePtr, handles[1], &status );
03890             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03891         }
03892         H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &tag_values[0], 0, H5P_DEFAULT );
03893         H5Tclose( file_type );
03894 
03895         mhdf_closeData( filePtr, handles[1], &status );
03896         if( mhdf_isError( &status ) )
03897         {
03898             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
03899         }
03900     }
03901 
03902     std::sort( tag_values.begin(), tag_values.end() );
03903     tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
03904 
03905     // Read dense values
03906     std::vector< int > prev_data, curr_data;
03907     for( int i = 0; i < tag.num_dense_indices; ++i )
03908     {
03909         int grp           = tag.dense_elem_indices[i];
03910         const char* gname = 0;
03911         if( grp == -1 )
03912             gname = mhdf_node_type_handle();
03913         else if( grp == -2 )
03914             gname = mhdf_set_type_handle();
03915         else
03916             gname = fileInfo->elems[grp].handle;
03917         hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
03918         if( mhdf_isError( &status ) )
03919         {
03920             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
03921         }
03922 
03923         hid_t file_type = H5Dget_type( handle );
03924         curr_data.resize( num_val );
03925         mhdf_readTagValuesWithOpt( handle, 0, num_val, file_type, &curr_data[0], collIO, &status );
03926         if( mhdf_isError( &status ) )
03927         {
03928             MB_SET_ERR_CONT( mhdf_message( &status ) );
03929             H5Tclose( file_type );
03930             mhdf_closeData( filePtr, handle, &status );
03931             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03932         }
03933 
03934         H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &curr_data[0], 0, H5P_DEFAULT );
03935         H5Tclose( file_type );
03936         mhdf_closeData( filePtr, handle, &status );
03937         if( mhdf_isError( &status ) )
03938         {
03939             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
03940         }
03941 
03942         std::sort( curr_data.begin(), curr_data.end() );
03943         curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
03944 
03945         prev_data.clear();
03946         tag_values.swap( prev_data );
03947         std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
03948                         std::back_inserter( tag_values ) );
03949     }
03950 
03951     return MB_SUCCESS;
03952 }
03953 void ReadHDF5::print_times()
03954 {
03955 #ifdef MOAB_HAVE_MPI
03956     if( !myPcomm )
03957     {
03958         double recv[NUM_TIMES];
03959         MPI_Reduce( (void*)_times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
03960         for( int i = 0; i < NUM_TIMES; i++ )
03961             _times[i] = recv[i];  // just get the max from all of them
03962     }
03963     if( 0 == myPcomm->proc_config().proc_rank() )
03964     {
03965 #endif
03966 
03967         std::cout << "ReadHDF5:             " << _times[TOTAL_TIME] << std::endl
03968                   << "  get set meta        " << _times[SET_META_TIME] << std::endl
03969                   << "  partial subsets     " << _times[SUBSET_IDS_TIME] << std::endl
03970                   << "  partition time      " << _times[GET_PARTITION_TIME] << std::endl
03971                   << "  get set ids         " << _times[GET_SET_IDS_TIME] << std::endl
03972                   << "  set contents        " << _times[GET_SET_CONTENTS_TIME] << std::endl
03973                   << "  polyhedra           " << _times[GET_POLYHEDRA_TIME] << std::endl
03974                   << "  elements            " << _times[GET_ELEMENTS_TIME] << std::endl
03975                   << "  nodes               " << _times[GET_NODES_TIME] << std::endl
03976                   << "  node adjacency      " << _times[GET_NODEADJ_TIME] << std::endl
03977                   << "  side elements       " << _times[GET_SIDEELEM_TIME] << std::endl
03978                   << "  update connectivity " << _times[UPDATECONN_TIME] << std::endl
03979                   << "  adjacency           " << _times[ADJACENCY_TIME] << std::endl
03980                   << "  delete non_adj      " << _times[DELETE_NON_SIDEELEM_TIME] << std::endl
03981                   << "  recursive sets      " << _times[READ_SET_IDS_RECURS_TIME] << std::endl
03982                   << "  find contain_sets   " << _times[FIND_SETS_CONTAINING_TIME] << std::endl
03983                   << "  read sets           " << _times[READ_SETS_TIME] << std::endl
03984                   << "  read tags           " << _times[READ_TAGS_TIME] << std::endl
03985                   << "  store file ids      " << _times[STORE_FILE_IDS_TIME] << std::endl
03986                   << "  read qa records     " << _times[READ_QA_TIME] << std::endl;
03987 
03988 #ifdef MOAB_HAVE_MPI
03989     }
03990 #endif
03991 }
03992 
03993 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines