MOAB: Mesh Oriented datABase  (version 5.3.0)
ReadHDF5.cpp
Go to the documentation of this file.
00001 /**
00002  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003  * storing and accessing finite element mesh data.
00004  *
00005  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00006  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007  * retains certain rights in this software.
00008  *
00009  * This library is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU Lesser General Public
00011  * License as published by the Free Software Foundation; either
00012  * version 2.1 of the License, or (at your option) any later version.
00013  *
00014  */
00015 
00016 //-------------------------------------------------------------------------
00017 // Filename      : ReadHDF5.cpp
00018 //
00019 // Purpose       : HDF5 Writer
00020 //
00021 // Creator       : Jason Kraftcheck
00022 //
00023 // Creation Date : 04/18/04
00024 //-------------------------------------------------------------------------
00025 
00026 #include <cassert>
00027 #include "moab/MOABConfig.h"
00028 /* Include our MPI header before any HDF5 because otherwise
00029    it will get included indirectly by HDF5 */
00030 #ifdef MOAB_HAVE_MPI
00031 #include "moab_mpi.h"
00032 #include "moab/ParallelComm.hpp"
00033 #endif
00034 #include <H5Tpublic.h>
00035 #include <H5Ppublic.h>
00036 #include <H5Epublic.h>
00037 #include "moab/Interface.hpp"
00038 #include "Internals.hpp"
00039 #include "MBTagConventions.hpp"
00040 #include "ReadHDF5.hpp"
00041 #include "moab/CN.hpp"
00042 #include "moab/FileOptions.hpp"
00043 #include "moab/CpuTimer.hpp"
00044 #ifdef MOAB_HAVE_HDF5_PARALLEL
00045 #include <H5FDmpi.h>
00046 #include <H5FDmpio.h>
00047 #endif
00048 //#include "WriteHDF5.hpp"
00049 
00050 #include <cstdlib>
00051 #include <cstring>
00052 #include <limits>
00053 #include <functional>
00054 #include <iostream>
00055 
00056 #include "IODebugTrack.hpp"
00057 #include "ReadHDF5Dataset.hpp"
00058 #include "ReadHDF5VarLen.hpp"
00059 #include "moab_mpe.h"
00060 
00061 namespace moab
00062 {
00063 
00064 /* If true, coordinates are read in blocked format (all X values before
00065  * Y values before Z values.) If undefined, then all coordinates for a
00066  * given vertex are read at the same time.
00067  */
00068 const bool DEFAULT_BLOCKED_COORDINATE_IO = false;
00069 
00070 /* If true, file is opened first by root node only to read summary,
00071  * file is the closed and the summary is broadcast to all nodes, after
00072  * which all nodes open file in parallel to read data. If undefined,
00073  * file is opened once in parallel and all nodes read summary data.
00074  */
00075 const bool DEFAULT_BCAST_SUMMARY = true;
00076 
00077 /* If true and all processors are to read the same block of data,
00078  * read it on one and broadcast to others rather than using collective
00079  * io
00080  */
00081 const bool DEFAULT_BCAST_DUPLICATE_READS = true;
00082 
00083 #define READ_HDF5_BUFFER_SIZE ( 128 * 1024 * 1024 )
00084 
00085 #define assert_range( PTR, CNT )            \
00086     assert( ( PTR ) >= (void*)dataBuffer ); \
00087     assert( ( ( PTR ) + ( CNT ) ) <= (void*)( dataBuffer + bufferSize ) );
00088 
00089 // Call \c error function during HDF5 library errors to make
00090 // it easier to trap such errors in the debugger. This function
00091 // gets registered with the HDF5 library as a callback. It
00092 // works the same as the default (H5Eprint), except that it
00093 // also calls the \c error function as a no-op.
00094 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
00095 static herr_t handle_hdf5_error( hid_t stack, void* data )
00096 {
00097     ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
00098     herr_t result                 = 0;
00099     if( h->func ) result = ( *h->func )( stack, h->data );MB_CHK_ERR_CONT( MB_FAILURE );
00100     return result;
00101 }
00102 #else
00103 static herr_t handle_hdf5_error( void* data )
00104 {
00105     ReadHDF5::HDF5ErrorHandler* h = reinterpret_cast< ReadHDF5::HDF5ErrorHandler* >( data );
00106     herr_t result                 = 0;
00107     if( h->func ) result = ( *h->func )( h->data );MB_CHK_ERR_CONT( MB_FAILURE );
00108     return result;
00109 }
00110 #endif
00111 
00112 static void copy_sorted_file_ids( const EntityHandle* sorted_ids, long num_ids, Range& results )
00113 {
00114     Range::iterator hint = results.begin();
00115     long i               = 0;
00116     while( i < num_ids )
00117     {
00118         EntityHandle start = sorted_ids[i];
00119         for( ++i; i < num_ids && sorted_ids[i] == 1 + sorted_ids[i - 1]; ++i )
00120             ;
00121         hint = results.insert( hint, start, sorted_ids[i - 1] );
00122     }
00123 }
00124 
00125 static void intersect( const mhdf_EntDesc& group, const Range& range, Range& result )
00126 {
00127     Range::const_iterator s, e;
00128     s = Range::lower_bound( range.begin(), range.end(), group.start_id );
00129     e = Range::lower_bound( s, range.end(), group.start_id + group.count );
00130     result.merge( s, e );
00131 }
00132 
00133 #define debug_barrier() debug_barrier_line( __LINE__ )
00134 void ReadHDF5::debug_barrier_line( int lineno )
00135 {
00136 #ifdef MOAB_HAVE_MPI
00137     if( mpiComm )
00138     {
00139         const unsigned threshold   = 2;
00140         static unsigned long count = 0;
00141         if( dbgOut.get_verbosity() >= threshold )
00142         {
00143             dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno );
00144             MPI_Barrier( *mpiComm );
00145         }
00146     }
00147 #else
00148     if( lineno ) {}
00149 #endif
00150 }
00151 
00152 class CheckOpenReadHDF5Handles
00153 {
00154     int fileline;
00155     mhdf_FileHandle handle;
00156     int enter_count;
00157 
00158   public:
00159     CheckOpenReadHDF5Handles( mhdf_FileHandle file, int line )
00160         : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
00161     {
00162     }
00163     ~CheckOpenReadHDF5Handles()
00164     {
00165         int new_count = mhdf_countOpenHandles( handle );
00166         if( new_count != enter_count )
00167         {
00168             std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
00169                       << "Open at entrance: " << enter_count << std::endl
00170                       << "Open at exit:     " << new_count << std::endl;
00171         }
00172     }
00173 };
00174 
00175 #ifdef NDEBUG
00176 #define CHECK_OPEN_HANDLES
00177 #else
00178 #define CHECK_OPEN_HANDLES CheckOpenReadHDF5Handles check_open_handles_( filePtr, __LINE__ )
00179 #endif
00180 
00181 ReaderIface* ReadHDF5::factory( Interface* iface )
00182 {
00183     return new ReadHDF5( iface );
00184 }
00185 
00186 ReadHDF5::ReadHDF5( Interface* iface )
00187     : bufferSize( READ_HDF5_BUFFER_SIZE ), dataBuffer( NULL ), iFace( iface ), filePtr( 0 ), fileInfo( NULL ),
00188       readUtil( NULL ), handleType( 0 ), indepIO( H5P_DEFAULT ), collIO( H5P_DEFAULT ), myPcomm( NULL ),
00189       debugTrack( false ), dbgOut( stderr ), nativeParallel( false ), mpiComm( NULL ),
00190       blockedCoordinateIO( DEFAULT_BLOCKED_COORDINATE_IO ), bcastSummary( DEFAULT_BCAST_SUMMARY ),
00191       bcastDuplicateReads( DEFAULT_BCAST_DUPLICATE_READS ), setMeta( 0 ), timer( NULL ), cputime( false )
00192 {
00193 }
00194 
00195 ErrorCode ReadHDF5::init()
00196 {
00197     ErrorCode rval;
00198 
00199     if( readUtil ) return MB_SUCCESS;
00200 
00201     indepIO = collIO = H5P_DEFAULT;
00202     // WriteHDF5::register_known_tag_types(iFace);
00203 
00204     handleType = H5Tcopy( H5T_NATIVE_ULONG );
00205     if( handleType < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00206 
00207     if( H5Tset_size( handleType, sizeof( EntityHandle ) ) < 0 )
00208     {
00209         H5Tclose( handleType );
00210         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00211     }
00212 
00213     rval = iFace->query_interface( readUtil );
00214     if( MB_SUCCESS != rval )
00215     {
00216         H5Tclose( handleType );
00217         MB_SET_ERR( rval, "ReadHDF5 Failure" );
00218     }
00219 
00220     idMap.clear();
00221     fileInfo   = 0;
00222     debugTrack = false;
00223     myPcomm    = 0;
00224 
00225     return MB_SUCCESS;
00226 }
00227 
00228 ReadHDF5::~ReadHDF5()
00229 {
00230     if( !readUtil )  // init() failed.
00231         return;
00232 
00233     delete[] setMeta;
00234     setMeta = 0;
00235     iFace->release_interface( readUtil );
00236     H5Tclose( handleType );
00237 }
00238 
00239 ErrorCode ReadHDF5::set_up_read( const char* filename, const FileOptions& opts )
00240 {
00241     ErrorCode rval;
00242     mhdf_Status status;
00243     indepIO = collIO = H5P_DEFAULT;
00244     mpiComm          = 0;
00245 
00246     if( MB_SUCCESS != init() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00247 
00248 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00249     herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
00250 #else
00251     herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
00252 #endif
00253     if( err < 0 )
00254     {
00255         errorHandler.func = 0;
00256         errorHandler.data = 0;
00257     }
00258     else
00259     {
00260 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
00261         err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
00262 #else
00263         err           = H5Eset_auto( &handle_hdf5_error, &errorHandler );
00264 #endif
00265         if( err < 0 )
00266         {
00267             errorHandler.func = 0;
00268             errorHandler.data = 0;
00269         }
00270     }
00271 
00272     // Set up debug output
00273     int tmpval;
00274     if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) )
00275     {
00276         dbgOut.set_verbosity( tmpval );
00277         dbgOut.set_prefix( "H5M " );
00278     }
00279     dbgOut.limit_output_to_first_N_procs( 32 );
00280 
00281     // Enable some extra checks for reads. Note: amongst other things this
00282     // will print errors if the entire file is not read, so if doing a
00283     // partial read that is not a parallel read, this should be disabled.
00284     debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
00285 
00286     opts.get_toggle_option( "BLOCKED_COORDINATE_IO", DEFAULT_BLOCKED_COORDINATE_IO, blockedCoordinateIO );
00287     opts.get_toggle_option( "BCAST_SUMMARY", DEFAULT_BCAST_SUMMARY, bcastSummary );
00288     opts.get_toggle_option( "BCAST_DUPLICATE_READS", DEFAULT_BCAST_DUPLICATE_READS, bcastDuplicateReads );
00289 
00290     // Handle parallel options
00291     bool use_mpio  = ( MB_SUCCESS == opts.get_null_option( "USE_MPIO" ) );
00292     rval           = opts.match_option( "PARALLEL", "READ_PART" );
00293     bool parallel  = ( rval != MB_ENTITY_NOT_FOUND );
00294     nativeParallel = ( rval == MB_SUCCESS );
00295     if( use_mpio && !parallel )
00296     {
00297         MB_SET_ERR( MB_NOT_IMPLEMENTED, "'USE_MPIO' option specified w/out 'PARALLEL' option" );
00298     }
00299 
00300     // This option is intended for testing purposes only, and thus
00301     // is not documented anywhere.  Decreasing the buffer size can
00302     // expose bugs that would otherwise only be seen when reading
00303     // very large files.
00304     rval = opts.get_int_option( "BUFFER_SIZE", bufferSize );
00305     if( MB_SUCCESS != rval ) { bufferSize = READ_HDF5_BUFFER_SIZE; }
00306     else if( bufferSize < (int)std::max( sizeof( EntityHandle ), sizeof( void* ) ) )
00307     {
00308         MB_CHK_ERR( MB_INVALID_SIZE );
00309     }
00310 
00311     dataBuffer = (char*)malloc( bufferSize );
00312     if( !dataBuffer ) MB_CHK_ERR( MB_MEMORY_ALLOCATION_FAILED );
00313 
00314     if( use_mpio || nativeParallel )
00315     {
00316 
00317 #ifndef MOAB_HAVE_HDF5_PARALLEL
00318         free( dataBuffer );
00319         dataBuffer = NULL;
00320         MB_SET_ERR( MB_NOT_IMPLEMENTED, "MOAB not configured with parallel HDF5 support" );
00321 #else
00322         MPI_Info info = MPI_INFO_NULL;
00323         std::string cb_size;
00324         rval = opts.get_str_option( "CB_BUFFER_SIZE", cb_size );
00325         if( MB_SUCCESS == rval )
00326         {
00327             MPI_Info_create( &info );
00328             MPI_Info_set( info, const_cast< char* >( "cb_buffer_size" ), const_cast< char* >( cb_size.c_str() ) );
00329         }
00330 
00331         int pcomm_no = 0;
00332         rval         = opts.get_int_option( "PARALLEL_COMM", pcomm_no );
00333         if( rval == MB_TYPE_OUT_OF_RANGE ) { MB_SET_ERR( rval, "Invalid value for PARALLEL_COMM option" ); }
00334         myPcomm = ParallelComm::get_pcomm( iFace, pcomm_no );
00335         if( 0 == myPcomm ) { myPcomm = new ParallelComm( iFace, MPI_COMM_WORLD ); }
00336         const int rank = myPcomm->proc_config().proc_rank();
00337         dbgOut.set_rank( rank );
00338         dbgOut.limit_output_to_first_N_procs( 32 );
00339         mpiComm = new MPI_Comm( myPcomm->proc_config().proc_comm() );
00340 
00341 #ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
00342         dbgOut.print( 1, "H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS is not defined\n" );
00343 #endif
00344 
00345         // Open the file in serial on root to read summary
00346         dbgOut.tprint( 1, "Getting file summary\n" );
00347         fileInfo = 0;
00348 
00349         hid_t file_prop;
00350         if( bcastSummary )
00351         {
00352             unsigned long size = 0;
00353             if( rank == 0 )
00354             {
00355                 file_prop = H5Pcreate( H5P_FILE_ACCESS );
00356                 err       = H5Pset_fapl_mpio( file_prop, MPI_COMM_SELF, MPI_INFO_NULL );
00357                 assert( file_prop >= 0 );
00358                 assert( err >= 0 );
00359                 filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
00360                 H5Pclose( file_prop );
00361 
00362                 if( filePtr )
00363                 {
00364                     fileInfo = mhdf_getFileSummary( filePtr, handleType, &status,
00365                                                     0 );  // no extra set info
00366                     if( !is_error( status ) )
00367                     {
00368                         size             = fileInfo->total_size;
00369                         fileInfo->offset = (unsigned char*)fileInfo;
00370                     }
00371                 }
00372                 mhdf_closeFile( filePtr, &status );
00373                 if( fileInfo && mhdf_isError( &status ) )
00374                 {
00375                     free( fileInfo );
00376                     fileInfo = NULL;
00377                 }
00378             }
00379 
00380             dbgOut.tprint( 1, "Communicating file summary\n" );
00381             int mpi_err = MPI_Bcast( &size, 1, MPI_UNSIGNED_LONG, 0, myPcomm->proc_config().proc_comm() );
00382             if( mpi_err || !size ) return MB_FAILURE;
00383 
00384             if( rank != 0 ) fileInfo = reinterpret_cast< mhdf_FileDesc* >( malloc( size ) );
00385 
00386             MPI_Bcast( fileInfo, size, MPI_BYTE, 0, myPcomm->proc_config().proc_comm() );
00387 
00388             if( rank != 0 ) mhdf_fixFileDesc( fileInfo, reinterpret_cast< mhdf_FileDesc* >( fileInfo->offset ) );
00389         }
00390 
00391         file_prop = H5Pcreate( H5P_FILE_ACCESS );
00392         err       = H5Pset_fapl_mpio( file_prop, myPcomm->proc_config().proc_comm(), info );
00393         assert( file_prop >= 0 );
00394         assert( err >= 0 );
00395 
00396         collIO = H5Pcreate( H5P_DATASET_XFER );
00397         assert( collIO > 0 );
00398         err = H5Pset_dxpl_mpio( collIO, H5FD_MPIO_COLLECTIVE );
00399         assert( err >= 0 );
00400         indepIO = nativeParallel ? H5P_DEFAULT : collIO;
00401 
00402         // Re-open file in parallel
00403         dbgOut.tprintf( 1, "Opening \"%s\" for parallel IO\n", filename );
00404         filePtr = mhdf_openFileWithOpt( filename, 0, NULL, handleType, file_prop, &status );
00405 
00406         H5Pclose( file_prop );
00407         if( !filePtr )
00408         {
00409             free( dataBuffer );
00410             dataBuffer = NULL;
00411             H5Pclose( indepIO );
00412             if( collIO != indepIO ) H5Pclose( collIO );
00413             collIO = indepIO = H5P_DEFAULT;
00414             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
00415         }
00416 
00417         if( !bcastSummary )
00418         {
00419             fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
00420             if( is_error( status ) )
00421             {
00422                 free( dataBuffer );
00423                 dataBuffer = NULL;
00424                 mhdf_closeFile( filePtr, &status );
00425                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00426             }
00427         }
00428 #endif  // HDF5_PARALLEL
00429     }
00430     else
00431     {
00432         // Open the file
00433         filePtr = mhdf_openFile( filename, 0, NULL, handleType, &status );
00434         if( !filePtr )
00435         {
00436             free( dataBuffer );
00437             dataBuffer = NULL;
00438             MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
00439         }
00440 
00441         // Get file info
00442         fileInfo = mhdf_getFileSummary( filePtr, handleType, &status, 0 );
00443         if( is_error( status ) )
00444         {
00445             free( dataBuffer );
00446             dataBuffer = NULL;
00447             mhdf_closeFile( filePtr, &status );
00448             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00449         }
00450     }
00451 
00452     ReadHDF5Dataset::default_hyperslab_selection_limit();
00453     int hslimit;
00454     rval = opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit );
00455     if( MB_SUCCESS == rval && hslimit > 0 )
00456         ReadHDF5Dataset::set_hyperslab_selection_limit( hslimit );
00457     else
00458         ReadHDF5Dataset::default_hyperslab_selection_limit();
00459     if( MB_SUCCESS != opts.get_null_option( "HYPERSLAB_OR" ) &&
00460         ( MB_SUCCESS == opts.get_null_option( "HYPERSLAB_APPEND" ) || HDF5_can_append_hyperslabs() ) )
00461     {
00462         ReadHDF5Dataset::append_hyperslabs();
00463         if( MB_SUCCESS != opts.get_int_option( "HYPERSLAB_SELECT_LIMIT", hslimit ) )
00464             ReadHDF5Dataset::set_hyperslab_selection_limit( std::numeric_limits< int >::max() );
00465         dbgOut.print( 1, "Using H5S_APPEND for hyperslab selection\n" );
00466     }
00467 
00468     return MB_SUCCESS;
00469 }
00470 
00471 ErrorCode ReadHDF5::clean_up_read( const FileOptions& )
00472 {
00473     HDF5ErrorHandler handler;
00474 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00475     herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
00476 #else
00477     herr_t err = H5Eget_auto( &handler.func, &handler.data );
00478 #endif
00479     if( err >= 0 && handler.func == &handle_hdf5_error )
00480     {
00481         assert( handler.data == &errorHandler );
00482 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00483         H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
00484 #else
00485         H5Eset_auto( errorHandler.func, errorHandler.data );
00486 #endif
00487     }
00488 
00489     free( dataBuffer );
00490     dataBuffer = NULL;
00491     free( fileInfo );
00492     fileInfo = NULL;
00493     delete mpiComm;
00494     mpiComm = 0;
00495 
00496     if( indepIO != H5P_DEFAULT ) H5Pclose( indepIO );
00497     if( collIO != indepIO ) H5Pclose( collIO );
00498     collIO = indepIO = H5P_DEFAULT;
00499 
00500     delete[] setMeta;
00501     setMeta = 0;
00502 
00503     mhdf_Status status;
00504     mhdf_closeFile( filePtr, &status );
00505     filePtr = 0;
00506     return is_error( status ) ? MB_FAILURE : MB_SUCCESS;
00507 }
00508 
00509 ErrorCode ReadHDF5::load_file( const char* filename, const EntityHandle* file_set, const FileOptions& opts,
00510                                const ReaderIface::SubsetList* subset_list, const Tag* file_id_tag )
00511 {
00512     ErrorCode rval;
00513 
00514     rval = set_up_read( filename, opts );
00515     if( MB_SUCCESS != rval )
00516     {
00517         clean_up_read( opts );
00518         return rval;
00519     }
00520     // See if we need to report times
00521 
00522     rval = opts.get_null_option( "CPUTIME" );
00523     if( MB_SUCCESS == rval )
00524     {
00525         cputime = true;
00526         timer   = new CpuTimer;
00527         for( int i = 0; i < NUM_TIMES; i++ )
00528             _times[i] = 0;
00529     }
00530 
00531     // We read the entire set description table regardless of partial
00532     // or complete reads or serial vs parallel reads
00533     rval = read_all_set_meta();
00534 
00535     if( cputime ) _times[SET_META_TIME] = timer->time_elapsed();
00536     if( subset_list && MB_SUCCESS == rval )
00537         rval = load_file_partial( subset_list->tag_list, subset_list->tag_list_length, subset_list->num_parts,
00538                                   subset_list->part_number, opts );
00539     else
00540         rval = load_file_impl( opts );
00541 
00542     if( MB_SUCCESS == rval && file_id_tag )
00543     {
00544         dbgOut.tprint( 1, "Storing file IDs in tag\n" );
00545         rval = store_file_ids( *file_id_tag );
00546     }
00547     ErrorCode rval3 = opts.get_null_option( "STORE_SETS_FILEIDS" );
00548     if( MB_SUCCESS == rval3 )
00549     {
00550         rval = store_sets_file_ids();
00551         if( MB_SUCCESS != rval ) return rval;
00552     }
00553 
00554     if( cputime ) _times[STORE_FILE_IDS_TIME] = timer->time_elapsed();
00555 
00556     if( MB_SUCCESS == rval && 0 != file_set )
00557     {
00558         dbgOut.tprint( 1, "Reading QA records\n" );
00559         rval = read_qa( *file_set );
00560     }
00561 
00562     if( cputime ) _times[READ_QA_TIME] = timer->time_elapsed();
00563     dbgOut.tprint( 1, "Cleaning up\n" );
00564     ErrorCode rval2 = clean_up_read( opts );
00565     if( rval == MB_SUCCESS && rval2 != MB_SUCCESS ) rval = rval2;
00566 
00567     if( MB_SUCCESS == rval )
00568         dbgOut.tprint( 1, "Read finished.\n" );
00569     else
00570     {
00571         std::string msg;
00572         iFace->get_last_error( msg );
00573         dbgOut.tprintf( 1, "READ FAILED (ERROR CODE %s): %s\n", ErrorCodeStr[rval], msg.c_str() );
00574     }
00575 
00576     if( cputime )
00577     {
00578         _times[TOTAL_TIME] = timer->time_since_birth();
00579         print_times();
00580         delete timer;
00581     }
00582     if( H5P_DEFAULT != collIO ) H5Pclose( collIO );
00583     if( H5P_DEFAULT != indepIO ) H5Pclose( indepIO );
00584     collIO = indepIO = H5P_DEFAULT;
00585 
00586     return rval;
00587 }
00588 
00589 ErrorCode ReadHDF5::load_file_impl( const FileOptions& )
00590 {
00591     ErrorCode rval;
00592     mhdf_Status status;
00593     int i;
00594 
00595     CHECK_OPEN_HANDLES;
00596 
00597     dbgOut.tprint( 1, "Reading all nodes...\n" );
00598     Range ids;
00599     if( fileInfo->nodes.count )
00600     {
00601         ids.insert( fileInfo->nodes.start_id, fileInfo->nodes.start_id + fileInfo->nodes.count - 1 );
00602         rval = read_nodes( ids );
00603         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00604     }
00605 
00606     dbgOut.tprint( 1, "Reading all element connectivity...\n" );
00607     std::vector< int > polyhedra;  // Need to do these last so that faces are loaded
00608     for( i = 0; i < fileInfo->num_elem_desc; ++i )
00609     {
00610         if( CN::EntityTypeFromName( fileInfo->elems[i].type ) == MBPOLYHEDRON )
00611         {
00612             polyhedra.push_back( i );
00613             continue;
00614         }
00615 
00616         rval = read_elems( i );
00617         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00618     }
00619     for( std::vector< int >::iterator it = polyhedra.begin(); it != polyhedra.end(); ++it )
00620     {
00621         rval = read_elems( *it );
00622         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00623     }
00624 
00625     dbgOut.tprint( 1, "Reading all sets...\n" );
00626     ids.clear();
00627     if( fileInfo->sets.count )
00628     {
00629         ids.insert( fileInfo->sets.start_id, fileInfo->sets.start_id + fileInfo->sets.count - 1 );
00630         rval = read_sets( ids );
00631         if( rval != MB_SUCCESS ) { MB_SET_ERR( rval, "ReadHDF5 Failure" ); }
00632     }
00633 
00634     dbgOut.tprint( 1, "Reading all adjacencies...\n" );
00635     for( i = 0; i < fileInfo->num_elem_desc; ++i )
00636     {
00637         if( !fileInfo->elems[i].have_adj ) continue;
00638 
00639         long table_len;
00640         hid_t table = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &table_len, &status );
00641         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00642 
00643         rval = read_adjacencies( table, table_len );
00644         mhdf_closeData( filePtr, table, &status );
00645         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00646         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
00647     }
00648 
00649     dbgOut.tprint( 1, "Reading all tags...\n" );
00650     for( i = 0; i < fileInfo->num_tag_desc; ++i )
00651     {
00652         rval = read_tag( i );
00653         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00654     }
00655 
00656     dbgOut.tprint( 1, "Core read finished.  Cleaning up...\n" );
00657     return MB_SUCCESS;
00658 }
00659 
00660 ErrorCode ReadHDF5::find_int_tag( const char* name, int& index )
00661 {
00662     for( index = 0; index < fileInfo->num_tag_desc; ++index )
00663         if( !strcmp( name, fileInfo->tags[index].name ) ) break;
00664 
00665     if( index == fileInfo->num_tag_desc )
00666     {
00667         MB_SET_ERR( MB_TAG_NOT_FOUND, "File does not contain subset tag '" << name << "'" );
00668     }
00669 
00670     if( fileInfo->tags[index].type != mhdf_INTEGER || fileInfo->tags[index].size != 1 )
00671     {
00672         MB_SET_ERR( MB_TAG_NOT_FOUND, "Tag ' " << name << "' does not contain single integer value" );
00673     }
00674 
00675     return MB_SUCCESS;
00676 }
00677 
00678 ErrorCode ReadHDF5::get_subset_ids( const ReaderIface::IDTag* subset_list, int subset_list_length, Range& file_ids )
00679 {
00680     ErrorCode rval;
00681 
00682     for( int i = 0; i < subset_list_length; ++i )
00683     {
00684         int tag_index;
00685         rval = find_int_tag( subset_list[i].tag_name, tag_index );
00686         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00687 
00688         Range tmp_file_ids;
00689         if( !subset_list[i].num_tag_values ) { rval = get_tagged_entities( tag_index, tmp_file_ids ); }
00690         else
00691         {
00692             std::vector< int > ids( subset_list[i].tag_values,
00693                                     subset_list[i].tag_values + subset_list[i].num_tag_values );
00694             std::sort( ids.begin(), ids.end() );
00695             rval = search_tag_values( tag_index, ids, tmp_file_ids );
00696             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00697         }
00698 
00699         if( tmp_file_ids.empty() ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
00700 
00701         if( i == 0 )
00702             file_ids.swap( tmp_file_ids );
00703         else
00704             file_ids = intersect( tmp_file_ids, file_ids );
00705     }
00706 
00707     return MB_SUCCESS;
00708 }
00709 
00710 ErrorCode ReadHDF5::get_partition( Range& tmp_file_ids, int num_parts, int part_number )
00711 {
00712     CHECK_OPEN_HANDLES;
00713 
00714     // Check that the tag only identified sets
00715     if( (unsigned long)fileInfo->sets.start_id > tmp_file_ids.front() )
00716     {
00717         dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
00718         tmp_file_ids.erase( tmp_file_ids.begin(), tmp_file_ids.lower_bound( (EntityHandle)fileInfo->sets.start_id ) );
00719     }
00720     unsigned long set_end = (unsigned long)fileInfo->sets.start_id + fileInfo->sets.count;
00721     if( tmp_file_ids.back() >= set_end )
00722     {
00723         dbgOut.print( 2, "Ignoring non-set entities with partition set tag\n" );
00724         tmp_file_ids.erase( tmp_file_ids.upper_bound( (EntityHandle)set_end ), tmp_file_ids.end() );
00725     }
00726 
00727     Range::iterator s   = tmp_file_ids.begin();
00728     size_t num_per_proc = tmp_file_ids.size() / num_parts;
00729     size_t num_extra    = tmp_file_ids.size() % num_parts;
00730     Range::iterator e;
00731     if( part_number < (long)num_extra )
00732     {
00733         s += ( num_per_proc + 1 ) * part_number;
00734         e = s;
00735         e += ( num_per_proc + 1 );
00736     }
00737     else
00738     {
00739         s += num_per_proc * part_number + num_extra;
00740         e = s;
00741         e += num_per_proc;
00742     }
00743     tmp_file_ids.erase( e, tmp_file_ids.end() );
00744     tmp_file_ids.erase( tmp_file_ids.begin(), s );
00745 
00746     return MB_SUCCESS;
00747 }
00748 
00749 ErrorCode ReadHDF5::load_file_partial( const ReaderIface::IDTag* subset_list, int subset_list_length, int num_parts,
00750                                        int part_number, const FileOptions& opts )
00751 {
00752     mhdf_Status status;
00753 
00754     static MPEState mpe_event( "ReadHDF5", "yellow" );
00755 
00756     mpe_event.start( "gather parts" );
00757 
00758     CHECK_OPEN_HANDLES;
00759 
00760     for( int i = 0; i < subset_list_length; ++i )
00761     {
00762         dbgOut.printf( 2, "Select by \"%s\" with num_tag_values = %d\n", subset_list[i].tag_name,
00763                        subset_list[i].num_tag_values );
00764         if( subset_list[i].num_tag_values )
00765         {
00766             assert( 0 != subset_list[i].tag_values );
00767             dbgOut.printf( 2, "  \"%s\" values = { %d", subset_list[i].tag_name, subset_list[i].tag_values[0] );
00768             for( int j = 1; j < subset_list[i].num_tag_values; ++j )
00769                 dbgOut.printf( 2, ", %d", subset_list[i].tag_values[j] );
00770             dbgOut.printf( 2, " }\n" );
00771         }
00772     }
00773     if( num_parts ) dbgOut.printf( 2, "Partition with num_parts = %d and part_number = %d\n", num_parts, part_number );
00774 
00775     dbgOut.tprint( 1, "RETRIEVING TAGGED ENTITIES\n" );
00776 
00777     Range file_ids;
00778     ErrorCode rval = get_subset_ids( subset_list, subset_list_length, file_ids );
00779     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00780 
00781     if( cputime ) _times[SUBSET_IDS_TIME] = timer->time_elapsed();
00782 
00783     if( num_parts )
00784     {
00785         /*if (num_parts>(int)file_ids.size())
00786         {
00787           MB_SET_ERR(MB_FAILURE, "Only " << file_ids.size() << " parts to distribute to " <<
00788         num_parts << " processes.");
00789         }*/
00790         rval = get_partition( file_ids, num_parts, part_number );
00791         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00792     }
00793 
00794     if( cputime ) _times[GET_PARTITION_TIME] = timer->time_elapsed();
00795 
00796     dbgOut.print_ints( 4, "Set file IDs for partial read: ", file_ids );
00797     mpe_event.end();
00798     mpe_event.start( "gather related sets" );
00799     dbgOut.tprint( 1, "GATHERING ADDITIONAL ENTITIES\n" );
00800 
00801     enum RecusiveSetMode
00802     {
00803         RSM_NONE,
00804         RSM_SETS,
00805         RSM_CONTENTS
00806     };
00807     const char* const set_opts[] = { "NONE", "SETS", "CONTENTS", NULL };
00808     int child_mode;
00809     rval = opts.match_option( "CHILDREN", set_opts, child_mode );
00810     if( MB_ENTITY_NOT_FOUND == rval )
00811         child_mode = RSM_CONTENTS;
00812     else if( MB_SUCCESS != rval )
00813     {
00814         MB_SET_ERR( rval, "Invalid value for 'CHILDREN' option" );
00815     }
00816     int content_mode;
00817     rval = opts.match_option( "SETS", set_opts, content_mode );
00818     if( MB_ENTITY_NOT_FOUND == rval )
00819         content_mode = RSM_CONTENTS;
00820     else if( MB_SUCCESS != rval )
00821     {
00822         MB_SET_ERR( rval, "Invalid value for 'SETS' option" );
00823     }
00824 
00825     // If we want the contents of contained/child sets,
00826     // search for them now (before gathering the non-set contents
00827     // of the sets.)
00828     Range sets;
00829     intersect( fileInfo->sets, file_ids, sets );
00830     if( content_mode == RSM_CONTENTS || child_mode == RSM_CONTENTS )
00831     {
00832         dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
00833         rval = read_set_ids_recursive( sets, content_mode == RSM_CONTENTS, child_mode == RSM_CONTENTS );
00834         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00835     }
00836 
00837     if( cputime ) _times[GET_SET_IDS_TIME] = timer->time_elapsed();
00838     debug_barrier();
00839 
00840     // Get elements and vertices contained in sets
00841     dbgOut.tprint( 1, "  doing get_set_contents\n" );
00842     rval = get_set_contents( sets, file_ids );
00843     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00844 
00845     if( cputime ) _times[GET_SET_CONTENTS_TIME] = timer->time_elapsed();
00846 
00847     dbgOut.print_ints( 5, "File IDs for partial read: ", file_ids );
00848     debug_barrier();
00849     mpe_event.end();
00850     dbgOut.tprint( 1, "GATHERING NODE IDS\n" );
00851 
00852     // Figure out the maximum dimension of entity to be read
00853     int max_dim = 0;
00854     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00855     {
00856         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00857         if( type <= MBVERTEX || type >= MBENTITYSET )
00858         {
00859             assert( false );  // For debug code die for unknown element types
00860             continue;         // For release code, skip unknown element types
00861         }
00862         int dim = CN::Dimension( type );
00863         if( dim > max_dim )
00864         {
00865             Range subset;
00866             intersect( fileInfo->elems[i].desc, file_ids, subset );
00867             if( !subset.empty() ) max_dim = dim;
00868         }
00869     }
00870 #ifdef MOAB_HAVE_MPI
00871     if( nativeParallel )
00872     {
00873         int send = max_dim;
00874         MPI_Allreduce( &send, &max_dim, 1, MPI_INT, MPI_MAX, *mpiComm );
00875     }
00876 #endif
00877 
00878     // If input contained any polyhedra, then need to get faces
00879     // of the polyhedra before the next loop because we need to
00880     // read said faces in that loop.
00881     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00882     {
00883         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00884         if( type != MBPOLYHEDRON ) continue;
00885 
00886         debug_barrier();
00887         dbgOut.print( 2, "    Getting polyhedra faces\n" );
00888         mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00889 
00890         Range polyhedra;
00891         intersect( fileInfo->elems[i].desc, file_ids, polyhedra );
00892         rval = read_elems( i, polyhedra, &file_ids );
00893         mpe_event.end( rval );
00894         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00895     }
00896 
00897     if( cputime ) _times[GET_POLYHEDRA_TIME] = timer->time_elapsed();
00898     // Get node file ids for all elements
00899     Range nodes;
00900     intersect( fileInfo->nodes, file_ids, nodes );
00901     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00902     {
00903         EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00904         if( type <= MBVERTEX || type >= MBENTITYSET )
00905         {
00906             assert( false );  // For debug code die for unknown element types
00907             continue;         // For release code, skip unknown element types
00908         }
00909         if( MBPOLYHEDRON == type ) continue;
00910 
00911         debug_barrier();
00912         dbgOut.printf( 2, "    Getting element node IDs for: %s\n", fileInfo->elems[i].handle );
00913 
00914         Range subset;
00915         intersect( fileInfo->elems[i].desc, file_ids, subset );
00916         mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00917 
00918         // If dimension is max_dim, then we can create the elements now
00919         // so we don't have to read the table again later (connectivity
00920         // will be fixed up after nodes are created when update_connectivity())
00921         // is called.  For elements of a smaller dimension, we just build
00922         // the node ID range now because a) we'll have to read the whole
00923         // connectivity table again later, and b) we don't want to worry
00924         // about accidentally creating multiple copies of the same element.
00925         if( CN::Dimension( type ) == max_dim )
00926             rval = read_elems( i, subset, &nodes );
00927         else
00928             rval = read_elems( i, subset, nodes );
00929         mpe_event.end( rval );
00930         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00931     }
00932     if( cputime ) _times[GET_ELEMENTS_TIME] = timer->time_elapsed();
00933     debug_barrier();
00934     mpe_event.start( "read coords" );
00935     dbgOut.tprintf( 1, "READING NODE COORDINATES (%lu nodes in %lu selects)\n", (unsigned long)nodes.size(),
00936                     (unsigned long)nodes.psize() );
00937 
00938     // Read node coordinates and create vertices in MOAB
00939     // NOTE: This populates the RangeMap with node file ids,
00940     //       which is expected by read_node_adj_elems.
00941     rval = read_nodes( nodes );
00942     mpe_event.end( rval );
00943     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00944 
00945     if( cputime ) _times[GET_NODES_TIME] = timer->time_elapsed();
00946 
00947     debug_barrier();
00948     dbgOut.tprint( 1, "READING ELEMENTS\n" );
00949 
00950     // Decide if we need to read additional elements
00951     enum SideMode
00952     {
00953         SM_EXPLICIT,
00954         SM_NODES,
00955         SM_SIDES
00956     };
00957     int side_mode;
00958     const char* const options[] = { "EXPLICIT", "NODES", "SIDES", 0 };
00959     rval                        = opts.match_option( "ELEMENTS", options, side_mode );
00960     if( MB_ENTITY_NOT_FOUND == rval )
00961     {
00962         // If only nodes were specified, then default to "NODES", otherwise
00963         // default to "SIDES".
00964         if( 0 == max_dim )
00965             side_mode = SM_NODES;
00966         else
00967             side_mode = SM_SIDES;
00968     }
00969     else if( MB_SUCCESS != rval )
00970     {
00971         MB_SET_ERR( rval, "Invalid value for 'ELEMENTS' option" );
00972     }
00973 
00974     if( side_mode == SM_SIDES /*ELEMENTS=SIDES*/ && max_dim == 0 /*node-based*/ )
00975     {
00976         // Read elements until we find something. Once we find something,
00977         // read only elements of the same dimension. NOTE: loop termination
00978         // criterion changes on both sides (max_dim can be changed in loop
00979         // body).
00980         for( int dim = 3; dim >= max_dim; --dim )
00981         {
00982             for( int i = 0; i < fileInfo->num_elem_desc; ++i )
00983             {
00984                 EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
00985                 if( CN::Dimension( type ) == dim )
00986                 {
00987                     debug_barrier();
00988                     dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
00989                     mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
00990                     Range ents;
00991                     rval = read_node_adj_elems( fileInfo->elems[i] );
00992                     mpe_event.end( rval );
00993                     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
00994                     if( !ents.empty() ) max_dim = 3;
00995                 }
00996             }
00997         }
00998     }
00999 
01000     if( cputime ) _times[GET_NODEADJ_TIME] = timer->time_elapsed();
01001     Range side_entities;
01002     if( side_mode != SM_EXPLICIT /*ELEMENTS=NODES || ELEMENTS=SIDES*/ )
01003     {
01004         if( 0 == max_dim ) max_dim = 4;
01005         // Now read any additional elements for which we've already read all
01006         // of the nodes.
01007         for( int dim = max_dim - 1; dim > 0; --dim )
01008         {
01009             for( int i = 0; i < fileInfo->num_elem_desc; ++i )
01010             {
01011                 EntityType type = CN::EntityTypeFromName( fileInfo->elems[i].type );
01012                 if( CN::Dimension( type ) == dim )
01013                 {
01014                     debug_barrier();
01015                     dbgOut.tprintf( 2, "    Reading node-adjacent elements for: %s\n", fileInfo->elems[i].handle );
01016                     mpe_event.start( "reading connectivity for ", fileInfo->elems[i].handle );
01017                     rval = read_node_adj_elems( fileInfo->elems[i], &side_entities );
01018                     mpe_event.end( rval );
01019                     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01020                 }
01021             }
01022         }
01023     }
01024 
01025     // We need to do this here for polyhedra to be handled correctly.
01026     // We have to wait until the faces are read in the above code block,
01027     // but need to create the connectivity before doing update_connectivity,
01028     // which might otherwise delete polyhedra faces.
01029     if( cputime ) _times[GET_SIDEELEM_TIME] = timer->time_elapsed();
01030 
01031     debug_barrier();
01032     dbgOut.tprint( 1, "UPDATING CONNECTIVITY ARRAYS FOR READ ELEMENTS\n" );
01033     mpe_event.start( "updating connectivity for elements read before vertices" );
01034     rval = update_connectivity();
01035     mpe_event.end();
01036     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01037 
01038     if( cputime ) _times[UPDATECONN_TIME] = timer->time_elapsed();
01039 
01040     dbgOut.tprint( 1, "READING ADJACENCIES\n" );
01041     for( int i = 0; i < fileInfo->num_elem_desc; ++i )
01042     {
01043         if (fileInfo->elems[i].have_adj  /*&&
01044         idMap.intersects(fileInfo->elems[i].desc.start_id, fileInfo->elems[i].desc.count) */)
01045         {
01046             mpe_event.start( "reading adjacencies for ", fileInfo->elems[i].handle );
01047             long len;
01048             hid_t th = mhdf_openAdjacency( filePtr, fileInfo->elems[i].handle, &len, &status );
01049             if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01050 
01051             rval = read_adjacencies( th, len );
01052             mhdf_closeData( filePtr, th, &status );
01053             mpe_event.end( rval );
01054             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01055         }
01056     }
01057 
01058     if( cputime ) _times[ADJACENCY_TIME] = timer->time_elapsed();
01059 
01060     // If doing ELEMENTS=SIDES then we need to delete any entities
01061     // that we read that aren't actually sides (e.g. an interior face
01062     // that connects two disjoint portions of the part). Both
01063     // update_connectivity and reading of any explicit adjacencies must
01064     // happen before this.
01065     if( side_mode == SM_SIDES )
01066     {
01067         debug_barrier();
01068         mpe_event.start( "cleaning up non-side lower-dim elements" );
01069         dbgOut.tprint( 1, "CHECKING FOR AND DELETING NON-SIDE ELEMENTS\n" );
01070         rval = delete_non_side_elements( side_entities );
01071         mpe_event.end( rval );
01072         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01073     }
01074 
01075     if( cputime ) _times[DELETE_NON_SIDEELEM_TIME] = timer->time_elapsed();
01076 
01077     debug_barrier();
01078     dbgOut.tprint( 1, "READING SETS\n" );
01079 
01080     // If reading contained/child sets but not their contents then find
01081     // them now. If we were also reading their contents we would
01082     // have found them already.
01083     if( content_mode == RSM_SETS || child_mode == RSM_SETS )
01084     {
01085         dbgOut.tprint( 1, "  doing read_set_ids_recursive\n" );
01086         mpe_event.start( "finding recursively contained sets" );
01087         rval = read_set_ids_recursive( sets, content_mode == RSM_SETS, child_mode == RSM_SETS );
01088         mpe_event.end( rval );
01089         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01090     }
01091 
01092     if( cputime ) _times[READ_SET_IDS_RECURS_TIME] = timer->time_elapsed();
01093 
01094     dbgOut.tprint( 1, "  doing find_sets_containing\n" );
01095     mpe_event.start( "finding sets containing any read entities" );
01096 
01097     // Decide whether to read set-containing parents
01098     bool read_set_containing_parents = true;
01099     std::string tmp_opt;
01100     rval = opts.get_option( "NO_SET_CONTAINING_PARENTS", tmp_opt );
01101     if( MB_SUCCESS == rval ) read_set_containing_parents = false;
01102 
01103     // Append file IDs of sets containing any of the nodes or elements
01104     // we've read up to this point.
01105     rval = find_sets_containing( sets, read_set_containing_parents );
01106     mpe_event.end( rval );
01107     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01108 
01109     if( cputime ) _times[FIND_SETS_CONTAINING_TIME] = timer->time_elapsed();
01110 
01111     // Now actually read all set data and instantiate sets in MOAB.
01112     // Get any contained sets out of file_ids.
01113     mpe_event.start( "reading set contents/parents/children" );
01114     EntityHandle first_set = fileInfo->sets.start_id;
01115     sets.merge( file_ids.lower_bound( first_set ), file_ids.lower_bound( first_set + fileInfo->sets.count ) );
01116     dbgOut.tprint( 1, "  doing read_sets\n" );
01117     rval = read_sets( sets );
01118     mpe_event.end( rval );
01119     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01120 
01121     if( cputime ) _times[READ_SETS_TIME] = timer->time_elapsed();
01122 
01123     dbgOut.tprint( 1, "READING TAGS\n" );
01124 
01125     for( int i = 0; i < fileInfo->num_tag_desc; ++i )
01126     {
01127         mpe_event.start( "reading tag: ", fileInfo->tags[i].name );
01128         rval = read_tag( i );
01129         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01130     }
01131 
01132     if( cputime ) _times[READ_TAGS_TIME] = timer->time_elapsed();
01133 
01134     dbgOut.tprint( 1, "PARTIAL READ COMPLETE.\n" );
01135 
01136     return MB_SUCCESS;
01137 }
01138 
01139 ErrorCode ReadHDF5::search_tag_values( int tag_index, const std::vector< int >& sorted_values, Range& file_ids,
01140                                        bool sets_only )
01141 {
01142     ErrorCode rval;
01143     mhdf_Status status;
01144     std::vector< EntityHandle >::iterator iter;
01145     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
01146     long size;
01147     long start_id;
01148 
01149     CHECK_OPEN_HANDLES;
01150 
01151     debug_barrier();
01152 
01153     // Do dense data
01154 
01155     hid_t table;
01156     const char* name;
01157     std::vector< EntityHandle > indices;
01158     // These are probably in order of dimension, so iterate
01159     // in reverse order to make Range insertions more efficient.
01160     std::vector< int > grp_indices( tag.dense_elem_indices, tag.dense_elem_indices + tag.num_dense_indices );
01161     for( std::vector< int >::reverse_iterator i = grp_indices.rbegin(); i != grp_indices.rend(); ++i )
01162     {
01163         int idx = *i;
01164         if( idx == -2 )
01165         {
01166             name     = mhdf_set_type_handle();
01167             start_id = fileInfo->sets.start_id;
01168         }
01169         else if( sets_only )
01170         {
01171             continue;
01172         }
01173         else if( idx == -1 )
01174         {
01175             name     = mhdf_node_type_handle();
01176             start_id = fileInfo->nodes.start_id;
01177         }
01178         else
01179         {
01180             if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01181             name     = fileInfo->elems[idx].handle;
01182             start_id = fileInfo->elems[idx].desc.start_id;
01183         }
01184         table = mhdf_openDenseTagData( filePtr, tag.name, name, &size, &status );
01185         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01186         rval = search_tag_values( table, size, sorted_values, indices );
01187         mhdf_closeData( filePtr, table, &status );
01188         if( MB_SUCCESS != rval || is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01189         // Convert from table indices to file IDs and add to result list
01190         std::sort( indices.begin(), indices.end(), std::greater< EntityHandle >() );
01191         std::transform( indices.begin(), indices.end(), range_inserter( file_ids ),
01192                         // std::bind1st(std::plus<long>(), start_id));
01193                         std::bind( std::plus< long >(), start_id, std::placeholders::_1 ) );
01194         indices.clear();
01195     }
01196 
01197     if( !tag.have_sparse ) return MB_SUCCESS;
01198 
01199     // Do sparse data
01200 
01201     hid_t tables[2];
01202     long junk;  // Redundant value for non-variable-length tags
01203     mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
01204     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01205     rval = search_tag_values( tables[1], size, sorted_values, indices );
01206     mhdf_closeData( filePtr, tables[1], &status );
01207     if( MB_SUCCESS != rval || is_error( status ) )
01208     {
01209         mhdf_closeData( filePtr, tables[0], &status );
01210         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01211     }
01212     // Convert to ranges
01213     std::sort( indices.begin(), indices.end() );
01214     std::vector< EntityHandle > ranges;
01215     iter = indices.begin();
01216     while( iter != indices.end() )
01217     {
01218         ranges.push_back( *iter );
01219         EntityHandle last = *iter;
01220         for( ++iter; iter != indices.end() && ( last + 1 ) == *iter; ++iter, ++last )
01221             ;
01222         ranges.push_back( last );
01223     }
01224     // Read file ids
01225     iter                 = ranges.begin();
01226     unsigned long offset = 0;
01227     while( iter != ranges.end() )
01228     {
01229         long begin = *iter;
01230         ++iter;
01231         long end = *iter;
01232         ++iter;
01233         mhdf_readSparseTagEntitiesWithOpt( tables[0], begin, end - begin + 1, handleType, &indices[offset], indepIO,
01234                                            &status );
01235         if( is_error( status ) )
01236         {
01237             mhdf_closeData( filePtr, tables[0], &status );
01238             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01239         }
01240         offset += end - begin + 1;
01241     }
01242     mhdf_closeData( filePtr, tables[0], &status );
01243     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01244     assert( offset == indices.size() );
01245     std::sort( indices.begin(), indices.end() );
01246 
01247     if( sets_only )
01248     {
01249         iter = std::lower_bound( indices.begin(), indices.end(),
01250                                  ( EntityHandle )( fileInfo->sets.start_id + fileInfo->sets.count ) );
01251         indices.erase( iter, indices.end() );
01252         iter = std::lower_bound( indices.begin(), indices.end(), fileInfo->sets.start_id );
01253         indices.erase( indices.begin(), iter );
01254     }
01255     copy_sorted_file_ids( &indices[0], indices.size(), file_ids );
01256 
01257     return MB_SUCCESS;
01258 }
01259 
01260 ErrorCode ReadHDF5::get_tagged_entities( int tag_index, Range& file_ids )
01261 {
01262     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
01263 
01264     CHECK_OPEN_HANDLES;
01265 
01266     // Do dense data
01267     Range::iterator hint = file_ids.begin();
01268     for( int i = 0; i < tag.num_dense_indices; ++i )
01269     {
01270         int idx = tag.dense_elem_indices[i];
01271         mhdf_EntDesc* ents;
01272         if( idx == -2 )
01273             ents = &fileInfo->sets;
01274         else if( idx == -1 )
01275             ents = &fileInfo->nodes;
01276         else
01277         {
01278             if( idx < 0 || idx >= fileInfo->num_elem_desc ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01279             ents = &( fileInfo->elems[idx].desc );
01280         }
01281 
01282         EntityHandle h = (EntityHandle)ents->start_id;
01283         hint           = file_ids.insert( hint, h, h + ents->count - 1 );
01284     }
01285 
01286     if( !tag.have_sparse ) return MB_SUCCESS;
01287 
01288     // Do sparse data
01289 
01290     mhdf_Status status;
01291     hid_t tables[2];
01292     long size, junk;
01293     mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
01294     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01295     mhdf_closeData( filePtr, tables[1], &status );
01296     if( is_error( status ) )
01297     {
01298         mhdf_closeData( filePtr, tables[0], &status );
01299         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01300     }
01301 
01302     hid_t file_type = H5Dget_type( tables[0] );
01303     if( file_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01304 
01305     hint                   = file_ids.begin();
01306     EntityHandle* buffer   = reinterpret_cast< EntityHandle* >( dataBuffer );
01307     const long buffer_size = bufferSize / std::max( sizeof( EntityHandle ), H5Tget_size( file_type ) );
01308     long remaining = size, offset = 0;
01309     while( remaining )
01310     {
01311         long count = std::min( buffer_size, remaining );
01312         assert_range( buffer, count );
01313         mhdf_readSparseTagEntitiesWithOpt( *tables, offset, count, file_type, buffer, collIO, &status );
01314         if( is_error( status ) )
01315         {
01316             H5Tclose( file_type );
01317             mhdf_closeData( filePtr, *tables, &status );
01318             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01319         }
01320         H5Tconvert( file_type, handleType, count, buffer, NULL, H5P_DEFAULT );
01321 
01322         std::sort( buffer, buffer + count );
01323         for( long i = 0; i < count; ++i )
01324             hint = file_ids.insert( hint, buffer[i], buffer[i] );
01325 
01326         remaining -= count;
01327         offset += count;
01328     }
01329 
01330     H5Tclose( file_type );
01331     mhdf_closeData( filePtr, *tables, &status );
01332     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01333 
01334     return MB_SUCCESS;
01335 }
01336 
01337 ErrorCode ReadHDF5::search_tag_values( hid_t tag_table, unsigned long table_size,
01338                                        const std::vector< int >& sorted_values,
01339                                        std::vector< EntityHandle >& value_indices )
01340 {
01341     debug_barrier();
01342 
01343     CHECK_OPEN_HANDLES;
01344 
01345     mhdf_Status status;
01346     size_t chunk_size = bufferSize / sizeof( int );
01347     int* buffer       = reinterpret_cast< int* >( dataBuffer );
01348     size_t remaining = table_size, offset = 0;
01349     while( remaining )
01350     {
01351         // Get a block of tag values
01352         size_t count = std::min( chunk_size, remaining );
01353         assert_range( buffer, count );
01354         mhdf_readTagValuesWithOpt( tag_table, offset, count, H5T_NATIVE_INT, buffer, collIO, &status );
01355         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01356 
01357         // Search tag values
01358         for( size_t i = 0; i < count; ++i )
01359             if( std::binary_search( sorted_values.begin(), sorted_values.end(), (int)buffer[i] ) )
01360                 value_indices.push_back( i + offset );
01361 
01362         offset += count;
01363         remaining -= count;
01364     }
01365 
01366     return MB_SUCCESS;
01367 }
01368 
01369 ErrorCode ReadHDF5::read_nodes( const Range& node_file_ids )
01370 {
01371     ErrorCode rval;
01372     mhdf_Status status;
01373     const int dim = fileInfo->nodes.vals_per_ent;
01374     Range range;
01375 
01376     CHECK_OPEN_HANDLES;
01377 
01378     if( node_file_ids.empty() && !nativeParallel ) return MB_SUCCESS;
01379 
01380     int cdim;
01381     rval = iFace->get_dimension( cdim );
01382     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01383 
01384     if( cdim < dim )
01385     {
01386         rval = iFace->set_dimension( dim );
01387         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01388     }
01389 
01390     hid_t data_id = mhdf_openNodeCoordsSimple( filePtr, &status );
01391     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01392 
01393     EntityHandle handle;
01394     std::vector< double* > arrays( dim );
01395     const size_t num_nodes = node_file_ids.size();
01396     if( num_nodes > 0 )
01397     {
01398         rval = readUtil->get_node_coords( dim, (int)num_nodes, 0, handle, arrays );
01399         if( MB_SUCCESS != rval )
01400         {
01401             mhdf_closeData( filePtr, data_id, &status );
01402             MB_SET_ERR( rval, "ReadHDF5 Failure" );
01403         }
01404     }
01405 
01406     if( blockedCoordinateIO )
01407     {
01408         try
01409         {
01410             for( int d = 0; d < dim; ++d )
01411             {
01412                 ReadHDF5Dataset reader( "blocked coords", data_id, nativeParallel, mpiComm, false );
01413                 reader.set_column( d );
01414                 reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, num_nodes, H5T_NATIVE_DOUBLE );
01415                 dbgOut.printf( 3, "Reading %lu chunks for coordinate dimension %d\n", reader.get_read_count(), d );
01416                 // Should normally only have one read call, unless sparse nature
01417                 // of file_ids caused reader to do something strange
01418                 size_t count, offset = 0;
01419                 int nn = 0;
01420                 while( !reader.done() )
01421                 {
01422                     dbgOut.printf( 3, "Reading chunk %d for dimension %d\n", ++nn, d );
01423                     reader.read( arrays[d] + offset, count );
01424                     offset += count;
01425                 }
01426                 if( offset != num_nodes )
01427                 {
01428                     mhdf_closeData( filePtr, data_id, &status );
01429                     assert( false );
01430                     return MB_FAILURE;
01431                 }
01432             }
01433         }
01434         catch( ReadHDF5Dataset::Exception )
01435         {
01436             mhdf_closeData( filePtr, data_id, &status );
01437             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01438         }
01439     }
01440     else
01441     {  // !blockedCoordinateIO
01442         double* buffer  = (double*)dataBuffer;
01443         long chunk_size = bufferSize / ( 3 * sizeof( double ) );
01444         long coffset    = 0;
01445         int nn          = 0;
01446         try
01447         {
01448             ReadHDF5Dataset reader( "interleaved coords", data_id, nativeParallel, mpiComm, false );
01449             reader.set_file_ids( node_file_ids, fileInfo->nodes.start_id, chunk_size, H5T_NATIVE_DOUBLE );
01450             dbgOut.printf( 3, "Reading %lu chunks for coordinate coordinates\n", reader.get_read_count() );
01451             while( !reader.done() )
01452             {
01453                 dbgOut.tprintf( 3, "Reading chunk %d of node coords\n", ++nn );
01454 
01455                 size_t count;
01456                 reader.read( buffer, count );
01457 
01458                 for( size_t i = 0; i < count; ++i )
01459                     for( int d = 0; d < dim; ++d )
01460                         arrays[d][coffset + i] = buffer[dim * i + d];
01461                 coffset += count;
01462             }
01463         }
01464         catch( ReadHDF5Dataset::Exception )
01465         {
01466             mhdf_closeData( filePtr, data_id, &status );
01467             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01468         }
01469     }
01470 
01471     dbgOut.print( 3, "Closing node coordinate table\n" );
01472     mhdf_closeData( filePtr, data_id, &status );
01473     for( int d = dim; d < cdim; ++d )
01474         memset( arrays[d], 0, num_nodes * sizeof( double ) );
01475 
01476     dbgOut.printf( 3, "Updating ID to handle map for %lu nodes\n", (unsigned long)node_file_ids.size() );
01477     return insert_in_id_map( node_file_ids, handle );
01478 }
01479 
01480 ErrorCode ReadHDF5::read_elems( int i )
01481 {
01482     Range ids;
01483     ids.insert( fileInfo->elems[i].desc.start_id,
01484                 fileInfo->elems[i].desc.start_id + fileInfo->elems[i].desc.count - 1 );
01485     return read_elems( i, ids );
01486 }
01487 
01488 ErrorCode ReadHDF5::read_elems( int i, const Range& file_ids, Range* node_ids )
01489 {
01490     if( fileInfo->elems[i].desc.vals_per_ent < 0 )
01491     {
01492         if( node_ids != 0 )  // Not implemented for version 3 format of poly data
01493             MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
01494         return read_poly( fileInfo->elems[i], file_ids );
01495     }
01496     else
01497         return read_elems( fileInfo->elems[i], file_ids, node_ids );
01498 }
01499 
01500 ErrorCode ReadHDF5::read_elems( const mhdf_ElemDesc& elems, const Range& file_ids, Range* node_ids )
01501 {
01502     CHECK_OPEN_HANDLES;
01503 
01504     debug_barrier();
01505     dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", elems.handle,
01506                     (unsigned long)file_ids.size(), (unsigned long)file_ids.psize() );
01507 
01508     ErrorCode rval = MB_SUCCESS;
01509     mhdf_Status status;
01510 
01511     EntityType type = CN::EntityTypeFromName( elems.type );
01512     if( type == MBMAXTYPE ) { MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" ); }
01513 
01514     const int nodes_per_elem = elems.desc.vals_per_ent;
01515     const size_t count       = file_ids.size();
01516     hid_t data_id            = mhdf_openConnectivitySimple( filePtr, elems.handle, &status );
01517     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01518 
01519     EntityHandle handle;
01520     EntityHandle* array = 0;
01521     if( count > 0 ) rval = readUtil->get_element_connect( count, nodes_per_elem, type, 0, handle, array );
01522     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01523 
01524     try
01525     {
01526         EntityHandle* buffer     = reinterpret_cast< EntityHandle* >( dataBuffer );
01527         const size_t buffer_size = bufferSize / ( sizeof( EntityHandle ) * nodes_per_elem );
01528         ReadHDF5Dataset reader( elems.handle, data_id, nativeParallel, mpiComm );
01529         reader.set_file_ids( file_ids, elems.desc.start_id, buffer_size, handleType );
01530         dbgOut.printf( 3, "Reading connectivity in %lu chunks for element group \"%s\"\n", reader.get_read_count(),
01531                        elems.handle );
01532         EntityHandle* iter = array;
01533         int nn             = 0;
01534         while( !reader.done() )
01535         {
01536             dbgOut.printf( 3, "Reading chunk %d for \"%s\"\n", ++nn, elems.handle );
01537 
01538             size_t num_read;
01539             reader.read( buffer, num_read );
01540             iter = std::copy( buffer, buffer + num_read * nodes_per_elem, iter );
01541 
01542             if( node_ids )
01543             {
01544                 std::sort( buffer, buffer + num_read * nodes_per_elem );
01545                 num_read = std::unique( buffer, buffer + num_read * nodes_per_elem ) - buffer;
01546                 copy_sorted_file_ids( buffer, num_read, *node_ids );
01547             }
01548         }
01549         assert( iter - array == (ptrdiff_t)count * nodes_per_elem );
01550     }
01551     catch( ReadHDF5Dataset::Exception )
01552     {
01553         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01554     }
01555 
01556     if( !node_ids )
01557     {
01558         rval = convert_id_to_handle( array, count * nodes_per_elem );
01559         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01560 
01561         rval = readUtil->update_adjacencies( handle, count, nodes_per_elem, array );
01562         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01563     }
01564     else
01565     {
01566         IDConnectivity t;
01567         t.handle         = handle;
01568         t.count          = count;
01569         t.nodes_per_elem = nodes_per_elem;
01570         t.array          = array;
01571         idConnectivityList.push_back( t );
01572     }
01573 
01574     return insert_in_id_map( file_ids, handle );
01575 }
01576 
01577 ErrorCode ReadHDF5::update_connectivity()
01578 {
01579     ErrorCode rval;
01580     std::vector< IDConnectivity >::iterator i;
01581     for( i = idConnectivityList.begin(); i != idConnectivityList.end(); ++i )
01582     {
01583         rval = convert_id_to_handle( i->array, i->count * i->nodes_per_elem );
01584         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01585 
01586         rval = readUtil->update_adjacencies( i->handle, i->count, i->nodes_per_elem, i->array );
01587         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01588     }
01589     idConnectivityList.clear();
01590 
01591     return MB_SUCCESS;
01592 }
01593 
01594 ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, Range* handles_out )
01595 {
01596     mhdf_Status status;
01597     ErrorCode rval;
01598 
01599     CHECK_OPEN_HANDLES;
01600 
01601     hid_t table = mhdf_openConnectivitySimple( filePtr, group.handle, &status );
01602     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01603 
01604     rval = read_node_adj_elems( group, table, handles_out );
01605 
01606     mhdf_closeData( filePtr, table, &status );
01607     if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR_RET_VAL( "ReadHDF5 Failure", MB_FAILURE );
01608 
01609     return rval;
01610 }
01611 
01612 ErrorCode ReadHDF5::read_node_adj_elems( const mhdf_ElemDesc& group, hid_t table_handle, Range* handles_out )
01613 {
01614     CHECK_OPEN_HANDLES;
01615 
01616     debug_barrier();
01617 
01618     mhdf_Status status;
01619     ErrorCode rval;
01620     IODebugTrack debug_track( debugTrack, std::string( group.handle ) );
01621 
01622     // Copy data to local variables (makes other code clearer)
01623     const int node_per_elem = group.desc.vals_per_ent;
01624     long start_id           = group.desc.start_id;
01625     long remaining          = group.desc.count;
01626     const EntityType type   = CN::EntityTypeFromName( group.type );
01627 
01628     // Figure out how many elements we can read in each pass
01629     long* const buffer     = reinterpret_cast< long* >( dataBuffer );
01630     const long buffer_size = bufferSize / ( node_per_elem * sizeof( buffer[0] ) );
01631     // Read all element connectivity in buffer_size blocks
01632     long offset = 0;
01633     dbgOut.printf( 3, "Reading node-adjacent elements from \"%s\" in %ld chunks\n", group.handle,
01634                    ( remaining + buffer_size - 1 ) / buffer_size );
01635     int nn = 0;
01636     Range::iterator hint;
01637     if( handles_out ) hint = handles_out->begin();
01638     while( remaining )
01639     {
01640         dbgOut.printf( 3, "Reading chunk %d of connectivity data for \"%s\"\n", ++nn, group.handle );
01641 
01642         // Read a block of connectivity data
01643         const long count = std::min( remaining, buffer_size );
01644         debug_track.record_io( offset, count );
01645         assert_range( buffer, count * node_per_elem );
01646         mhdf_readConnectivityWithOpt( table_handle, offset, count, H5T_NATIVE_LONG, buffer, collIO, &status );
01647         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01648         offset += count;
01649         remaining -= count;
01650 
01651         // Count the number of elements in the block that we want,
01652         // zero connectivity for other elements
01653         long num_elem = 0;
01654         long* iter    = buffer;
01655         for( long i = 0; i < count; ++i )
01656         {
01657             for( int j = 0; j < node_per_elem; ++j )
01658             {
01659                 iter[j] = (long)idMap.find( iter[j] );
01660                 if( !iter[j] )
01661                 {
01662                     iter[0] = 0;
01663                     break;
01664                 }
01665             }
01666             if( iter[0] ) ++num_elem;
01667             iter += node_per_elem;
01668         }
01669 
01670         if( !num_elem )
01671         {
01672             start_id += count;
01673             continue;
01674         }
01675 
01676         // Create elements
01677         EntityHandle handle;
01678         EntityHandle* array;
01679         rval = readUtil->get_element_connect( (int)num_elem, node_per_elem, type, 0, handle, array );
01680         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01681 
01682         // Copy all non-zero connectivity values
01683         iter                = buffer;
01684         EntityHandle* iter2 = array;
01685         EntityHandle h      = handle;
01686         for( long i = 0; i < count; ++i )
01687         {
01688             if( !*iter )
01689             {
01690                 iter += node_per_elem;
01691                 continue;
01692             }
01693             if( !idMap.insert( start_id + i, h++, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01694 
01695             long* const end = iter + node_per_elem;
01696             for( ; iter != end; ++iter, ++iter2 )
01697                 *iter2 = (EntityHandle)*iter;
01698         }
01699         assert( iter2 - array == num_elem * node_per_elem );
01700         start_id += count;
01701 
01702         rval = readUtil->update_adjacencies( handle, num_elem, node_per_elem, array );
01703         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01704         if( handles_out ) hint = handles_out->insert( hint, handle, handle + num_elem - 1 );
01705     }
01706 
01707     debug_track.all_reduce();
01708     return MB_SUCCESS;
01709 }
01710 
01711 ErrorCode ReadHDF5::read_elems( int i, const Range& elems_in, Range& nodes )
01712 {
01713     CHECK_OPEN_HANDLES;
01714 
01715     debug_barrier();
01716     dbgOut.tprintf( 1, "READING %s CONNECTIVITY (%lu elems in %lu selects)\n", fileInfo->elems[i].handle,
01717                     (unsigned long)elems_in.size(), (unsigned long)elems_in.psize() );
01718 
01719     EntityHandle* const buffer = reinterpret_cast< EntityHandle* >( dataBuffer );
01720     const int node_per_elem    = fileInfo->elems[i].desc.vals_per_ent;
01721     const size_t buffer_size   = bufferSize / ( node_per_elem * sizeof( EntityHandle ) );
01722 
01723     if( elems_in.empty() ) return MB_SUCCESS;
01724 
01725     assert( (long)elems_in.front() >= fileInfo->elems[i].desc.start_id );
01726     assert( (long)elems_in.back() - fileInfo->elems[i].desc.start_id < fileInfo->elems[i].desc.count );
01727 
01728     // We don't support version 3 style poly element data
01729     if( fileInfo->elems[i].desc.vals_per_ent <= 0 ) MB_CHK_ERR( MB_TYPE_OUT_OF_RANGE );
01730 
01731     mhdf_Status status;
01732     hid_t table = mhdf_openConnectivitySimple( filePtr, fileInfo->elems[i].handle, &status );
01733     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01734 
01735     try
01736     {
01737         ReadHDF5Dataset reader( fileInfo->elems[i].handle, table, nativeParallel, mpiComm );
01738         reader.set_file_ids( elems_in, fileInfo->elems[i].desc.start_id, buffer_size, handleType );
01739         dbgOut.printf( 3, "Reading node list in %lu chunks for \"%s\"\n", reader.get_read_count(),
01740                        fileInfo->elems[i].handle );
01741         int nn = 0;
01742         while( !reader.done() )
01743         {
01744             dbgOut.printf( 3, "Reading chunk %d of \"%s\" connectivity\n", ++nn, fileInfo->elems[i].handle );
01745             size_t num_read;
01746             reader.read( buffer, num_read );
01747             std::sort( buffer, buffer + num_read * node_per_elem );
01748             num_read = std::unique( buffer, buffer + num_read * node_per_elem ) - buffer;
01749             copy_sorted_file_ids( buffer, num_read, nodes );
01750         }
01751     }
01752     catch( ReadHDF5Dataset::Exception )
01753     {
01754         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01755     }
01756 
01757     return MB_SUCCESS;
01758 }
01759 
01760 ErrorCode ReadHDF5::read_poly( const mhdf_ElemDesc& elems, const Range& file_ids )
01761 {
01762     class PolyReader : public ReadHDF5VarLen
01763     {
01764       private:
01765         const EntityType type;
01766         ReadHDF5* readHDF5;
01767 
01768       public:
01769         PolyReader( EntityType elem_type, void* buffer, size_t buffer_size, ReadHDF5* owner, DebugOutput& dbg )
01770             : ReadHDF5VarLen( dbg, buffer, buffer_size ), type( elem_type ), readHDF5( owner )
01771         {
01772         }
01773         virtual ~PolyReader() {}
01774         ErrorCode store_data( EntityHandle file_id, void* data, long len, bool )
01775         {
01776             size_t valid;
01777             EntityHandle* conn = reinterpret_cast< EntityHandle* >( data );
01778             readHDF5->convert_id_to_handle( conn, len, valid );
01779             if( valid != (size_t)len ) MB_CHK_ERR( MB_ENTITY_NOT_FOUND );
01780             EntityHandle handle;
01781             ErrorCode rval = readHDF5->moab()->create_element( type, conn, len, handle );
01782             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01783 
01784             rval = readHDF5->insert_in_id_map( file_id, handle );
01785             return rval;
01786         }
01787     };
01788 
01789     CHECK_OPEN_HANDLES;
01790 
01791     debug_barrier();
01792 
01793     EntityType type = CN::EntityTypeFromName( elems.type );
01794     if( type == MBMAXTYPE ) { MB_SET_ERR( MB_FAILURE, "Unknown element type: \"" << elems.type << "\"" ); }
01795 
01796     hid_t handles[2];
01797     mhdf_Status status;
01798     long num_poly, num_conn, first_id;
01799     mhdf_openPolyConnectivity( filePtr, elems.handle, &num_poly, &num_conn, &first_id, handles, &status );
01800     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01801 
01802     std::string nm( elems.handle );
01803     ReadHDF5Dataset offset_reader( ( nm + " offsets" ).c_str(), handles[0], nativeParallel, mpiComm, true );
01804     ReadHDF5Dataset connect_reader( ( nm + " data" ).c_str(), handles[1], nativeParallel, mpiComm, true );
01805 
01806     PolyReader tool( type, dataBuffer, bufferSize, this, dbgOut );
01807     return tool.read( offset_reader, connect_reader, file_ids, first_id, handleType );
01808 }
01809 
01810 ErrorCode ReadHDF5::delete_non_side_elements( const Range& side_ents )
01811 {
01812     ErrorCode rval;
01813 
01814     // Build list of entities that we need to find the sides of
01815     Range explicit_ents;
01816     Range::iterator hint = explicit_ents.begin();
01817     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
01818     {
01819         EntityHandle start = i->value;
01820         EntityHandle end   = i->value + i->count - 1;
01821         EntityType type    = TYPE_FROM_HANDLE( start );
01822         assert( type == TYPE_FROM_HANDLE( end ) );  // Otherwise handle space entirely full!!
01823         if( type != MBVERTEX && type != MBENTITYSET ) hint = explicit_ents.insert( hint, start, end );
01824     }
01825     explicit_ents = subtract( explicit_ents, side_ents );
01826 
01827     // Figure out which entities we want to delete
01828     Range dead_ents( side_ents );
01829     Range::iterator ds, de, es;
01830     ds = dead_ents.lower_bound( CN::TypeDimensionMap[1].first );
01831     de = dead_ents.lower_bound( CN::TypeDimensionMap[2].first, ds );
01832     if( ds != de )
01833     {
01834         // Get subset of explicit ents of dimension greater than 1
01835         es = explicit_ents.lower_bound( CN::TypeDimensionMap[2].first );
01836         Range subset, adj;
01837         subset.insert( es, explicit_ents.end() );
01838         rval = iFace->get_adjacencies( subset, 1, false, adj, Interface::UNION );
01839         if( MB_SUCCESS != rval ) return rval;
01840         dead_ents = subtract( dead_ents, adj );
01841     }
01842     ds = dead_ents.lower_bound( CN::TypeDimensionMap[2].first );
01843     de = dead_ents.lower_bound( CN::TypeDimensionMap[3].first, ds );
01844     assert( de == dead_ents.end() );
01845     if( ds != de )
01846     {
01847         // Get subset of explicit ents of dimension 3
01848         es = explicit_ents.lower_bound( CN::TypeDimensionMap[3].first );
01849         Range subset, adj;
01850         subset.insert( es, explicit_ents.end() );
01851         rval = iFace->get_adjacencies( subset, 2, false, adj, Interface::UNION );
01852         if( MB_SUCCESS != rval ) return rval;
01853         dead_ents = subtract( dead_ents, adj );
01854     }
01855 
01856     // Now delete anything remaining in dead_ents
01857     dbgOut.printf( 2, "Deleting %lu elements\n", (unsigned long)dead_ents.size() );
01858     dbgOut.print( 4, "\tDead entities: ", dead_ents );
01859     rval = iFace->delete_entities( dead_ents );
01860     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01861 
01862     // Remove dead entities from ID map
01863     while( !dead_ents.empty() )
01864     {
01865         EntityHandle start = dead_ents.front();
01866         EntityID count     = dead_ents.const_pair_begin()->second - start + 1;
01867         IDMap::iterator rit;
01868         for( rit = idMap.begin(); rit != idMap.end(); ++rit )
01869             if( rit->value <= start && ( EntityID )( start - rit->value ) < rit->count ) break;
01870         if( rit == idMap.end() ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01871 
01872         EntityID offset = start - rit->value;
01873         EntityID avail  = rit->count - offset;
01874         if( avail < count ) count = avail;
01875 
01876         dead_ents.erase( dead_ents.begin(), dead_ents.begin() + count );
01877         idMap.erase( rit->begin + offset, count );
01878     }
01879 
01880     return MB_SUCCESS;
01881 }
01882 
01883 ErrorCode ReadHDF5::read_sets( const Range& file_ids )
01884 {
01885     CHECK_OPEN_HANDLES;
01886 
01887     debug_barrier();
01888 
01889     mhdf_Status status;
01890     ErrorCode rval;
01891 
01892     const size_t num_sets = fileInfo->sets.count;
01893     if( !num_sets )  // If no sets at all!
01894         return MB_SUCCESS;
01895 
01896     // Create sets
01897     std::vector< unsigned > flags( file_ids.size() );
01898     Range::iterator si = file_ids.begin();
01899     for( size_t i = 0; i < flags.size(); ++i, ++si )
01900         flags[i] = setMeta[*si - fileInfo->sets.start_id][3] & ~(long)mhdf_SET_RANGE_BIT;
01901     EntityHandle start_handle;
01902     // the files ids could be empty, for empty partitions
01903     if( !file_ids.empty() )
01904     {
01905         rval = readUtil->create_entity_sets( flags.size(), &flags[0], 0, start_handle );
01906         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01907         rval = insert_in_id_map( file_ids, start_handle );
01908         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01909     }
01910 
01911     // Read contents
01912     if( fileInfo->have_set_contents )
01913     {
01914         long len     = 0;
01915         hid_t handle = mhdf_openSetData( filePtr, &len, &status );
01916         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01917 
01918         ReadHDF5Dataset dat( "set contents", handle, nativeParallel, mpiComm, true );
01919         rval = read_set_data( file_ids, start_handle, dat, CONTENT );
01920         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01921     }
01922 
01923     // Read set child lists
01924     if( fileInfo->have_set_children )
01925     {
01926         long len     = 0;
01927         hid_t handle = mhdf_openSetChildren( filePtr, &len, &status );
01928         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01929 
01930         ReadHDF5Dataset dat( "set children", handle, nativeParallel, mpiComm, true );
01931         rval = read_set_data( file_ids, start_handle, dat, CHILD );
01932         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01933     }
01934 
01935     // Read set parent lists
01936     if( fileInfo->have_set_parents )
01937     {
01938         long len     = 0;
01939         hid_t handle = mhdf_openSetParents( filePtr, &len, &status );
01940         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01941 
01942         ReadHDF5Dataset dat( "set parents", handle, nativeParallel, mpiComm, true );
01943         rval = read_set_data( file_ids, start_handle, dat, PARENT );
01944         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
01945     }
01946 
01947     return MB_SUCCESS;
01948 }
01949 
01950 ErrorCode ReadHDF5::read_all_set_meta()
01951 {
01952     CHECK_OPEN_HANDLES;
01953 
01954     assert( !setMeta );
01955     const long num_sets = fileInfo->sets.count;
01956     if( !num_sets ) return MB_SUCCESS;
01957 
01958     mhdf_Status status;
01959     hid_t handle = mhdf_openSetMetaSimple( filePtr, &status );
01960     if( is_error( status ) ) { MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" ); }
01961 
01962     // Allocate extra space if we need it for data conversion
01963     hid_t meta_type = H5Dget_type( handle );
01964     size_t size     = H5Tget_size( meta_type );
01965     if( size > sizeof( long ) )
01966         setMeta = new long[( num_sets * size + ( sizeof( long ) - 1 ) ) / sizeof( long )][4];
01967     else
01968         setMeta = new long[num_sets][4];
01969 
01970     // Set some parameters based on whether or not each proc reads the
01971     // table or only the root reads it and bcasts it to the others
01972     int rank     = 0;
01973     bool bcast   = false;
01974     hid_t ioprop = H5P_DEFAULT;
01975 #ifdef MOAB_HAVE_MPI
01976     MPI_Comm comm = 0;
01977     if( nativeParallel )
01978     {
01979         rank  = myPcomm->proc_config().proc_rank();
01980         comm  = myPcomm->proc_config().proc_comm();
01981         bcast = bcastDuplicateReads;
01982         if( !bcast ) ioprop = collIO;
01983     }
01984 #endif
01985 
01986     if( !bcast || 0 == rank )
01987     {
01988         mhdf_readSetMetaWithOpt( handle, 0, num_sets, meta_type, setMeta, ioprop, &status );
01989         if( is_error( status ) )
01990         {
01991             H5Tclose( meta_type );
01992             mhdf_closeData( filePtr, handle, &status );
01993             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
01994         }
01995 
01996         H5Tconvert( meta_type, H5T_NATIVE_LONG, num_sets * 4, setMeta, 0, H5P_DEFAULT );
01997     }
01998     mhdf_closeData( filePtr, handle, &status );
01999     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02000     H5Tclose( meta_type );
02001 
02002     if( bcast )
02003     {
02004 #ifdef MOAB_HAVE_MPI
02005         int ierr = MPI_Bcast( (void*)setMeta, num_sets * 4, MPI_LONG, 0, comm );
02006         if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02007 #else
02008         assert( rank == 0 );              // If not MPI, then only one proc
02009 #endif
02010     }
02011 
02012     return MB_SUCCESS;
02013 }
02014 
02015 ErrorCode ReadHDF5::read_set_ids_recursive( Range& sets_in_out, bool contained_sets, bool child_sets )
02016 {
02017     CHECK_OPEN_HANDLES;
02018     mhdf_Status status;
02019 
02020     if( !fileInfo->have_set_children ) child_sets = false;
02021     if( !fileInfo->have_set_contents ) contained_sets = false;
02022     if( !child_sets && !contained_sets ) return MB_SUCCESS;
02023 
02024     // Open data tables
02025     if( fileInfo->sets.count == 0 )
02026     {
02027         assert( sets_in_out.empty() );
02028         return MB_SUCCESS;
02029     }
02030 
02031     if( !contained_sets && !child_sets ) return MB_SUCCESS;
02032 
02033     ReadHDF5Dataset cont( "set contents", false, mpiComm );
02034     ReadHDF5Dataset child( "set children", false, mpiComm );
02035 
02036     if( contained_sets )
02037     {
02038         long content_len     = 0;
02039         hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
02040         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02041         try
02042         {
02043             cont.init( content_handle, true );
02044         }
02045         catch( ReadHDF5Dataset::Exception )
02046         {
02047             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02048         }
02049     }
02050 
02051     if( child_sets )
02052     {
02053         long child_len     = 0;
02054         hid_t child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
02055         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02056         try
02057         {
02058             child.init( child_handle, true );
02059         }
02060         catch( ReadHDF5Dataset::Exception )
02061         {
02062             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02063         }
02064     }
02065 
02066     ErrorCode rval = MB_SUCCESS;
02067     Range children, new_children( sets_in_out );
02068     int iteration_count = 0;
02069     do
02070     {
02071         ++iteration_count;
02072         dbgOut.tprintf( 2, "Iteration %d of read_set_ids_recursive\n", iteration_count );
02073         children.clear();
02074         if( child_sets )
02075         {
02076             rval = read_set_data( new_children, 0, child, CHILD, &children );
02077             if( MB_SUCCESS != rval ) break;
02078         }
02079         if( contained_sets )
02080         {
02081             rval = read_set_data( new_children, 0, cont, CONTENT, &children );
02082             // Remove any non-set values
02083             Range::iterator it = children.lower_bound( fileInfo->sets.start_id );
02084             children.erase( children.begin(), it );
02085             it = children.lower_bound( fileInfo->sets.start_id + fileInfo->sets.count );
02086             children.erase( it, children.end() );
02087             if( MB_SUCCESS != rval ) break;
02088         }
02089         new_children = subtract( children, sets_in_out );
02090         dbgOut.print_ints( 2, "Adding additional contained/child sets", new_children );
02091         sets_in_out.merge( new_children );
02092     } while( !new_children.empty() );
02093 
02094     return MB_SUCCESS;
02095 }
02096 
02097 ErrorCode ReadHDF5::find_sets_containing( Range& sets_out, bool read_set_containing_parents )
02098 {
02099     ErrorCode rval;
02100     mhdf_Status status;
02101 
02102     CHECK_OPEN_HANDLES;
02103 
02104     if( !fileInfo->have_set_contents ) return MB_SUCCESS;
02105     assert( fileInfo->sets.count );
02106 
02107     // Open data tables
02108     long content_len     = 0;
02109     hid_t content_handle = mhdf_openSetData( filePtr, &content_len, &status );
02110     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02111 
02112     hid_t data_type = H5Dget_type( content_handle );
02113 
02114     rval = find_sets_containing( content_handle, data_type, content_len, read_set_containing_parents, sets_out );
02115 
02116     H5Tclose( data_type );
02117 
02118     mhdf_closeData( filePtr, content_handle, &status );
02119     if( MB_SUCCESS == rval && is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02120 
02121     return rval;
02122 }
02123 
02124 static bool set_map_intersect( bool ranged, const long* contents, int content_len,
02125                                const RangeMap< long, EntityHandle >& id_map )
02126 {
02127     if( ranged )
02128     {
02129         if( !content_len || id_map.empty() ) return false;
02130 
02131         const long* j         = contents;
02132         const long* const end = contents + content_len;
02133         assert( content_len % 2 == 0 );
02134         while( j != end )
02135         {
02136             long start = *( j++ );
02137             long count = *( j++ );
02138             if( id_map.intersects( start, count ) ) return true;
02139         }
02140     }
02141     else
02142     {
02143         const long* const end = contents + content_len;
02144         for( const long* i = contents; i != end; ++i )
02145             if( id_map.exists( *i ) ) return true;
02146     }
02147 
02148     return false;
02149 }
02150 
02151 struct SetContOffComp
02152 {
02153     bool operator()( const long a1[4], const long a2[4] )
02154     {
02155         return a1[ReadHDF5::CONTENT] < a2[0];
02156     }
02157 };
02158 
02159 ErrorCode ReadHDF5::find_sets_containing( hid_t contents_handle, hid_t content_type, long contents_len,
02160                                           bool read_set_containing_parents, Range& file_ids )
02161 {
02162     CHECK_OPEN_HANDLES;
02163 
02164     // Scan all set contents data
02165 
02166     const size_t content_size = H5Tget_size( content_type );
02167     const long num_sets       = fileInfo->sets.count;
02168     dbgOut.printf( 2, "Searching contents of %ld\n", num_sets );
02169     mhdf_Status status;
02170 
02171     int rank   = 0;
02172     bool bcast = false;
02173 #ifdef MOAB_HAVE_MPI
02174     MPI_Comm comm = 0;
02175     if( nativeParallel )
02176     {
02177         rank  = myPcomm->proc_config().proc_rank();
02178         comm  = myPcomm->proc_config().proc_comm();
02179         bcast = bcastDuplicateReads;
02180     }
02181 #endif
02182 
02183     // Check offsets so that we don't read past end of table or
02184     // walk off end of array.
02185     long prev = -1;
02186     for( long i = 0; i < num_sets; ++i )
02187     {
02188         if( setMeta[i][CONTENT] < prev )
02189         {
02190             std::cerr << "Invalid data in set contents offsets at position " << i << ": index " << setMeta[i][CONTENT]
02191                       << " is less than previous index " << prev << std::endl;
02192             std::cerr.flush();
02193             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02194         }
02195         prev = setMeta[i][CONTENT];
02196     }
02197     if( setMeta[num_sets - 1][CONTENT] >= contents_len )
02198     {
02199         std::cerr << "Maximum set content index " << setMeta[num_sets - 1][CONTENT]
02200                   << " exceeds contents table length of " << contents_len << std::endl;
02201         std::cerr.flush();
02202         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02203     }
02204 
02205     // Set up buffer for reading set contents
02206     long* const content_buffer = (long*)dataBuffer;
02207     const long content_len     = bufferSize / std::max( content_size, sizeof( long ) );
02208 
02209     // Scan set table
02210     Range::iterator hint = file_ids.begin();
02211     Range tmp_range;
02212     long prev_idx    = -1;
02213     int mm           = 0;
02214     long sets_offset = 0;
02215     long temp_content[4];
02216     while( sets_offset < num_sets )
02217     {
02218         temp_content[0] = content_len + prev_idx;
02219         long sets_count =
02220             std::lower_bound( setMeta + sets_offset, setMeta + num_sets, temp_content, SetContOffComp() ) - setMeta -
02221             sets_offset;
02222         assert( sets_count >= 0 && sets_offset + sets_count <= num_sets );
02223         if( !sets_count )
02224         {  // Contents of single set don't fit in buffer
02225             long content_remaining = setMeta[sets_offset][CONTENT] - prev_idx;
02226             long content_offset    = prev_idx + 1;
02227             while( content_remaining )
02228             {
02229                 long content_count = content_len < content_remaining ? 2 * ( content_len / 2 ) : content_remaining;
02230                 assert_range( content_buffer, content_count );
02231                 dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, content_count );
02232                 if( !bcast || 0 == rank )
02233                 {
02234                     if( !bcast )
02235                         mhdf_readSetDataWithOpt( contents_handle, content_offset, content_count, content_type,
02236                                                  content_buffer, collIO, &status );
02237                     else
02238                         mhdf_readSetData( contents_handle, content_offset, content_count, content_type, content_buffer,
02239                                           &status );
02240                     if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02241 
02242                     H5Tconvert( content_type, H5T_NATIVE_LONG, content_count, content_buffer, 0, H5P_DEFAULT );
02243                 }
02244                 if( bcast )
02245                 {
02246 #ifdef MOAB_HAVE_MPI
02247                     int ierr = MPI_Bcast( content_buffer, content_count, MPI_LONG, 0, comm );
02248                     if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02249 #else
02250                     assert( rank == 0 );  // If not MPI, then only one proc
02251 #endif
02252                 }
02253 
02254                 if( read_set_containing_parents )
02255                 {
02256                     tmp_range.clear();
02257                     if( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT )
02258                         tmp_range.insert( *content_buffer, *( content_buffer + 1 ) );
02259                     else
02260                         std::copy( content_buffer, content_buffer + content_count, range_inserter( tmp_range ) );
02261                     tmp_range = intersect( tmp_range, file_ids );
02262                 }
02263 
02264                 if( !tmp_range.empty() || set_map_intersect( setMeta[sets_offset][3] & mhdf_SET_RANGE_BIT,
02265                                                              content_buffer, content_count, idMap ) )
02266                 {
02267                     long id = fileInfo->sets.start_id + sets_offset;
02268                     hint    = file_ids.insert( hint, id, id );
02269                     if( !nativeParallel )  // Don't stop if doing READ_PART because we need to read
02270                                            // collectively
02271                         break;
02272                 }
02273                 content_remaining -= content_count;
02274                 content_offset += content_count;
02275             }
02276             prev_idx   = setMeta[sets_offset][CONTENT];
02277             sets_count = 1;
02278         }
02279         else if( long read_num = setMeta[sets_offset + sets_count - 1][CONTENT] - prev_idx )
02280         {
02281             assert( sets_count > 0 );
02282             assert_range( content_buffer, read_num );
02283             dbgOut.printf( 3, "Reading chunk %d (%ld values) from set contents table\n", ++mm, read_num );
02284             if( !bcast || 0 == rank )
02285             {
02286                 if( !bcast )
02287                     mhdf_readSetDataWithOpt( contents_handle, prev_idx + 1, read_num, content_type, content_buffer,
02288                                              collIO, &status );
02289                 else
02290                     mhdf_readSetData( contents_handle, prev_idx + 1, read_num, content_type, content_buffer, &status );
02291                 if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02292 
02293                 H5Tconvert( content_type, H5T_NATIVE_LONG, read_num, content_buffer, 0, H5P_DEFAULT );
02294             }
02295             if( bcast )
02296             {
02297 #ifdef MOAB_HAVE_MPI
02298                 int ierr = MPI_Bcast( content_buffer, read_num, MPI_LONG, 0, comm );
02299                 if( MPI_SUCCESS != ierr ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02300 #else
02301                 assert( rank == 0 );      // If not MPI, then only one proc
02302 #endif
02303             }
02304 
02305             long* buff_iter = content_buffer;
02306             for( long i = 0; i < sets_count; ++i )
02307             {
02308                 long set_size = setMeta[i + sets_offset][CONTENT] - prev_idx;
02309                 prev_idx += set_size;
02310 
02311                 // Check whether contents include set already being loaded
02312                 if( read_set_containing_parents )
02313                 {
02314                     tmp_range.clear();
02315                     if( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT )
02316                     {
02317                         // put in tmp_range the contents on the set
02318                         // file_ids contain at this points only other sets
02319                         const long* j         = buff_iter;
02320                         const long* const end = buff_iter + set_size;
02321                         assert( set_size % 2 == 0 );
02322                         while( j != end )
02323                         {
02324                             long start = *( j++ );
02325                             long count = *( j++ );
02326                             tmp_range.insert( start, start + count - 1 );
02327                         }
02328                     }
02329                     else
02330                         std::copy( buff_iter, buff_iter + set_size, range_inserter( tmp_range ) );
02331                     tmp_range = intersect( tmp_range, file_ids );
02332                 }
02333 
02334                 if( !tmp_range.empty() ||
02335                     set_map_intersect( setMeta[sets_offset + i][3] & mhdf_SET_RANGE_BIT, buff_iter, set_size, idMap ) )
02336                 {
02337                     long id = fileInfo->sets.start_id + sets_offset + i;
02338                     hint    = file_ids.insert( hint, id, id );
02339                 }
02340                 buff_iter += set_size;
02341             }
02342         }
02343 
02344         sets_offset += sets_count;
02345     }
02346 
02347     return MB_SUCCESS;
02348 }
02349 
02350 static Range::iterator copy_set_contents( Range::iterator hint, int ranged, EntityHandle* contents, long length,
02351                                           Range& results )
02352 {
02353     if( ranged )
02354     {
02355         assert( length % 2 == 0 );
02356         for( long i = 0; i < length; i += 2 )
02357             hint = results.insert( hint, contents[i], contents[i] + contents[i + 1] - 1 );
02358     }
02359     else
02360     {
02361         std::sort( contents, contents + length );
02362         for( long i = 0; i < length; ++i )
02363             hint = results.insert( hint, contents[i] );
02364     }
02365     return hint;
02366 }
02367 
02368 ErrorCode ReadHDF5::read_set_data( const Range& set_file_ids, EntityHandle start_handle, ReadHDF5Dataset& data,
02369                                    SetMode mode, Range* file_ids_out )
02370 {
02371     ErrorCode rval;
02372     Range::const_pair_iterator pi;
02373     Range::iterator out_hint;
02374     if( file_ids_out ) out_hint = file_ids_out->begin();
02375 
02376     // Construct range of offsets into data table at which to read
02377     // Note: all offsets are incremented by TWEAK because Range cannot
02378     // store zeros.
02379     const long TWEAK = 1;
02380     Range data_offsets;
02381     Range::iterator hint = data_offsets.begin();
02382     pi                   = set_file_ids.const_pair_begin();
02383     if( (long)pi->first == fileInfo->sets.start_id )
02384     {
02385         long second = pi->second - fileInfo->sets.start_id;
02386         if( setMeta[second][mode] >= 0 ) hint = data_offsets.insert( hint, TWEAK, setMeta[second][mode] + TWEAK );
02387         ++pi;
02388     }
02389     for( ; pi != set_file_ids.const_pair_end(); ++pi )
02390     {
02391         long first  = pi->first - fileInfo->sets.start_id;
02392         long second = pi->second - fileInfo->sets.start_id;
02393         long idx1   = setMeta[first - 1][mode] + 1;
02394         long idx2   = setMeta[second][mode];
02395         if( idx2 >= idx1 ) hint = data_offsets.insert( hint, idx1 + TWEAK, idx2 + TWEAK );
02396     }
02397     try
02398     {
02399         data.set_file_ids( data_offsets, TWEAK, bufferSize / sizeof( EntityHandle ), handleType );
02400     }
02401     catch( ReadHDF5Dataset::Exception )
02402     {
02403         return MB_FAILURE;
02404     }
02405 
02406     // We need to increment this for each processed set because
02407     // the sets were created in the order of the ids in file_ids.
02408     EntityHandle h = start_handle;
02409 
02410     const long ranged_flag = ( mode == CONTENT ) ? mhdf_SET_RANGE_BIT : 0;
02411 
02412     std::vector< EntityHandle > partial;  // For when we read only part of the contents of a set/entity
02413     Range::const_iterator fileid_iter = set_file_ids.begin();
02414     EntityHandle* buffer              = reinterpret_cast< EntityHandle* >( dataBuffer );
02415     size_t count, offset;
02416 
02417     int nn = 0;
02418     /*
02419     #ifdef  MOAB_HAVE_MPI
02420       if (nativeParallel && mode==CONTENT && myPcomm->proc_config().proc_size()>1 &&
02421     data_offsets.empty())
02422       {
02423         MB_SET_ERR_CONT( "ReadHDF5 Failure: Attempt reading an empty dataset on proc " <<
02424             myPcomm->proc_config().proc_rank());
02425         MPI_Abort(myPcomm->proc_config().proc_comm(), 1);
02426       }
02427     #endif
02428     */
02429     if( ( 1 >= set_file_ids.size() ) && ( data.done() ) && moab::ReadHDF5::CONTENT == mode )
02430         // do at least one null read, it is needed in parallel
02431         data.null_read();
02432 
02433     while( !data.done() )
02434     {
02435         dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data.get_debug_desc() );
02436         try
02437         {
02438             data.read( buffer, count );
02439         }
02440         catch( ReadHDF5Dataset::Exception )
02441         {
02442             return MB_FAILURE;
02443         }
02444 
02445         // Assert not appropriate here - I might have treated all my file ids, but maybe
02446         // another proc hasn't; for me, count will be zero, so I won't do anything, but
02447         // I still need to go through the motions to make the read work
02448 
02449         // Handle 'special' case where we read some, but not all
02450         // of the data for an entity during the last iteration.
02451         offset = 0;
02452         if( !partial.empty() )
02453         {  // Didn't read all of previous entity
02454             assert( fileid_iter != set_file_ids.end() );
02455             size_t num_prev = partial.size();
02456             size_t idx      = *fileid_iter - fileInfo->sets.start_id;
02457             size_t len      = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
02458             offset          = len - num_prev;
02459             if( offset > count )
02460             {  // Still don't have all
02461                 partial.insert( partial.end(), buffer, buffer + count );
02462                 continue;
02463             }
02464 
02465             partial.insert( partial.end(), buffer, buffer + offset );
02466             if( file_ids_out )
02467             {
02468                 out_hint = copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, &partial[0], partial.size(),
02469                                               *file_ids_out );
02470             }
02471             else
02472             {
02473                 switch( mode )
02474                 {
02475                     size_t valid;
02476                     case CONTENT:
02477                         if( setMeta[idx][3] & ranged_flag )
02478                         {
02479                             if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
02480                             Range range;
02481                             convert_range_to_handle( &partial[0], len / 2, range );
02482                             rval = moab()->add_entities( h, range );
02483                         }
02484                         else
02485                         {
02486                             convert_id_to_handle( &partial[0], len, valid );
02487                             rval = moab()->add_entities( h, &partial[0], valid );
02488                         }
02489                         break;
02490                     case CHILD:
02491                         convert_id_to_handle( &partial[0], len, valid );
02492                         rval = moab()->add_child_meshsets( h, &partial[0], valid );
02493                         break;
02494                     case PARENT:
02495                         convert_id_to_handle( &partial[0], len, valid );
02496                         rval = moab()->add_parent_meshsets( h, &partial[0], valid );
02497                         break;
02498                     default:
02499                         break;
02500                 }
02501                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02502             }
02503 
02504             ++fileid_iter;
02505             ++h;
02506             partial.clear();
02507         }
02508 
02509         // Process contents for all entities for which we
02510         // have read the complete list
02511         while( offset < count )
02512         {
02513             assert( fileid_iter != set_file_ids.end() );
02514             size_t idx = *fileid_iter - fileInfo->sets.start_id;
02515             size_t len = idx ? setMeta[idx][mode] - setMeta[idx - 1][mode] : setMeta[idx][mode] + 1;
02516             // If we did not read all of the final entity,
02517             // store what we did read to be processed in the
02518             // next iteration
02519             if( offset + len > count )
02520             {
02521                 partial.insert( partial.end(), buffer + offset, buffer + count );
02522                 break;
02523             }
02524 
02525             if( file_ids_out )
02526             {
02527                 out_hint =
02528                     copy_set_contents( out_hint, setMeta[idx][3] & ranged_flag, buffer + offset, len, *file_ids_out );
02529             }
02530             else
02531             {
02532                 switch( mode )
02533                 {
02534                     size_t valid;
02535                     case CONTENT:
02536                         if( setMeta[idx][3] & ranged_flag )
02537                         {
02538                             if( len % 2 ) MB_CHK_ERR( MB_INDEX_OUT_OF_RANGE );
02539                             Range range;
02540                             convert_range_to_handle( buffer + offset, len / 2, range );
02541                             rval = moab()->add_entities( h, range );
02542                         }
02543                         else
02544                         {
02545                             convert_id_to_handle( buffer + offset, len, valid );
02546                             rval = moab()->add_entities( h, buffer + offset, valid );
02547                         }
02548                         break;
02549                     case CHILD:
02550                         convert_id_to_handle( buffer + offset, len, valid );
02551                         rval = moab()->add_child_meshsets( h, buffer + offset, valid );
02552                         break;
02553                     case PARENT:
02554                         convert_id_to_handle( buffer + offset, len, valid );
02555                         rval = moab()->add_parent_meshsets( h, buffer + offset, valid );
02556                         break;
02557                     default:
02558                         break;
02559                 }
02560                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02561             }
02562 
02563             ++fileid_iter;
02564             ++h;
02565             offset += len;
02566         }
02567     }
02568 
02569     return MB_SUCCESS;
02570 }
02571 
02572 ErrorCode ReadHDF5::get_set_contents( const Range& sets, Range& file_ids )
02573 {
02574     CHECK_OPEN_HANDLES;
02575 
02576     if( !fileInfo->have_set_contents ) return MB_SUCCESS;
02577     dbgOut.tprint( 2, "Reading set contained file IDs\n" );
02578     try
02579     {
02580         mhdf_Status status;
02581         long content_len;
02582         hid_t contents = mhdf_openSetData( filePtr, &content_len, &status );
02583         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02584         ReadHDF5Dataset data( "set contents", contents, nativeParallel, mpiComm, true );
02585 
02586         return read_set_data( sets, 0, data, CONTENT, &file_ids );
02587     }
02588     catch( ReadHDF5Dataset::Exception )
02589     {
02590         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02591     }
02592 }
02593 
02594 ErrorCode ReadHDF5::read_adjacencies( hid_t table, long table_len )
02595 {
02596     CHECK_OPEN_HANDLES;
02597 
02598     ErrorCode rval;
02599     mhdf_Status status;
02600 
02601     debug_barrier();
02602 
02603     hid_t read_type = H5Dget_type( table );
02604     if( read_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02605     const bool convert = !H5Tequal( read_type, handleType );
02606 
02607     EntityHandle* buffer = (EntityHandle*)dataBuffer;
02608     size_t chunk_size    = bufferSize / H5Tget_size( read_type );
02609     size_t remaining     = table_len;
02610     size_t left_over     = 0;
02611     size_t offset        = 0;
02612     dbgOut.printf( 3, "Reading adjacency list in %lu chunks\n",
02613                    (unsigned long)( remaining + chunk_size - 1 ) / chunk_size );
02614     int nn = 0;
02615     while( remaining )
02616     {
02617         dbgOut.printf( 3, "Reading chunk %d of adjacency list\n", ++nn );
02618 
02619         size_t count = std::min( chunk_size, remaining );
02620         count -= left_over;
02621         remaining -= count;
02622 
02623         assert_range( buffer + left_over, count );
02624         mhdf_readAdjacencyWithOpt( table, offset, count, read_type, buffer + left_over, collIO, &status );
02625         if( is_error( status ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02626 
02627         if( convert )
02628         {
02629             herr_t err = H5Tconvert( read_type, handleType, count, buffer + left_over, 0, H5P_DEFAULT );
02630             if( err < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02631         }
02632 
02633         EntityHandle* iter = buffer;
02634         EntityHandle* end  = buffer + count + left_over;
02635         while( end - iter >= 3 )
02636         {
02637             EntityHandle h      = idMap.find( *iter++ );
02638             EntityHandle count2 = *iter++;
02639             if( !h )
02640             {
02641                 iter += count2;
02642                 continue;
02643             }
02644 
02645             if( count2 < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02646 
02647             if( end < count2 + iter )
02648             {
02649                 iter -= 2;
02650                 break;
02651             }
02652 
02653             size_t valid;
02654             convert_id_to_handle( iter, count2, valid, idMap );
02655             rval = iFace->add_adjacencies( h, iter, valid, false );
02656             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02657 
02658             iter += count2;
02659         }
02660 
02661         left_over = end - iter;
02662         assert_range( (char*)buffer, left_over );
02663         assert_range( (char*)iter, left_over );
02664         memmove( buffer, iter, left_over );
02665     }
02666 
02667     assert( !left_over );  // Unexpected truncation of data
02668 
02669     return MB_SUCCESS;
02670 }
02671 
02672 ErrorCode ReadHDF5::read_tag( int tag_index )
02673 {
02674     CHECK_OPEN_HANDLES;
02675 
02676     dbgOut.tprintf( 2, "Reading tag \"%s\"\n", fileInfo->tags[tag_index].name );
02677 
02678     debug_barrier();
02679 
02680     ErrorCode rval;
02681     mhdf_Status status;
02682     Tag tag         = 0;
02683     hid_t read_type = -1;
02684     bool table_type;
02685     rval = create_tag( fileInfo->tags[tag_index], tag, read_type );
02686     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02687 
02688     if( fileInfo->tags[tag_index].have_sparse )
02689     {
02690         hid_t handles[3];
02691         long num_ent, num_val;
02692         mhdf_openSparseTagData( filePtr, fileInfo->tags[tag_index].name, &num_ent, &num_val, handles, &status );
02693         if( is_error( status ) )
02694         {
02695             if( read_type ) H5Tclose( read_type );
02696             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02697         }
02698 
02699         table_type = false;
02700         if( read_type == 0 )
02701         {
02702             read_type = H5Dget_type( handles[1] );
02703             if( read_type == 0 )
02704             {
02705                 mhdf_closeData( filePtr, handles[0], &status );
02706                 mhdf_closeData( filePtr, handles[0], &status );
02707                 if( fileInfo->tags[tag_index].size <= 0 ) mhdf_closeData( filePtr, handles[2], &status );
02708                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02709             }
02710             table_type = true;
02711         }
02712 
02713         if( fileInfo->tags[tag_index].size > 0 )
02714         {
02715             dbgOut.printf( 2, "Reading sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
02716             rval = read_sparse_tag( tag, read_type, handles[0], handles[1], num_ent );
02717         }
02718         else
02719         {
02720             dbgOut.printf( 2, "Reading var-len sparse data for tag \"%s\"\n", fileInfo->tags[tag_index].name );
02721             rval = read_var_len_tag( tag, read_type, handles[0], handles[1], handles[2], num_ent, num_val );
02722         }
02723 
02724         if( table_type )
02725         {
02726             H5Tclose( read_type );
02727             read_type = 0;
02728         }
02729 
02730         mhdf_closeData( filePtr, handles[0], &status );
02731         if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02732         mhdf_closeData( filePtr, handles[1], &status );
02733         if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02734         if( fileInfo->tags[tag_index].size <= 0 )
02735         {
02736             mhdf_closeData( filePtr, handles[2], &status );
02737             if( MB_SUCCESS == rval && is_error( status ) ) rval = MB_FAILURE;
02738         }
02739         if( MB_SUCCESS != rval )
02740         {
02741             if( read_type ) H5Tclose( read_type );
02742             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02743         }
02744     }
02745 
02746     for( int j = 0; j < fileInfo->tags[tag_index].num_dense_indices; ++j )
02747     {
02748         long count;
02749         const char* name = 0;
02750         mhdf_EntDesc* desc;
02751         int elem_idx = fileInfo->tags[tag_index].dense_elem_indices[j];
02752         if( elem_idx == -2 )
02753         {
02754             desc = &fileInfo->sets;
02755             name = mhdf_set_type_handle();
02756         }
02757         else if( elem_idx == -1 )
02758         {
02759             desc = &fileInfo->nodes;
02760             name = mhdf_node_type_handle();
02761         }
02762         else if( elem_idx >= 0 && elem_idx < fileInfo->num_elem_desc )
02763         {
02764             desc = &fileInfo->elems[elem_idx].desc;
02765             name = fileInfo->elems[elem_idx].handle;
02766         }
02767         else
02768         {
02769             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02770         }
02771 
02772         dbgOut.printf( 2, "Read dense data block for tag \"%s\" on \"%s\"\n", fileInfo->tags[tag_index].name, name );
02773 
02774         hid_t handle = mhdf_openDenseTagData( filePtr, fileInfo->tags[tag_index].name, name, &count, &status );
02775         if( is_error( status ) )
02776         {
02777             rval = MB_FAILURE;  // rval = error(MB_FAILURE);
02778             break;
02779         }
02780 
02781         if( count > desc->count )
02782         {
02783             mhdf_closeData( filePtr, handle, &status );
02784             MB_SET_ERR( MB_FAILURE,
02785                         "Invalid data length for dense tag data: " << name << "/" << fileInfo->tags[tag_index].name );
02786         }
02787 
02788         table_type = false;
02789         if( read_type == 0 )
02790         {
02791             read_type = H5Dget_type( handle );
02792             if( read_type == 0 )
02793             {
02794                 mhdf_closeData( filePtr, handle, &status );
02795                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02796             }
02797             table_type = true;
02798         }
02799 
02800         rval = read_dense_tag( tag, name, read_type, handle, desc->start_id, count );
02801 
02802         if( table_type )
02803         {
02804             H5Tclose( read_type );
02805             read_type = 0;
02806         }
02807 
02808         mhdf_closeData( filePtr, handle, &status );
02809         if( MB_SUCCESS != rval ) break;
02810         if( is_error( status ) )
02811         {
02812             rval = MB_FAILURE;
02813             break;
02814         }
02815     }
02816 
02817     if( read_type ) H5Tclose( read_type );
02818     return rval;
02819 }
02820 
02821 ErrorCode ReadHDF5::create_tag( const mhdf_TagDesc& info, Tag& handle, hid_t& hdf_type )
02822 {
02823     CHECK_OPEN_HANDLES;
02824 
02825     ErrorCode rval;
02826     mhdf_Status status;
02827     TagType storage;
02828     DataType mb_type;
02829     bool re_read_default = false;
02830 
02831     switch( info.storage )
02832     {
02833         case mhdf_DENSE_TYPE:
02834             storage = MB_TAG_DENSE;
02835             break;
02836         case mhdf_SPARSE_TYPE:
02837             storage = MB_TAG_SPARSE;
02838             break;
02839         case mhdf_BIT_TYPE:
02840             storage = MB_TAG_BIT;
02841             break;
02842         case mhdf_MESH_TYPE:
02843             storage = MB_TAG_MESH;
02844             break;
02845         default:
02846             MB_SET_ERR( MB_FAILURE, "Invalid storage type for tag '" << info.name << "': " << info.storage );
02847     }
02848 
02849     // Type-specific stuff
02850     if( info.type == mhdf_BITFIELD )
02851     {
02852         if( info.size < 1 || info.size > 8 )
02853         {
02854             MB_SET_ERR( MB_FAILURE, "Invalid bit tag: class is MB_TAG_BIT, num bits = " << info.size );
02855         }
02856         hdf_type = H5Tcopy( H5T_NATIVE_B8 );
02857         mb_type  = MB_TYPE_BIT;
02858         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02859     }
02860     else if( info.type == mhdf_OPAQUE )
02861     {
02862         mb_type = MB_TYPE_OPAQUE;
02863 
02864         // Check for user-provided type
02865         Tag type_handle;
02866         std::string tag_type_name = "__hdf5_tag_type_";
02867         tag_type_name += info.name;
02868         rval = iFace->tag_get_handle( tag_type_name.c_str(), sizeof( hid_t ), MB_TYPE_OPAQUE, type_handle );
02869         if( MB_SUCCESS == rval )
02870         {
02871             EntityHandle root = 0;
02872             rval              = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
02873             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
02874             hdf_type        = H5Tcopy( hdf_type );
02875             re_read_default = true;
02876         }
02877         else if( MB_TAG_NOT_FOUND == rval )
02878         {
02879             hdf_type = 0;
02880         }
02881         else
02882             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02883 
02884         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02885     }
02886     else
02887     {
02888         switch( info.type )
02889         {
02890             case mhdf_INTEGER:
02891                 hdf_type = H5T_NATIVE_INT;
02892                 mb_type  = MB_TYPE_INTEGER;
02893                 break;
02894             case mhdf_FLOAT:
02895                 hdf_type = H5T_NATIVE_DOUBLE;
02896                 mb_type  = MB_TYPE_DOUBLE;
02897                 break;
02898             case mhdf_BOOLEAN:
02899                 hdf_type = H5T_NATIVE_UINT;
02900                 mb_type  = MB_TYPE_INTEGER;
02901                 break;
02902             case mhdf_ENTITY_ID:
02903                 hdf_type = handleType;
02904                 mb_type  = MB_TYPE_HANDLE;
02905                 break;
02906             default:
02907                 MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02908         }
02909 
02910         if( info.size > 1 )
02911         {  // Array
02912             hsize_t tmpsize = info.size;
02913 #if defined( H5Tarray_create_vers ) && H5Tarray_create_vers > 1
02914             hdf_type = H5Tarray_create2( hdf_type, 1, &tmpsize );
02915 #else
02916             hdf_type = H5Tarray_create( hdf_type, 1, &tmpsize, NULL );
02917 #endif
02918         }
02919         else
02920         {
02921             hdf_type = H5Tcopy( hdf_type );
02922         }
02923         if( hdf_type < 0 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
02924     }
02925 
02926     // If default or global/mesh value in file, read it.
02927     if( info.default_value || info.global_value )
02928     {
02929         if( re_read_default )
02930         {
02931             mhdf_getTagValues( filePtr, info.name, hdf_type, info.default_value, info.global_value, &status );
02932             if( mhdf_isError( &status ) )
02933             {
02934                 if( hdf_type ) H5Tclose( hdf_type );
02935                 MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) );
02936             }
02937         }
02938 
02939         if( MB_TYPE_HANDLE == mb_type )
02940         {
02941             if( info.default_value )
02942             {
02943                 rval = convert_id_to_handle( (EntityHandle*)info.default_value, info.default_value_size );
02944                 if( MB_SUCCESS != rval )
02945                 {
02946                     if( hdf_type ) H5Tclose( hdf_type );
02947                     MB_SET_ERR( rval, "ReadHDF5 Failure" );
02948                 }
02949             }
02950             if( info.global_value )
02951             {
02952                 rval = convert_id_to_handle( (EntityHandle*)info.global_value, info.global_value_size );
02953                 if( MB_SUCCESS != rval )
02954                 {
02955                     if( hdf_type ) H5Tclose( hdf_type );
02956                     MB_SET_ERR( rval, "ReadHDF5 Failure" );
02957                 }
02958             }
02959         }
02960     }
02961 
02962     // Get tag handle, creating if necessary
02963     if( info.size < 0 )
02964         rval = iFace->tag_get_handle( info.name, info.default_value_size, mb_type, handle,
02965                                       storage | MB_TAG_CREAT | MB_TAG_VARLEN | MB_TAG_DFTOK, info.default_value );
02966     else
02967         rval = iFace->tag_get_handle( info.name, info.size, mb_type, handle, storage | MB_TAG_CREAT | MB_TAG_DFTOK,
02968                                       info.default_value );
02969     if( MB_SUCCESS != rval )
02970     {
02971         if( hdf_type ) H5Tclose( hdf_type );
02972         MB_SET_ERR( MB_FAILURE, "Tag type in file does not match type in database for \"" << info.name << "\"" );
02973     }
02974 
02975     if( info.global_value )
02976     {
02977         EntityHandle root = 0;
02978         if( info.size > 0 )
02979         {  // Fixed-length tag
02980             rval = iFace->tag_set_data( handle, &root, 1, info.global_value );
02981         }
02982         else
02983         {
02984             int tag_size = info.global_value_size;
02985             rval         = iFace->tag_set_by_ptr( handle, &root, 1, &info.global_value, &tag_size );
02986         }
02987         if( MB_SUCCESS != rval )
02988         {
02989             if( hdf_type ) H5Tclose( hdf_type );
02990             MB_SET_ERR( rval, "ReadHDF5 Failure" );
02991         }
02992     }
02993 
02994     return MB_SUCCESS;
02995 }
02996 
02997 ErrorCode ReadHDF5::read_dense_tag( Tag tag_handle, const char* ent_name, hid_t hdf_read_type, hid_t data,
02998                                     long start_id, long num_values )
02999 {
03000     CHECK_OPEN_HANDLES;
03001 
03002     ErrorCode rval;
03003     DataType mb_type;
03004 
03005     rval = iFace->tag_get_data_type( tag_handle, mb_type );
03006     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03007 
03008     int read_size;
03009     rval = iFace->tag_get_bytes( tag_handle, read_size );
03010     if( MB_SUCCESS != rval )  // Wrong function for variable-length tags
03011         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03012     // if (MB_TYPE_BIT == mb_type)
03013     // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
03014 
03015     if( hdf_read_type )
03016     {  // If not opaque
03017         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03018         if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03019     }
03020 
03021     // Get actual entities read from file
03022     Range file_ids, handles;
03023     Range::iterator f_ins = file_ids.begin(), h_ins = handles.begin();
03024     IDMap::iterator l, u;
03025     l = idMap.lower_bound( start_id );
03026     u = idMap.lower_bound( start_id + num_values - 1 );
03027     if( l != idMap.end() && start_id + num_values > l->begin )
03028     {
03029         if( l == u )
03030         {
03031             size_t beg = std::max( start_id, l->begin );
03032             size_t end = std::min( start_id + num_values, u->begin + u->count ) - 1;
03033             f_ins      = file_ids.insert( f_ins, beg, end );
03034             h_ins      = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + ( end - l->begin ) );
03035         }
03036         else
03037         {
03038             size_t beg = std::max( start_id, l->begin );
03039             f_ins      = file_ids.insert( f_ins, beg, l->begin + l->count - 1 );
03040             h_ins      = handles.insert( h_ins, l->value + ( beg - l->begin ), l->value + l->count - 1 );
03041             for( ++l; l != u; ++l )
03042             {
03043                 f_ins = file_ids.insert( f_ins, l->begin, l->begin + l->count - 1 );
03044                 h_ins = handles.insert( h_ins, l->value, l->value + l->count - 1 );
03045             }
03046             if( u != idMap.end() && u->begin < start_id + num_values )
03047             {
03048                 size_t end = std::min( start_id + num_values, u->begin + u->count - 1 );
03049                 f_ins      = file_ids.insert( f_ins, u->begin, end );
03050                 h_ins      = handles.insert( h_ins, u->value, u->value + end - u->begin );
03051             }
03052         }
03053     }
03054 
03055     // Given that all of the entities for this dense tag data should
03056     // have been created as a single contiguous block, the resulting
03057     // MOAB handle range should be contiguous.
03058     // THE ABOVE IS NOT NECESSARILY TRUE. SOMETIMES LOWER-DIMENSION
03059     // ENTS ARE READ AND THEN DELETED FOR PARTIAL READS.
03060     // assert(handles.empty() || handles.size() == (handles.back() - handles.front() + 1));
03061 
03062     std::string tn( "<error>" );
03063     iFace->tag_get_name( tag_handle, tn );
03064     tn += " data for ";
03065     tn += ent_name;
03066     try
03067     {
03068         h_ins = handles.begin();
03069         ReadHDF5Dataset reader( tn.c_str(), data, nativeParallel, mpiComm, false );
03070         long buffer_size = bufferSize / read_size;
03071         reader.set_file_ids( file_ids, start_id, buffer_size, hdf_read_type );
03072         dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tn.c_str(), ent_name,
03073                        reader.get_read_count() );
03074         int nn = 0;
03075         while( !reader.done() )
03076         {
03077             dbgOut.printf( 3, "Reading chunk %d of \"%s\" data\n", ++nn, tn.c_str() );
03078 
03079             size_t count;
03080             reader.read( dataBuffer, count );
03081 
03082             if( MB_TYPE_HANDLE == mb_type )
03083             {
03084                 rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * read_size / sizeof( EntityHandle ) );
03085                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03086             }
03087 
03088             Range ents;
03089             Range::iterator end = h_ins;
03090             end += count;
03091             ents.insert( h_ins, end );
03092             h_ins = end;
03093 
03094             rval = iFace->tag_set_data( tag_handle, ents, dataBuffer );
03095             if( MB_SUCCESS != rval )
03096             {
03097                 dbgOut.printf( 1, "Internal error setting data for tag \"%s\"\n", tn.c_str() );
03098                 MB_SET_ERR( rval, "ReadHDF5 Failure" );
03099             }
03100         }
03101     }
03102     catch( ReadHDF5Dataset::Exception )
03103     {
03104         dbgOut.printf( 1, "Internal error reading dense data for tag \"%s\"\n", tn.c_str() );
03105         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03106     }
03107 
03108     return MB_SUCCESS;
03109 }
03110 
03111 // Read entire ID table and for those file IDs corresponding
03112 // to entities that we have read from the file add both the
03113 // offset into the offset range and the handle into the handle
03114 // range. If handles are not ordered, switch to using a vector.
03115 ErrorCode ReadHDF5::read_sparse_tag_indices( const char* name, hid_t id_table,
03116                                              EntityHandle start_offset,  // Can't put zero in a Range
03117                                              Range& offset_range, Range& handle_range,
03118                                              std::vector< EntityHandle >& handle_vect )
03119 {
03120     CHECK_OPEN_HANDLES;
03121 
03122     offset_range.clear();
03123     handle_range.clear();
03124     handle_vect.clear();
03125 
03126     ErrorCode rval;
03127     Range::iterator handle_hint = handle_range.begin();
03128     Range::iterator offset_hint = offset_range.begin();
03129 
03130     EntityHandle* idbuf = (EntityHandle*)dataBuffer;
03131     size_t idbuf_size   = bufferSize / sizeof( EntityHandle );
03132 
03133     std::string tn( name );
03134     tn += " indices";
03135 
03136     assert( start_offset > 0 );  // Can't put zero in a Range
03137     try
03138     {
03139         ReadHDF5Dataset id_reader( tn.c_str(), id_table, nativeParallel, mpiComm, false );
03140         id_reader.set_all_file_ids( idbuf_size, handleType );
03141         size_t offset = start_offset;
03142         dbgOut.printf( 3, "Reading file ids for sparse tag \"%s\" in %lu chunks\n", name, id_reader.get_read_count() );
03143         int nn = 0;
03144         while( !id_reader.done() )
03145         {
03146             dbgOut.printf( 3, "Reading chunk %d of \"%s\" IDs\n", ++nn, name );
03147             size_t count;
03148             id_reader.read( idbuf, count );
03149 
03150             rval = convert_id_to_handle( idbuf, count );
03151             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03152 
03153             // idbuf will now contain zero-valued handles for those
03154             // tag values that correspond to entities we are not reading
03155             // from the file.
03156             for( size_t i = 0; i < count; ++i )
03157             {
03158                 if( idbuf[i] )
03159                 {
03160                     offset_hint = offset_range.insert( offset_hint, offset + i );
03161                     if( !handle_vect.empty() ) { handle_vect.push_back( idbuf[i] ); }
03162                     else if( handle_range.empty() || idbuf[i] > handle_range.back() )
03163                     {
03164                         handle_hint = handle_range.insert( handle_hint, idbuf[i] );
03165                     }
03166                     else
03167                     {
03168                         handle_vect.resize( handle_range.size() );
03169                         std::copy( handle_range.begin(), handle_range.end(), handle_vect.begin() );
03170                         handle_range.clear();
03171                         handle_vect.push_back( idbuf[i] );
03172                         dbgOut.print( 2, "Switching to unordered list for tag handle list\n" );
03173                     }
03174                 }
03175             }
03176 
03177             offset += count;
03178         }
03179     }
03180     catch( ReadHDF5Dataset::Exception )
03181     {
03182         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03183     }
03184 
03185     return MB_SUCCESS;
03186 }
03187 
03188 ErrorCode ReadHDF5::read_sparse_tag( Tag tag_handle, hid_t hdf_read_type, hid_t id_table, hid_t value_table,
03189                                      long /*num_values*/ )
03190 {
03191     CHECK_OPEN_HANDLES;
03192 
03193     // Read entire ID table and for those file IDs corresponding
03194     // to entities that we have read from the file add both the
03195     // offset into the offset range and the handle into the handle
03196     // range.  If handles are not ordered, switch to using a vector.
03197     const EntityHandle base_offset = 1;  // Can't put zero in a Range
03198     std::vector< EntityHandle > handle_vect;
03199     Range handle_range, offset_range;
03200     std::string tn( "<error>" );
03201     iFace->tag_get_name( tag_handle, tn );
03202     ErrorCode rval =
03203         read_sparse_tag_indices( tn.c_str(), id_table, base_offset, offset_range, handle_range, handle_vect );
03204     if( MB_SUCCESS != rval ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03205 
03206     DataType mbtype;
03207     rval = iFace->tag_get_data_type( tag_handle, mbtype );
03208     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03209 
03210     int read_size;
03211     rval = iFace->tag_get_bytes( tag_handle, read_size );
03212     if( MB_SUCCESS != rval )  // Wrong function for variable-length tags
03213         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03214     // if (MB_TYPE_BIT == mbtype)
03215     // read_size = (read_size + 7) / 8; // Convert bits to bytes, plus 7 for ceiling
03216 
03217     if( hdf_read_type )
03218     {  // If not opaque
03219         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03220         if( hdf_size != (hsize_t)read_size ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03221     }
03222 
03223     const int handles_per_tag = read_size / sizeof( EntityHandle );
03224 
03225     // Now read data values
03226     size_t chunk_size = bufferSize / read_size;
03227     try
03228     {
03229         ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), value_table, nativeParallel, mpiComm, false );
03230         val_reader.set_file_ids( offset_range, base_offset, chunk_size, hdf_read_type );
03231         dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tn.c_str(),
03232                        val_reader.get_read_count() );
03233         int nn        = 0;
03234         size_t offset = 0;
03235         while( !val_reader.done() )
03236         {
03237             dbgOut.printf( 3, "Reading chunk %d of \"%s\" values\n", ++nn, tn.c_str() );
03238             size_t count;
03239             val_reader.read( dataBuffer, count );
03240             if( MB_TYPE_HANDLE == mbtype )
03241             {
03242                 rval = convert_id_to_handle( (EntityHandle*)dataBuffer, count * handles_per_tag );
03243                 if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03244             }
03245 
03246             if( !handle_vect.empty() )
03247             {
03248                 rval = iFace->tag_set_data( tag_handle, &handle_vect[offset], count, dataBuffer );
03249                 offset += count;
03250             }
03251             else
03252             {
03253                 Range r;
03254                 r.merge( handle_range.begin(), handle_range.begin() + count );
03255                 handle_range.erase( handle_range.begin(), handle_range.begin() + count );
03256                 rval = iFace->tag_set_data( tag_handle, r, dataBuffer );
03257             }
03258             if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03259         }
03260     }
03261     catch( ReadHDF5Dataset::Exception )
03262     {
03263         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03264     }
03265 
03266     return MB_SUCCESS;
03267 }
03268 
03269 ErrorCode ReadHDF5::read_var_len_tag( Tag tag_handle, hid_t hdf_read_type, hid_t ent_table, hid_t val_table,
03270                                       hid_t off_table, long /*num_entities*/, long /*num_values*/ )
03271 {
03272     CHECK_OPEN_HANDLES;
03273 
03274     ErrorCode rval;
03275     DataType mbtype;
03276 
03277     rval = iFace->tag_get_data_type( tag_handle, mbtype );
03278     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03279 
03280     // Can't do variable-length bit tags
03281     if( MB_TYPE_BIT == mbtype ) MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
03282 
03283     // If here, MOAB tag must be variable-length
03284     int mbsize;
03285     if( MB_VARIABLE_DATA_LENGTH != iFace->tag_get_bytes( tag_handle, mbsize ) )
03286     {
03287         assert( false );MB_CHK_ERR( MB_VARIABLE_DATA_LENGTH );
03288     }
03289 
03290     int read_size;
03291     if( hdf_read_type )
03292     {
03293         hsize_t hdf_size = H5Tget_size( hdf_read_type );
03294         if( hdf_size < 1 ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03295         read_size = hdf_size;
03296     }
03297     else
03298     {
03299         // Opaque
03300         read_size = 1;
03301     }
03302 
03303     std::string tn( "<error>" );
03304     iFace->tag_get_name( tag_handle, tn );
03305 
03306     // Read entire ID table and for those file IDs corresponding
03307     // to entities that we have read from the file add both the
03308     // offset into the offset range and the handle into the handle
03309     // range. If handles are not ordered, switch to using a vector.
03310     const EntityHandle base_offset = 1;  // Can't put zero in a Range
03311     std::vector< EntityHandle > handle_vect;
03312     Range handle_range, offset_range;
03313     rval = read_sparse_tag_indices( tn.c_str(), ent_table, base_offset, offset_range, handle_range, handle_vect );
03314 
03315     // This code only works if the id_table is an ordered list.
03316     // This assumption was also true for the previous iteration
03317     // of this code, but wasn't checked. MOAB's file writer
03318     // always writes an ordered list for id_table.
03319     if( !handle_vect.empty() ) { MB_SET_ERR( MB_FAILURE, "Unordered file ids for variable length tag not supported" ); }
03320 
03321     class VTReader : public ReadHDF5VarLen
03322     {
03323         Tag tagHandle;
03324         bool isHandle;
03325         size_t readSize;
03326         ReadHDF5* readHDF5;
03327 
03328       public:
03329         ErrorCode store_data( EntityHandle file_id, void* data, long count, bool )
03330         {
03331             ErrorCode rval1;
03332             if( isHandle )
03333             {
03334                 assert( readSize == sizeof( EntityHandle ) );
03335                 rval1 = readHDF5->convert_id_to_handle( (EntityHandle*)data, count );MB_CHK_ERR( rval1 );
03336             }
03337             int n = count;
03338             return readHDF5->moab()->tag_set_by_ptr( tagHandle, &file_id, 1, &data, &n );
03339         }
03340         VTReader( DebugOutput& debug_output, void* buffer, size_t buffer_size, Tag tag, bool is_handle_tag,
03341                   size_t read_size1, ReadHDF5* owner )
03342             : ReadHDF5VarLen( debug_output, buffer, buffer_size ), tagHandle( tag ), isHandle( is_handle_tag ),
03343               readSize( read_size1 ), readHDF5( owner )
03344         {
03345         }
03346     };
03347 
03348     VTReader tool( dbgOut, dataBuffer, bufferSize, tag_handle, MB_TYPE_HANDLE == mbtype, read_size, this );
03349     try
03350     {
03351         // Read offsets into value table.
03352         std::vector< unsigned > counts;
03353         Range offsets;
03354         ReadHDF5Dataset off_reader( ( tn + " offsets" ).c_str(), off_table, nativeParallel, mpiComm, false );
03355         rval = tool.read_offsets( off_reader, offset_range, base_offset, base_offset, offsets, counts );
03356         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03357 
03358         // Read tag values
03359         Range empty;
03360         ReadHDF5Dataset val_reader( ( tn + " values" ).c_str(), val_table, nativeParallel, mpiComm, false );
03361         rval = tool.read_data( val_reader, offsets, base_offset, hdf_read_type, handle_range, counts, empty );
03362         if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03363     }
03364     catch( ReadHDF5Dataset::Exception )
03365     {
03366         MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03367     }
03368 
03369     return MB_SUCCESS;
03370 }
03371 
03372 ErrorCode ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size )
03373 {
03374     convert_id_to_handle( array, size, idMap );
03375     return MB_SUCCESS;
03376 }
03377 
03378 void ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size, const RangeMap< long, EntityHandle >& id_map )
03379 {
03380     for( EntityHandle* const end = array + size; array != end; ++array )
03381         *array = id_map.find( *array );
03382 }
03383 
03384 void ReadHDF5::convert_id_to_handle( EntityHandle* array, size_t size, size_t& new_size,
03385                                      const RangeMap< long, EntityHandle >& id_map )
03386 {
03387     RangeMap< long, EntityHandle >::const_iterator it;
03388     new_size = 0;
03389     for( size_t i = 0; i < size; ++i )
03390     {
03391         it = id_map.lower_bound( array[i] );
03392         if( it != id_map.end() && it->begin <= (long)array[i] )
03393             array[new_size++] = it->value + ( array[i] - it->begin );
03394     }
03395 }
03396 
03397 void ReadHDF5::convert_range_to_handle( const EntityHandle* ranges, size_t num_ranges,
03398                                         const RangeMap< long, EntityHandle >& id_map, Range& merge )
03399 {
03400     RangeMap< long, EntityHandle >::iterator it = id_map.begin();
03401     Range::iterator hint                        = merge.begin();
03402     for( size_t i = 0; i < num_ranges; ++i )
03403     {
03404         long id        = ranges[2 * i];
03405         const long end = id + ranges[2 * i + 1];
03406         // We assume that 'ranges' is sorted, but check just in case it isn't.
03407         if( it == id_map.end() || it->begin > id ) it = id_map.begin();
03408         it = id_map.lower_bound( it, id_map.end(), id );
03409         if( it == id_map.end() ) continue;
03410         if( id < it->begin ) id = it->begin;
03411         while( id < end )
03412         {
03413             if( id < it->begin ) id = it->begin;
03414             const long off = id - it->begin;
03415             long count     = std::min( it->count - off, end - id );
03416             // It is possible that this new subrange is starting after the end
03417             // It will result in negative count, which does not make sense
03418             // We are done with this range, go to the next one
03419             if( count <= 0 ) break;
03420             hint = merge.insert( hint, it->value + off, it->value + off + count - 1 );
03421             id += count;
03422             if( id < end )
03423             {
03424                 if( ++it == id_map.end() ) break;
03425                 if( it->begin > end ) break;
03426             }
03427         }
03428     }
03429 }
03430 
03431 ErrorCode ReadHDF5::convert_range_to_handle( const EntityHandle* array, size_t num_ranges, Range& range )
03432 {
03433     convert_range_to_handle( array, num_ranges, idMap, range );
03434     return MB_SUCCESS;
03435 }
03436 
03437 ErrorCode ReadHDF5::insert_in_id_map( const Range& file_ids, EntityHandle start_id )
03438 {
03439     IDMap tmp_map;
03440     bool merge = !idMap.empty() && !file_ids.empty() && idMap.back().begin > (long)file_ids.front();
03441     IDMap& map = merge ? tmp_map : idMap;
03442     Range::const_pair_iterator p;
03443     for( p = file_ids.const_pair_begin(); p != file_ids.const_pair_end(); ++p )
03444     {
03445         size_t count = p->second - p->first + 1;
03446         if( !map.insert( p->first, start_id, count ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03447         start_id += count;
03448     }
03449     if( merge && !idMap.merge( tmp_map ) ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03450 
03451     return MB_SUCCESS;
03452 }
03453 
03454 ErrorCode ReadHDF5::insert_in_id_map( long file_id, EntityHandle handle )
03455 {
03456     if( !idMap.insert( file_id, handle, 1 ).second ) MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03457     return MB_SUCCESS;
03458 }
03459 
03460 ErrorCode ReadHDF5::read_qa( EntityHandle )
03461 {
03462     CHECK_OPEN_HANDLES;
03463 
03464     mhdf_Status status;
03465     // std::vector<std::string> qa_list;
03466 
03467     int qa_len;
03468     char** qa = mhdf_readHistory( filePtr, &qa_len, &status );
03469     if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03470     // qa_list.resize(qa_len);
03471     for( int i = 0; i < qa_len; i++ )
03472     {
03473         // qa_list[i] = qa[i];
03474         free( qa[i] );
03475     }
03476     free( qa );
03477 
03478     /** FIX ME - how to put QA list on set?? */
03479 
03480     return MB_SUCCESS;
03481 }
03482 
03483 ErrorCode ReadHDF5::store_file_ids( Tag tag )
03484 {
03485     CHECK_OPEN_HANDLES;
03486 
03487     // typedef int tag_type;
03488     typedef long tag_type;
03489     // change it to be able to read much bigger files (long is 64 bits ...)
03490 
03491     tag_type* buffer       = reinterpret_cast< tag_type* >( dataBuffer );
03492     const long buffer_size = bufferSize / sizeof( tag_type );
03493     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
03494     {
03495         IDMap::Range range = *i;
03496 
03497         // Make sure the values will fit in the tag type
03498         IDMap::key_type rv = range.begin + ( range.count - 1 );
03499         tag_type tv        = (tag_type)rv;
03500         if( (IDMap::key_type)tv != rv )
03501         {
03502             assert( false );
03503             return MB_INDEX_OUT_OF_RANGE;
03504         }
03505 
03506         while( range.count )
03507         {
03508             long count = buffer_size < range.count ? buffer_size : range.count;
03509 
03510             Range handles;
03511             handles.insert( range.value, range.value + count - 1 );
03512             range.value += count;
03513             range.count -= count;
03514             for( long j = 0; j < count; ++j )
03515                 buffer[j] = (tag_type)range.begin++;
03516 
03517             ErrorCode rval = iFace->tag_set_data( tag, handles, buffer );
03518             if( MB_SUCCESS != rval ) return rval;
03519         }
03520     }
03521 
03522     return MB_SUCCESS;
03523 }
03524 
03525 ErrorCode ReadHDF5::store_sets_file_ids()
03526 {
03527     CHECK_OPEN_HANDLES;
03528 
03529     // create a tag that will not be saved, but it will be
03530     // used by visit plugin to match the sets and their file ids
03531     // it is the same type as the tag defined in ReadParallelcpp, for file id
03532     Tag setFileIdTag;
03533     long default_val = 0;
03534     ErrorCode rval   = iFace->tag_get_handle( "__FILE_ID_FOR_SETS", sizeof( long ), MB_TYPE_OPAQUE, setFileIdTag,
03535                                             ( MB_TAG_DENSE | MB_TAG_CREAT ), &default_val );
03536 
03537     if( MB_SUCCESS != rval || 0 == setFileIdTag ) return rval;
03538     // typedef int tag_type;
03539     typedef long tag_type;
03540     // change it to be able to read much bigger files (long is 64 bits ...)
03541 
03542     tag_type* buffer       = reinterpret_cast< tag_type* >( dataBuffer );
03543     const long buffer_size = bufferSize / sizeof( tag_type );
03544     for( IDMap::iterator i = idMap.begin(); i != idMap.end(); ++i )
03545     {
03546         IDMap::Range range = *i;
03547         EntityType htype   = iFace->type_from_handle( range.value );
03548         if( MBENTITYSET != htype ) continue;
03549         // work only with entity sets
03550         // Make sure the values will fit in the tag type
03551         IDMap::key_type rv = range.begin + ( range.count - 1 );
03552         tag_type tv        = (tag_type)rv;
03553         if( (IDMap::key_type)tv != rv )
03554         {
03555             assert( false );
03556             return MB_INDEX_OUT_OF_RANGE;
03557         }
03558 
03559         while( range.count )
03560         {
03561             long count = buffer_size < range.count ? buffer_size : range.count;
03562 
03563             Range handles;
03564             handles.insert( range.value, range.value + count - 1 );
03565             range.value += count;
03566             range.count -= count;
03567             for( long j = 0; j < count; ++j )
03568                 buffer[j] = (tag_type)range.begin++;
03569 
03570             rval = iFace->tag_set_data( setFileIdTag, handles, buffer );
03571             if( MB_SUCCESS != rval ) return rval;
03572         }
03573     }
03574     return MB_SUCCESS;
03575 }
03576 
03577 ErrorCode ReadHDF5::read_tag_values( const char* file_name, const char* tag_name, const FileOptions& opts,
03578                                      std::vector< int >& tag_values_out, const SubsetList* subset_list )
03579 {
03580     ErrorCode rval;
03581 
03582     rval = set_up_read( file_name, opts );
03583     if( MB_SUCCESS != rval ) MB_SET_ERR( rval, "ReadHDF5 Failure" );
03584 
03585     int tag_index;
03586     rval = find_int_tag( tag_name, tag_index );
03587     if( MB_SUCCESS != rval )
03588     {
03589         clean_up_read( opts );
03590         MB_SET_ERR( rval, "ReadHDF5 Failure" );
03591     }
03592 
03593     if( subset_list )
03594     {
03595         Range file_ids;
03596         rval = get_subset_ids( subset_list->tag_list, subset_list->tag_list_length, file_ids );
03597         if( MB_SUCCESS != rval )
03598         {
03599             clean_up_read( opts );
03600             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03601         }
03602 
03603         rval = read_tag_values_partial( tag_index, file_ids, tag_values_out );
03604         if( MB_SUCCESS != rval )
03605         {
03606             clean_up_read( opts );
03607             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03608         }
03609     }
03610     else
03611     {
03612         rval = read_tag_values_all( tag_index, tag_values_out );
03613         if( MB_SUCCESS != rval )
03614         {
03615             clean_up_read( opts );
03616             MB_SET_ERR( rval, "ReadHDF5 Failure" );
03617         }
03618     }
03619 
03620     return clean_up_read( opts );
03621 }
03622 
03623 ErrorCode ReadHDF5::read_tag_values_partial( int tag_index, const Range& file_ids, std::vector< int >& tag_values )
03624 {
03625     CHECK_OPEN_HANDLES;
03626 
03627     mhdf_Status status;
03628     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
03629     long num_ent, num_val;
03630     size_t count;
03631     std::string tn( tag.name );
03632 
03633     // Read sparse values
03634     if( tag.have_sparse )
03635     {
03636         hid_t handles[3];
03637         mhdf_openSparseTagData( filePtr, tag.name, &num_ent, &num_val, handles, &status );
03638         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03639 
03640         try
03641         {
03642             // Read all entity handles and fill 'offsets' with ranges of
03643             // offsets into the data table for entities that we want.
03644             Range offsets;
03645             long* buffer           = reinterpret_cast< long* >( dataBuffer );
03646             const long buffer_size = bufferSize / sizeof( long );
03647             ReadHDF5Dataset ids( ( tn + " ids" ).c_str(), handles[0], nativeParallel, mpiComm );
03648             ids.set_all_file_ids( buffer_size, H5T_NATIVE_LONG );
03649             size_t offset = 0;
03650             dbgOut.printf( 3, "Reading sparse IDs for tag \"%s\" in %lu chunks\n", tag.name, ids.get_read_count() );
03651             int nn = 0;
03652             while( !ids.done() )
03653             {
03654                 dbgOut.printf( 3, "Reading chunk %d of IDs for \"%s\"\n", ++nn, tag.name );
03655                 ids.read( buffer, count );
03656 
03657                 std::sort( buffer, buffer + count );
03658                 Range::iterator ins     = offsets.begin();
03659                 Range::const_iterator i = file_ids.begin();
03660                 for( size_t j = 0; j < count; ++j )
03661                 {
03662                     while( i != file_ids.end() && (long)*i < buffer[j] )
03663                         ++i;
03664                     if( i == file_ids.end() ) break;
03665                     if( (long)*i == buffer[j] ) { ins = offsets.insert( ins, j + offset, j + offset ); }
03666                 }
03667 
03668                 offset += count;
03669             }
03670 
03671             tag_values.clear();
03672             tag_values.reserve( offsets.size() );
03673             const size_t data_buffer_size = bufferSize / sizeof( int );
03674             int* data_buffer              = reinterpret_cast< int* >( dataBuffer );
03675             ReadHDF5Dataset vals( ( tn + " sparse vals" ).c_str(), handles[1], nativeParallel, mpiComm );
03676             vals.set_file_ids( offsets, 0, data_buffer_size, H5T_NATIVE_INT );
03677             dbgOut.printf( 3, "Reading sparse values for tag \"%s\" in %lu chunks\n", tag.name, vals.get_read_count() );
03678             nn = 0;
03679             // Should normally only have one read call, unless sparse nature
03680             // of file_ids caused reader to do something strange
03681             while( !vals.done() )
03682             {
03683                 dbgOut.printf( 3, "Reading chunk %d of values for \"%s\"\n", ++nn, tag.name );
03684                 vals.read( data_buffer, count );
03685                 tag_values.insert( tag_values.end(), data_buffer, data_buffer + count );
03686             }
03687         }
03688         catch( ReadHDF5Dataset::Exception )
03689         {
03690             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03691         }
03692     }
03693 
03694     std::sort( tag_values.begin(), tag_values.end() );
03695     tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
03696 
03697     // Read dense values
03698     std::vector< int > prev_data, curr_data;
03699     for( int i = 0; i < tag.num_dense_indices; ++i )
03700     {
03701         int grp            = tag.dense_elem_indices[i];
03702         const char* gname  = 0;
03703         mhdf_EntDesc* desc = 0;
03704         if( grp == -1 )
03705         {
03706             gname = mhdf_node_type_handle();
03707             desc  = &fileInfo->nodes;
03708         }
03709         else if( grp == -2 )
03710         {
03711             gname = mhdf_set_type_handle();
03712             desc  = &fileInfo->sets;
03713         }
03714         else
03715         {
03716             assert( grp >= 0 && grp < fileInfo->num_elem_desc );
03717             gname = fileInfo->elems[grp].handle;
03718             desc  = &fileInfo->elems[grp].desc;
03719         }
03720 
03721         Range::iterator s = file_ids.lower_bound( ( EntityHandle )( desc->start_id ) );
03722         Range::iterator e = Range::lower_bound( s, file_ids.end(), ( EntityHandle )( desc->start_id ) + desc->count );
03723         Range subset;
03724         subset.merge( s, e );
03725 
03726         hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
03727         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03728 
03729         try
03730         {
03731             curr_data.clear();
03732             tag_values.reserve( subset.size() );
03733             const size_t data_buffer_size = bufferSize / sizeof( int );
03734             int* data_buffer              = reinterpret_cast< int* >( dataBuffer );
03735 
03736             ReadHDF5Dataset reader( ( tn + " dense vals" ).c_str(), handle, nativeParallel, mpiComm );
03737             reader.set_file_ids( subset, desc->start_id, data_buffer_size, H5T_NATIVE_INT );
03738             dbgOut.printf( 3, "Reading dense data for tag \"%s\" and group \"%s\" in %lu chunks\n", tag.name,
03739                            fileInfo->elems[grp].handle, reader.get_read_count() );
03740             int nn = 0;
03741             // Should normally only have one read call, unless sparse nature
03742             // of file_ids caused reader to do something strange
03743             while( !reader.done() )
03744             {
03745                 dbgOut.printf( 3, "Reading chunk %d of \"%s\"/\"%s\"\n", ++nn, tag.name, fileInfo->elems[grp].handle );
03746                 reader.read( data_buffer, count );
03747                 curr_data.insert( curr_data.end(), data_buffer, data_buffer + count );
03748             }
03749         }
03750         catch( ReadHDF5Dataset::Exception )
03751         {
03752             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03753         }
03754 
03755         std::sort( curr_data.begin(), curr_data.end() );
03756         curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
03757         prev_data.clear();
03758         tag_values.swap( prev_data );
03759         std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
03760                         std::back_inserter( tag_values ) );
03761     }
03762 
03763     return MB_SUCCESS;
03764 }
03765 
03766 ErrorCode ReadHDF5::read_tag_values_all( int tag_index, std::vector< int >& tag_values )
03767 {
03768     CHECK_OPEN_HANDLES;
03769 
03770     mhdf_Status status;
03771     const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
03772     long junk, num_val;
03773 
03774     // Read sparse values
03775     if( tag.have_sparse )
03776     {
03777         hid_t handles[3];
03778         mhdf_openSparseTagData( filePtr, tag.name, &junk, &num_val, handles, &status );
03779         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03780 
03781         mhdf_closeData( filePtr, handles[0], &status );
03782         if( mhdf_isError( &status ) )
03783         {
03784             MB_SET_ERR_CONT( mhdf_message( &status ) );
03785             mhdf_closeData( filePtr, handles[1], &status );
03786             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03787         }
03788 
03789         hid_t file_type = H5Dget_type( handles[1] );
03790         tag_values.resize( num_val );
03791         mhdf_readTagValuesWithOpt( handles[1], 0, num_val, file_type, &tag_values[0], collIO, &status );
03792         if( mhdf_isError( &status ) )
03793         {
03794             MB_SET_ERR_CONT( mhdf_message( &status ) );
03795             H5Tclose( file_type );
03796             mhdf_closeData( filePtr, handles[1], &status );
03797             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03798         }
03799         H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &tag_values[0], 0, H5P_DEFAULT );
03800         H5Tclose( file_type );
03801 
03802         mhdf_closeData( filePtr, handles[1], &status );
03803         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03804     }
03805 
03806     std::sort( tag_values.begin(), tag_values.end() );
03807     tag_values.erase( std::unique( tag_values.begin(), tag_values.end() ), tag_values.end() );
03808 
03809     // Read dense values
03810     std::vector< int > prev_data, curr_data;
03811     for( int i = 0; i < tag.num_dense_indices; ++i )
03812     {
03813         int grp           = tag.dense_elem_indices[i];
03814         const char* gname = 0;
03815         if( grp == -1 )
03816             gname = mhdf_node_type_handle();
03817         else if( grp == -2 )
03818             gname = mhdf_set_type_handle();
03819         else
03820             gname = fileInfo->elems[grp].handle;
03821         hid_t handle = mhdf_openDenseTagData( filePtr, tag.name, gname, &num_val, &status );
03822         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03823 
03824         hid_t file_type = H5Dget_type( handle );
03825         curr_data.resize( num_val );
03826         mhdf_readTagValuesWithOpt( handle, 0, num_val, file_type, &curr_data[0], collIO, &status );
03827         if( mhdf_isError( &status ) )
03828         {
03829             MB_SET_ERR_CONT( mhdf_message( &status ) );
03830             H5Tclose( file_type );
03831             mhdf_closeData( filePtr, handle, &status );
03832             MB_SET_ERR( MB_FAILURE, "ReadHDF5 Failure" );
03833         }
03834 
03835         H5Tconvert( file_type, H5T_NATIVE_INT, num_val, &curr_data[0], 0, H5P_DEFAULT );
03836         H5Tclose( file_type );
03837         mhdf_closeData( filePtr, handle, &status );
03838         if( mhdf_isError( &status ) ) { MB_SET_ERR( MB_FAILURE, mhdf_message( &status ) ); }
03839 
03840         std::sort( curr_data.begin(), curr_data.end() );
03841         curr_data.erase( std::unique( curr_data.begin(), curr_data.end() ), curr_data.end() );
03842 
03843         prev_data.clear();
03844         tag_values.swap( prev_data );
03845         std::set_union( prev_data.begin(), prev_data.end(), curr_data.begin(), curr_data.end(),
03846                         std::back_inserter( tag_values ) );
03847     }
03848 
03849     return MB_SUCCESS;
03850 }
03851 void ReadHDF5::print_times()
03852 {
03853 #ifdef MOAB_HAVE_MPI
03854     if( !myPcomm )
03855     {
03856         double recv[NUM_TIMES];
03857         MPI_Reduce( (void*)_times, recv, NUM_TIMES, MPI_DOUBLE, MPI_MAX, 0, myPcomm->proc_config().proc_comm() );
03858         for( int i = 0; i < NUM_TIMES; i++ )
03859             _times[i] = recv[i];  // just get the max from all of them
03860     }
03861     if( 0 == myPcomm->proc_config().proc_rank() )
03862     {
03863 #endif
03864 
03865         std::cout << "ReadHDF5:             " << _times[TOTAL_TIME] << std::endl
03866                   << "  get set meta        " << _times[SET_META_TIME] << std::endl
03867                   << "  partial subsets     " << _times[SUBSET_IDS_TIME] << std::endl
03868                   << "  partition time      " << _times[GET_PARTITION_TIME] << std::endl
03869                   << "  get set ids         " << _times[GET_SET_IDS_TIME] << std::endl
03870                   << "  set contents        " << _times[GET_SET_CONTENTS_TIME] << std::endl
03871                   << "  polyhedra           " << _times[GET_POLYHEDRA_TIME] << std::endl
03872                   << "  elements            " << _times[GET_ELEMENTS_TIME] << std::endl
03873                   << "  nodes               " << _times[GET_NODES_TIME] << std::endl
03874                   << "  node adjacency      " << _times[GET_NODEADJ_TIME] << std::endl
03875                   << "  side elements       " << _times[GET_SIDEELEM_TIME] << std::endl
03876                   << "  update connectivity " << _times[UPDATECONN_TIME] << std::endl
03877                   << "  adjacency           " << _times[ADJACENCY_TIME] << std::endl
03878                   << "  delete non_adj      " << _times[DELETE_NON_SIDEELEM_TIME] << std::endl
03879                   << "  recursive sets      " << _times[READ_SET_IDS_RECURS_TIME] << std::endl
03880                   << "  find contain_sets   " << _times[FIND_SETS_CONTAINING_TIME] << std::endl
03881                   << "  read sets           " << _times[READ_SETS_TIME] << std::endl
03882                   << "  read tags           " << _times[READ_TAGS_TIME] << std::endl
03883                   << "  store file ids      " << _times[STORE_FILE_IDS_TIME] << std::endl
03884                   << "  read qa records     " << _times[READ_QA_TIME] << std::endl;
03885 
03886 #ifdef MOAB_HAVE_MPI
03887     }
03888 #endif
03889 }
03890 
03891 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines