![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 /**
00002 * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003 * storing and accessing finite element mesh data.
00004 *
00005 * Copyright 2004 Sandia Corporation. Under the terms of Contract
00006 * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007 * retains certain rights in this software.
00008 *
00009 * This library is free software; you can redistribute it and/or
00010 * modify it under the terms of the GNU Lesser General Public
00011 * License as published by the Free Software Foundation; either
00012 * version 2.1 of the License, or (at your option) any later version.
00013 *
00014 */
00015
00016 //-------------------------------------------------------------------------
00017 // Filename : WriteHDF5.cpp
00018 //
00019 // Purpose : TSTT HDF5 Writer
00020 //
00021 // Special Notes : WriteSLAC used as template for this
00022 //
00023 // Creator : Jason Kraftcheck
00024 //
00025 // Creation Date : 04/01/04
00026 //-------------------------------------------------------------------------
00027
00028 #include
00029 #if defined( _MSC_VER )
00030 typedef int id_t;
00031 #elif defined( __MINGW32__ )
00032 #include
00033 #else
00034 #include
00035 #endif
00036
00037 #include
00038 #include
00039 #include
00040 #include
00041 #include
00042 #include
00043 #include "WriteHDF5.hpp"
00044 #include
00045 #include
00046 #include
00047 #include "moab/Interface.hpp"
00048 #include "Internals.hpp"
00049 #include "MBTagConventions.hpp"
00050 #include "moab/CN.hpp"
00051 #include "moab/FileOptions.hpp"
00052 #include "moab/CpuTimer.hpp"
00053 #include "IODebugTrack.hpp"
00054 #include "mhdf.h"
00055
00056 #ifndef MOAB_HAVE_HDF5
00057 #error Attempt to compile WriteHDF5 with HDF5 support disabled
00058 #endif
00059
00060 #undef BLOCKED_COORD_IO
00061
00062 #ifdef MOAB_HAVE_VALGRIND
00063 #include
00064
00065 template < typename T >
00066 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& v )
00067 {
00068 (void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof( T ) );
00069 }
00070
00071 #else
00072 #ifndef VALGRIND_CHECK_MEM_IS_DEFINED
00073 #define VALGRIND_CHECK_MEM_IS_DEFINED( a, b ) ( (void)0 )
00074 #endif
00075 #ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE
00076 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE( a, b ) ( (void)0 )
00077 #endif
00078 #ifndef VALGRIND_MAKE_MEM_UNDEFINED
00079 #define VALGRIND_MAKE_MEM_UNDEFINED( a, b ) ( (void)0 )
00080 #endif
00081
00082 template < typename T >
00083 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& )
00084 {
00085 (void)VALGRIND_MAKE_MEM_UNDEFINED( 0, 0 );
00086 }
00087
00088 #endif
00089
00090 namespace moab
00091 {
00092
00093 #define WRITE_HDF5_BUFFER_SIZE ( 40 * 1024 * 1024 )
00094
00095 static hid_t get_id_type()
00096 {
00097 if( 8 == sizeof( WriteHDF5::wid_t ) )
00098 {
00099 if( 8 == sizeof( long ) )
00100 return H5T_NATIVE_ULONG;
00101 else
00102 return H5T_NATIVE_UINT64;
00103 }
00104 else if( 4 == sizeof( WriteHDF5::wid_t ) )
00105 {
00106 if( 4 == sizeof( int ) )
00107 return H5T_NATIVE_UINT;
00108 else
00109 return H5T_NATIVE_UINT32;
00110 }
00111 else
00112 {
00113 assert( 0 );
00114 return (hid_t)-1;
00115 }
00116 }
00117
00118 // This is the HDF5 type used to store file IDs
00119 const hid_t WriteHDF5::id_type = get_id_type();
00120
00121 // This function doesn't do anything useful. It's just a nice
00122 // place to set a break point to determine why the writer fails.
00123 static inline ErrorCode error( ErrorCode rval )
00124 {
00125 return rval;
00126 }
00127
00128 // Call \c error function during HDF5 library errors to make
00129 // it easier to trap such errors in the debugger. This function
00130 // gets registered with the HDF5 library as a callback. It
00131 // works the same as the default (H5Eprint), except that it
00132 // also calls the \c error function as a no-op.
00133 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
00134 static herr_t handle_hdf5_error( hid_t stack, void* data )
00135 {
00136 WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data );
00137 herr_t result = 0;
00138 if( h->func ) result = ( *h->func )( stack, h->data );
00139 error( MB_FAILURE );
00140 return result;
00141 }
00142 #else
00143 static herr_t handle_hdf5_error( void* data )
00144 {
00145 WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data );
00146 herr_t result = 0;
00147 if( h->func ) result = ( *h->func )( h->data );
00148 error( MB_FAILURE );
00149 return result;
00150 }
00151 #endif
00152
00153 // Some macros to handle error checking. The
00154 // CHK_MHDF__ERR* macros check the value of an mhdf_Status
00155 // object. The CHK_MB_ERR_* check the value of an ErrorCode.
00156 // The *_0 macros accept no other arguments. The *_1
00157 // macros accept a single hdf5 handle to close on error.
00158 // The *_2 macros accept an array of two hdf5 handles to
00159 // close on error. The _*2C macros accept one hdf5 handle
00160 // to close on error and a bool and an hdf5 handle where
00161 // the latter handle is conditionally closed depending on
00162 // the value of the bool. All macros contain a "return"
00163 // statement.
00164 #define CHK_MHDF_ERR_0( A ) \
00165 do \
00166 { \
00167 if( mhdf_isError( &( A ) ) ) \
00168 { \
00169 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
00170 assert( 0 ); \
00171 return error( MB_FAILURE ); \
00172 } \
00173 } while( false )
00174
00175 #define CHK_MHDF_ERR_1( A, B ) \
00176 do \
00177 { \
00178 if( mhdf_isError( &( A ) ) ) \
00179 { \
00180 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
00181 assert( 0 ); \
00182 mhdf_closeData( filePtr, ( B ), &( A ) ); \
00183 return error( MB_FAILURE ); \
00184 } \
00185 } while( false )
00186
00187 #define CHK_MHDF_ERR_2( A, B ) \
00188 do \
00189 { \
00190 if( mhdf_isError( &( A ) ) ) \
00191 { \
00192 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
00193 assert( 0 ); \
00194 mhdf_closeData( filePtr, ( B )[0], &( A ) ); \
00195 mhdf_closeData( filePtr, ( B )[1], &( A ) ); \
00196 return error( MB_FAILURE ); \
00197 } \
00198 } while( false )
00199
00200 #define CHK_MHDF_ERR_3( A, B ) \
00201 do \
00202 { \
00203 if( mhdf_isError( &( A ) ) ) \
00204 { \
00205 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
00206 assert( 0 ); \
00207 mhdf_closeData( filePtr, ( B )[0], &( A ) ); \
00208 mhdf_closeData( filePtr, ( B )[1], &( A ) ); \
00209 mhdf_closeData( filePtr, ( B )[2], &( A ) ); \
00210 return error( MB_FAILURE ); \
00211 } \
00212 } while( false )
00213
00214 #define CHK_MHDF_ERR_2C( A, B, C, D ) \
00215 do \
00216 { \
00217 if( mhdf_isError( &( A ) ) ) \
00218 { \
00219 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
00220 assert( 0 ); \
00221 mhdf_closeData( filePtr, ( B ), &( A ) ); \
00222 if( C ) mhdf_closeData( filePtr, ( D ), &( A ) ); \
00223 return error( MB_FAILURE ); \
00224 } \
00225 } while( false )
00226
00227 #define CHK_MB_ERR_0( A ) \
00228 do \
00229 { \
00230 if( MB_SUCCESS != ( A ) ) \
00231 { \
00232 MB_CHK_ERR_CONT( ( A ) ); \
00233 return error( A ); \
00234 } \
00235 } while( false )
00236
00237 #define CHK_MB_ERR_1( A, B, C ) \
00238 do \
00239 { \
00240 if( MB_SUCCESS != ( A ) ) \
00241 { \
00242 MB_CHK_ERR_CONT( ( A ) ); \
00243 mhdf_closeData( filePtr, ( B ), &( C ) ); \
00244 assert( 0 ); \
00245 return error( A ); \
00246 } \
00247 } while( false )
00248
00249 #define CHK_MB_ERR_2( A, B, C ) \
00250 do \
00251 { \
00252 if( MB_SUCCESS != ( A ) ) \
00253 { \
00254 MB_CHK_ERR_CONT( ( A ) ); \
00255 mhdf_closeData( filePtr, ( B )[0], &( C ) ); \
00256 mhdf_closeData( filePtr, ( B )[1], &( C ) ); \
00257 write_finished(); \
00258 assert( 0 ); \
00259 return error( A ); \
00260 } \
00261 } while( false )
00262
00263 #define CHK_MB_ERR_3( A, B, C ) \
00264 do \
00265 { \
00266 if( MB_SUCCESS != ( A ) ) \
00267 { \
00268 MB_CHK_ERR_CONT( ( A ) ); \
00269 mhdf_closeData( filePtr, ( B )[0], &( C ) ); \
00270 mhdf_closeData( filePtr, ( B )[1], &( C ) ); \
00271 mhdf_closeData( filePtr, ( B )[2], &( C ) ); \
00272 write_finished(); \
00273 assert( 0 ); \
00274 return error( A ); \
00275 } \
00276 } while( false )
00277
00278 #define CHK_MB_ERR_2C( A, B, C, D, E ) \
00279 do \
00280 { \
00281 if( MB_SUCCESS != ( A ) ) \
00282 { \
00283 MB_CHK_ERR_CONT( ( A ) ); \
00284 mhdf_closeData( filePtr, ( B ), &( E ) ); \
00285 if( C ) mhdf_closeData( filePtr, ( D ), &( E ) ); \
00286 write_finished(); \
00287 assert( 0 ); \
00288 return error( A ); \
00289 } \
00290 } while( false )
00291
00292 #define debug_barrier() debug_barrier_line( __LINE__ )
00293 void WriteHDF5::debug_barrier_line( int ) {}
00294
00295 class CheckOpenWriteHDF5Handles
00296 {
00297 int fileline;
00298 mhdf_FileHandle handle;
00299 int enter_count;
00300
00301 public:
00302 CheckOpenWriteHDF5Handles( mhdf_FileHandle file, int line )
00303 : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
00304 {
00305 }
00306
00307 ~CheckOpenWriteHDF5Handles()
00308 {
00309 int new_count = mhdf_countOpenHandles( handle );
00310 if( new_count != enter_count )
00311 {
00312 std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
00313 << "Open at entrance: " << enter_count << std::endl
00314 << "Open at exit: " << new_count << std::endl;
00315 }
00316 }
00317 };
00318
00319 MPEState WriteHDF5::topState;
00320 MPEState WriteHDF5::subState;
00321
00322 #ifdef NDEBUG
00323 #define CHECK_OPEN_HANDLES
00324 #else
00325 #define CHECK_OPEN_HANDLES CheckOpenWriteHDF5Handles check_open_handles_( filePtr, __LINE__ )
00326 #endif
00327
00328 bool WriteHDF5::convert_handle_tag( const EntityHandle* source, EntityHandle* dest, size_t count ) const
00329 {
00330 bool some_valid = false;
00331 for( size_t i = 0; i < count; ++i )
00332 {
00333 if( !source[i] )
00334 dest[i] = 0;
00335 else
00336 {
00337 dest[i] = idMap.find( source[i] );
00338 if( dest[i] ) some_valid = true;
00339 }
00340 }
00341
00342 return some_valid;
00343 }
00344
00345 bool WriteHDF5::convert_handle_tag( EntityHandle* data, size_t count ) const
00346 {
00347 assert( sizeof( EntityHandle ) == sizeof( wid_t ) );
00348 return convert_handle_tag( data, data, count );
00349 }
00350
00351 ErrorCode WriteHDF5::assign_ids( const Range& entities, wid_t id )
00352 {
00353 Range::const_pair_iterator pi;
00354 for( pi = entities.const_pair_begin(); pi != entities.const_pair_end(); ++pi )
00355 {
00356 const EntityHandle n = pi->second - pi->first + 1;
00357 dbgOut.printf( 3, "Assigning %s %lu to %lu to file IDs [%lu,%lu]\n",
00358 CN::EntityTypeName( TYPE_FROM_HANDLE( pi->first ) ),
00359 (unsigned long)( ID_FROM_HANDLE( pi->first ) ),
00360 (unsigned long)( ID_FROM_HANDLE( pi->first ) + n - 1 ), (unsigned long)id,
00361 (unsigned long)( id + n - 1 ) );
00362 if( TYPE_FROM_HANDLE( pi->first ) == MBPOLYGON || TYPE_FROM_HANDLE( pi->first ) == MBPOLYHEDRON )
00363 {
00364 int num_vertices = 0;
00365 const EntityHandle* conn = 0;
00366 iFace->get_connectivity( pi->first, conn, num_vertices );
00367 dbgOut.printf( 3, " poly with %d verts/faces \n", num_vertices );
00368 }
00369 if( !idMap.insert( pi->first, id, n ).second ) return error( MB_FAILURE );
00370 id += n;
00371 }
00372
00373 return MB_SUCCESS;
00374 }
00375
00376 const char* WriteHDF5::ExportSet::name() const
00377 {
00378 static char buffer[128];
00379 switch( type )
00380 {
00381 case MBVERTEX:
00382 return mhdf_node_type_handle();
00383 case MBENTITYSET:
00384 return mhdf_set_type_handle();
00385 default:
00386 sprintf( buffer, "%s%d", CN::EntityTypeName( type ), num_nodes );
00387 return buffer;
00388 }
00389 }
00390
00391 WriterIface* WriteHDF5::factory( Interface* iface )
00392 {
00393 return new WriteHDF5( iface );
00394 }
00395
00396 WriteHDF5::WriteHDF5( Interface* iface )
00397 : bufferSize( WRITE_HDF5_BUFFER_SIZE ), dataBuffer( 0 ), iFace( iface ), writeUtil( 0 ), filePtr( 0 ),
00398 setContentsOffset( 0 ), setChildrenOffset( 0 ), setParentsOffset( 0 ), maxNumSetContents( 0 ),
00399 maxNumSetChildren( 0 ), maxNumSetParents( 0 ), writeSets( false ), writeSetContents( false ),
00400 writeSetChildren( false ), writeSetParents( false ), parallelWrite( false ), collectiveIO( false ),
00401 writeTagDense( false ), writeProp( H5P_DEFAULT ), dbgOut( "H5M", stderr ), debugTrack( false )
00402 {
00403 }
00404
00405 ErrorCode WriteHDF5::init()
00406 {
00407 ErrorCode rval;
00408
00409 if( writeUtil ) // init has already been called
00410 return MB_SUCCESS;
00411 /*
00412 #ifdef DEBUG
00413 H5Eset_auto(&hdf_error_handler, writeUtil); // HDF5 callback for errors
00414 #endif
00415 */
00416 // For known tag types, store the corresponding HDF5 in which
00417 // the tag data is to be written in the file.
00418 // register_known_tag_types(iFace);
00419
00420 // Get the util interface
00421 rval = iFace->query_interface( writeUtil );
00422 CHK_MB_ERR_0( rval );
00423
00424 idMap.clear();
00425
00426 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00427 herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
00428 #else
00429 herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
00430 #endif
00431 if( err < 0 )
00432 {
00433 errorHandler.func = 0;
00434 errorHandler.data = 0;
00435 }
00436 else
00437 {
00438 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
00439 err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
00440 #else
00441 err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
00442 #endif
00443 if( err < 0 )
00444 {
00445 errorHandler.func = 0;
00446 errorHandler.data = 0;
00447 }
00448 }
00449
00450 if( !topState.valid() ) topState = MPEState( "WriteHDF5", "yellow" );
00451 if( !subState.valid() ) subState = MPEState( "WriteHDF5 subevent", "cyan" );
00452
00453 return MB_SUCCESS;
00454 }
00455
00456 ErrorCode WriteHDF5::write_finished()
00457 {
00458 // Release memory allocated in lists
00459 exportList.clear();
00460 nodeSet.range.clear();
00461 setSet.range.clear();
00462 tagList.clear();
00463 idMap.clear();
00464
00465 HDF5ErrorHandler handler;
00466 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00467 herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
00468 #else
00469 herr_t err = H5Eget_auto( &handler.func, &handler.data );
00470 #endif
00471 if( err >= 0 && handler.func == &handle_hdf5_error )
00472 {
00473 assert( handler.data == &errorHandler );
00474 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00475 H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
00476 #else
00477 H5Eset_auto( errorHandler.func, errorHandler.data );
00478 #endif
00479 }
00480
00481 return MB_SUCCESS;
00482 }
00483
00484 WriteHDF5::~WriteHDF5()
00485 {
00486 if( !writeUtil ) // init() failed.
00487 return;
00488
00489 iFace->release_interface( writeUtil );
00490 }
00491
00492 ErrorCode WriteHDF5::write_file( const char* filename,
00493 bool overwrite,
00494 const FileOptions& opts,
00495 const EntityHandle* set_array,
00496 const int num_sets,
00497 const std::vector< std::string >& qa_records,
00498 const Tag* tag_list,
00499 int num_tags,
00500 int user_dimension )
00501 {
00502 mhdf_Status status;
00503
00504 parallelWrite = false;
00505 collectiveIO = false;
00506
00507 // Enable debug output
00508 int tmpval = 0;
00509 if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) ) dbgOut.set_verbosity( tmpval );
00510
00511 // writeTagDense = (MB_SUCCESS == opts.get_null_option("DENSE_TAGS"));
00512 writeTagDense = true;
00513
00514 // Enable some extra checks for reads. Note: amongst other things this
00515 // will print errors if the entire file is not read, so if doing a
00516 // partial read that is not a parallel read, this should be disabled.
00517 debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
00518
00519 bufferSize = WRITE_HDF5_BUFFER_SIZE;
00520 int buf_size;
00521 ErrorCode rval = opts.get_int_option( "BUFFER_SIZE", buf_size );
00522 if( MB_SUCCESS == rval && buf_size >= 24 ) bufferSize = buf_size;
00523
00524 // Allocate internal buffer to use when gathering data to write.
00525 dataBuffer = (char*)malloc( bufferSize );
00526 if( !dataBuffer ) return error( MB_MEMORY_ALLOCATION_FAILED );
00527
00528 // Clear filePtr so we know if it is open upon failure
00529 filePtr = 0;
00530
00531 // Do actual write.
00532 writeProp = H5P_DEFAULT;
00533 ErrorCode result = write_file_impl( filename, overwrite, opts, set_array, num_sets, qa_records, tag_list, num_tags,
00534 user_dimension );
00535 // Close writeProp if it was opened
00536 if( writeProp != H5P_DEFAULT ) H5Pclose( writeProp );
00537
00538 // Free memory buffer
00539 free( dataBuffer );
00540 dataBuffer = 0;
00541
00542 // Close file
00543 bool created_file = false;
00544 if( filePtr )
00545 {
00546 created_file = true;
00547 mhdf_closeFile( filePtr, &status );
00548 filePtr = 0;
00549 if( mhdf_isError( &status ) )
00550 {
00551 MB_SET_ERR_CONT( mhdf_message( &status ) );
00552 if( MB_SUCCESS == result ) result = MB_FAILURE;
00553 }
00554 }
00555
00556 // Release other resources
00557 if( MB_SUCCESS == result )
00558 result = write_finished();
00559 else
00560 write_finished();
00561
00562 // If write failed, remove file unless KEEP option was specified
00563 if( MB_SUCCESS != result && created_file && MB_ENTITY_NOT_FOUND == opts.get_null_option( "KEEP" ) )
00564 remove( filename );
00565
00566 return result;
00567 }
00568
00569 ErrorCode WriteHDF5::write_file_impl( const char* filename,
00570 bool overwrite,
00571 const FileOptions& opts,
00572 const EntityHandle* set_array,
00573 const int num_sets,
00574 const std::vector< std::string >& qa_records,
00575 const Tag* tag_list,
00576 int num_tags,
00577 int user_dimension )
00578 {
00579 ErrorCode result;
00580 std::list< TagDesc >::const_iterator t_itor;
00581 std::list< ExportSet >::iterator ex_itor;
00582 EntityHandle elem_count, max_id;
00583 double times[NUM_TIMES] = { 0 };
00584
00585 if( MB_SUCCESS != init() ) return error( MB_FAILURE );
00586
00587 // See if we need to report times
00588 bool cputime = false;
00589 result = opts.get_null_option( "CPUTIME" );
00590 if( MB_SUCCESS == result ) cputime = true;
00591
00592 CpuTimer timer;
00593
00594 dbgOut.tprint( 1, "Gathering Mesh\n" );
00595 topState.start( "gathering mesh" );
00596
00597 // Gather mesh to export
00598 exportList.clear();
00599 if( 0 == num_sets || ( 1 == num_sets && set_array[0] == 0 ) )
00600 {
00601 result = gather_all_mesh();
00602 topState.end( result );
00603 CHK_MB_ERR_0( result );
00604 }
00605 else
00606 {
00607 std::vector< EntityHandle > passed_export_list( set_array, set_array + num_sets );
00608 result = gather_mesh_info( passed_export_list );
00609 topState.end( result );
00610 CHK_MB_ERR_0( result );
00611 }
00612
00613 times[GATHER_TIME] = timer.time_elapsed();
00614
00615 // if (nodeSet.range.size() == 0)
00616 // return error(MB_ENTITY_NOT_FOUND);
00617
00618 dbgOut.tprint( 1, "Checking ID space\n" );
00619
00620 // Make sure ID space is sufficient
00621 elem_count = nodeSet.range.size() + setSet.range.size();
00622 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
00623 elem_count += ex_itor->range.size();
00624 max_id = (EntityHandle)1 << ( 8 * sizeof( wid_t ) - 1 );
00625 if( elem_count > max_id )
00626 {
00627 MB_SET_ERR_CONT( "ID space insufficient for mesh size" );
00628 return error( result );
00629 }
00630
00631 dbgOut.tprint( 1, "Creating File\n" );
00632
00633 // Figure out the dimension in which to write the mesh.
00634 int mesh_dim;
00635 result = iFace->get_dimension( mesh_dim );
00636 CHK_MB_ERR_0( result );
00637
00638 if( user_dimension < 1 ) user_dimension = mesh_dim;
00639 user_dimension = user_dimension > mesh_dim ? mesh_dim : user_dimension;
00640
00641 // Create the file layout, including all tables (zero-ed) and
00642 // all structure and meta information.
00643 const char* optnames[] = { "WRITE_PART", "FORMAT", 0 };
00644 int junk;
00645 parallelWrite = ( MB_SUCCESS == opts.match_option( "PARALLEL", optnames, junk ) );
00646 if( parallelWrite )
00647 {
00648 // Just store Boolean value based on string option here.
00649 // parallel_create_file will set writeProp accordingly.
00650 // collectiveIO = (MB_SUCCESS == opts.get_null_option("COLLECTIVE"));
00651 // dbgOut.printf(2, "'COLLECTIVE' option = %s\n", collectiveIO ? "YES" : "NO");
00652 // Do this all the time, as it appears to be much faster than indep in some cases
00653 collectiveIO = true;
00654 result =
00655 parallel_create_file( filename, overwrite, qa_records, opts, tag_list, num_tags, user_dimension, times );
00656 }
00657 else
00658 {
00659 result = serial_create_file( filename, overwrite, qa_records, tag_list, num_tags, user_dimension );
00660 }
00661 if( MB_SUCCESS != result ) return error( result );
00662
00663 times[CREATE_TIME] = timer.time_elapsed();
00664
00665 dbgOut.tprint( 1, "Writing Nodes.\n" );
00666 // Write node coordinates
00667 if( !nodeSet.range.empty() || parallelWrite )
00668 {
00669 topState.start( "writing coords" );
00670 result = write_nodes();
00671 topState.end( result );
00672 if( MB_SUCCESS != result ) return error( result );
00673 }
00674
00675 times[COORD_TIME] = timer.time_elapsed();
00676
00677 dbgOut.tprint( 1, "Writing connectivity.\n" );
00678
00679 // Write element connectivity
00680 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
00681 {
00682 topState.start( "writing connectivity for ", ex_itor->name() );
00683 result = write_elems( *ex_itor );
00684 topState.end( result );
00685 if( MB_SUCCESS != result ) return error( result );
00686 }
00687 times[CONN_TIME] = timer.time_elapsed();
00688
00689 dbgOut.tprint( 1, "Writing sets.\n" );
00690
00691 // Write meshsets
00692 result = write_sets( times );
00693 if( MB_SUCCESS != result ) return error( result );
00694 debug_barrier();
00695
00696 times[SET_TIME] = timer.time_elapsed();
00697 dbgOut.tprint( 1, "Writing adjacencies.\n" );
00698
00699 // Write adjacencies
00700 // Tim says don't save node adjacencies!
00701 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
00702 result = write_adjacencies( nodeSet );
00703 if( MB_SUCCESS != result ) return error( result );
00704 #endif
00705 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
00706 {
00707 topState.start( "writing adjacencies for ", ex_itor->name() );
00708 result = write_adjacencies( *ex_itor );
00709 topState.end( result );
00710 if( MB_SUCCESS != result ) return error( result );
00711 }
00712 times[ADJ_TIME] = timer.time_elapsed();
00713
00714 dbgOut.tprint( 1, "Writing tags.\n" );
00715
00716 // Write tags
00717 for( t_itor = tagList.begin(); t_itor != tagList.end(); ++t_itor )
00718 {
00719 std::string name;
00720 iFace->tag_get_name( t_itor->tag_id, name );
00721 topState.start( "writing tag: ", name.c_str() );
00722 result = write_tag( *t_itor, times );
00723 topState.end( result );
00724 if( MB_SUCCESS != result ) return error( result );
00725 }
00726 times[TAG_TIME] = timer.time_elapsed();
00727
00728 times[TOTAL_TIME] = timer.time_since_birth();
00729
00730 if( cputime )
00731 {
00732 print_times( times );
00733 }
00734
00735 return MB_SUCCESS;
00736 }
00737
00738 ErrorCode WriteHDF5::initialize_mesh( const Range ranges[5] )
00739 {
00740 ErrorCode rval;
00741
00742 if( !ranges[0].all_of_type( MBVERTEX ) ) return error( MB_FAILURE );
00743 nodeSet.range = ranges[0];
00744 nodeSet.type = MBVERTEX;
00745 nodeSet.num_nodes = 1;
00746 nodeSet.max_num_ents = nodeSet.max_num_adjs = 0;
00747
00748 if( !ranges[4].all_of_type( MBENTITYSET ) ) return error( MB_FAILURE );
00749 setSet.range = ranges[4];
00750 setSet.type = MBENTITYSET;
00751 setSet.num_nodes = 0;
00752 setSet.max_num_ents = setSet.max_num_adjs = 0;
00753 maxNumSetContents = maxNumSetChildren = maxNumSetParents = 0;
00754
00755 exportList.clear();
00756 std::vector< Range > bins( 1024 ); // Sort entities by connectivity length
00757 // Resize is expensive due to Range copy, so start big
00758 for( EntityType type = MBEDGE; type < MBENTITYSET; ++type )
00759 {
00760 ExportSet set;
00761 set.max_num_ents = set.max_num_adjs = 0;
00762 const int dim = CN::Dimension( type );
00763
00764 // Group entities by connectivity length
00765 bins.clear();
00766 assert( dim >= 0 && dim <= 4 );
00767 std::pair< Range::const_iterator, Range::const_iterator > p = ranges[dim].equal_range( type );
00768 Range::const_iterator i = p.first;
00769 while( i != p.second )
00770 {
00771 Range::const_iterator first = i;
00772 EntityHandle const* conn;
00773 int len, firstlen;
00774
00775 // Dummy storage vector for structured mesh "get_connectivity" function
00776 std::vector< EntityHandle > storage;
00777
00778 rval = iFace->get_connectivity( *i, conn, firstlen, false, &storage );
00779 if( MB_SUCCESS != rval ) return error( rval );
00780
00781 for( ++i; i != p.second; ++i )
00782 {
00783 rval = iFace->get_connectivity( *i, conn, len, false, &storage );
00784 if( MB_SUCCESS != rval ) return error( rval );
00785
00786 if( len != firstlen ) break;
00787 }
00788
00789 if( firstlen >= (int)bins.size() ) bins.resize( firstlen + 1 );
00790 bins[firstlen].merge( first, i );
00791 }
00792 // Create ExportSet for each group
00793 for( std::vector< Range >::iterator j = bins.begin(); j != bins.end(); ++j )
00794 {
00795 if( j->empty() ) continue;
00796
00797 set.range.clear();
00798 set.type = type;
00799 set.num_nodes = j - bins.begin();
00800 exportList.push_back( set );
00801 exportList.back().range.swap( *j );
00802 }
00803 }
00804
00805 return MB_SUCCESS;
00806 }
00807
00808 // Gather the mesh to be written from a list of owning meshsets.
00809 ErrorCode WriteHDF5::gather_mesh_info( const std::vector< EntityHandle >& export_sets )
00810 {
00811 ErrorCode rval;
00812
00813 int dim;
00814 Range range; // Temporary storage
00815 Range ranges[5]; // Lists of entities to export, grouped by dimension
00816
00817 // Gather list of all related sets
00818 std::vector< EntityHandle > stack( export_sets );
00819 std::copy( export_sets.begin(), export_sets.end(), stack.begin() );
00820 std::vector< EntityHandle > set_children;
00821 while( !stack.empty() )
00822 {
00823 EntityHandle meshset = stack.back();
00824 stack.pop_back();
00825 ranges[4].insert( meshset );
00826
00827 // Get contained sets
00828 range.clear();
00829 rval = iFace->get_entities_by_type( meshset, MBENTITYSET, range );
00830 CHK_MB_ERR_0( rval );
00831 for( Range::iterator ritor = range.begin(); ritor != range.end(); ++ritor )
00832 {
00833 if( ranges[4].find( *ritor ) == ranges[4].end() ) stack.push_back( *ritor );
00834 }
00835
00836 // Get child sets
00837 set_children.clear();
00838 rval = iFace->get_child_meshsets( meshset, set_children, 1 );
00839 CHK_MB_ERR_0( rval );
00840 for( std::vector< EntityHandle >::iterator vitor = set_children.begin(); vitor != set_children.end(); ++vitor )
00841 {
00842 if( ranges[4].find( *vitor ) == ranges[4].end() ) stack.push_back( *vitor );
00843 }
00844 }
00845
00846 // Gather list of all mesh entities from list of sets,
00847 // grouped by dimension.
00848 for( Range::iterator setitor = ranges[4].begin(); setitor != ranges[4].end(); ++setitor )
00849 {
00850 for( dim = 0; dim < 4; ++dim )
00851 {
00852 range.clear();
00853 rval = iFace->get_entities_by_dimension( *setitor, dim, range, false );
00854 CHK_MB_ERR_0( rval );
00855
00856 ranges[dim].merge( range );
00857 }
00858 }
00859
00860 // For each list of elements, append adjacent children and
00861 // nodes to lists.
00862 for( dim = 3; dim > 0; --dim )
00863 {
00864 for( int cdim = 1; cdim < dim; ++cdim )
00865 {
00866 range.clear();
00867 rval = iFace->get_adjacencies( ranges[dim], cdim, false, range );
00868 CHK_MB_ERR_0( rval );
00869 ranges[cdim].merge( range );
00870 }
00871 range.clear();
00872 rval = writeUtil->gather_nodes_from_elements( ranges[dim], 0, range );
00873 CHK_MB_ERR_0( rval );
00874 ranges[0].merge( range );
00875 }
00876
00877 return initialize_mesh( ranges );
00878 }
00879
00880 // Gather all the mesh and related information to be written.
00881 ErrorCode WriteHDF5::gather_all_mesh()
00882 {
00883 ErrorCode rval;
00884 Range ranges[5];
00885
00886 rval = iFace->get_entities_by_type( 0, MBVERTEX, ranges[0] );
00887 if( MB_SUCCESS != rval ) return error( rval );
00888
00889 rval = iFace->get_entities_by_dimension( 0, 1, ranges[1] );
00890 if( MB_SUCCESS != rval ) return error( rval );
00891
00892 rval = iFace->get_entities_by_dimension( 0, 2, ranges[2] );
00893 if( MB_SUCCESS != rval ) return error( rval );
00894
00895 rval = iFace->get_entities_by_dimension( 0, 3, ranges[3] );
00896 if( MB_SUCCESS != rval ) return error( rval );
00897
00898 rval = iFace->get_entities_by_type( 0, MBENTITYSET, ranges[4] );
00899 if( MB_SUCCESS != rval ) return error( rval );
00900
00901 return initialize_mesh( ranges );
00902 }
00903
00904 ErrorCode WriteHDF5::write_nodes()
00905 {
00906 mhdf_Status status;
00907 int dim, mesh_dim;
00908 ErrorCode rval;
00909 hid_t node_table;
00910 long first_id, num_nodes;
00911
00912 if( !nodeSet.total_num_ents ) return MB_SUCCESS; // No nodes!
00913
00914 CHECK_OPEN_HANDLES;
00915
00916 rval = iFace->get_dimension( mesh_dim );
00917 CHK_MB_ERR_0( rval );
00918
00919 debug_barrier();
00920 dbgOut.print( 3, "Opening Node Coords\n" );
00921 node_table = mhdf_openNodeCoords( filePtr, &num_nodes, &dim, &first_id, &status );
00922 CHK_MHDF_ERR_0( status );
00923 IODebugTrack track( debugTrack, "nodes", num_nodes );
00924
00925 double* buffer = (double*)dataBuffer;
00926 #ifdef BLOCKED_COORD_IO
00927 int chunk_size = bufferSize / sizeof( double );
00928 #else
00929 int chunk_size = bufferSize / ( 3 * sizeof( double ) );
00930 #endif
00931
00932 long remaining = nodeSet.range.size();
00933 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
00934 if( nodeSet.max_num_ents )
00935 {
00936 assert( nodeSet.max_num_ents >= remaining );
00937 num_writes = ( nodeSet.max_num_ents + chunk_size - 1 ) / chunk_size;
00938 }
00939 long remaining_writes = num_writes;
00940
00941 long offset = nodeSet.offset;
00942 Range::const_iterator iter = nodeSet.range.begin();
00943 dbgOut.printf( 3, "Writing %ld nodes in %ld blocks of %d\n", remaining, ( remaining + chunk_size - 1 ) / chunk_size,
00944 chunk_size );
00945 while( remaining )
00946 {
00947 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
00948 long count = chunk_size < remaining ? chunk_size : remaining;
00949 remaining -= count;
00950 Range::const_iterator end = iter;
00951 end += count;
00952
00953 #ifdef BLOCKED_COORD_IO
00954 for( int d = 0; d < dim; d++ )
00955 {
00956 if( d < mesh_dim )
00957 {
00958 rval = writeUtil->get_node_coords( d, iter, end, count, buffer );
00959 CHK_MB_ERR_1( rval, node_table, status );
00960 }
00961 else
00962 memset( buffer, 0, count * sizeof( double ) );
00963
00964 dbgOut.printf( 3, " writing %c node chunk %ld of %ld, %ld values at %ld\n", (char)( 'X' + d ),
00965 num_writes - remaining_writes + 1, num_writes, count, offset );
00966 mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status );
00967 CHK_MHDF_ERR_1( status, node_table );
00968 }
00969 #else
00970 rval = writeUtil->get_node_coords( -1, iter, end, 3 * count, buffer );
00971 CHK_MB_ERR_1( rval, node_table, status );
00972 dbgOut.printf( 3, " writing node chunk %ld of %ld, %ld values at %ld\n", num_writes - remaining_writes + 1,
00973 num_writes, count, offset );
00974 mhdf_writeNodeCoordsWithOpt( node_table, offset, count, buffer, writeProp, &status );
00975 CHK_MHDF_ERR_1( status, node_table );
00976 #endif
00977 track.record_io( offset, count );
00978
00979 iter = end;
00980 offset += count;
00981 --remaining_writes;
00982 }
00983
00984 // Do empty writes if necessary for parallel collective IO
00985 if( collectiveIO )
00986 {
00987 while( remaining_writes-- )
00988 {
00989 assert( writeProp != H5P_DEFAULT );
00990 #ifdef BLOCKED_COORD_IO
00991 for( int d = 0; d < dim; ++d )
00992 {
00993 dbgOut.printf( 3, " writing (empty) %c node chunk %ld of %ld.\n", (char)( 'X' + d ),
00994 num_writes - remaining_writes, num_writes );
00995 mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status );
00996 CHK_MHDF_ERR_1( status, node_table );
00997 }
00998 #else
00999 dbgOut.printf( 3, " writing (empty) node chunk %ld of %ld.\n", num_writes - remaining_writes, num_writes );
01000 mhdf_writeNodeCoordsWithOpt( node_table, offset, 0, 0, writeProp, &status );
01001 CHK_MHDF_ERR_1( status, node_table );
01002 #endif
01003 }
01004 }
01005
01006 mhdf_closeData( filePtr, node_table, &status );
01007 CHK_MHDF_ERR_0( status );
01008
01009 track.all_reduce();
01010 return MB_SUCCESS;
01011 }
01012
01013 ErrorCode WriteHDF5::write_elems( ExportSet& elems )
01014 {
01015 mhdf_Status status;
01016 ErrorCode rval;
01017 long first_id;
01018 int nodes_per_elem;
01019 long table_size;
01020
01021 CHECK_OPEN_HANDLES;
01022
01023 debug_barrier();
01024 dbgOut.printf( 2, "Writing %lu elements of type %s%d\n", (unsigned long)elems.range.size(),
01025 CN::EntityTypeName( elems.type ), elems.num_nodes );
01026 dbgOut.print( 3, "Writing elements", elems.range );
01027
01028 hid_t elem_table = mhdf_openConnectivity( filePtr, elems.name(), &nodes_per_elem, &table_size, &first_id, &status );
01029 CHK_MHDF_ERR_0( status );
01030 IODebugTrack track( debugTrack, elems.name() && strlen( elems.name() ) ? elems.name() : "",
01031 table_size );
01032
01033 assert( (unsigned long)first_id <= elems.first_id );
01034 assert( (unsigned long)table_size >= elems.offset + elems.range.size() );
01035
01036 EntityHandle* buffer = (EntityHandle*)dataBuffer;
01037 int chunk_size = bufferSize / ( elems.num_nodes * sizeof( wid_t ) );
01038 long offset = elems.offset;
01039 long remaining = elems.range.size();
01040 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
01041 if( elems.max_num_ents )
01042 {
01043 assert( elems.max_num_ents >= remaining );
01044 num_writes = ( elems.max_num_ents + chunk_size - 1 ) / chunk_size;
01045 }
01046 long remaining_writes = num_writes;
01047 Range::iterator iter = elems.range.begin();
01048
01049 while( remaining )
01050 {
01051 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01052 long count = chunk_size < remaining ? chunk_size : remaining;
01053 remaining -= count;
01054
01055 Range::iterator next = iter;
01056 next += count;
01057 rval = writeUtil->get_element_connect( iter, next, elems.num_nodes, count * elems.num_nodes, buffer );
01058 CHK_MB_ERR_1( rval, elem_table, status );
01059 iter = next;
01060
01061 for( long i = 0; i < count * nodes_per_elem; ++i )
01062 {
01063 buffer[i] = idMap.find( buffer[i] );
01064 if( 0 == buffer[i] )
01065 {
01066 MB_SET_ERR_CONT( "Invalid " << elems.name() << " element connectivity. Write Aborted" );
01067 mhdf_closeData( filePtr, elem_table, &status );
01068 return error( MB_FAILURE );
01069 }
01070 }
01071
01072 dbgOut.printf( 3, " writing node connectivity %ld of %ld, %ld values at %ld\n",
01073 num_writes - remaining_writes + 1, num_writes, count, offset );
01074 track.record_io( offset, count );
01075 mhdf_writeConnectivityWithOpt( elem_table, offset, count, id_type, buffer, writeProp, &status );
01076 CHK_MHDF_ERR_1( status, elem_table );
01077
01078 offset += count;
01079 --remaining_writes;
01080 }
01081
01082 // Do empty writes if necessary for parallel collective IO
01083 if( collectiveIO )
01084 {
01085 while( remaining_writes-- )
01086 {
01087 assert( writeProp != H5P_DEFAULT );
01088 dbgOut.printf( 3, " writing (empty) connectivity chunk %ld of %ld.\n", num_writes - remaining_writes + 1,
01089 num_writes );
01090 mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status );
01091 CHK_MHDF_ERR_1( status, elem_table );
01092 }
01093 }
01094
01095 mhdf_closeData( filePtr, elem_table, &status );
01096 CHK_MHDF_ERR_0( status );
01097
01098 track.all_reduce();
01099 return MB_SUCCESS;
01100 }
01101
01102 ErrorCode WriteHDF5::get_set_info( EntityHandle set,
01103 long& num_entities,
01104 long& num_children,
01105 long& num_parents,
01106 unsigned long& flags )
01107 {
01108 ErrorCode rval;
01109 int i;
01110 unsigned int u;
01111
01112 rval = iFace->get_number_entities_by_handle( set, i, false );
01113 CHK_MB_ERR_0( rval );
01114 num_entities = i;
01115
01116 rval = iFace->num_child_meshsets( set, &i );
01117 CHK_MB_ERR_0( rval );
01118 num_children = i;
01119
01120 rval = iFace->num_parent_meshsets( set, &i );
01121 CHK_MB_ERR_0( rval );
01122 num_parents = i;
01123
01124 rval = iFace->get_meshset_options( set, u );
01125 CHK_MB_ERR_0( rval );
01126 flags = u;
01127
01128 return MB_SUCCESS;
01129 }
01130
01131 ErrorCode WriteHDF5::write_set_data( const WriteUtilIface::EntityListType which_data,
01132 const hid_t handle,
01133 IODebugTrack& track,
01134 Range* ranged,
01135 Range* null_stripped,
01136 std::vector< long >* set_sizes )
01137 {
01138 // ranged must be non-null for CONTENTS and null for anything else
01139 assert( ( which_data == WriteUtilIface::CONTENTS ) == ( 0 != ranged ) );
01140 ErrorCode rval;
01141 mhdf_Status status;
01142
01143 debug_barrier();
01144
01145 // Function pointer type used to write set data
01146 void ( *write_func )( hid_t, long, long, hid_t, const void*, hid_t, mhdf_Status* );
01147 long max_vals; // Max over all procs of number of values to write to data set
01148 long offset; // Offset in HDF5 dataset at which to write next block of data
01149 switch( which_data )
01150 {
01151 case WriteUtilIface::CONTENTS:
01152 assert( ranged != 0 && null_stripped != 0 && set_sizes != 0 );
01153 write_func = &mhdf_writeSetDataWithOpt;
01154 max_vals = maxNumSetContents;
01155 offset = setContentsOffset;
01156 dbgOut.print( 2, "Writing set contents\n" );
01157 break;
01158 case WriteUtilIface::CHILDREN:
01159 assert( !ranged && !null_stripped && !set_sizes );
01160 write_func = &mhdf_writeSetParentsChildrenWithOpt;
01161 max_vals = maxNumSetChildren;
01162 offset = setChildrenOffset;
01163 dbgOut.print( 2, "Writing set child lists\n" );
01164 break;
01165 case WriteUtilIface::PARENTS:
01166 assert( !ranged && !null_stripped && !set_sizes );
01167 write_func = &mhdf_writeSetParentsChildrenWithOpt;
01168 max_vals = maxNumSetParents;
01169 offset = setParentsOffset;
01170 dbgOut.print( 2, "Writing set parent lists\n" );
01171 break;
01172 default:
01173 assert( false );
01174 return MB_FAILURE;
01175 }
01176 // assert(max_vals > 0); // Should have skipped this function otherwise
01177
01178 // buffer to use for IO
01179 wid_t* buffer = reinterpret_cast< wid_t* >( dataBuffer );
01180 // number of handles that will fit in the buffer
01181 const size_t buffer_size = bufferSize / sizeof( EntityHandle );
01182 // the total number of write calls that must be made, including no-ops for collective io
01183 const size_t num_total_writes = ( max_vals + buffer_size - 1 ) / buffer_size;
01184
01185 std::vector< SpecialSetData >::iterator si = specialSets.begin();
01186
01187 std::vector< wid_t > remaining; // data left over from prev iteration because it didn't fit in buffer
01188 size_t remaining_offset = 0; // avoid erasing from front of 'remaining'
01189 const EntityHandle* remaining_ptr = 0; // remaining for non-ranged data
01190 size_t remaining_count = 0;
01191 const wid_t* special_rem_ptr = 0;
01192 Range::const_iterator i = setSet.range.begin(), j, rhint, nshint;
01193 if( ranged ) rhint = ranged->begin();
01194 if( null_stripped ) nshint = null_stripped->begin();
01195 for( size_t w = 0; w < num_total_writes; ++w )
01196 {
01197 if( i == setSet.range.end() && !remaining.empty() && !remaining_ptr )
01198 {
01199 // If here, then we've written everything but we need to
01200 // make more write calls because we're doing collective IO
01201 // in parallel
01202 ( *write_func )( handle, 0, 0, id_type, 0, writeProp, &status );
01203 CHK_MHDF_ERR_0( status );
01204 continue;
01205 }
01206
01207 // If we had some left-over data from a range-compacted set
01208 // from the last iteration, add it to the buffer now
01209 size_t count = 0;
01210 if( !remaining.empty() )
01211 {
01212 count = remaining.size() - remaining_offset;
01213 if( count > buffer_size )
01214 {
01215 memcpy( buffer, &remaining[remaining_offset], buffer_size * sizeof( wid_t ) );
01216 count = buffer_size;
01217 remaining_offset += buffer_size;
01218 }
01219 else
01220 {
01221 memcpy( buffer, &remaining[remaining_offset], count * sizeof( wid_t ) );
01222 remaining_offset = 0;
01223 remaining.clear();
01224 }
01225 }
01226 // If we had some left-over data from a non-range-compacted set
01227 // from the last iteration, add it to the buffer now
01228 else if( remaining_ptr )
01229 {
01230 if( remaining_count > buffer_size )
01231 {
01232 rval = vector_to_id_list( remaining_ptr, buffer, buffer_size );
01233 CHK_MB_ERR_0( rval );
01234 count = buffer_size;
01235 remaining_ptr += count;
01236 remaining_count -= count;
01237 }
01238 else
01239 {
01240 rval = vector_to_id_list( remaining_ptr, buffer, remaining_count );
01241 CHK_MB_ERR_0( rval );
01242 count = remaining_count;
01243 remaining_ptr = 0;
01244 remaining_count = 0;
01245 }
01246 }
01247 // If we had some left-over data from a "special" (i.e. parallel shared)
01248 // set.
01249 else if( special_rem_ptr )
01250 {
01251 if( remaining_count > buffer_size )
01252 {
01253 memcpy( buffer, special_rem_ptr, buffer_size * sizeof( wid_t ) );
01254 count = buffer_size;
01255 special_rem_ptr += count;
01256 remaining_count -= count;
01257 }
01258 else
01259 {
01260 memcpy( buffer, special_rem_ptr, remaining_count * sizeof( wid_t ) );
01261 count = remaining_count;
01262 special_rem_ptr = 0;
01263 remaining_count = 0;
01264 }
01265 }
01266
01267 // While there is both space remaining in the buffer and
01268 // more sets to write, append more set data to buffer.
01269
01270 while( count < buffer_size && i != setSet.range.end() )
01271 {
01272 // Special case for "special" (i.e. parallel shared) sets:
01273 // we already have the data in a vector, just copy it.
01274 if( si != specialSets.end() && si->setHandle == *i )
01275 {
01276 std::vector< wid_t >& list = ( which_data == WriteUtilIface::CONTENTS ) ? si->contentIds
01277 : ( which_data == WriteUtilIface::PARENTS ) ? si->parentIds
01278 : si->childIds;
01279 size_t append = list.size();
01280 if( count + list.size() > buffer_size )
01281 {
01282 append = buffer_size - count;
01283 special_rem_ptr = &list[append];
01284 remaining_count = list.size() - append;
01285 }
01286 memcpy( buffer + count, &list[0], append * sizeof( wid_t ) );
01287 ++i;
01288 ++si;
01289 count += append;
01290 continue;
01291 }
01292
01293 j = i;
01294 ++i;
01295 const EntityHandle* ptr;
01296 int len;
01297 unsigned char flags;
01298 rval = writeUtil->get_entity_list_pointers( j, i, &ptr, which_data, &len, &flags );
01299 if( MB_SUCCESS != rval ) return rval;
01300 if( which_data == WriteUtilIface::CONTENTS && !( flags & MESHSET_ORDERED ) )
01301 {
01302 bool compacted;
01303 remaining.clear();
01304 if( len == 0 )
01305 compacted = false;
01306 else
01307 {
01308 assert( !( len % 2 ) );
01309 rval = range_to_blocked_list( ptr, len / 2, remaining, compacted );
01310 if( MB_SUCCESS != rval ) return rval;
01311 }
01312 if( compacted )
01313 {
01314 rhint = ranged->insert( rhint, *j );
01315 set_sizes->push_back( remaining.size() );
01316 }
01317 else if( remaining.size() != (unsigned)len )
01318 {
01319 nshint = null_stripped->insert( nshint, *j );
01320 set_sizes->push_back( remaining.size() );
01321 }
01322
01323 if( count + remaining.size() <= buffer_size )
01324 {
01325 if( !remaining.empty() )
01326 memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining.size() );
01327 count += remaining.size();
01328 remaining.clear();
01329 remaining_offset = 0;
01330 }
01331 else
01332 {
01333 remaining_offset = buffer_size - count;
01334 memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining_offset );
01335 count += remaining_offset;
01336 }
01337 }
01338 else
01339 {
01340 if( count + len > buffer_size )
01341 {
01342 size_t append = buffer_size - count;
01343 remaining_ptr = ptr + append;
01344 remaining_count = len - append;
01345 len = append;
01346 }
01347
01348 rval = vector_to_id_list( ptr, buffer + count, len );
01349 count += len;
01350 }
01351 }
01352
01353 // Write the buffer.
01354 ( *write_func )( handle, offset, count, id_type, buffer, writeProp, &status );
01355 CHK_MHDF_ERR_0( status );
01356 track.record_io( offset, count );
01357 offset += count;
01358 }
01359
01360 return MB_SUCCESS;
01361 }
01362
01363 ErrorCode WriteHDF5::write_sets( double* times )
01364 {
01365 mhdf_Status status;
01366 ErrorCode rval;
01367 long first_id, size;
01368 hid_t table;
01369 CpuTimer timer;
01370
01371 CHECK_OPEN_HANDLES;
01372 /* If no sets, just return success */
01373 if( !writeSets ) return MB_SUCCESS;
01374
01375 debug_barrier();
01376 dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01377 dbgOut.print( 3, "Non-shared sets", setSet.range );
01378
01379 /* Write set parents */
01380 if( writeSetParents )
01381 {
01382 topState.start( "writing parent lists for local sets" );
01383 table = mhdf_openSetParents( filePtr, &size, &status );
01384 CHK_MHDF_ERR_0( status );
01385 IODebugTrack track( debugTrack, "SetParents", size );
01386
01387 rval = write_set_data( WriteUtilIface::PARENTS, table, track );
01388 topState.end( rval );
01389 CHK_MB_ERR_1( rval, table, status );
01390
01391 mhdf_closeData( filePtr, table, &status );
01392 CHK_MHDF_ERR_0( status );
01393
01394 times[SET_PARENT] = timer.time_elapsed();
01395 track.all_reduce();
01396 }
01397
01398 /* Write set children */
01399 if( writeSetChildren )
01400 {
01401 topState.start( "writing child lists for local sets" );
01402 table = mhdf_openSetChildren( filePtr, &size, &status );
01403 CHK_MHDF_ERR_0( status );
01404 IODebugTrack track( debugTrack, "SetChildren", size );
01405
01406 rval = write_set_data( WriteUtilIface::CHILDREN, table, track );
01407 topState.end( rval );
01408 CHK_MB_ERR_1( rval, table, status );
01409
01410 mhdf_closeData( filePtr, table, &status );
01411 CHK_MHDF_ERR_0( status );
01412
01413 times[SET_CHILD] = timer.time_elapsed();
01414 track.all_reduce();
01415 }
01416
01417 /* Write set contents */
01418 Range ranged_sets, null_stripped_sets;
01419 std::vector< long > set_sizes;
01420 if( writeSetContents )
01421 {
01422 topState.start( "writing content lists for local sets" );
01423 table = mhdf_openSetData( filePtr, &size, &status );
01424 CHK_MHDF_ERR_0( status );
01425 IODebugTrack track( debugTrack, "SetContents", size );
01426
01427 rval = write_set_data( WriteUtilIface::CONTENTS, table, track, &ranged_sets, &null_stripped_sets, &set_sizes );
01428 topState.end( rval );
01429 CHK_MB_ERR_1( rval, table, status );
01430
01431 mhdf_closeData( filePtr, table, &status );
01432 CHK_MHDF_ERR_0( status );
01433
01434 times[SET_CONTENT] = timer.time_elapsed();
01435 track.all_reduce();
01436 }
01437 assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
01438
01439 /* Write set description table */
01440
01441 debug_barrier();
01442 topState.start( "writing descriptions of local sets" );
01443 dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01444 dbgOut.print( 3, "Non-shared sets", setSet.range );
01445
01446 /* Open the table */
01447 table = mhdf_openSetMeta( filePtr, &size, &first_id, &status );
01448 CHK_MHDF_ERR_0( status );
01449 IODebugTrack track_meta( debugTrack, "SetMeta", size );
01450
01451 /* Some debug stuff */
01452 debug_barrier();
01453 dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01454 dbgOut.print( 3, "Non-shared sets", setSet.range );
01455
01456 /* Counts and buffers and such */
01457 mhdf_index_t* const buffer = reinterpret_cast< mhdf_index_t* >( dataBuffer );
01458 const size_t buffer_size = bufferSize / ( 4 * sizeof( mhdf_index_t ) );
01459 const size_t num_local_writes = ( setSet.range.size() + buffer_size - 1 ) / buffer_size;
01460 const size_t num_global_writes = ( setSet.max_num_ents + buffer_size - 1 ) / buffer_size;
01461 assert( num_local_writes <= num_global_writes );
01462 assert( num_global_writes > 0 );
01463
01464 /* data about sets for which number of handles written is
01465 * not the same as the number of handles in the set
01466 * (range-compacted or null handles stripped out)
01467 */
01468 Range::const_iterator i = setSet.range.begin();
01469 Range::const_iterator r = ranged_sets.begin();
01470 Range::const_iterator s = null_stripped_sets.begin();
01471 std::vector< mhdf_index_t >::const_iterator n = set_sizes.begin();
01472 assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
01473
01474 /* We write the end index for each list, rather than the count */
01475 mhdf_index_t prev_contents_end = setContentsOffset - 1;
01476 mhdf_index_t prev_children_end = setChildrenOffset - 1;
01477 mhdf_index_t prev_parents_end = setParentsOffset - 1;
01478
01479 /* While there is more data to write */
01480 size_t offset = setSet.offset;
01481 std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
01482 for( size_t w = 0; w < num_local_writes; ++w )
01483 {
01484 // Get a buffer full of data
01485 size_t count = 0;
01486 while( count < buffer_size && i != setSet.range.end() )
01487 {
01488 // Get set properties
01489 long num_ent, num_child, num_parent;
01490 unsigned long flags;
01491 if( si != specialSets.end() && si->setHandle == *i )
01492 {
01493 flags = si->setFlags;
01494 num_ent = si->contentIds.size();
01495 num_child = si->childIds.size();
01496 num_parent = si->parentIds.size();
01497 ++si;
01498 if( r != ranged_sets.end() && *i == *r )
01499 {
01500 assert( flags & mhdf_SET_RANGE_BIT );
01501 ++r;
01502 ++n;
01503 }
01504 else if( s != null_stripped_sets.end() && *i == *s )
01505 {
01506 ++s;
01507 ++n;
01508 }
01509 }
01510 else
01511 {
01512 assert( si == specialSets.end() || si->setHandle > *i );
01513
01514 // Get set properties
01515 rval = get_set_info( *i, num_ent, num_child, num_parent, flags );
01516 CHK_MB_ERR_1( rval, table, status );
01517
01518 // Check if size is something other than num handles in set
01519 if( r != ranged_sets.end() && *i == *r )
01520 {
01521 num_ent = *n;
01522 ++r;
01523 ++n;
01524 flags |= mhdf_SET_RANGE_BIT;
01525 }
01526 else if( s != null_stripped_sets.end() && *i == *s )
01527 {
01528 num_ent = *n;
01529 ++s;
01530 ++n;
01531 }
01532 }
01533
01534 // Put data in buffer
01535 mhdf_index_t* local = buffer + 4 * count;
01536 prev_contents_end += num_ent;
01537 prev_children_end += num_child;
01538 prev_parents_end += num_parent;
01539 local[0] = prev_contents_end;
01540 local[1] = prev_children_end;
01541 local[2] = prev_parents_end;
01542 local[3] = flags;
01543
01544 // Iterate
01545 ++count;
01546 ++i;
01547 }
01548
01549 // Write the data
01550 mhdf_writeSetMetaWithOpt( table, offset, count, MHDF_INDEX_TYPE, buffer, writeProp, &status );
01551 CHK_MHDF_ERR_1( status, table );
01552 track_meta.record_io( offset, count );
01553 offset += count;
01554 }
01555 assert( r == ranged_sets.end() );
01556 assert( s == null_stripped_sets.end() );
01557 assert( n == set_sizes.end() );
01558
01559 /* If doing parallel write with collective IO, do null write
01560 * calls because other procs aren't done yet and write calls
01561 * are collective */
01562 for( size_t w = num_local_writes; w != num_global_writes; ++w )
01563 {
01564 mhdf_writeSetMetaWithOpt( table, 0, 0, MHDF_INDEX_TYPE, 0, writeProp, &status );
01565 CHK_MHDF_ERR_1( status, table );
01566 }
01567
01568 topState.end();
01569 mhdf_closeData( filePtr, table, &status );
01570 CHK_MHDF_ERR_0( status );
01571
01572 times[SET_META] = timer.time_elapsed();
01573 track_meta.all_reduce();
01574
01575 return MB_SUCCESS;
01576 }
01577
01578 template < class HandleRangeIter >
01579 inline size_t count_num_handles( HandleRangeIter iter, HandleRangeIter end )
01580 {
01581 size_t result = 0;
01582 for( ; iter != end; ++iter )
01583 result += iter->second - iter->first + 1;
01584
01585 return result;
01586 }
01587
01588 template < class HandleRangeIter >
01589 inline ErrorCode range_to_id_list_templ( HandleRangeIter begin,
01590 HandleRangeIter end,
01591 const RangeMap< EntityHandle, WriteHDF5::wid_t >& idMap,
01592 WriteHDF5::wid_t* array )
01593 {
01594 ErrorCode rval = MB_SUCCESS;
01595 RangeMap< EntityHandle, WriteHDF5::wid_t >::iterator ri = idMap.begin();
01596 WriteHDF5::wid_t* i = array;
01597 for( HandleRangeIter pi = begin; pi != end; ++pi )
01598 {
01599 EntityHandle h = pi->first;
01600 while( h <= pi->second )
01601 {
01602 ri = idMap.lower_bound( ri, idMap.end(), h );
01603 if( ri == idMap.end() || ri->begin > h )
01604 {
01605 rval = MB_ENTITY_NOT_FOUND;
01606 *i = 0;
01607 ++i;
01608 ++h;
01609 continue;
01610 }
01611
01612 // compute the last available value of the found target range (ri iterator)
01613 WriteHDF5::wid_t last_valid_input_value_in_current_map_range = ri->begin + ri->count - 1;
01614 // limit the number of steps we do on top of h so we do not overflow the output range
01615 // span
01616 WriteHDF5::wid_t step_until = std::min( last_valid_input_value_in_current_map_range, pi->second );
01617 WriteHDF5::wid_t n = step_until - h + 1;
01618 assert( n > 0 ); // We must at least step 1
01619
01620 WriteHDF5::wid_t id = ri->value + ( h - ri->begin );
01621 for( WriteHDF5::wid_t j = 0; j < n; ++i, ++j )
01622 *i = id + j;
01623 h += n;
01624 }
01625 }
01626
01627 assert( i == array + count_num_handles( begin, end ) );
01628 return rval;
01629 }
01630
01631 template < class HandleRangeIter >
01632 inline ErrorCode range_to_blocked_list_templ( HandleRangeIter begin,
01633 HandleRangeIter end,
01634 const RangeMap< EntityHandle, WriteHDF5::wid_t >& idMap,
01635 std::vector< WriteHDF5::wid_t >& output_id_list,
01636 bool& ranged_list )
01637 {
01638 output_id_list.clear();
01639 if( begin == end )
01640 {
01641 ranged_list = false;
01642 return MB_SUCCESS;
01643 }
01644
01645 // First try ranged format, but give up if we reach the
01646 // non-range format size.
01647 RangeMap< EntityHandle, WriteHDF5::wid_t >::iterator ri = idMap.begin();
01648
01649 const size_t num_handles = count_num_handles( begin, end );
01650 // If we end up with more than this many range blocks, then
01651 // we're better off just writing the set as a simple list
01652 size_t pairs_remaining = num_handles / 2;
01653 for( HandleRangeIter pi = begin; pi != end; ++pi )
01654 {
01655 EntityHandle h = pi->first;
01656 WriteHDF5::wid_t local_mapped_from_subrange = 0;
01657 while( h <= pi->second )
01658 {
01659 ri = idMap.lower_bound( ri, idMap.end(), h );
01660 if( ri == idMap.end() || ri->begin > h )
01661 {
01662 ++h;
01663 continue;
01664 }
01665
01666 WriteHDF5::wid_t n = pi->second - pi->first + 1 - local_mapped_from_subrange;
01667 if( n > ri->count ) n = ri->count;
01668
01669 WriteHDF5::wid_t id = ri->value + ( h - ri->begin );
01670 // see if we can go to the end of the range
01671 if( id + n > ri->value + ri->count ) // we have to reduce n, because we cannot go over next subrange
01672 {
01673 if( ri->value + ri->count - id > 0 ) n = ri->value + ri->count - id;
01674 }
01675
01676 // See if we can append it to the previous range
01677 if( !output_id_list.empty() && output_id_list[output_id_list.size() - 2] + output_id_list.back() == id )
01678 {
01679 output_id_list.back() += n;
01680 }
01681
01682 // If we ran out of space, (or set is empty) just do list format
01683 else if( !pairs_remaining )
01684 {
01685 ranged_list = false;
01686 output_id_list.resize( num_handles );
01687 range_to_id_list_templ( begin, end, idMap, &output_id_list[0] );
01688 output_id_list.erase( std::remove( output_id_list.begin(), output_id_list.end(), 0u ),
01689 output_id_list.end() );
01690 return MB_SUCCESS;
01691 }
01692
01693 //
01694 else
01695 {
01696 --pairs_remaining;
01697 output_id_list.push_back( id );
01698 output_id_list.push_back( n );
01699 }
01700 local_mapped_from_subrange += n; // we already mapped so many
01701 h += n;
01702 }
01703 }
01704
01705 ranged_list = true;
01706 return MB_SUCCESS;
01707 }
01708
01709 ErrorCode WriteHDF5::range_to_blocked_list( const Range& input_range,
01710 std::vector< wid_t >& output_id_list,
01711 bool& ranged_list )
01712 {
01713 return range_to_blocked_list_templ( input_range.const_pair_begin(), input_range.const_pair_end(), idMap,
01714 output_id_list, ranged_list );
01715 }
01716
01717 ErrorCode WriteHDF5::range_to_blocked_list( const EntityHandle* array,
01718 size_t num_input_ranges,
01719 std::vector< wid_t >& output_id_list,
01720 bool& ranged_list )
01721 {
01722 // We assume this in the cast on the following line
01723 typedef std::pair< EntityHandle, EntityHandle > mtype;
01724 assert( sizeof( mtype ) == 2 * sizeof( EntityHandle ) );
01725 const mtype* arr = reinterpret_cast< const mtype* >( array );
01726 return range_to_blocked_list_templ( arr, arr + num_input_ranges, idMap, output_id_list, ranged_list );
01727 }
01728
01729 ErrorCode WriteHDF5::range_to_id_list( const Range& range, wid_t* array )
01730 {
01731 return range_to_id_list_templ( range.const_pair_begin(), range.const_pair_end(), idMap, array );
01732 }
01733
01734 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input,
01735 size_t input_len,
01736 wid_t* output,
01737 size_t& output_len,
01738 bool remove_zeros )
01739 {
01740 const EntityHandle* i_iter = input;
01741 const EntityHandle* i_end = input + input_len;
01742 wid_t* o_iter = output;
01743 for( ; i_iter != i_end; ++i_iter )
01744 {
01745 wid_t id = idMap.find( *i_iter );
01746 if( !remove_zeros || id != 0 )
01747 {
01748 *o_iter = id;
01749 ++o_iter;
01750 }
01751 }
01752 output_len = o_iter - output;
01753
01754 return MB_SUCCESS;
01755 }
01756
01757 ErrorCode WriteHDF5::vector_to_id_list( const std::vector< EntityHandle >& input,
01758 std::vector< wid_t >& output,
01759 bool remove_zeros )
01760 {
01761 output.resize( input.size() );
01762 size_t output_size = 0;
01763 ErrorCode rval = vector_to_id_list( &input[0], input.size(), &output[0], output_size, remove_zeros );
01764 output.resize( output_size );
01765 return rval;
01766 }
01767
01768 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input, wid_t* output, size_t count )
01769 {
01770 size_t output_len;
01771 return vector_to_id_list( input, count, output, output_len, false );
01772 }
01773
01774 inline ErrorCode WriteHDF5::get_adjacencies( EntityHandle entity, std::vector< wid_t >& adj )
01775 {
01776 const EntityHandle* adj_array;
01777 int num_adj;
01778 ErrorCode rval = writeUtil->get_adjacencies( entity, adj_array, num_adj );
01779 if( MB_SUCCESS != rval ) return error( rval );
01780
01781 size_t j = 0;
01782 adj.resize( num_adj );
01783 for( int i = 0; i < num_adj; ++i )
01784 if( wid_t id = idMap.find( adj_array[i] ) ) adj[j++] = id;
01785 adj.resize( j );
01786
01787 return MB_SUCCESS;
01788 }
01789
01790 ErrorCode WriteHDF5::write_adjacencies( const ExportSet& elements )
01791 {
01792 ErrorCode rval;
01793 mhdf_Status status;
01794 Range::const_iterator iter;
01795 const Range::const_iterator end = elements.range.end();
01796 std::vector< wid_t > adj_list;
01797
01798 CHECK_OPEN_HANDLES;
01799
01800 debug_barrier();
01801
01802 /* Count Adjacencies */
01803 long count = 0;
01804 // for (iter = elements.range.begin(); iter != end; ++iter) {
01805 // adj_list.clear();
01806 // rval = get_adjacencies(*iter, adj_list);CHK_MB_ERR_0(rval);
01807 //
01808 // if (adj_list.size() > 0)
01809 // count += adj_list.size() + 2;
01810 //}
01811
01812 // if (count == 0)
01813 // return MB_SUCCESS;
01814
01815 long offset = elements.adj_offset;
01816 if( elements.max_num_adjs == 0 ) return MB_SUCCESS;
01817
01818 /* Create data list */
01819 hid_t table = mhdf_openAdjacency( filePtr, elements.name(), &count, &status );
01820 CHK_MHDF_ERR_0( status );
01821 IODebugTrack track( debugTrack, "Adjacencies", count );
01822
01823 /* Write data */
01824 wid_t* buffer = (wid_t*)dataBuffer;
01825 long chunk_size = bufferSize / sizeof( wid_t );
01826 long num_writes = ( elements.max_num_adjs + chunk_size - 1 ) / chunk_size;
01827 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01828 count = 0;
01829 for( iter = elements.range.begin(); iter != end; ++iter )
01830 {
01831 adj_list.clear();
01832 rval = get_adjacencies( *iter, adj_list );
01833 CHK_MB_ERR_1( rval, table, status );
01834 if( adj_list.size() == 0 ) continue;
01835
01836 // If buffer is full, flush it
01837 if( count + adj_list.size() + 2 > (unsigned long)chunk_size )
01838 {
01839 dbgOut.print( 3, " writing adjacency chunk.\n" );
01840 track.record_io( offset, count );
01841 mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
01842 CHK_MHDF_ERR_1( status, table );
01843 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01844
01845 offset += count;
01846 count = 0;
01847 }
01848
01849 buffer[count++] = idMap.find( *iter );
01850 buffer[count++] = adj_list.size();
01851
01852 assert( adj_list.size() + 2 < (unsigned long)chunk_size );
01853 memcpy( buffer + count, &adj_list[0], adj_list.size() * sizeof( wid_t ) );
01854 count += adj_list.size();
01855 }
01856
01857 if( count )
01858 {
01859 dbgOut.print( 2, " writing final adjacency chunk.\n" );
01860 mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
01861 CHK_MHDF_ERR_1( status, table );
01862
01863 offset += count;
01864 count = 0;
01865 --num_writes;
01866 }
01867
01868 // Do empty writes if necessary for parallel collective IO
01869 if( collectiveIO )
01870 {
01871 while( num_writes > 0 )
01872 {
01873 --num_writes;
01874 assert( writeProp != H5P_DEFAULT );
01875 dbgOut.print( 2, " writing empty adjacency chunk.\n" );
01876 mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status );
01877 CHK_MHDF_ERR_1( status, table );
01878 }
01879 }
01880
01881 mhdf_closeData( filePtr, table, &status );
01882 CHK_MHDF_ERR_0( status );
01883
01884 track.all_reduce();
01885 return MB_SUCCESS;
01886 }
01887
01888 ErrorCode WriteHDF5::write_tag( const TagDesc& tag_data, double* times )
01889 {
01890 std::string name;
01891 ErrorCode rval = iFace->tag_get_name( tag_data.tag_id, name );
01892 if( MB_SUCCESS != rval ) return error( rval );
01893
01894 CHECK_OPEN_HANDLES;
01895 debug_barrier();
01896 dbgOut.tprintf( 1, "Writing tag: \"%s\"\n", name.c_str() );
01897
01898 int moab_size, elem_size, array_len;
01899 DataType moab_type;
01900 mhdf_TagDataType mhdf_type;
01901 hid_t hdf5_type;
01902 rval = get_tag_size( tag_data.tag_id, moab_type, moab_size, elem_size, array_len, mhdf_type, hdf5_type );
01903 if( MB_SUCCESS != rval ) return error( rval );
01904
01905 CpuTimer timer;
01906 if( array_len == MB_VARIABLE_LENGTH && tag_data.write_sparse )
01907 {
01908 dbgOut.printf( 2, "Writing sparse data for var-len tag: \"%s\"\n", name.c_str() );
01909 rval = write_var_len_tag( tag_data, name, moab_type, hdf5_type, elem_size );
01910 times[VARLEN_TAG_TIME] += timer.time_elapsed();
01911 }
01912 else
01913 {
01914 int data_len = elem_size;
01915 if( moab_type != MB_TYPE_BIT ) data_len *= array_len;
01916 if( tag_data.write_sparse )
01917 {
01918 dbgOut.printf( 2, "Writing sparse data for tag: \"%s\"\n", name.c_str() );
01919 rval = write_sparse_tag( tag_data, name, moab_type, hdf5_type, data_len );
01920 times[SPARSE_TAG_TIME] += timer.time_elapsed();
01921 }
01922 for( size_t i = 0; MB_SUCCESS == rval && i < tag_data.dense_list.size(); ++i )
01923 {
01924 const ExportSet* set = find( tag_data.dense_list[i] );
01925 assert( 0 != set );
01926 debug_barrier();
01927 dbgOut.printf( 2, "Writing dense data for tag: \"%s\" on group \"%s\"\n", name.c_str(), set->name() );
01928 subState.start( "writing dense data for tag: ", ( name + ":" + set->name() ).c_str() );
01929 rval = write_dense_tag( tag_data, *set, name, moab_type, hdf5_type, data_len );
01930 subState.end( rval );
01931 }
01932 times[DENSE_TAG_TIME] += timer.time_elapsed();
01933 }
01934
01935 H5Tclose( hdf5_type );
01936 return MB_SUCCESS == rval ? MB_SUCCESS : error( rval );
01937 }
01938
01939 ErrorCode WriteHDF5::write_sparse_ids( const TagDesc& tag_data,
01940 const Range& range,
01941 hid_t id_table,
01942 size_t table_size,
01943 const char* name )
01944 {
01945 ErrorCode rval;
01946 mhdf_Status status;
01947
01948 CHECK_OPEN_HANDLES;
01949
01950 std::string tname( name ? name : "" );
01951 tname += " - Ids";
01952 IODebugTrack track( debugTrack, tname, table_size );
01953
01954 // Set up data buffer for writing IDs
01955 size_t chunk_size = bufferSize / sizeof( wid_t );
01956 wid_t* id_buffer = (wid_t*)dataBuffer;
01957
01958 // Write IDs of tagged entities.
01959 long remaining = range.size();
01960 long offset = tag_data.sparse_offset;
01961 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
01962 if( tag_data.max_num_ents )
01963 {
01964 assert( tag_data.max_num_ents >= (unsigned long)remaining );
01965 num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
01966 }
01967 Range::const_iterator iter = range.begin();
01968 while( remaining )
01969 {
01970 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01971
01972 // Write "chunk_size" blocks of data
01973 long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
01974 remaining -= count;
01975 Range::const_iterator stop = iter;
01976 stop += count;
01977 Range tmp;
01978 ;
01979 tmp.merge( iter, stop );
01980 iter = stop;
01981 assert( tmp.size() == (unsigned)count );
01982
01983 rval = range_to_id_list( tmp, id_buffer );
01984 CHK_MB_ERR_0( rval );
01985
01986 // Write the data
01987 dbgOut.print( 3, " writing sparse tag entity chunk.\n" );
01988 track.record_io( offset, count );
01989 mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type, id_buffer, writeProp, &status );
01990 CHK_MHDF_ERR_0( status );
01991
01992 offset += count;
01993 --num_writes;
01994 } // while (remaining)
01995
01996 // Do empty writes if necessary for parallel collective IO
01997 if( collectiveIO )
01998 {
01999 while( num_writes-- )
02000 {
02001 assert( writeProp != H5P_DEFAULT );
02002 dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
02003 mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type, 0, writeProp, &status );
02004 CHK_MHDF_ERR_0( status );
02005 }
02006 }
02007
02008 track.all_reduce();
02009 return MB_SUCCESS;
02010 }
02011
02012 ErrorCode WriteHDF5::write_sparse_tag( const TagDesc& tag_data,
02013 const std::string& name,
02014 DataType mb_data_type,
02015 hid_t value_type,
02016 int value_type_size )
02017 {
02018 ErrorCode rval;
02019 mhdf_Status status;
02020 hid_t tables[3];
02021 long table_size, data_size;
02022
02023 CHECK_OPEN_HANDLES;
02024
02025 // Get entities for which to write tag values
02026 Range range;
02027 rval = get_sparse_tagged_entities( tag_data, range );
02028
02029 // Open tables to write info
02030 mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_size, tables, &status );
02031 CHK_MHDF_ERR_0( status );
02032 assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
02033 // Fixed-length tag
02034 assert( table_size == data_size );
02035
02036 // Write IDs for tagged entities
02037 subState.start( "writing sparse ids for tag: ", name.c_str() );
02038 rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
02039 subState.end( rval );
02040 CHK_MB_ERR_2( rval, tables, status );
02041 mhdf_closeData( filePtr, tables[0], &status );
02042 CHK_MHDF_ERR_1( status, tables[1] );
02043
02044 // Set up data buffer for writing tag values
02045 IODebugTrack track( debugTrack, name + " Data", data_size );
02046 subState.start( "writing sparse values for tag: ", name.c_str() );
02047 rval = write_tag_values( tag_data.tag_id, tables[1], tag_data.sparse_offset, range, mb_data_type, value_type,
02048 value_type_size, tag_data.max_num_ents, track );
02049 subState.end( rval );
02050 CHK_MB_ERR_0( rval );
02051 mhdf_closeData( filePtr, tables[1], &status );
02052 CHK_MHDF_ERR_0( status );
02053
02054 track.all_reduce();
02055 return MB_SUCCESS;
02056 }
02057
02058 ErrorCode WriteHDF5::write_var_len_indices( const TagDesc& tag_data,
02059 const Range& range,
02060 hid_t idx_table,
02061 size_t table_size,
02062 int /*type_size*/,
02063 const char* name )
02064 {
02065 ErrorCode rval;
02066 mhdf_Status status;
02067
02068 CHECK_OPEN_HANDLES;
02069
02070 std::string tname( name ? name : "" );
02071 tname += " - End Indices";
02072 IODebugTrack track( debugTrack, tname, table_size );
02073
02074 // Set up data buffer for writing indices
02075 size_t chunk_size = bufferSize / ( std::max( sizeof( void* ), sizeof( long ) ) + sizeof( int ) );
02076 mhdf_index_t* idx_buffer = (mhdf_index_t*)dataBuffer;
02077 const void** junk = (const void**)dataBuffer;
02078 int* size_buffer = (int*)( dataBuffer + chunk_size * std::max( sizeof( void* ), sizeof( mhdf_index_t ) ) );
02079
02080 // Write IDs of tagged entities.
02081 long data_offset = tag_data.var_data_offset - 1; // Offset at which to write data buffer
02082 size_t remaining = range.size();
02083 size_t offset = tag_data.sparse_offset;
02084 size_t num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
02085 if( tag_data.max_num_ents )
02086 {
02087 assert( tag_data.max_num_ents >= (unsigned long)remaining );
02088 num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
02089 }
02090 Range::const_iterator iter = range.begin();
02091 while( remaining )
02092 {
02093 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
02094
02095 // Write "chunk_size" blocks of data
02096 size_t count = remaining > chunk_size ? chunk_size : remaining;
02097 remaining -= count;
02098 Range::const_iterator stop = iter;
02099 stop += count;
02100 Range tmp;
02101 tmp.merge( iter, stop );
02102 iter = stop;
02103 assert( tmp.size() == (unsigned)count );
02104
02105 rval = iFace->tag_get_by_ptr( tag_data.tag_id, tmp, junk, size_buffer );
02106 CHK_MB_ERR_0( rval );
02107
02108 // Calculate end indices
02109 dbgOut.print( 3, " writing var-len tag offset chunk.\n" );
02110 track.record_io( offset, count );
02111 for( size_t i = 0; i < count; ++i )
02112 {
02113 data_offset += size_buffer[i];
02114 idx_buffer[i] = data_offset;
02115 }
02116
02117 // Write
02118 mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, count, MHDF_INDEX_TYPE, idx_buffer, writeProp, &status );
02119 CHK_MHDF_ERR_0( status );
02120
02121 offset += count;
02122 --num_writes;
02123 } // while (remaining)
02124
02125 // Do empty writes if necessary for parallel collective IO
02126 if( collectiveIO )
02127 {
02128 while( num_writes-- )
02129 {
02130 assert( writeProp != H5P_DEFAULT );
02131 dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
02132 mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, 0, id_type, 0, writeProp, &status );
02133 CHK_MHDF_ERR_0( status );
02134 }
02135 }
02136
02137 track.all_reduce();
02138 return MB_SUCCESS;
02139 }
02140
02141 ErrorCode WriteHDF5::write_var_len_data( const TagDesc& tag_data,
02142 const Range& range,
02143 hid_t table,
02144 size_t table_size,
02145 bool handle_tag,
02146 hid_t hdf_type,
02147 int type_size,
02148 const char* name )
02149 {
02150 ErrorCode rval;
02151 mhdf_Status status;
02152
02153 CHECK_OPEN_HANDLES;
02154 assert( !handle_tag || sizeof( EntityHandle ) == type_size );
02155
02156 std::string tname( name ? name : "" );
02157 tname += " - Values";
02158 IODebugTrack track( debugTrack, tname, table_size );
02159
02160 const size_t buffer_size = bufferSize / type_size;
02161
02162 size_t num_writes = ( table_size + buffer_size - 1 ) / buffer_size;
02163 if( collectiveIO )
02164 {
02165 assert( tag_data.max_num_vals > 0 );
02166 num_writes = ( tag_data.max_num_vals + buffer_size - 1 ) / buffer_size;
02167 }
02168
02169 unsigned char* buffer = (unsigned char*)dataBuffer;
02170 const void* prev_data = 0; // Data left over from prev iteration
02171 size_t prev_len = 0;
02172 Range::const_iterator iter = range.begin();
02173 long offset = tag_data.var_data_offset;
02174 while( prev_data || iter != range.end() )
02175 {
02176 size_t count = 0;
02177 if( prev_data )
02178 {
02179 size_t len;
02180 const void* ptr = prev_data;
02181 if( prev_len <= buffer_size )
02182 {
02183 len = prev_len;
02184 prev_data = 0;
02185 prev_len = 0;
02186 }
02187 else
02188 {
02189 len = buffer_size;
02190 prev_data = ( (const char*)prev_data ) + buffer_size * type_size;
02191 prev_len -= buffer_size;
02192 }
02193
02194 if( handle_tag )
02195 convert_handle_tag( (const EntityHandle*)ptr, (EntityHandle*)buffer, len );
02196 else
02197 memcpy( buffer, ptr, len * type_size );
02198 count = len;
02199 }
02200
02201 for( ; count < buffer_size && iter != range.end(); ++iter )
02202 {
02203 int len;
02204 const void* ptr;
02205 rval = iFace->tag_get_by_ptr( tag_data.tag_id, &*iter, 1, &ptr, &len );
02206 CHK_MB_ERR_0( rval );
02207 if( len + count > buffer_size )
02208 {
02209 prev_len = len + count - buffer_size;
02210 len = buffer_size - count;
02211 prev_data = ( (const char*)ptr ) + len * type_size;
02212 }
02213
02214 if( handle_tag )
02215 convert_handle_tag( (const EntityHandle*)ptr, ( (EntityHandle*)buffer ) + count, len );
02216 else
02217 memcpy( buffer + count * type_size, ptr, len * type_size );
02218 count += len;
02219 }
02220
02221 track.record_io( offset, count );
02222 mhdf_writeTagValuesWithOpt( table, offset, count, hdf_type, buffer, writeProp, &status );
02223 offset += count;
02224 CHK_MHDF_ERR_0( status );
02225 --num_writes;
02226 }
02227
02228 // Do empty writes if necessary for parallel collective IO
02229 if( collectiveIO )
02230 {
02231 while( num_writes-- )
02232 {
02233 assert( writeProp != H5P_DEFAULT );
02234 dbgOut.print( 3, " writing empty var-len tag data chunk.\n" );
02235 mhdf_writeTagValuesWithOpt( table, 0, 0, hdf_type, 0, writeProp, &status );
02236 CHK_MHDF_ERR_0( status );
02237 }
02238 }
02239
02240 track.all_reduce();
02241 return MB_SUCCESS;
02242 }
02243
02244 ErrorCode WriteHDF5::write_var_len_tag( const TagDesc& tag_data,
02245 const std::string& name,
02246 DataType mb_data_type,
02247 hid_t hdf_type,
02248 int type_size )
02249 {
02250 ErrorCode rval;
02251 mhdf_Status status;
02252 hid_t tables[3];
02253 long table_size;
02254 long data_table_size;
02255
02256 CHECK_OPEN_HANDLES;
02257
02258 // Get entities for which to write tag values
02259 Range range;
02260 rval = get_sparse_tagged_entities( tag_data, range );
02261
02262 // Open tables to write info
02263 mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_table_size, tables, &status );
02264 CHK_MHDF_ERR_0( status );
02265 assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
02266
02267 // Write IDs for tagged entities
02268 subState.start( "writing ids for var-len tag: ", name.c_str() );
02269 rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
02270 subState.end( rval );
02271 CHK_MB_ERR_2( rval, tables, status );
02272 mhdf_closeData( filePtr, tables[0], &status );
02273 CHK_MHDF_ERR_2( status, tables + 1 );
02274
02275 // Write offsets for tagged entities
02276 subState.start( "writing indices for var-len tag: ", name.c_str() );
02277 rval = write_var_len_indices( tag_data, range, tables[2], table_size, type_size, name.c_str() );
02278 subState.end( rval );
02279 CHK_MB_ERR_1( rval, tables[1], status );
02280 mhdf_closeData( filePtr, tables[2], &status );
02281 CHK_MHDF_ERR_1( status, tables[1] );
02282
02283 // Write the actual tag data
02284 subState.start( "writing values for var-len tag: ", name.c_str() );
02285 rval = write_var_len_data( tag_data, range, tables[1], data_table_size, mb_data_type == MB_TYPE_HANDLE, hdf_type,
02286 type_size, name.c_str() );
02287 subState.end( rval );
02288 CHK_MB_ERR_0( rval );
02289 mhdf_closeData( filePtr, tables[1], &status );
02290 CHK_MHDF_ERR_0( status );
02291
02292 return MB_SUCCESS;
02293 }
02294
02295 ErrorCode WriteHDF5::write_dense_tag( const TagDesc& tag_data,
02296 const ExportSet& elem_data,
02297 const std::string& name,
02298 DataType mb_data_type,
02299 hid_t value_type,
02300 int value_type_size )
02301 {
02302 CHECK_OPEN_HANDLES;
02303
02304 // Open tables to write info
02305 mhdf_Status status;
02306 long table_size;
02307 hid_t table = mhdf_openDenseTagData( filePtr, name.c_str(), elem_data.name(), &table_size, &status );
02308 CHK_MHDF_ERR_0( status );
02309 assert( elem_data.range.size() + elem_data.offset <= (unsigned long)table_size );
02310
02311 IODebugTrack track( debugTrack, name + " " + elem_data.name() + " Data", table_size );
02312 ErrorCode rval = write_tag_values( tag_data.tag_id, table, elem_data.offset, elem_data.range, mb_data_type,
02313 value_type, value_type_size, elem_data.max_num_ents, track );
02314 CHK_MB_ERR_0( rval );
02315 mhdf_closeData( filePtr, table, &status );
02316 CHK_MHDF_ERR_0( status );
02317
02318 return MB_SUCCESS;
02319 }
02320
02321 ErrorCode WriteHDF5::write_tag_values( Tag tag_id,
02322 hid_t data_table,
02323 unsigned long offset_in,
02324 const Range& range_in,
02325 DataType mb_data_type,
02326 hid_t value_type,
02327 int value_type_size,
02328 unsigned long max_num_ents,
02329 IODebugTrack& track )
02330 {
02331 mhdf_Status status;
02332
02333 CHECK_OPEN_HANDLES;
02334
02335 // Set up data buffer for writing tag values
02336 size_t chunk_size = bufferSize / value_type_size;
02337 assert( chunk_size > 0 );
02338 char* tag_buffer = (char*)dataBuffer;
02339
02340 // Write the tag values
02341 size_t remaining = range_in.size();
02342 size_t offset = offset_in;
02343 Range::const_iterator iter = range_in.begin();
02344 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
02345 if( max_num_ents )
02346 {
02347 assert( max_num_ents >= remaining );
02348 num_writes = ( max_num_ents + chunk_size - 1 ) / chunk_size;
02349 }
02350 while( remaining )
02351 {
02352 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
02353
02354 // Write "chunk_size" blocks of data
02355 long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
02356 remaining -= count;
02357 memset( tag_buffer, 0, count * value_type_size );
02358 Range::const_iterator stop = iter;
02359 stop += count;
02360 Range range;
02361 range.merge( iter, stop );
02362 iter = stop;
02363 assert( range.size() == (unsigned)count );
02364
02365 ErrorCode rval = iFace->tag_get_data( tag_id, range, tag_buffer );
02366 CHK_MB_ERR_0( rval );
02367
02368 // Convert EntityHandles to file ids
02369 if( mb_data_type == MB_TYPE_HANDLE )
02370 convert_handle_tag( reinterpret_cast< EntityHandle* >( tag_buffer ),
02371 count * value_type_size / sizeof( EntityHandle ) );
02372
02373 // Write the data
02374 dbgOut.print( 2, " writing tag value chunk.\n" );
02375 track.record_io( offset, count );
02376 assert( value_type > 0 );
02377 mhdf_writeTagValuesWithOpt( data_table, offset, count, value_type, tag_buffer, writeProp, &status );
02378 CHK_MHDF_ERR_0( status );
02379
02380 offset += count;
02381 --num_writes;
02382 } // while (remaining)
02383
02384 // Do empty writes if necessary for parallel collective IO
02385 if( collectiveIO )
02386 {
02387 while( num_writes-- )
02388 {
02389 assert( writeProp != H5P_DEFAULT );
02390 dbgOut.print( 2, " writing empty tag value chunk.\n" );
02391 assert( value_type > 0 );
02392 mhdf_writeTagValuesWithOpt( data_table, offset, 0, value_type, 0, writeProp, &status );
02393 CHK_MHDF_ERR_0( status );
02394 }
02395 }
02396
02397 track.all_reduce();
02398 return MB_SUCCESS;
02399 }
02400
02401 ErrorCode WriteHDF5::write_qa( const std::vector< std::string >& list )
02402 {
02403 const char* app = "MOAB";
02404 const char* vers = MOAB_VERSION;
02405 char date_str[64];
02406 char time_str[64];
02407
02408 CHECK_OPEN_HANDLES;
02409
02410 std::vector< const char* > strs( list.size() ? list.size() : 4 );
02411 if( list.size() == 0 )
02412 {
02413 time_t t = time( NULL );
02414 tm* lt = localtime( &t );
02415 #ifdef WIN32
02416 strftime( date_str, sizeof( date_str ), "%m/%d/%y", lt ); // VS 2008 does not support %D
02417 strftime( time_str, sizeof( time_str ), "%H:%M:%S", lt ); // VS 2008 does not support %T
02418 #else
02419 strftime( date_str, sizeof( date_str ), "%D", lt );
02420 strftime( time_str, sizeof( time_str ), "%T", lt );
02421 #endif
02422
02423 strs[0] = app;
02424 strs[1] = vers;
02425 strs[2] = date_str;
02426 strs[3] = time_str;
02427 }
02428 else
02429 {
02430 for( unsigned int i = 0; i < list.size(); ++i )
02431 strs[i] = list[i].c_str();
02432 }
02433
02434 mhdf_Status status;
02435 dbgOut.print( 2, " writing QA history.\n" );
02436 mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status );
02437 CHK_MHDF_ERR_0( status );
02438
02439 return MB_SUCCESS;
02440 }
02441
02442 /*
02443 ErrorCode WriteHDF5::register_known_tag_types(Interface* iface)
02444 {
02445 hid_t int4, double16;
02446 hsize_t dim[1];
02447 int error = 0;
02448 ErrorCode rval;
02449
02450 dim[0] = 4;
02451 int4 = H5Tarray_create(H5T_NATIVE_INT, 1, dim, NULL);
02452
02453 dim[0] = 16;
02454 double16 = H5Tarray_create(H5T_NATIVE_DOUBLE, 1, dim, NULL);
02455
02456 if (int4 < 0 || double16 < 0)
02457 error = 1;
02458
02459 struct { const char* name; hid_t type; } list[] = {
02460 { GLOBAL_ID_TAG_NAME, H5T_NATIVE_INT } ,
02461 { MATERIAL_SET_TAG_NAME, H5T_NATIVE_INT },
02462 { DIRICHLET_SET_TAG_NAME, H5T_NATIVE_INT },
02463 { NEUMANN_SET_TAG_NAME, H5T_NATIVE_INT },
02464 { HAS_MID_NODES_TAG_NAME, int4 },
02465 { GEOM_DIMENSION_TAG_NAME, H5T_NATIVE_INT },
02466 { MESH_TRANSFORM_TAG_NAME, double16 },
02467 { 0, 0 } };
02468
02469 for (int i = 0; list[i].name; ++i) {
02470 if (list[i].type < 1) {
02471 ++error;
02472 continue;
02473 }
02474
02475 Tag handle;
02476
02477 std::string name("__hdf5_tag_type_");
02478 name += list[i].name;
02479
02480 rval = iface->tag_get_handle(name.c_str(), handle);
02481 if (MB_TAG_NOT_FOUND == rval) {
02482 rval = iface->tag_create(name.c_str(), sizeof(hid_t), MB_TAG_SPARSE, handle, NULL);
02483 if (MB_SUCCESS != rval) {
02484 ++error;
02485 continue;
02486 }
02487
02488 hid_t copy_id = H5Tcopy(list[i].type);
02489 const EntityHandle mesh = 0;
02490 rval = iface->tag_set_data(handle, &mesh, 1, ©_id);
02491 if (MB_SUCCESS != rval) {
02492 ++error;
02493 continue;
02494 }
02495 }
02496 }
02497
02498 H5Tclose(int4);
02499 H5Tclose(double16);
02500 return error ? MB_FAILURE : MB_SUCCESS;
02501 }
02502 */
02503
02504 ErrorCode WriteHDF5::gather_tags( const Tag* user_tag_list, int num_tags )
02505 {
02506 ErrorCode result;
02507 std::vector< Tag > tag_list;
02508 std::vector< Tag >::iterator t_itor;
02509 Range range;
02510
02511 // Get list of Tags to write
02512 result = writeUtil->get_tag_list( tag_list, user_tag_list, num_tags );
02513 CHK_MB_ERR_0( result );
02514
02515 // Get list of tags
02516 for( t_itor = tag_list.begin(); t_itor != tag_list.end(); ++t_itor )
02517 {
02518 // Add tag to export list
02519 TagDesc tag_data;
02520 tag_data.write_sparse = false;
02521 tag_data.tag_id = *t_itor;
02522 tag_data.sparse_offset = 0;
02523 tag_data.var_data_offset = 0;
02524 tag_data.max_num_ents = 0;
02525 tag_data.max_num_vals = 0;
02526 tagList.push_back( tag_data );
02527 }
02528
02529 return MB_SUCCESS;
02530 }
02531
02532 // If we support parallel, then this function will have been
02533 // overridden with an alternate version in WriteHDF5Parallel
02534 // that supports parallel I/O. If we're here
02535 // then MOAB was not built with support for parallel HDF5 I/O.
02536 ErrorCode WriteHDF5::parallel_create_file( const char* /* filename */,
02537 bool /* overwrite */,
02538 const std::vector< std::string >& /* qa_records */,
02539 const FileOptions& /* opts */,
02540 const Tag* /* tag_list */,
02541 int /* num_tags */,
02542 int /* dimension */,
02543 double* /* times */ )
02544 {
02545 MB_SET_ERR( MB_NOT_IMPLEMENTED, "WriteHDF5 does not support parallel writing" );
02546 }
02547
02548 ErrorCode WriteHDF5::serial_create_file( const char* filename,
02549 bool overwrite,
02550 const std::vector< std::string >& qa_records,
02551 const Tag* user_tag_list,
02552 int num_user_tags,
02553 int dimension )
02554 {
02555 long first_id;
02556 mhdf_Status status;
02557 hid_t handle;
02558 std::list< ExportSet >::iterator ex_itor;
02559 ErrorCode rval;
02560
02561 topState.start( "creating file" );
02562
02563 const char* type_names[MBMAXTYPE];
02564 memset( type_names, 0, MBMAXTYPE * sizeof( char* ) );
02565 for( EntityType i = MBEDGE; i < MBENTITYSET; ++i )
02566 type_names[i] = CN::EntityTypeName( i );
02567
02568 // Create the file
02569 filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
02570 CHK_MHDF_ERR_0( status );
02571 assert( !!filePtr );
02572
02573 rval = write_qa( qa_records );
02574 CHK_MB_ERR_0( rval );
02575
02576 // Create node table
02577 if( nodeSet.range.size() )
02578 {
02579 nodeSet.total_num_ents = nodeSet.range.size();
02580 handle = mhdf_createNodeCoords( filePtr, dimension, nodeSet.total_num_ents, &first_id, &status );
02581 CHK_MHDF_ERR_0( status );
02582 mhdf_closeData( filePtr, handle, &status );
02583 CHK_MHDF_ERR_0( status );
02584 nodeSet.first_id = (wid_t)first_id;
02585 rval = assign_ids( nodeSet.range, nodeSet.first_id );
02586 CHK_MB_ERR_0( rval );
02587 }
02588 else
02589 {
02590 nodeSet.first_id = std::numeric_limits< wid_t >::max();
02591 }
02592 nodeSet.offset = 0;
02593
02594 // Create element tables
02595 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
02596 {
02597 ex_itor->total_num_ents = ex_itor->range.size();
02598 rval = create_elem_table( *ex_itor, ex_itor->total_num_ents, first_id );
02599 CHK_MB_ERR_0( rval );
02600
02601 ex_itor->first_id = (wid_t)first_id;
02602 ex_itor->offset = 0;
02603 rval = assign_ids( ex_itor->range, ex_itor->first_id );
02604 CHK_MB_ERR_0( rval );
02605 }
02606 // Create set tables
02607 writeSets = !setSet.range.empty();
02608 if( writeSets )
02609 {
02610 long contents_len, children_len, parents_len;
02611
02612 setSet.total_num_ents = setSet.range.size();
02613 setSet.max_num_ents = setSet.total_num_ents;
02614 rval = create_set_meta( setSet.total_num_ents, first_id );
02615 CHK_MB_ERR_0( rval );
02616
02617 setSet.first_id = (wid_t)first_id;
02618 rval = assign_ids( setSet.range, setSet.first_id );
02619 CHK_MB_ERR_0( rval );
02620
02621 rval = count_set_size( setSet.range, contents_len, children_len, parents_len );
02622 CHK_MB_ERR_0( rval );
02623
02624 rval = create_set_tables( contents_len, children_len, parents_len );
02625 CHK_MB_ERR_0( rval );
02626
02627 setSet.offset = 0;
02628 setContentsOffset = 0;
02629 setChildrenOffset = 0;
02630 setParentsOffset = 0;
02631 writeSetContents = !!contents_len;
02632 writeSetChildren = !!children_len;
02633 writeSetParents = !!parents_len;
02634
02635 maxNumSetContents = contents_len;
02636 maxNumSetChildren = children_len;
02637 maxNumSetParents = parents_len;
02638 } // if (!setSet.range.empty())
02639
02640 // Create adjacency table after set table, because sets do not have yet an id
02641 // some entities are adjacent to sets (exodus?)
02642 // Create node adjacency table
02643 wid_t num_adjacencies;
02644 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
02645 rval = count_adjacencies( nodeSet.range, num_adjacencies );
02646 CHK_MB_ERR_0( rval );
02647 nodeSet.adj_offset = 0;
02648 nodeSet.max_num_adjs = num_adjacencies;
02649 if( num_adjacencies > 0 )
02650 {
02651 handle = mhdf_createAdjacency( filePtr, mhdf_node_type_handle(), num_adjacencies, &status );
02652 CHK_MHDF_ERR_0( status );
02653 mhdf_closeData( filePtr, handle, &status );
02654 }
02655 #endif
02656
02657 // Create element adjacency tables
02658 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
02659 {
02660 rval = count_adjacencies( ex_itor->range, num_adjacencies );
02661 CHK_MB_ERR_0( rval );
02662
02663 ex_itor->adj_offset = 0;
02664 ex_itor->max_num_adjs = num_adjacencies;
02665 if( num_adjacencies > 0 )
02666 {
02667 handle = mhdf_createAdjacency( filePtr, ex_itor->name(), num_adjacencies, &status );
02668 CHK_MHDF_ERR_0( status );
02669 mhdf_closeData( filePtr, handle, &status );
02670 }
02671 }
02672
02673 dbgOut.tprint( 1, "Gathering Tags\n" );
02674
02675 rval = gather_tags( user_tag_list, num_user_tags );
02676 CHK_MB_ERR_0( rval );
02677
02678 // Create the tags and tag data tables
02679 std::list< TagDesc >::iterator tag_iter = tagList.begin();
02680 for( ; tag_iter != tagList.end(); ++tag_iter )
02681 {
02682 // As we haven't yet added any ExportSets for which to write
02683 // dense tag data to the TagDesc struct pointed to by
02684 // tag_iter, this call will initially return all tagged entities
02685 // in the set of entities to be written.
02686 Range range;
02687 rval = get_sparse_tagged_entities( *tag_iter, range );
02688 CHK_MB_ERR_0( rval );
02689
02690 int s;
02691 bool var_len = ( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) );
02692
02693 // Determine which ExportSets we want to write dense
02694 // data for. We never write dense data for variable-length
02695 // tag data.
02696 if( !var_len && writeTagDense )
02697 {
02698 // Check if we want to write this tag in dense format even if not
02699 // all of the entities have a tag value. The criterion of this
02700 // is that the tag be dense, have a default value, and have at
02701 // least 2/3 of the entities tagged.
02702 bool prefer_dense = false;
02703 TagType type;
02704 rval = iFace->tag_get_type( tag_iter->tag_id, type );
02705 CHK_MB_ERR_0( rval );
02706 if( MB_TAG_DENSE == type )
02707 {
02708 const void* defval = 0;
02709 rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
02710 if( MB_SUCCESS == rval ) prefer_dense = true;
02711 }
02712
02713 if( check_dense_format_tag( nodeSet, range, prefer_dense ) )
02714 {
02715 range -= nodeSet.range;
02716 tag_iter->dense_list.push_back( nodeSet );
02717 }
02718
02719 std::list< ExportSet >::const_iterator ex = exportList.begin();
02720 for( ; ex != exportList.end(); ++ex )
02721 {
02722 if( check_dense_format_tag( *ex, range, prefer_dense ) )
02723 {
02724 range -= ex->range;
02725 tag_iter->dense_list.push_back( *ex );
02726 }
02727 }
02728
02729 if( check_dense_format_tag( setSet, range, prefer_dense ) )
02730 {
02731 range -= setSet.range;
02732 tag_iter->dense_list.push_back( setSet );
02733 }
02734 }
02735
02736 tag_iter->write_sparse = !range.empty();
02737
02738 unsigned long var_len_total = 0;
02739 if( var_len )
02740 {
02741 rval = get_tag_data_length( *tag_iter, range, var_len_total );
02742 CHK_MB_ERR_0( rval );
02743 }
02744
02745 rval = create_tag( *tag_iter, range.size(), var_len_total );
02746 CHK_MB_ERR_0( rval );
02747 } // for (tags)
02748
02749 topState.end();
02750 return MB_SUCCESS;
02751 }
02752
02753 bool WriteHDF5::check_dense_format_tag( const ExportSet& ents, const Range& all_tagged, bool prefer_dense )
02754 {
02755 // If there are no tagged entities, then don't write anything
02756 if( ents.range.empty() ) return false;
02757
02758 // If all of the entities are tagged, then write in dense format
02759 if( all_tagged.contains( ents.range ) ) return true;
02760
02761 // Unless asked for more lenient choice of dense format, return false
02762 if( !prefer_dense ) return false;
02763
02764 // If we're being lenient about choosing dense format, then
02765 // return true if at least 2/3 of the entities are tagged.
02766 Range xsect = intersect( setSet.range, all_tagged );
02767 if( 3 * xsect.size() >= 2 * setSet.range.size() ) return true;
02768
02769 return false;
02770 }
02771
02772 ErrorCode WriteHDF5::count_adjacencies( const Range& set, wid_t& result )
02773 {
02774 ErrorCode rval;
02775 std::vector< wid_t > adj_list;
02776 Range::const_iterator iter = set.begin();
02777 const Range::const_iterator end = set.end();
02778 result = 0;
02779 for( ; iter != end; ++iter )
02780 {
02781 adj_list.clear();
02782 rval = get_adjacencies( *iter, adj_list );
02783 CHK_MB_ERR_0( rval );
02784
02785 if( adj_list.size() > 0 ) result += 2 + adj_list.size();
02786 }
02787
02788 return MB_SUCCESS;
02789 }
02790
02791 ErrorCode WriteHDF5::create_elem_table( const ExportSet& block, long num_entities, long& first_id_out )
02792 {
02793 mhdf_Status status;
02794 hid_t handle;
02795
02796 CHECK_OPEN_HANDLES;
02797
02798 mhdf_addElement( filePtr, block.name(), block.type, &status );
02799 CHK_MHDF_ERR_0( status );
02800
02801 handle = mhdf_createConnectivity( filePtr, block.name(), block.num_nodes, num_entities, &first_id_out, &status );
02802 CHK_MHDF_ERR_0( status );
02803 mhdf_closeData( filePtr, handle, &status );
02804 CHK_MHDF_ERR_0( status );
02805
02806 return MB_SUCCESS;
02807 }
02808
02809 ErrorCode WriteHDF5::count_set_size( const Range& sets,
02810 long& contents_length_out,
02811 long& children_length_out,
02812 long& parents_length_out )
02813 {
02814 ErrorCode rval;
02815 Range set_contents;
02816 long contents_length_set, children_length_set, parents_length_set;
02817 unsigned long flags;
02818 std::vector< wid_t > set_contents_ids;
02819 std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
02820
02821 contents_length_out = 0;
02822 children_length_out = 0;
02823 parents_length_out = 0;
02824
02825 for( Range::const_iterator iter = sets.begin(); iter != sets.end(); ++iter )
02826 {
02827 while( si != specialSets.end() && si->setHandle < *iter )
02828 ++si;
02829
02830 if( si != specialSets.end() && si->setHandle == *iter )
02831 {
02832 contents_length_out += si->contentIds.size();
02833 children_length_out += si->childIds.size();
02834 parents_length_out += si->parentIds.size();
02835 ++si;
02836 continue;
02837 }
02838
02839 rval = get_set_info( *iter, contents_length_set, children_length_set, parents_length_set, flags );
02840 CHK_MB_ERR_0( rval );
02841
02842 // Check if can and should compress as ranges
02843 if( !( flags & MESHSET_ORDERED ) && contents_length_set )
02844 {
02845 set_contents.clear();
02846 rval = iFace->get_entities_by_handle( *iter, set_contents, false );
02847 CHK_MB_ERR_0( rval );
02848
02849 bool blocked_list;
02850 rval = range_to_blocked_list( set_contents, set_contents_ids, blocked_list );
02851 CHK_MB_ERR_0( rval );
02852
02853 if( blocked_list )
02854 {
02855 assert( set_contents_ids.size() % 2 == 0 );
02856 contents_length_set = set_contents_ids.size();
02857 }
02858 }
02859
02860 contents_length_out += contents_length_set;
02861 children_length_out += children_length_set;
02862 parents_length_out += parents_length_set;
02863 }
02864
02865 return MB_SUCCESS;
02866 }
02867
02868 ErrorCode WriteHDF5::create_set_meta( long num_sets, long& first_id_out )
02869 {
02870 hid_t handle;
02871 mhdf_Status status;
02872
02873 CHECK_OPEN_HANDLES;
02874
02875 handle = mhdf_createSetMeta( filePtr, num_sets, &first_id_out, &status );
02876 CHK_MHDF_ERR_0( status );
02877 mhdf_closeData( filePtr, handle, &status );
02878
02879 return MB_SUCCESS;
02880 }
02881
02882 WriteHDF5::SpecialSetData* WriteHDF5::find_set_data( EntityHandle h )
02883 {
02884 SpecialSetData tmp;
02885 tmp.setHandle = h;
02886 std::vector< SpecialSetData >::iterator i;
02887 i = std::lower_bound( specialSets.begin(), specialSets.end(), tmp, SpecSetLess() );
02888 return ( i == specialSets.end() || i->setHandle != h ) ? 0 : &*i;
02889 }
02890
02891 ErrorCode WriteHDF5::create_set_tables( long num_set_contents, long num_set_children, long num_set_parents )
02892 {
02893 hid_t handle;
02894 mhdf_Status status;
02895
02896 CHECK_OPEN_HANDLES;
02897
02898 if( num_set_contents > 0 )
02899 {
02900 handle = mhdf_createSetData( filePtr, num_set_contents, &status );
02901 CHK_MHDF_ERR_0( status );
02902 mhdf_closeData( filePtr, handle, &status );
02903 }
02904
02905 if( num_set_children > 0 )
02906 {
02907 handle = mhdf_createSetChildren( filePtr, num_set_children, &status );
02908 CHK_MHDF_ERR_0( status );
02909 mhdf_closeData( filePtr, handle, &status );
02910 }
02911
02912 if( num_set_parents > 0 )
02913 {
02914 handle = mhdf_createSetParents( filePtr, num_set_parents, &status );
02915 CHK_MHDF_ERR_0( status );
02916 mhdf_closeData( filePtr, handle, &status );
02917 }
02918
02919 return MB_SUCCESS;
02920 }
02921
02922 ErrorCode WriteHDF5::get_tag_size( Tag tag,
02923 DataType& moab_type,
02924 int& num_bytes,
02925 int& type_size,
02926 int& array_length,
02927 mhdf_TagDataType& file_type,
02928 hid_t& hdf_type )
02929 {
02930 ErrorCode rval;
02931 Tag type_handle;
02932 std::string tag_name, tag_type_name;
02933
02934 CHECK_OPEN_HANDLES;
02935
02936 // We return NULL for hdf_type if it can be determined from
02937 // the file_type. The only case where it is non-zero is
02938 // if the user specified a specific type via a mesh tag.
02939 hdf_type = (hid_t)0;
02940 bool close_hdf_type = false;
02941
02942 rval = iFace->tag_get_data_type( tag, moab_type );
02943 CHK_MB_ERR_0( rval );
02944 rval = iFace->tag_get_length( tag, array_length );
02945 if( MB_VARIABLE_DATA_LENGTH == rval )
02946 {
02947 array_length = MB_VARIABLE_LENGTH;
02948 }
02949 else if( MB_SUCCESS != rval )
02950 return error( rval );
02951 rval = iFace->tag_get_bytes( tag, num_bytes );
02952 if( MB_VARIABLE_DATA_LENGTH == rval )
02953 num_bytes = MB_VARIABLE_LENGTH;
02954 else if( MB_SUCCESS != rval )
02955 return error( rval );
02956
02957 switch( moab_type )
02958 {
02959 case MB_TYPE_INTEGER:
02960 type_size = sizeof( int );
02961 file_type = mhdf_INTEGER;
02962 hdf_type = H5T_NATIVE_INT;
02963 close_hdf_type = false;
02964 break;
02965 case MB_TYPE_DOUBLE:
02966 type_size = sizeof( double );
02967 file_type = mhdf_FLOAT;
02968 hdf_type = H5T_NATIVE_DOUBLE;
02969 close_hdf_type = false;
02970 break;
02971 case MB_TYPE_BIT:
02972 type_size = sizeof( bool );
02973 file_type = mhdf_BITFIELD;
02974 assert( array_length <= 8 );
02975 hdf_type = H5Tcopy( H5T_NATIVE_B8 );
02976 H5Tset_precision( hdf_type, array_length );
02977 close_hdf_type = true;
02978 break;
02979 case MB_TYPE_HANDLE:
02980 type_size = sizeof( EntityHandle );
02981 file_type = mhdf_ENTITY_ID;
02982 hdf_type = id_type;
02983 close_hdf_type = false;
02984 break;
02985 case MB_TYPE_OPAQUE:
02986 file_type = mhdf_OPAQUE;
02987 rval = iFace->tag_get_name( tag, tag_name );
02988 CHK_MB_ERR_0( rval );
02989 tag_type_name = "__hdf5_tag_type_";
02990 tag_type_name += tag_name;
02991 rval = iFace->tag_get_handle( tag_type_name.c_str(), 0, MB_TYPE_OPAQUE, type_handle, MB_TAG_ANY );
02992 if( MB_TAG_NOT_FOUND == rval )
02993 {
02994 if( num_bytes == MB_VARIABLE_LENGTH )
02995 type_size = 1;
02996 else
02997 type_size = num_bytes;
02998 hdf_type = H5Tcreate( H5T_OPAQUE, type_size );
02999 close_hdf_type = true;
03000 }
03001 else if( MB_SUCCESS == rval )
03002 {
03003 int hsize;
03004 rval = iFace->tag_get_bytes( type_handle, hsize );
03005 if( hsize != sizeof( hid_t ) ) return error( MB_FAILURE );
03006
03007 const EntityHandle root = 0;
03008 rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
03009 if( rval != MB_SUCCESS ) return error( rval );
03010
03011 type_size = H5Tget_size( hdf_type );
03012 if( type_size != num_bytes ) return error( MB_FAILURE );
03013
03014 close_hdf_type = false;
03015 }
03016 else
03017 return error( rval );
03018 num_bytes = array_length;
03019 array_length = ( num_bytes == MB_VARIABLE_LENGTH ) ? MB_VARIABLE_LENGTH : 1;
03020 break;
03021 default:
03022 break;
03023 }
03024
03025 assert( num_bytes == MB_VARIABLE_LENGTH || ( moab_type == MB_TYPE_BIT && num_bytes == 1 ) ||
03026 array_length * type_size == num_bytes );
03027
03028 if( num_bytes == MB_VARIABLE_LENGTH )
03029 {
03030 array_length = MB_VARIABLE_LENGTH;
03031 if( !close_hdf_type )
03032 {
03033 hdf_type = H5Tcopy( hdf_type );
03034 // close_hdf_type = true;
03035 }
03036 }
03037 else if( array_length > 1 && moab_type != MB_TYPE_BIT )
03038 {
03039 hsize_t len = array_length;
03040 #if defined( H5Tarray_create_vers ) && ( H5Tarray_create_vers > 1 )
03041 hid_t temp_id = H5Tarray_create2( hdf_type, 1, &len );
03042 #else
03043 hid_t temp_id = H5Tarray_create( hdf_type, 1, &len, NULL );
03044 #endif
03045 if( close_hdf_type ) H5Tclose( hdf_type );
03046 hdf_type = temp_id;
03047 }
03048 else if( !close_hdf_type )
03049 {
03050 hdf_type = H5Tcopy( hdf_type );
03051 // close_hdf_type = true;
03052 }
03053
03054 return MB_SUCCESS;
03055 }
03056
03057 ErrorCode WriteHDF5::get_tag_data_length( const TagDesc& tag_info, const Range& range, unsigned long& result )
03058 {
03059 ErrorCode rval;
03060 result = 0;
03061
03062 // Split buffer into two pieces, one for pointers and one for sizes
03063 size_t step, remaining;
03064 step = bufferSize / ( sizeof( int ) + sizeof( void* ) );
03065 const void** ptr_buffer = reinterpret_cast< const void** >( dataBuffer );
03066 int* size_buffer = reinterpret_cast< int* >( ptr_buffer + step );
03067 Range subrange;
03068 Range::const_iterator iter = range.begin();
03069 for( remaining = range.size(); remaining >= step; remaining -= step )
03070 {
03071 // Get subset of range containing 'count' entities
03072 Range::const_iterator end = iter;
03073 end += step;
03074 subrange.clear();
03075 subrange.merge( iter, end );
03076 iter = end;
03077 // Get tag sizes for entities
03078 rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
03079 if( MB_SUCCESS != rval ) return error( rval );
03080 // Sum lengths
03081 for( size_t i = 0; i < step; ++i )
03082 result += size_buffer[i];
03083 }
03084 // Process remaining
03085 subrange.clear();
03086 subrange.merge( iter, range.end() );
03087 assert( subrange.size() == remaining );
03088 rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
03089 if( MB_SUCCESS != rval ) return error( rval );
03090 for( size_t i = 0; i < remaining; ++i )
03091 result += size_buffer[i];
03092
03093 return MB_SUCCESS;
03094 }
03095
03096 ErrorCode WriteHDF5::create_tag( const TagDesc& tag_data,
03097 unsigned long num_sparse_entities,
03098 unsigned long data_table_size )
03099 {
03100 TagType mb_storage;
03101 DataType mb_type;
03102 mhdf_TagDataType mhdf_type;
03103 int tag_bytes, type_size, num_vals, storage;
03104 hid_t hdf_type = (hid_t)0;
03105 hid_t handles[3];
03106 std::string tag_name;
03107 ErrorCode rval;
03108 mhdf_Status status;
03109
03110 CHECK_OPEN_HANDLES;
03111
03112 // Get tag properties
03113 rval = iFace->tag_get_type( tag_data.tag_id, mb_storage );
03114 CHK_MB_ERR_0( rval );
03115 switch( mb_storage )
03116 {
03117 case MB_TAG_DENSE:
03118 storage = mhdf_DENSE_TYPE;
03119 break;
03120 case MB_TAG_SPARSE:
03121 storage = mhdf_SPARSE_TYPE;
03122 break;
03123 case MB_TAG_BIT:
03124 storage = mhdf_BIT_TYPE;
03125 break;
03126 case MB_TAG_MESH:
03127 storage = mhdf_MESH_TYPE;
03128 break;
03129 default:
03130 return error( MB_FAILURE );
03131 }
03132 rval = iFace->tag_get_name( tag_data.tag_id, tag_name );
03133 CHK_MB_ERR_0( rval );
03134 rval = get_tag_size( tag_data.tag_id, mb_type, tag_bytes, type_size, num_vals, mhdf_type, hdf_type );
03135 CHK_MB_ERR_0( rval );
03136
03137 // Get default value
03138 const void *def_value, *mesh_value;
03139 int def_val_len, mesh_val_len;
03140 rval = iFace->tag_get_default_value( tag_data.tag_id, def_value, def_val_len );
03141 if( MB_ENTITY_NOT_FOUND == rval )
03142 {
03143 def_value = 0;
03144 def_val_len = 0;
03145 }
03146 else if( MB_SUCCESS != rval )
03147 {
03148 H5Tclose( hdf_type );
03149 return error( rval );
03150 }
03151
03152 // Get mesh value
03153 unsigned char byte;
03154 const EntityHandle root = 0;
03155 if( mb_storage == MB_TAG_BIT )
03156 {
03157 rval = iFace->tag_get_data( tag_data.tag_id, &root, 1, &byte );
03158 mesh_value = &byte;
03159 mesh_val_len = 1;
03160 }
03161 else
03162 {
03163 rval = iFace->tag_get_by_ptr( tag_data.tag_id, &root, 1, &mesh_value, &mesh_val_len );
03164 }
03165 if( MB_TAG_NOT_FOUND == rval )
03166 {
03167 mesh_value = 0;
03168 mesh_val_len = 0;
03169 }
03170 else if( MB_SUCCESS != rval )
03171 {
03172 H5Tclose( hdf_type );
03173 return error( rval );
03174 }
03175
03176 // For handle-type tags, need to convert from handles to file ids
03177 if( MB_TYPE_HANDLE == mb_type )
03178 {
03179 // Make sure there's room in the buffer for both
03180 assert( ( def_val_len + mesh_val_len ) * sizeof( long ) < (size_t)bufferSize );
03181
03182 // Convert default value
03183 if( def_value )
03184 {
03185 memcpy( dataBuffer, def_value, def_val_len * sizeof( EntityHandle ) );
03186 convert_handle_tag( reinterpret_cast< EntityHandle* >( dataBuffer ), def_val_len );
03187 def_value = dataBuffer;
03188 }
03189
03190 // Convert mesh value
03191 if( mesh_value )
03192 {
03193 EntityHandle* ptr = reinterpret_cast< EntityHandle* >( dataBuffer ) + def_val_len;
03194 memcpy( ptr, mesh_value, mesh_val_len * sizeof( EntityHandle ) );
03195 if( convert_handle_tag( ptr, mesh_val_len ) )
03196 mesh_value = ptr;
03197 else
03198 mesh_value = 0;
03199 }
03200 }
03201
03202 if( MB_VARIABLE_LENGTH != tag_bytes )
03203 {
03204 // Write the tag description to the file
03205 mhdf_createTag( filePtr, tag_name.c_str(), mhdf_type, num_vals, storage, def_value, mesh_value, hdf_type,
03206 mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
03207 CHK_MHDF_ERR_0( status );
03208 H5Tclose( hdf_type );
03209
03210 // Create empty table for tag data
03211 if( num_sparse_entities )
03212 {
03213 mhdf_createSparseTagData( filePtr, tag_name.c_str(), num_sparse_entities, handles, &status );
03214 CHK_MHDF_ERR_0( status );
03215 mhdf_closeData( filePtr, handles[0], &status );
03216 mhdf_closeData( filePtr, handles[1], &status );
03217 }
03218
03219 for( size_t i = 0; i < tag_data.dense_list.size(); ++i )
03220 {
03221 const ExportSet* ex = find( tag_data.dense_list[i] );
03222 assert( 0 != ex );
03223 handles[0] = mhdf_createDenseTagData( filePtr, tag_name.c_str(), ex->name(), ex->total_num_ents, &status );
03224 CHK_MHDF_ERR_0( status );
03225 mhdf_closeData( filePtr, handles[0], &status );
03226 }
03227 }
03228 else
03229 {
03230 mhdf_createVarLenTag( filePtr, tag_name.c_str(), mhdf_type, storage, def_value, def_val_len, mesh_value,
03231 mesh_val_len, hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
03232 CHK_MHDF_ERR_0( status );
03233 H5Tclose( hdf_type );
03234
03235 // Create empty table for tag data
03236 if( num_sparse_entities )
03237 {
03238 mhdf_createVarLenTagData( filePtr, tag_name.c_str(), num_sparse_entities, data_table_size, handles,
03239 &status );
03240 CHK_MHDF_ERR_0( status );
03241 mhdf_closeData( filePtr, handles[0], &status );
03242 mhdf_closeData( filePtr, handles[1], &status );
03243 mhdf_closeData( filePtr, handles[2], &status );
03244 }
03245 }
03246
03247 return MB_SUCCESS;
03248 }
03249
03250 ErrorCode WriteHDF5::get_num_sparse_tagged_entities( const TagDesc& tag, size_t& count )
03251 {
03252 Range tmp;
03253 ErrorCode rval = get_sparse_tagged_entities( tag, tmp );
03254 count = tmp.size();
03255 return rval;
03256 }
03257
03258 ErrorCode WriteHDF5::get_sparse_tagged_entities( const TagDesc& tag, Range& results )
03259 {
03260 results.clear();
03261 if( !tag.have_dense( setSet ) ) results.merge( setSet.range );
03262 std::list< ExportSet >::reverse_iterator e;
03263 for( e = exportList.rbegin(); e != exportList.rend(); ++e )
03264 {
03265 if( !tag.have_dense( *e ) ) results.merge( e->range );
03266 }
03267 if( !tag.have_dense( nodeSet ) ) results.merge( nodeSet.range );
03268 if( results.empty() ) return MB_SUCCESS;
03269
03270 return iFace->get_entities_by_type_and_tag( 0, MBMAXTYPE, &tag.tag_id, 0, 1, results, Interface::INTERSECT );
03271 }
03272
03273 void WriteHDF5::get_write_entities( Range& range )
03274 {
03275 range.clear();
03276 range.merge( setSet.range );
03277 std::list< ExportSet >::reverse_iterator e;
03278 for( e = exportList.rbegin(); e != exportList.rend(); ++e )
03279 range.merge( e->range );
03280 range.merge( nodeSet.range );
03281 }
03282
03283 void WriteHDF5::print_id_map() const
03284 {
03285 print_id_map( std::cout, "" );
03286 }
03287
03288 void WriteHDF5::print_id_map( std::ostream& s, const char* pfx ) const
03289 {
03290 RangeMap< EntityHandle, wid_t >::const_iterator i;
03291 for( i = idMap.begin(); i != idMap.end(); ++i )
03292 {
03293 const char* n1 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin ) );
03294 EntityID id = ID_FROM_HANDLE( i->begin );
03295 if( 1 == i->count )
03296 {
03297 s << pfx << n1 << " " << id << " -> " << i->value << std::endl;
03298 }
03299 else
03300 {
03301 const char* n2 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin + i->count - 1 ) );
03302 if( n1 == n2 )
03303 {
03304 s << pfx << n1 << " " << id << "-" << id + i->count - 1 << " -> " << i->value << "-"
03305 << i->value + i->count - 1 << std::endl;
03306 }
03307 else
03308 {
03309 s << pfx << n1 << " " << id << "-" << n1 << " " << ID_FROM_HANDLE( i->begin + i->count - 1 ) << " -> "
03310 << i->value << "-" << i->value + i->count - 1 << std::endl;
03311 }
03312 }
03313 }
03314 }
03315
03316 void WriteHDF5::print_times( const double* t ) const
03317 {
03318 std::cout << "WriteHDF5: " << t[TOTAL_TIME] << std::endl
03319 << " gather mesh: " << t[GATHER_TIME] << std::endl
03320 << " create file: " << t[CREATE_TIME] << std::endl
03321 << " create nodes: " << t[CREATE_NODE_TIME] << std::endl
03322 << " negotiate types: " << t[NEGOTIATE_TYPES_TIME] << std::endl
03323 << " create elem: " << t[CREATE_ELEM_TIME] << std::endl
03324 << " file id exch: " << t[FILEID_EXCHANGE_TIME] << std::endl
03325 << " create adj: " << t[CREATE_ADJ_TIME] << std::endl
03326 << " create set: " << t[CREATE_SET_TIME] << std::endl
03327 << " shared ids: " << t[SHARED_SET_IDS] << std::endl
03328 << " shared data: " << t[SHARED_SET_CONTENTS] << std::endl
03329 << " set offsets: " << t[SET_OFFSET_TIME] << std::endl
03330 << " create tags: " << t[CREATE_TAG_TIME] << std::endl
03331 << " coordinates: " << t[COORD_TIME] << std::endl
03332 << " connectivity: " << t[CONN_TIME] << std::endl
03333 << " sets: " << t[SET_TIME] << std::endl
03334 << " set descrip: " << t[SET_META] << std::endl
03335 << " set content: " << t[SET_CONTENT] << std::endl
03336 << " set parent: " << t[SET_PARENT] << std::endl
03337 << " set child: " << t[SET_CHILD] << std::endl
03338 << " adjacencies: " << t[ADJ_TIME] << std::endl
03339 << " tags: " << t[TAG_TIME] << std::endl
03340 << " dense data: " << t[DENSE_TAG_TIME] << std::endl
03341 << " sparse data: " << t[SPARSE_TAG_TIME] << std::endl
03342 << " var-len data: " << t[VARLEN_TAG_TIME] << std::endl;
03343 }
03344
03345 } // namespace moab