MOAB: Mesh Oriented datABase
(version 5.3.1)
|
00001 /** 00002 * MOAB, a Mesh-Oriented datABase, is a software component for creating, 00003 * storing and accessing finite element mesh data. 00004 * 00005 * Copyright 2004 Sandia Corporation. Under the terms of Contract 00006 * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government 00007 * retains certain rights in this software. 00008 * 00009 * This library is free software; you can redistribute it and/or 00010 * modify it under the terms of the GNU Lesser General Public 00011 * License as published by the Free Software Foundation; either 00012 * version 2.1 of the License, or (at your option) any later version. 00013 * 00014 */ 00015 00016 //------------------------------------------------------------------------- 00017 // Filename : WriteHDF5.cpp 00018 // 00019 // Purpose : TSTT HDF5 Writer 00020 // 00021 // Special Notes : WriteSLAC used as template for this 00022 // 00023 // Creator : Jason Kraftcheck 00024 // 00025 // Creation Date : 04/01/04 00026 //------------------------------------------------------------------------- 00027 00028 #include <cassert> 00029 #if defined( _MSC_VER ) 00030 typedef int id_t; 00031 #elif defined( __MINGW32__ ) 00032 #include <sys/time.h> 00033 #else 00034 #include <ctime> 00035 #endif 00036 00037 #include <cstdlib> 00038 #include <cstring> 00039 #include <cstdarg> 00040 #include <limits> 00041 #include <cstdio> 00042 #include <iostream> 00043 #include "WriteHDF5.hpp" 00044 #include <H5Tpublic.h> 00045 #include <H5Ppublic.h> 00046 #include <H5Epublic.h> 00047 #include "moab/Interface.hpp" 00048 #include "Internals.hpp" 00049 #include "MBTagConventions.hpp" 00050 #include "moab/CN.hpp" 00051 #include "moab/FileOptions.hpp" 00052 #include "moab/CpuTimer.hpp" 00053 #include "IODebugTrack.hpp" 00054 #include "mhdf.h" 00055 00056 #ifndef MOAB_HAVE_HDF5 00057 #error Attempt to compile WriteHDF5 with HDF5 support disabled 00058 #endif 00059 00060 #undef BLOCKED_COORD_IO 00061 00062 #ifdef MOAB_HAVE_VALGRIND 00063 #include <valgrind/memcheck.h> 00064 #else 00065 #ifndef VALGRIND_CHECK_MEM_IS_DEFINED 00066 #define VALGRIND_CHECK_MEM_IS_DEFINED( a, b ) ( (void)0 ) 00067 #endif 00068 #ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE 00069 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE( a, b ) ( (void)0 ) 00070 #endif 00071 #ifndef VALGRIND_MAKE_MEM_UNDEFINED 00072 #define VALGRIND_MAKE_MEM_UNDEFINED( a, b ) ( (void)0 ) 00073 #endif 00074 #endif 00075 00076 namespace moab 00077 { 00078 00079 template < typename T > 00080 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& v ) 00081 { 00082 (void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof( T ) ); 00083 } 00084 00085 #define WRITE_HDF5_BUFFER_SIZE ( 40 * 1024 * 1024 ) 00086 00087 static hid_t get_id_type() 00088 { 00089 if( 8 == sizeof( WriteHDF5::wid_t ) ) 00090 { 00091 if( 8 == sizeof( long ) ) 00092 return H5T_NATIVE_ULONG; 00093 else 00094 return H5T_NATIVE_UINT64; 00095 } 00096 else if( 4 == sizeof( WriteHDF5::wid_t ) ) 00097 { 00098 if( 4 == sizeof( int ) ) 00099 return H5T_NATIVE_UINT; 00100 else 00101 return H5T_NATIVE_UINT32; 00102 } 00103 else 00104 { 00105 assert( 0 ); 00106 return (hid_t)-1; 00107 } 00108 } 00109 00110 // This is the HDF5 type used to store file IDs 00111 const hid_t WriteHDF5::id_type = get_id_type(); 00112 00113 // This function doesn't do anything useful. It's just a nice 00114 // place to set a break point to determine why the writer fails. 00115 static inline ErrorCode error( ErrorCode rval ) 00116 { 00117 return rval; 00118 } 00119 00120 // Call \c error function during HDF5 library errors to make 00121 // it easier to trap such errors in the debugger. This function 00122 // gets registered with the HDF5 library as a callback. It 00123 // works the same as the default (H5Eprint), except that it 00124 // also calls the \c error function as a no-op. 00125 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1 00126 static herr_t handle_hdf5_error( hid_t stack, void* data ) 00127 { 00128 WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data ); 00129 herr_t result = 0; 00130 if( h->func ) result = ( *h->func )( stack, h->data ); 00131 error( MB_FAILURE ); 00132 return result; 00133 } 00134 #else 00135 static herr_t handle_hdf5_error( void* data ) 00136 { 00137 WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data ); 00138 herr_t result = 0; 00139 if( h->func ) result = ( *h->func )( h->data ); 00140 error( MB_FAILURE ); 00141 return result; 00142 } 00143 #endif 00144 00145 // Some macros to handle error checking. The 00146 // CHK_MHDF__ERR* macros check the value of an mhdf_Status 00147 // object. The CHK_MB_ERR_* check the value of an ErrorCode. 00148 // The *_0 macros accept no other arguments. The *_1 00149 // macros accept a single hdf5 handle to close on error. 00150 // The *_2 macros accept an array of two hdf5 handles to 00151 // close on error. The _*2C macros accept one hdf5 handle 00152 // to close on error and a bool and an hdf5 handle where 00153 // the latter handle is conditionally closed depending on 00154 // the value of the bool. All macros contain a "return" 00155 // statement. 00156 #define CHK_MHDF_ERR_0( A ) \ 00157 do \ 00158 { \ 00159 if( mhdf_isError( &( A ) ) ) \ 00160 { \ 00161 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \ 00162 assert( 0 ); \ 00163 return error( MB_FAILURE ); \ 00164 } \ 00165 } while( false ) 00166 00167 #define CHK_MHDF_ERR_1( A, B ) \ 00168 do \ 00169 { \ 00170 if( mhdf_isError( &( A ) ) ) \ 00171 { \ 00172 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \ 00173 assert( 0 ); \ 00174 mhdf_closeData( filePtr, ( B ), &( A ) ); \ 00175 return error( MB_FAILURE ); \ 00176 } \ 00177 } while( false ) 00178 00179 #define CHK_MHDF_ERR_2( A, B ) \ 00180 do \ 00181 { \ 00182 if( mhdf_isError( &( A ) ) ) \ 00183 { \ 00184 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \ 00185 assert( 0 ); \ 00186 mhdf_closeData( filePtr, ( B )[0], &( A ) ); \ 00187 mhdf_closeData( filePtr, ( B )[1], &( A ) ); \ 00188 return error( MB_FAILURE ); \ 00189 } \ 00190 } while( false ) 00191 00192 #define CHK_MHDF_ERR_3( A, B ) \ 00193 do \ 00194 { \ 00195 if( mhdf_isError( &( A ) ) ) \ 00196 { \ 00197 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \ 00198 assert( 0 ); \ 00199 mhdf_closeData( filePtr, ( B )[0], &( A ) ); \ 00200 mhdf_closeData( filePtr, ( B )[1], &( A ) ); \ 00201 mhdf_closeData( filePtr, ( B )[2], &( A ) ); \ 00202 return error( MB_FAILURE ); \ 00203 } \ 00204 } while( false ) 00205 00206 #define CHK_MHDF_ERR_2C( A, B, C, D ) \ 00207 do \ 00208 { \ 00209 if( mhdf_isError( &( A ) ) ) \ 00210 { \ 00211 MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \ 00212 assert( 0 ); \ 00213 mhdf_closeData( filePtr, ( B ), &( A ) ); \ 00214 if( C ) mhdf_closeData( filePtr, ( D ), &( A ) ); \ 00215 return error( MB_FAILURE ); \ 00216 } \ 00217 } while( false ) 00218 00219 #define CHK_MB_ERR_0( A ) \ 00220 do \ 00221 { \ 00222 if( MB_SUCCESS != ( A ) ) \ 00223 { \ 00224 MB_CHK_ERR_CONT( ( A ) ); \ 00225 return error( A ); \ 00226 } \ 00227 } while( false ) 00228 00229 #define CHK_MB_ERR_1( A, B, C ) \ 00230 do \ 00231 { \ 00232 if( MB_SUCCESS != ( A ) ) \ 00233 { \ 00234 MB_CHK_ERR_CONT( ( A ) ); \ 00235 mhdf_closeData( filePtr, ( B ), &( C ) ); \ 00236 assert( 0 ); \ 00237 return error( A ); \ 00238 } \ 00239 } while( false ) 00240 00241 #define CHK_MB_ERR_2( A, B, C ) \ 00242 do \ 00243 { \ 00244 if( MB_SUCCESS != ( A ) ) \ 00245 { \ 00246 MB_CHK_ERR_CONT( ( A ) ); \ 00247 mhdf_closeData( filePtr, ( B )[0], &( C ) ); \ 00248 mhdf_closeData( filePtr, ( B )[1], &( C ) ); \ 00249 write_finished(); \ 00250 assert( 0 ); \ 00251 return error( A ); \ 00252 } \ 00253 } while( false ) 00254 00255 #define CHK_MB_ERR_3( A, B, C ) \ 00256 do \ 00257 { \ 00258 if( MB_SUCCESS != ( A ) ) \ 00259 { \ 00260 MB_CHK_ERR_CONT( ( A ) ); \ 00261 mhdf_closeData( filePtr, ( B )[0], &( C ) ); \ 00262 mhdf_closeData( filePtr, ( B )[1], &( C ) ); \ 00263 mhdf_closeData( filePtr, ( B )[2], &( C ) ); \ 00264 write_finished(); \ 00265 assert( 0 ); \ 00266 return error( A ); \ 00267 } \ 00268 } while( false ) 00269 00270 #define CHK_MB_ERR_2C( A, B, C, D, E ) \ 00271 do \ 00272 { \ 00273 if( MB_SUCCESS != ( A ) ) \ 00274 { \ 00275 MB_CHK_ERR_CONT( ( A ) ); \ 00276 mhdf_closeData( filePtr, ( B ), &( E ) ); \ 00277 if( C ) mhdf_closeData( filePtr, ( D ), &( E ) ); \ 00278 write_finished(); \ 00279 assert( 0 ); \ 00280 return error( A ); \ 00281 } \ 00282 } while( false ) 00283 00284 #define debug_barrier() debug_barrier_line( __LINE__ ) 00285 void WriteHDF5::debug_barrier_line( int ) {} 00286 00287 class CheckOpenWriteHDF5Handles 00288 { 00289 int fileline; 00290 mhdf_FileHandle handle; 00291 int enter_count; 00292 00293 public: 00294 CheckOpenWriteHDF5Handles( mhdf_FileHandle file, int line ) 00295 : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) ) 00296 { 00297 } 00298 00299 ~CheckOpenWriteHDF5Handles() 00300 { 00301 int new_count = mhdf_countOpenHandles( handle ); 00302 if( new_count != enter_count ) 00303 { 00304 std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl 00305 << "Open at entrance: " << enter_count << std::endl 00306 << "Open at exit: " << new_count << std::endl; 00307 } 00308 } 00309 }; 00310 00311 MPEState WriteHDF5::topState; 00312 MPEState WriteHDF5::subState; 00313 00314 #ifdef NDEBUG 00315 #define CHECK_OPEN_HANDLES 00316 #else 00317 #define CHECK_OPEN_HANDLES CheckOpenWriteHDF5Handles check_open_handles_( filePtr, __LINE__ ) 00318 #endif 00319 00320 bool WriteHDF5::convert_handle_tag( const EntityHandle* source, EntityHandle* dest, size_t count ) const 00321 { 00322 bool some_valid = false; 00323 for( size_t i = 0; i < count; ++i ) 00324 { 00325 if( !source[i] ) 00326 dest[i] = 0; 00327 else 00328 { 00329 dest[i] = idMap.find( source[i] ); 00330 if( dest[i] ) some_valid = true; 00331 } 00332 } 00333 00334 return some_valid; 00335 } 00336 00337 bool WriteHDF5::convert_handle_tag( EntityHandle* data, size_t count ) const 00338 { 00339 assert( sizeof( EntityHandle ) == sizeof( wid_t ) ); 00340 return convert_handle_tag( data, data, count ); 00341 } 00342 00343 ErrorCode WriteHDF5::assign_ids( const Range& entities, wid_t id ) 00344 { 00345 Range::const_pair_iterator pi; 00346 for( pi = entities.const_pair_begin(); pi != entities.const_pair_end(); ++pi ) 00347 { 00348 const EntityHandle n = pi->second - pi->first + 1; 00349 dbgOut.printf( 3, "Assigning %s %lu to %lu to file IDs [%lu,%lu]\n", 00350 CN::EntityTypeName( TYPE_FROM_HANDLE( pi->first ) ), 00351 (unsigned long)( ID_FROM_HANDLE( pi->first ) ), 00352 (unsigned long)( ID_FROM_HANDLE( pi->first ) + n - 1 ), (unsigned long)id, 00353 (unsigned long)( id + n - 1 ) ); 00354 if( TYPE_FROM_HANDLE( pi->first ) == MBPOLYGON || TYPE_FROM_HANDLE( pi->first ) == MBPOLYHEDRON ) 00355 { 00356 int num_vertices = 0; 00357 const EntityHandle* conn = 0; 00358 iFace->get_connectivity( pi->first, conn, num_vertices ); 00359 dbgOut.printf( 3, " poly with %d verts/faces \n", num_vertices ); 00360 } 00361 if( !idMap.insert( pi->first, id, n ).second ) return error( MB_FAILURE ); 00362 id += n; 00363 } 00364 00365 return MB_SUCCESS; 00366 } 00367 00368 const char* WriteHDF5::ExportSet::name() const 00369 { 00370 static char buffer[128]; 00371 switch( type ) 00372 { 00373 case MBVERTEX: 00374 return mhdf_node_type_handle(); 00375 case MBENTITYSET: 00376 return mhdf_set_type_handle(); 00377 default: 00378 sprintf( buffer, "%s%d", CN::EntityTypeName( type ), num_nodes ); 00379 return buffer; 00380 } 00381 } 00382 00383 WriterIface* WriteHDF5::factory( Interface* iface ) 00384 { 00385 return new WriteHDF5( iface ); 00386 } 00387 00388 WriteHDF5::WriteHDF5( Interface* iface ) 00389 : bufferSize( WRITE_HDF5_BUFFER_SIZE ), dataBuffer( 0 ), iFace( iface ), writeUtil( 0 ), filePtr( 0 ), 00390 setContentsOffset( 0 ), setChildrenOffset( 0 ), setParentsOffset( 0 ), maxNumSetContents( 0 ), 00391 maxNumSetChildren( 0 ), maxNumSetParents( 0 ), writeSets( false ), writeSetContents( false ), 00392 writeSetChildren( false ), writeSetParents( false ), parallelWrite( false ), collectiveIO( false ), 00393 writeTagDense( false ), writeProp( H5P_DEFAULT ), dbgOut( "H5M", stderr ), debugTrack( false ) 00394 { 00395 } 00396 00397 ErrorCode WriteHDF5::init() 00398 { 00399 ErrorCode rval; 00400 00401 if( writeUtil ) // init has already been called 00402 return MB_SUCCESS; 00403 /* 00404 #ifdef DEBUG 00405 H5Eset_auto(&hdf_error_handler, writeUtil); // HDF5 callback for errors 00406 #endif 00407 */ 00408 // For known tag types, store the corresponding HDF5 in which 00409 // the tag data is to be written in the file. 00410 // register_known_tag_types(iFace); 00411 00412 // Get the util interface 00413 rval = iFace->query_interface( writeUtil ); 00414 CHK_MB_ERR_0( rval ); 00415 00416 idMap.clear(); 00417 00418 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1 00419 herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data ); 00420 #else 00421 herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data ); 00422 #endif 00423 if( err < 0 ) 00424 { 00425 errorHandler.func = 0; 00426 errorHandler.data = 0; 00427 } 00428 else 00429 { 00430 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1 00431 err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler ); 00432 #else 00433 err = H5Eset_auto( &handle_hdf5_error, &errorHandler ); 00434 #endif 00435 if( err < 0 ) 00436 { 00437 errorHandler.func = 0; 00438 errorHandler.data = 0; 00439 } 00440 } 00441 00442 if( !topState.valid() ) topState = MPEState( "WriteHDF5", "yellow" ); 00443 if( !subState.valid() ) subState = MPEState( "WriteHDF5 subevent", "cyan" ); 00444 00445 return MB_SUCCESS; 00446 } 00447 00448 ErrorCode WriteHDF5::write_finished() 00449 { 00450 // Release memory allocated in lists 00451 exportList.clear(); 00452 nodeSet.range.clear(); 00453 setSet.range.clear(); 00454 tagList.clear(); 00455 idMap.clear(); 00456 00457 HDF5ErrorHandler handler; 00458 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1 00459 herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data ); 00460 #else 00461 herr_t err = H5Eget_auto( &handler.func, &handler.data ); 00462 #endif 00463 if( err >= 0 && handler.func == &handle_hdf5_error ) 00464 { 00465 assert( handler.data == &errorHandler ); 00466 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1 00467 H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data ); 00468 #else 00469 H5Eset_auto( errorHandler.func, errorHandler.data ); 00470 #endif 00471 } 00472 00473 return MB_SUCCESS; 00474 } 00475 00476 WriteHDF5::~WriteHDF5() 00477 { 00478 if( !writeUtil ) // init() failed. 00479 return; 00480 00481 iFace->release_interface( writeUtil ); 00482 } 00483 00484 ErrorCode WriteHDF5::write_file( const char* filename, bool overwrite, const FileOptions& opts, 00485 const EntityHandle* set_array, const int num_sets, 00486 const std::vector< std::string >& qa_records, const Tag* tag_list, int num_tags, 00487 int user_dimension ) 00488 { 00489 mhdf_Status status; 00490 00491 parallelWrite = false; 00492 collectiveIO = false; 00493 00494 // Enable debug output 00495 int tmpval = 0; 00496 if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) ) dbgOut.set_verbosity( tmpval ); 00497 00498 // writeTagDense = (MB_SUCCESS == opts.get_null_option("DENSE_TAGS")); 00499 writeTagDense = true; 00500 00501 // Enable some extra checks for reads. Note: amongst other things this 00502 // will print errors if the entire file is not read, so if doing a 00503 // partial read that is not a parallel read, this should be disabled. 00504 debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) ); 00505 00506 bufferSize = WRITE_HDF5_BUFFER_SIZE; 00507 int buf_size; 00508 ErrorCode rval = opts.get_int_option( "BUFFER_SIZE", buf_size ); 00509 if( MB_SUCCESS == rval && buf_size >= 24 ) bufferSize = buf_size; 00510 00511 // Allocate internal buffer to use when gathering data to write. 00512 dataBuffer = (char*)malloc( bufferSize ); 00513 if( !dataBuffer ) return error( MB_MEMORY_ALLOCATION_FAILED ); 00514 00515 // Clear filePtr so we know if it is open upon failure 00516 filePtr = 0; 00517 00518 // Do actual write. 00519 writeProp = H5P_DEFAULT; 00520 ErrorCode result = write_file_impl( filename, overwrite, opts, set_array, num_sets, qa_records, tag_list, num_tags, 00521 user_dimension ); 00522 // Close writeProp if it was opened 00523 if( writeProp != H5P_DEFAULT ) H5Pclose( writeProp ); 00524 00525 // Free memory buffer 00526 free( dataBuffer ); 00527 dataBuffer = 0; 00528 00529 // Close file 00530 bool created_file = false; 00531 if( filePtr ) 00532 { 00533 created_file = true; 00534 mhdf_closeFile( filePtr, &status ); 00535 filePtr = 0; 00536 if( mhdf_isError( &status ) ) 00537 { 00538 MB_SET_ERR_CONT( mhdf_message( &status ) ); 00539 if( MB_SUCCESS == result ) result = MB_FAILURE; 00540 } 00541 } 00542 00543 // Release other resources 00544 if( MB_SUCCESS == result ) 00545 result = write_finished(); 00546 else 00547 write_finished(); 00548 00549 // If write failed, remove file unless KEEP option was specified 00550 if( MB_SUCCESS != result && created_file && MB_ENTITY_NOT_FOUND == opts.get_null_option( "KEEP" ) ) 00551 remove( filename ); 00552 00553 return result; 00554 } 00555 00556 ErrorCode WriteHDF5::write_file_impl( const char* filename, bool overwrite, const FileOptions& opts, 00557 const EntityHandle* set_array, const int num_sets, 00558 const std::vector< std::string >& qa_records, const Tag* tag_list, int num_tags, 00559 int user_dimension ) 00560 { 00561 ErrorCode result; 00562 std::list< TagDesc >::const_iterator t_itor; 00563 std::list< ExportSet >::iterator ex_itor; 00564 EntityHandle elem_count, max_id; 00565 double times[NUM_TIMES] = { 0 }; 00566 00567 if( MB_SUCCESS != init() ) return error( MB_FAILURE ); 00568 00569 // See if we need to report times 00570 bool cputime = false; 00571 result = opts.get_null_option( "CPUTIME" ); 00572 if( MB_SUCCESS == result ) cputime = true; 00573 00574 CpuTimer timer; 00575 00576 dbgOut.tprint( 1, "Gathering Mesh\n" ); 00577 topState.start( "gathering mesh" ); 00578 00579 // Gather mesh to export 00580 exportList.clear(); 00581 if( 0 == num_sets || ( 1 == num_sets && set_array[0] == 0 ) ) 00582 { 00583 result = gather_all_mesh(); 00584 topState.end( result ); 00585 CHK_MB_ERR_0( result ); 00586 } 00587 else 00588 { 00589 std::vector< EntityHandle > passed_export_list( set_array, set_array + num_sets ); 00590 result = gather_mesh_info( passed_export_list ); 00591 topState.end( result ); 00592 CHK_MB_ERR_0( result ); 00593 } 00594 00595 times[GATHER_TIME] = timer.time_elapsed(); 00596 00597 // if (nodeSet.range.size() == 0) 00598 // return error(MB_ENTITY_NOT_FOUND); 00599 00600 dbgOut.tprint( 1, "Checking ID space\n" ); 00601 00602 // Make sure ID space is sufficient 00603 elem_count = nodeSet.range.size() + setSet.range.size(); 00604 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor ) 00605 elem_count += ex_itor->range.size(); 00606 max_id = (EntityHandle)1 << ( 8 * sizeof( wid_t ) - 1 ); 00607 if( elem_count > max_id ) 00608 { 00609 MB_SET_ERR_CONT( "ID space insufficient for mesh size" ); 00610 return error( result ); 00611 } 00612 00613 dbgOut.tprint( 1, "Creating File\n" ); 00614 00615 // Figure out the dimension in which to write the mesh. 00616 int mesh_dim; 00617 result = iFace->get_dimension( mesh_dim ); 00618 CHK_MB_ERR_0( result ); 00619 00620 if( user_dimension < 1 ) user_dimension = mesh_dim; 00621 user_dimension = user_dimension > mesh_dim ? mesh_dim : user_dimension; 00622 00623 // Create the file layout, including all tables (zero-ed) and 00624 // all structure and meta information. 00625 const char* optnames[] = { "WRITE_PART", "FORMAT", 0 }; 00626 int junk; 00627 parallelWrite = ( MB_SUCCESS == opts.match_option( "PARALLEL", optnames, junk ) ); 00628 if( parallelWrite ) 00629 { 00630 // Just store Boolean value based on string option here. 00631 // parallel_create_file will set writeProp accordingly. 00632 // collectiveIO = (MB_SUCCESS == opts.get_null_option("COLLECTIVE")); 00633 // dbgOut.printf(2, "'COLLECTIVE' option = %s\n", collectiveIO ? "YES" : "NO"); 00634 // Do this all the time, as it appears to be much faster than indep in some cases 00635 collectiveIO = true; 00636 result = 00637 parallel_create_file( filename, overwrite, qa_records, opts, tag_list, num_tags, user_dimension, times ); 00638 } 00639 else 00640 { 00641 result = serial_create_file( filename, overwrite, qa_records, tag_list, num_tags, user_dimension ); 00642 } 00643 if( MB_SUCCESS != result ) return error( result ); 00644 00645 times[CREATE_TIME] = timer.time_elapsed(); 00646 00647 dbgOut.tprint( 1, "Writing Nodes.\n" ); 00648 // Write node coordinates 00649 if( !nodeSet.range.empty() || parallelWrite ) 00650 { 00651 topState.start( "writing coords" ); 00652 result = write_nodes(); 00653 topState.end( result ); 00654 if( MB_SUCCESS != result ) return error( result ); 00655 } 00656 00657 times[COORD_TIME] = timer.time_elapsed(); 00658 00659 dbgOut.tprint( 1, "Writing connectivity.\n" ); 00660 00661 // Write element connectivity 00662 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor ) 00663 { 00664 topState.start( "writing connectivity for ", ex_itor->name() ); 00665 result = write_elems( *ex_itor ); 00666 topState.end( result ); 00667 if( MB_SUCCESS != result ) return error( result ); 00668 } 00669 times[CONN_TIME] = timer.time_elapsed(); 00670 00671 dbgOut.tprint( 1, "Writing sets.\n" ); 00672 00673 // Write meshsets 00674 result = write_sets( times ); 00675 if( MB_SUCCESS != result ) return error( result ); 00676 debug_barrier(); 00677 00678 times[SET_TIME] = timer.time_elapsed(); 00679 dbgOut.tprint( 1, "Writing adjacencies.\n" ); 00680 00681 // Write adjacencies 00682 // Tim says don't save node adjacencies! 00683 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES 00684 result = write_adjacencies( nodeSet ); 00685 if( MB_SUCCESS != result ) return error( result ); 00686 #endif 00687 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor ) 00688 { 00689 topState.start( "writing adjacencies for ", ex_itor->name() ); 00690 result = write_adjacencies( *ex_itor ); 00691 topState.end( result ); 00692 if( MB_SUCCESS != result ) return error( result ); 00693 } 00694 times[ADJ_TIME] = timer.time_elapsed(); 00695 00696 dbgOut.tprint( 1, "Writing tags.\n" ); 00697 00698 // Write tags 00699 for( t_itor = tagList.begin(); t_itor != tagList.end(); ++t_itor ) 00700 { 00701 std::string name; 00702 iFace->tag_get_name( t_itor->tag_id, name ); 00703 topState.start( "writing tag: ", name.c_str() ); 00704 result = write_tag( *t_itor, times ); 00705 topState.end( result ); 00706 if( MB_SUCCESS != result ) return error( result ); 00707 } 00708 times[TAG_TIME] = timer.time_elapsed(); 00709 00710 times[TOTAL_TIME] = timer.time_since_birth(); 00711 00712 if( cputime ) { print_times( times ); } 00713 00714 return MB_SUCCESS; 00715 } 00716 00717 ErrorCode WriteHDF5::initialize_mesh( const Range ranges[5] ) 00718 { 00719 ErrorCode rval; 00720 00721 if( !ranges[0].all_of_type( MBVERTEX ) ) return error( MB_FAILURE ); 00722 nodeSet.range = ranges[0]; 00723 nodeSet.type = MBVERTEX; 00724 nodeSet.num_nodes = 1; 00725 nodeSet.max_num_ents = nodeSet.max_num_adjs = 0; 00726 00727 if( !ranges[4].all_of_type( MBENTITYSET ) ) return error( MB_FAILURE ); 00728 setSet.range = ranges[4]; 00729 setSet.type = MBENTITYSET; 00730 setSet.num_nodes = 0; 00731 setSet.max_num_ents = setSet.max_num_adjs = 0; 00732 maxNumSetContents = maxNumSetChildren = maxNumSetParents = 0; 00733 00734 exportList.clear(); 00735 std::vector< Range > bins( 1024 ); // Sort entities by connectivity length 00736 // Resize is expensive due to Range copy, so start big 00737 for( EntityType type = MBEDGE; type < MBENTITYSET; ++type ) 00738 { 00739 ExportSet set; 00740 set.max_num_ents = set.max_num_adjs = 0; 00741 const int dim = CN::Dimension( type ); 00742 00743 // Group entities by connectivity length 00744 bins.clear(); 00745 assert( dim >= 0 && dim <= 4 ); 00746 std::pair< Range::const_iterator, Range::const_iterator > p = ranges[dim].equal_range( type ); 00747 Range::const_iterator i = p.first; 00748 while( i != p.second ) 00749 { 00750 Range::const_iterator first = i; 00751 EntityHandle const* conn; 00752 int len, firstlen; 00753 00754 // Dummy storage vector for structured mesh "get_connectivity" function 00755 std::vector< EntityHandle > storage; 00756 00757 rval = iFace->get_connectivity( *i, conn, firstlen, false, &storage ); 00758 if( MB_SUCCESS != rval ) return error( rval ); 00759 00760 for( ++i; i != p.second; ++i ) 00761 { 00762 rval = iFace->get_connectivity( *i, conn, len, false, &storage ); 00763 if( MB_SUCCESS != rval ) return error( rval ); 00764 00765 if( len != firstlen ) break; 00766 } 00767 00768 if( firstlen >= (int)bins.size() ) bins.resize( firstlen + 1 ); 00769 bins[firstlen].merge( first, i ); 00770 } 00771 // Create ExportSet for each group 00772 for( std::vector< Range >::iterator j = bins.begin(); j != bins.end(); ++j ) 00773 { 00774 if( j->empty() ) continue; 00775 00776 set.range.clear(); 00777 set.type = type; 00778 set.num_nodes = j - bins.begin(); 00779 exportList.push_back( set ); 00780 exportList.back().range.swap( *j ); 00781 } 00782 } 00783 00784 return MB_SUCCESS; 00785 } 00786 00787 // Gather the mesh to be written from a list of owning meshsets. 00788 ErrorCode WriteHDF5::gather_mesh_info( const std::vector< EntityHandle >& export_sets ) 00789 { 00790 ErrorCode rval; 00791 00792 int dim; 00793 Range range; // Temporary storage 00794 Range ranges[5]; // Lists of entities to export, grouped by dimension 00795 00796 // Gather list of all related sets 00797 std::vector< EntityHandle > stack( export_sets ); 00798 std::copy( export_sets.begin(), export_sets.end(), stack.begin() ); 00799 std::vector< EntityHandle > set_children; 00800 while( !stack.empty() ) 00801 { 00802 EntityHandle meshset = stack.back(); 00803 stack.pop_back(); 00804 ranges[4].insert( meshset ); 00805 00806 // Get contained sets 00807 range.clear(); 00808 rval = iFace->get_entities_by_type( meshset, MBENTITYSET, range ); 00809 CHK_MB_ERR_0( rval ); 00810 for( Range::iterator ritor = range.begin(); ritor != range.end(); ++ritor ) 00811 { 00812 if( ranges[4].find( *ritor ) == ranges[4].end() ) stack.push_back( *ritor ); 00813 } 00814 00815 // Get child sets 00816 set_children.clear(); 00817 rval = iFace->get_child_meshsets( meshset, set_children, 1 ); 00818 CHK_MB_ERR_0( rval ); 00819 for( std::vector< EntityHandle >::iterator vitor = set_children.begin(); vitor != set_children.end(); ++vitor ) 00820 { 00821 if( ranges[4].find( *vitor ) == ranges[4].end() ) stack.push_back( *vitor ); 00822 } 00823 } 00824 00825 // Gather list of all mesh entities from list of sets, 00826 // grouped by dimension. 00827 for( Range::iterator setitor = ranges[4].begin(); setitor != ranges[4].end(); ++setitor ) 00828 { 00829 for( dim = 0; dim < 4; ++dim ) 00830 { 00831 range.clear(); 00832 rval = iFace->get_entities_by_dimension( *setitor, dim, range, false ); 00833 CHK_MB_ERR_0( rval ); 00834 00835 ranges[dim].merge( range ); 00836 } 00837 } 00838 00839 // For each list of elements, append adjacent children and 00840 // nodes to lists. 00841 for( dim = 3; dim > 0; --dim ) 00842 { 00843 for( int cdim = 1; cdim < dim; ++cdim ) 00844 { 00845 range.clear(); 00846 rval = iFace->get_adjacencies( ranges[dim], cdim, false, range ); 00847 CHK_MB_ERR_0( rval ); 00848 ranges[cdim].merge( range ); 00849 } 00850 range.clear(); 00851 rval = writeUtil->gather_nodes_from_elements( ranges[dim], 0, range ); 00852 CHK_MB_ERR_0( rval ); 00853 ranges[0].merge( range ); 00854 } 00855 00856 return initialize_mesh( ranges ); 00857 } 00858 00859 // Gather all the mesh and related information to be written. 00860 ErrorCode WriteHDF5::gather_all_mesh() 00861 { 00862 ErrorCode rval; 00863 Range ranges[5]; 00864 00865 rval = iFace->get_entities_by_type( 0, MBVERTEX, ranges[0] ); 00866 if( MB_SUCCESS != rval ) return error( rval ); 00867 00868 rval = iFace->get_entities_by_dimension( 0, 1, ranges[1] ); 00869 if( MB_SUCCESS != rval ) return error( rval ); 00870 00871 rval = iFace->get_entities_by_dimension( 0, 2, ranges[2] ); 00872 if( MB_SUCCESS != rval ) return error( rval ); 00873 00874 rval = iFace->get_entities_by_dimension( 0, 3, ranges[3] ); 00875 if( MB_SUCCESS != rval ) return error( rval ); 00876 00877 rval = iFace->get_entities_by_type( 0, MBENTITYSET, ranges[4] ); 00878 if( MB_SUCCESS != rval ) return error( rval ); 00879 00880 return initialize_mesh( ranges ); 00881 } 00882 00883 ErrorCode WriteHDF5::write_nodes() 00884 { 00885 mhdf_Status status; 00886 int dim, mesh_dim; 00887 ErrorCode rval; 00888 hid_t node_table; 00889 long first_id, num_nodes; 00890 00891 if( !nodeSet.total_num_ents ) return MB_SUCCESS; // No nodes! 00892 00893 CHECK_OPEN_HANDLES; 00894 00895 rval = iFace->get_dimension( mesh_dim ); 00896 CHK_MB_ERR_0( rval ); 00897 00898 debug_barrier(); 00899 dbgOut.print( 3, "Opening Node Coords\n" ); 00900 node_table = mhdf_openNodeCoords( filePtr, &num_nodes, &dim, &first_id, &status ); 00901 CHK_MHDF_ERR_0( status ); 00902 IODebugTrack track( debugTrack, "nodes", num_nodes ); 00903 00904 double* buffer = (double*)dataBuffer; 00905 #ifdef BLOCKED_COORD_IO 00906 int chunk_size = bufferSize / sizeof( double ); 00907 #else 00908 int chunk_size = bufferSize / ( 3 * sizeof( double ) ); 00909 #endif 00910 00911 long remaining = nodeSet.range.size(); 00912 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size; 00913 if( nodeSet.max_num_ents ) 00914 { 00915 assert( nodeSet.max_num_ents >= remaining ); 00916 num_writes = ( nodeSet.max_num_ents + chunk_size - 1 ) / chunk_size; 00917 } 00918 long remaining_writes = num_writes; 00919 00920 long offset = nodeSet.offset; 00921 Range::const_iterator iter = nodeSet.range.begin(); 00922 dbgOut.printf( 3, "Writing %ld nodes in %ld blocks of %d\n", remaining, ( remaining + chunk_size - 1 ) / chunk_size, 00923 chunk_size ); 00924 while( remaining ) 00925 { 00926 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); 00927 long count = chunk_size < remaining ? chunk_size : remaining; 00928 remaining -= count; 00929 Range::const_iterator end = iter; 00930 end += count; 00931 00932 #ifdef BLOCKED_COORD_IO 00933 for( int d = 0; d < dim; d++ ) 00934 { 00935 if( d < mesh_dim ) 00936 { 00937 rval = writeUtil->get_node_coords( d, iter, end, count, buffer ); 00938 CHK_MB_ERR_1( rval, node_table, status ); 00939 } 00940 else 00941 memset( buffer, 0, count * sizeof( double ) ); 00942 00943 dbgOut.printf( 3, " writing %c node chunk %ld of %ld, %ld values at %ld\n", (char)( 'X' + d ), 00944 num_writes - remaining_writes + 1, num_writes, count, offset ); 00945 mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status ); 00946 CHK_MHDF_ERR_1( status, node_table ); 00947 } 00948 #else 00949 rval = writeUtil->get_node_coords( -1, iter, end, 3 * count, buffer ); 00950 CHK_MB_ERR_1( rval, node_table, status ); 00951 dbgOut.printf( 3, " writing node chunk %ld of %ld, %ld values at %ld\n", num_writes - remaining_writes + 1, 00952 num_writes, count, offset ); 00953 mhdf_writeNodeCoordsWithOpt( node_table, offset, count, buffer, writeProp, &status ); 00954 CHK_MHDF_ERR_1( status, node_table ); 00955 #endif 00956 track.record_io( offset, count ); 00957 00958 iter = end; 00959 offset += count; 00960 --remaining_writes; 00961 } 00962 00963 // Do empty writes if necessary for parallel collective IO 00964 if( collectiveIO ) 00965 { 00966 while( remaining_writes-- ) 00967 { 00968 assert( writeProp != H5P_DEFAULT ); 00969 #ifdef BLOCKED_COORD_IO 00970 for( int d = 0; d < dim; ++d ) 00971 { 00972 dbgOut.printf( 3, " writing (empty) %c node chunk %ld of %ld.\n", (char)( 'X' + d ), 00973 num_writes - remaining_writes, num_writes ); 00974 mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status ); 00975 CHK_MHDF_ERR_1( status, node_table ); 00976 } 00977 #else 00978 dbgOut.printf( 3, " writing (empty) node chunk %ld of %ld.\n", num_writes - remaining_writes, num_writes ); 00979 mhdf_writeNodeCoordsWithOpt( node_table, offset, 0, 0, writeProp, &status ); 00980 CHK_MHDF_ERR_1( status, node_table ); 00981 #endif 00982 } 00983 } 00984 00985 mhdf_closeData( filePtr, node_table, &status ); 00986 CHK_MHDF_ERR_0( status ); 00987 00988 track.all_reduce(); 00989 return MB_SUCCESS; 00990 } 00991 00992 ErrorCode WriteHDF5::write_elems( ExportSet& elems ) 00993 { 00994 mhdf_Status status; 00995 ErrorCode rval; 00996 long first_id; 00997 int nodes_per_elem; 00998 long table_size; 00999 01000 CHECK_OPEN_HANDLES; 01001 01002 debug_barrier(); 01003 dbgOut.printf( 2, "Writing %lu elements of type %s%d\n", (unsigned long)elems.range.size(), 01004 CN::EntityTypeName( elems.type ), elems.num_nodes ); 01005 dbgOut.print( 3, "Writing elements", elems.range ); 01006 01007 hid_t elem_table = mhdf_openConnectivity( filePtr, elems.name(), &nodes_per_elem, &table_size, &first_id, &status ); 01008 CHK_MHDF_ERR_0( status ); 01009 IODebugTrack track( debugTrack, elems.name() && strlen( elems.name() ) ? elems.name() : "<ANONYMOUS ELEM SET?>", 01010 table_size ); 01011 01012 assert( (unsigned long)first_id <= elems.first_id ); 01013 assert( (unsigned long)table_size >= elems.offset + elems.range.size() ); 01014 01015 EntityHandle* buffer = (EntityHandle*)dataBuffer; 01016 int chunk_size = bufferSize / ( elems.num_nodes * sizeof( wid_t ) ); 01017 long offset = elems.offset; 01018 long remaining = elems.range.size(); 01019 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size; 01020 if( elems.max_num_ents ) 01021 { 01022 assert( elems.max_num_ents >= remaining ); 01023 num_writes = ( elems.max_num_ents + chunk_size - 1 ) / chunk_size; 01024 } 01025 long remaining_writes = num_writes; 01026 Range::iterator iter = elems.range.begin(); 01027 01028 while( remaining ) 01029 { 01030 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); 01031 long count = chunk_size < remaining ? chunk_size : remaining; 01032 remaining -= count; 01033 01034 Range::iterator next = iter; 01035 next += count; 01036 rval = writeUtil->get_element_connect( iter, next, elems.num_nodes, count * elems.num_nodes, buffer ); 01037 CHK_MB_ERR_1( rval, elem_table, status ); 01038 iter = next; 01039 01040 for( long i = 0; i < count * nodes_per_elem; ++i ) 01041 { 01042 buffer[i] = idMap.find( buffer[i] ); 01043 if( 0 == buffer[i] ) 01044 { 01045 MB_SET_ERR_CONT( "Invalid " << elems.name() << " element connectivity. Write Aborted" ); 01046 mhdf_closeData( filePtr, elem_table, &status ); 01047 return error( MB_FAILURE ); 01048 } 01049 } 01050 01051 dbgOut.printf( 3, " writing node connectivity %ld of %ld, %ld values at %ld\n", 01052 num_writes - remaining_writes + 1, num_writes, count, offset ); 01053 track.record_io( offset, count ); 01054 mhdf_writeConnectivityWithOpt( elem_table, offset, count, id_type, buffer, writeProp, &status ); 01055 CHK_MHDF_ERR_1( status, elem_table ); 01056 01057 offset += count; 01058 --remaining_writes; 01059 } 01060 01061 // Do empty writes if necessary for parallel collective IO 01062 if( collectiveIO ) 01063 { 01064 while( remaining_writes-- ) 01065 { 01066 assert( writeProp != H5P_DEFAULT ); 01067 dbgOut.printf( 3, " writing (empty) connectivity chunk %ld of %ld.\n", num_writes - remaining_writes + 1, 01068 num_writes ); 01069 mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status ); 01070 CHK_MHDF_ERR_1( status, elem_table ); 01071 } 01072 } 01073 01074 mhdf_closeData( filePtr, elem_table, &status ); 01075 CHK_MHDF_ERR_0( status ); 01076 01077 track.all_reduce(); 01078 return MB_SUCCESS; 01079 } 01080 01081 ErrorCode WriteHDF5::get_set_info( EntityHandle set, long& num_entities, long& num_children, long& num_parents, 01082 unsigned long& flags ) 01083 { 01084 ErrorCode rval; 01085 int i; 01086 unsigned int u; 01087 01088 rval = iFace->get_number_entities_by_handle( set, i, false ); 01089 CHK_MB_ERR_0( rval ); 01090 num_entities = i; 01091 01092 rval = iFace->num_child_meshsets( set, &i ); 01093 CHK_MB_ERR_0( rval ); 01094 num_children = i; 01095 01096 rval = iFace->num_parent_meshsets( set, &i ); 01097 CHK_MB_ERR_0( rval ); 01098 num_parents = i; 01099 01100 rval = iFace->get_meshset_options( set, u ); 01101 CHK_MB_ERR_0( rval ); 01102 flags = u; 01103 01104 return MB_SUCCESS; 01105 } 01106 01107 ErrorCode WriteHDF5::write_set_data( const WriteUtilIface::EntityListType which_data, const hid_t handle, 01108 IODebugTrack& track, Range* ranged, Range* null_stripped, 01109 std::vector< long >* set_sizes ) 01110 { 01111 // ranged must be non-null for CONTENTS and null for anything else 01112 assert( ( which_data == WriteUtilIface::CONTENTS ) == ( 0 != ranged ) ); 01113 ErrorCode rval; 01114 mhdf_Status status; 01115 01116 debug_barrier(); 01117 01118 // Function pointer type used to write set data 01119 void ( *write_func )( hid_t, long, long, hid_t, const void*, hid_t, mhdf_Status* ); 01120 long max_vals; // Max over all procs of number of values to write to data set 01121 long offset; // Offset in HDF5 dataset at which to write next block of data 01122 switch( which_data ) 01123 { 01124 case WriteUtilIface::CONTENTS: 01125 assert( ranged != 0 && null_stripped != 0 && set_sizes != 0 ); 01126 write_func = &mhdf_writeSetDataWithOpt; 01127 max_vals = maxNumSetContents; 01128 offset = setContentsOffset; 01129 dbgOut.print( 2, "Writing set contents\n" ); 01130 break; 01131 case WriteUtilIface::CHILDREN: 01132 assert( !ranged && !null_stripped && !set_sizes ); 01133 write_func = &mhdf_writeSetParentsChildrenWithOpt; 01134 max_vals = maxNumSetChildren; 01135 offset = setChildrenOffset; 01136 dbgOut.print( 2, "Writing set child lists\n" ); 01137 break; 01138 case WriteUtilIface::PARENTS: 01139 assert( !ranged && !null_stripped && !set_sizes ); 01140 write_func = &mhdf_writeSetParentsChildrenWithOpt; 01141 max_vals = maxNumSetParents; 01142 offset = setParentsOffset; 01143 dbgOut.print( 2, "Writing set parent lists\n" ); 01144 break; 01145 default: 01146 assert( false ); 01147 return MB_FAILURE; 01148 } 01149 // assert(max_vals > 0); // Should have skipped this function otherwise 01150 01151 // buffer to use for IO 01152 wid_t* buffer = reinterpret_cast< wid_t* >( dataBuffer ); 01153 // number of handles that will fit in the buffer 01154 const size_t buffer_size = bufferSize / sizeof( EntityHandle ); 01155 // the total number of write calls that must be made, including no-ops for collective io 01156 const size_t num_total_writes = ( max_vals + buffer_size - 1 ) / buffer_size; 01157 01158 std::vector< SpecialSetData >::iterator si = specialSets.begin(); 01159 01160 std::vector< wid_t > remaining; // data left over from prev iteration because it didn't fit in buffer 01161 size_t remaining_offset = 0; // avoid erasing from front of 'remaining' 01162 const EntityHandle* remaining_ptr = 0; // remaining for non-ranged data 01163 size_t remaining_count = 0; 01164 const wid_t* special_rem_ptr = 0; 01165 Range::const_iterator i = setSet.range.begin(), j, rhint, nshint; 01166 if( ranged ) rhint = ranged->begin(); 01167 if( null_stripped ) nshint = null_stripped->begin(); 01168 for( size_t w = 0; w < num_total_writes; ++w ) 01169 { 01170 if( i == setSet.range.end() && !remaining.empty() && !remaining_ptr ) 01171 { 01172 // If here, then we've written everything but we need to 01173 // make more write calls because we're doing collective IO 01174 // in parallel 01175 ( *write_func )( handle, 0, 0, id_type, 0, writeProp, &status ); 01176 CHK_MHDF_ERR_0( status ); 01177 continue; 01178 } 01179 01180 // If we had some left-over data from a range-compacted set 01181 // from the last iteration, add it to the buffer now 01182 size_t count = 0; 01183 if( !remaining.empty() ) 01184 { 01185 count = remaining.size() - remaining_offset; 01186 if( count > buffer_size ) 01187 { 01188 memcpy( buffer, &remaining[remaining_offset], buffer_size * sizeof( wid_t ) ); 01189 count = buffer_size; 01190 remaining_offset += buffer_size; 01191 } 01192 else 01193 { 01194 memcpy( buffer, &remaining[remaining_offset], count * sizeof( wid_t ) ); 01195 remaining_offset = 0; 01196 remaining.clear(); 01197 } 01198 } 01199 // If we had some left-over data from a non-range-compacted set 01200 // from the last iteration, add it to the buffer now 01201 else if( remaining_ptr ) 01202 { 01203 if( remaining_count > buffer_size ) 01204 { 01205 rval = vector_to_id_list( remaining_ptr, buffer, buffer_size ); 01206 CHK_MB_ERR_0( rval ); 01207 count = buffer_size; 01208 remaining_ptr += count; 01209 remaining_count -= count; 01210 } 01211 else 01212 { 01213 rval = vector_to_id_list( remaining_ptr, buffer, remaining_count ); 01214 CHK_MB_ERR_0( rval ); 01215 count = remaining_count; 01216 remaining_ptr = 0; 01217 remaining_count = 0; 01218 } 01219 } 01220 // If we had some left-over data from a "special" (i.e. parallel shared) 01221 // set. 01222 else if( special_rem_ptr ) 01223 { 01224 if( remaining_count > buffer_size ) 01225 { 01226 memcpy( buffer, special_rem_ptr, buffer_size * sizeof( wid_t ) ); 01227 count = buffer_size; 01228 special_rem_ptr += count; 01229 remaining_count -= count; 01230 } 01231 else 01232 { 01233 memcpy( buffer, special_rem_ptr, remaining_count * sizeof( wid_t ) ); 01234 count = remaining_count; 01235 special_rem_ptr = 0; 01236 remaining_count = 0; 01237 } 01238 } 01239 01240 // While there is both space remaining in the buffer and 01241 // more sets to write, append more set data to buffer. 01242 01243 while( count < buffer_size && i != setSet.range.end() ) 01244 { 01245 // Special case for "special" (i.e. parallel shared) sets: 01246 // we already have the data in a vector, just copy it. 01247 if( si != specialSets.end() && si->setHandle == *i ) 01248 { 01249 std::vector< wid_t >& list = ( which_data == WriteUtilIface::CONTENTS ) ? si->contentIds 01250 : ( which_data == WriteUtilIface::PARENTS ) ? si->parentIds 01251 : si->childIds; 01252 size_t append = list.size(); 01253 if( count + list.size() > buffer_size ) 01254 { 01255 append = buffer_size - count; 01256 special_rem_ptr = &list[append]; 01257 remaining_count = list.size() - append; 01258 } 01259 memcpy( buffer + count, &list[0], append * sizeof( wid_t ) ); 01260 ++i; 01261 ++si; 01262 count += append; 01263 continue; 01264 } 01265 01266 j = i; 01267 ++i; 01268 const EntityHandle* ptr; 01269 int len; 01270 unsigned char flags; 01271 rval = writeUtil->get_entity_list_pointers( j, i, &ptr, which_data, &len, &flags ); 01272 if( MB_SUCCESS != rval ) return rval; 01273 if( which_data == WriteUtilIface::CONTENTS && !( flags & MESHSET_ORDERED ) ) 01274 { 01275 bool compacted; 01276 remaining.clear(); 01277 if( len == 0 ) 01278 compacted = false; 01279 else 01280 { 01281 assert( !( len % 2 ) ); 01282 rval = range_to_blocked_list( ptr, len / 2, remaining, compacted ); 01283 if( MB_SUCCESS != rval ) return rval; 01284 } 01285 if( compacted ) 01286 { 01287 rhint = ranged->insert( rhint, *j ); 01288 set_sizes->push_back( remaining.size() ); 01289 } 01290 else if( remaining.size() != (unsigned)len ) 01291 { 01292 nshint = null_stripped->insert( nshint, *j ); 01293 set_sizes->push_back( remaining.size() ); 01294 } 01295 01296 if( count + remaining.size() <= buffer_size ) 01297 { 01298 if( !remaining.empty() ) 01299 memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining.size() ); 01300 count += remaining.size(); 01301 remaining.clear(); 01302 remaining_offset = 0; 01303 } 01304 else 01305 { 01306 remaining_offset = buffer_size - count; 01307 memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining_offset ); 01308 count += remaining_offset; 01309 } 01310 } 01311 else 01312 { 01313 if( count + len > buffer_size ) 01314 { 01315 size_t append = buffer_size - count; 01316 remaining_ptr = ptr + append; 01317 remaining_count = len - append; 01318 len = append; 01319 } 01320 01321 rval = vector_to_id_list( ptr, buffer + count, len ); 01322 count += len; 01323 } 01324 } 01325 01326 // Write the buffer. 01327 ( *write_func )( handle, offset, count, id_type, buffer, writeProp, &status ); 01328 CHK_MHDF_ERR_0( status ); 01329 track.record_io( offset, count ); 01330 offset += count; 01331 } 01332 01333 return MB_SUCCESS; 01334 } 01335 01336 ErrorCode WriteHDF5::write_sets( double* times ) 01337 { 01338 mhdf_Status status; 01339 ErrorCode rval; 01340 long first_id, size; 01341 hid_t table; 01342 CpuTimer timer; 01343 01344 CHECK_OPEN_HANDLES; 01345 /* If no sets, just return success */ 01346 if( !writeSets ) return MB_SUCCESS; 01347 01348 debug_barrier(); 01349 dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() ); 01350 dbgOut.print( 3, "Non-shared sets", setSet.range ); 01351 01352 /* Write set parents */ 01353 if( writeSetParents ) 01354 { 01355 topState.start( "writing parent lists for local sets" ); 01356 table = mhdf_openSetParents( filePtr, &size, &status ); 01357 CHK_MHDF_ERR_0( status ); 01358 IODebugTrack track( debugTrack, "SetParents", size ); 01359 01360 rval = write_set_data( WriteUtilIface::PARENTS, table, track ); 01361 topState.end( rval ); 01362 CHK_MB_ERR_1( rval, table, status ); 01363 01364 mhdf_closeData( filePtr, table, &status ); 01365 CHK_MHDF_ERR_0( status ); 01366 01367 times[SET_PARENT] = timer.time_elapsed(); 01368 track.all_reduce(); 01369 } 01370 01371 /* Write set children */ 01372 if( writeSetChildren ) 01373 { 01374 topState.start( "writing child lists for local sets" ); 01375 table = mhdf_openSetChildren( filePtr, &size, &status ); 01376 CHK_MHDF_ERR_0( status ); 01377 IODebugTrack track( debugTrack, "SetChildren", size ); 01378 01379 rval = write_set_data( WriteUtilIface::CHILDREN, table, track ); 01380 topState.end( rval ); 01381 CHK_MB_ERR_1( rval, table, status ); 01382 01383 mhdf_closeData( filePtr, table, &status ); 01384 CHK_MHDF_ERR_0( status ); 01385 01386 times[SET_CHILD] = timer.time_elapsed(); 01387 track.all_reduce(); 01388 } 01389 01390 /* Write set contents */ 01391 Range ranged_sets, null_stripped_sets; 01392 std::vector< long > set_sizes; 01393 if( writeSetContents ) 01394 { 01395 topState.start( "writing content lists for local sets" ); 01396 table = mhdf_openSetData( filePtr, &size, &status ); 01397 CHK_MHDF_ERR_0( status ); 01398 IODebugTrack track( debugTrack, "SetContents", size ); 01399 01400 rval = write_set_data( WriteUtilIface::CONTENTS, table, track, &ranged_sets, &null_stripped_sets, &set_sizes ); 01401 topState.end( rval ); 01402 CHK_MB_ERR_1( rval, table, status ); 01403 01404 mhdf_closeData( filePtr, table, &status ); 01405 CHK_MHDF_ERR_0( status ); 01406 01407 times[SET_CONTENT] = timer.time_elapsed(); 01408 track.all_reduce(); 01409 } 01410 assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() ); 01411 01412 /* Write set description table */ 01413 01414 debug_barrier(); 01415 topState.start( "writing descriptions of local sets" ); 01416 dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() ); 01417 dbgOut.print( 3, "Non-shared sets", setSet.range ); 01418 01419 /* Open the table */ 01420 table = mhdf_openSetMeta( filePtr, &size, &first_id, &status ); 01421 CHK_MHDF_ERR_0( status ); 01422 IODebugTrack track_meta( debugTrack, "SetMeta", size ); 01423 01424 /* Some debug stuff */ 01425 debug_barrier(); 01426 dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() ); 01427 dbgOut.print( 3, "Non-shared sets", setSet.range ); 01428 01429 /* Counts and buffers and such */ 01430 mhdf_index_t* const buffer = reinterpret_cast< mhdf_index_t* >( dataBuffer ); 01431 const size_t buffer_size = bufferSize / ( 4 * sizeof( mhdf_index_t ) ); 01432 const size_t num_local_writes = ( setSet.range.size() + buffer_size - 1 ) / buffer_size; 01433 const size_t num_global_writes = ( setSet.max_num_ents + buffer_size - 1 ) / buffer_size; 01434 assert( num_local_writes <= num_global_writes ); 01435 assert( num_global_writes > 0 ); 01436 01437 /* data about sets for which number of handles written is 01438 * not the same as the number of handles in the set 01439 * (range-compacted or null handles stripped out) 01440 */ 01441 Range::const_iterator i = setSet.range.begin(); 01442 Range::const_iterator r = ranged_sets.begin(); 01443 Range::const_iterator s = null_stripped_sets.begin(); 01444 std::vector< mhdf_index_t >::const_iterator n = set_sizes.begin(); 01445 assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() ); 01446 01447 /* We write the end index for each list, rather than the count */ 01448 mhdf_index_t prev_contents_end = setContentsOffset - 1; 01449 mhdf_index_t prev_children_end = setChildrenOffset - 1; 01450 mhdf_index_t prev_parents_end = setParentsOffset - 1; 01451 01452 /* While there is more data to write */ 01453 size_t offset = setSet.offset; 01454 std::vector< SpecialSetData >::const_iterator si = specialSets.begin(); 01455 for( size_t w = 0; w < num_local_writes; ++w ) 01456 { 01457 // Get a buffer full of data 01458 size_t count = 0; 01459 while( count < buffer_size && i != setSet.range.end() ) 01460 { 01461 // Get set properties 01462 long num_ent, num_child, num_parent; 01463 unsigned long flags; 01464 if( si != specialSets.end() && si->setHandle == *i ) 01465 { 01466 flags = si->setFlags; 01467 num_ent = si->contentIds.size(); 01468 num_child = si->childIds.size(); 01469 num_parent = si->parentIds.size(); 01470 ++si; 01471 if( r != ranged_sets.end() && *i == *r ) 01472 { 01473 assert( flags & mhdf_SET_RANGE_BIT ); 01474 ++r; 01475 ++n; 01476 } 01477 else if( s != null_stripped_sets.end() && *i == *s ) 01478 { 01479 ++s; 01480 ++n; 01481 } 01482 } 01483 else 01484 { 01485 assert( si == specialSets.end() || si->setHandle > *i ); 01486 01487 // Get set properties 01488 rval = get_set_info( *i, num_ent, num_child, num_parent, flags ); 01489 CHK_MB_ERR_1( rval, table, status ); 01490 01491 // Check if size is something other than num handles in set 01492 if( r != ranged_sets.end() && *i == *r ) 01493 { 01494 num_ent = *n; 01495 ++r; 01496 ++n; 01497 flags |= mhdf_SET_RANGE_BIT; 01498 } 01499 else if( s != null_stripped_sets.end() && *i == *s ) 01500 { 01501 num_ent = *n; 01502 ++s; 01503 ++n; 01504 } 01505 } 01506 01507 // Put data in buffer 01508 mhdf_index_t* local = buffer + 4 * count; 01509 prev_contents_end += num_ent; 01510 prev_children_end += num_child; 01511 prev_parents_end += num_parent; 01512 local[0] = prev_contents_end; 01513 local[1] = prev_children_end; 01514 local[2] = prev_parents_end; 01515 local[3] = flags; 01516 01517 // Iterate 01518 ++count; 01519 ++i; 01520 } 01521 01522 // Write the data 01523 mhdf_writeSetMetaWithOpt( table, offset, count, MHDF_INDEX_TYPE, buffer, writeProp, &status ); 01524 CHK_MHDF_ERR_1( status, table ); 01525 track_meta.record_io( offset, count ); 01526 offset += count; 01527 } 01528 assert( r == ranged_sets.end() ); 01529 assert( s == null_stripped_sets.end() ); 01530 assert( n == set_sizes.end() ); 01531 01532 /* If doing parallel write with collective IO, do null write 01533 * calls because other procs aren't done yet and write calls 01534 * are collective */ 01535 for( size_t w = num_local_writes; w != num_global_writes; ++w ) 01536 { 01537 mhdf_writeSetMetaWithOpt( table, 0, 0, MHDF_INDEX_TYPE, 0, writeProp, &status ); 01538 CHK_MHDF_ERR_1( status, table ); 01539 } 01540 01541 topState.end(); 01542 mhdf_closeData( filePtr, table, &status ); 01543 CHK_MHDF_ERR_0( status ); 01544 01545 times[SET_META] = timer.time_elapsed(); 01546 track_meta.all_reduce(); 01547 01548 return MB_SUCCESS; 01549 } 01550 01551 template < class HandleRangeIter > 01552 inline size_t count_num_handles( HandleRangeIter iter, HandleRangeIter end ) 01553 { 01554 size_t result = 0; 01555 for( ; iter != end; ++iter ) 01556 result += iter->second - iter->first + 1; 01557 01558 return result; 01559 } 01560 01561 template < class HandleRangeIter > 01562 inline ErrorCode range_to_id_list_templ( HandleRangeIter begin, HandleRangeIter end, 01563 const RangeMap< EntityHandle, WriteHDF5::wid_t >& idMap, 01564 WriteHDF5::wid_t* array ) 01565 { 01566 ErrorCode rval = MB_SUCCESS; 01567 RangeMap< EntityHandle, WriteHDF5::wid_t >::iterator ri = idMap.begin(); 01568 WriteHDF5::wid_t* i = array; 01569 for( HandleRangeIter pi = begin; pi != end; ++pi ) 01570 { 01571 EntityHandle h = pi->first; 01572 while( h <= pi->second ) 01573 { 01574 ri = idMap.lower_bound( ri, idMap.end(), h ); 01575 if( ri == idMap.end() || ri->begin > h ) 01576 { 01577 rval = MB_ENTITY_NOT_FOUND; 01578 *i = 0; 01579 ++i; 01580 ++h; 01581 continue; 01582 } 01583 01584 // compute the last available value of the found target range (ri iterator) 01585 WriteHDF5::wid_t last_valid_input_value_in_current_map_range = ri->begin + ri->count - 1; 01586 // limit the number of steps we do on top of h so we do not overflow the output range 01587 // span 01588 WriteHDF5::wid_t step_until = std::min( last_valid_input_value_in_current_map_range, pi->second ); 01589 WriteHDF5::wid_t n = step_until - h + 1; 01590 assert( n > 0 ); // We must at least step 1 01591 01592 WriteHDF5::wid_t id = ri->value + ( h - ri->begin ); 01593 for( WriteHDF5::wid_t j = 0; j < n; ++i, ++j ) 01594 *i = id + j; 01595 h += n; 01596 } 01597 } 01598 01599 assert( i == array + count_num_handles( begin, end ) ); 01600 return rval; 01601 } 01602 01603 template < class HandleRangeIter > 01604 inline ErrorCode range_to_blocked_list_templ( HandleRangeIter begin, HandleRangeIter end, 01605 const RangeMap< EntityHandle, WriteHDF5::wid_t >& idMap, 01606 std::vector< WriteHDF5::wid_t >& output_id_list, bool& ranged_list ) 01607 { 01608 output_id_list.clear(); 01609 if( begin == end ) 01610 { 01611 ranged_list = false; 01612 return MB_SUCCESS; 01613 } 01614 01615 // First try ranged format, but give up if we reach the 01616 // non-range format size. 01617 RangeMap< EntityHandle, WriteHDF5::wid_t >::iterator ri = idMap.begin(); 01618 01619 const size_t num_handles = count_num_handles( begin, end ); 01620 // If we end up with more than this many range blocks, then 01621 // we're better off just writing the set as a simple list 01622 size_t pairs_remaining = num_handles / 2; 01623 for( HandleRangeIter pi = begin; pi != end; ++pi ) 01624 { 01625 EntityHandle h = pi->first; 01626 WriteHDF5::wid_t local_mapped_from_subrange = 0; 01627 while( h <= pi->second ) 01628 { 01629 ri = idMap.lower_bound( ri, idMap.end(), h ); 01630 if( ri == idMap.end() || ri->begin > h ) 01631 { 01632 ++h; 01633 continue; 01634 } 01635 01636 WriteHDF5::wid_t n = pi->second - pi->first + 1 - local_mapped_from_subrange; 01637 if( n > ri->count ) n = ri->count; 01638 01639 WriteHDF5::wid_t id = ri->value + ( h - ri->begin ); 01640 // see if we can go to the end of the range 01641 if( id + n > ri->value + ri->count ) // we have to reduce n, because we cannot go over next subrange 01642 { 01643 if( ri->value + ri->count - id > 0 ) n = ri->value + ri->count - id; 01644 } 01645 01646 // See if we can append it to the previous range 01647 if( !output_id_list.empty() && output_id_list[output_id_list.size() - 2] + output_id_list.back() == id ) 01648 { 01649 output_id_list.back() += n; 01650 } 01651 01652 // If we ran out of space, (or set is empty) just do list format 01653 else if( !pairs_remaining ) 01654 { 01655 ranged_list = false; 01656 output_id_list.resize( num_handles ); 01657 range_to_id_list_templ( begin, end, idMap, &output_id_list[0] ); 01658 output_id_list.erase( std::remove( output_id_list.begin(), output_id_list.end(), 0u ), 01659 output_id_list.end() ); 01660 return MB_SUCCESS; 01661 } 01662 01663 // 01664 else 01665 { 01666 --pairs_remaining; 01667 output_id_list.push_back( id ); 01668 output_id_list.push_back( n ); 01669 } 01670 local_mapped_from_subrange += n; // we already mapped so many 01671 h += n; 01672 } 01673 } 01674 01675 ranged_list = true; 01676 return MB_SUCCESS; 01677 } 01678 01679 ErrorCode WriteHDF5::range_to_blocked_list( const Range& input_range, std::vector< wid_t >& output_id_list, 01680 bool& ranged_list ) 01681 { 01682 return range_to_blocked_list_templ( input_range.const_pair_begin(), input_range.const_pair_end(), idMap, 01683 output_id_list, ranged_list ); 01684 } 01685 01686 ErrorCode WriteHDF5::range_to_blocked_list( const EntityHandle* array, size_t num_input_ranges, 01687 std::vector< wid_t >& output_id_list, bool& ranged_list ) 01688 { 01689 // We assume this in the cast on the following line 01690 typedef std::pair< EntityHandle, EntityHandle > mtype; 01691 assert( sizeof( mtype ) == 2 * sizeof( EntityHandle ) ); 01692 const mtype* arr = reinterpret_cast< const mtype* >( array ); 01693 return range_to_blocked_list_templ( arr, arr + num_input_ranges, idMap, output_id_list, ranged_list ); 01694 } 01695 01696 ErrorCode WriteHDF5::range_to_id_list( const Range& range, wid_t* array ) 01697 { 01698 return range_to_id_list_templ( range.const_pair_begin(), range.const_pair_end(), idMap, array ); 01699 } 01700 01701 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input, size_t input_len, wid_t* output, size_t& output_len, 01702 bool remove_zeros ) 01703 { 01704 const EntityHandle* i_iter = input; 01705 const EntityHandle* i_end = input + input_len; 01706 wid_t* o_iter = output; 01707 for( ; i_iter != i_end; ++i_iter ) 01708 { 01709 wid_t id = idMap.find( *i_iter ); 01710 if( !remove_zeros || id != 0 ) 01711 { 01712 *o_iter = id; 01713 ++o_iter; 01714 } 01715 } 01716 output_len = o_iter - output; 01717 01718 return MB_SUCCESS; 01719 } 01720 01721 ErrorCode WriteHDF5::vector_to_id_list( const std::vector< EntityHandle >& input, std::vector< wid_t >& output, 01722 bool remove_zeros ) 01723 { 01724 output.resize( input.size() ); 01725 size_t output_size = 0; 01726 ErrorCode rval = vector_to_id_list( &input[0], input.size(), &output[0], output_size, remove_zeros ); 01727 output.resize( output_size ); 01728 return rval; 01729 } 01730 01731 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input, wid_t* output, size_t count ) 01732 { 01733 size_t output_len; 01734 return vector_to_id_list( input, count, output, output_len, false ); 01735 } 01736 01737 inline ErrorCode WriteHDF5::get_adjacencies( EntityHandle entity, std::vector< wid_t >& adj ) 01738 { 01739 const EntityHandle* adj_array; 01740 int num_adj; 01741 ErrorCode rval = writeUtil->get_adjacencies( entity, adj_array, num_adj ); 01742 if( MB_SUCCESS != rval ) return error( rval ); 01743 01744 size_t j = 0; 01745 adj.resize( num_adj ); 01746 for( int i = 0; i < num_adj; ++i ) 01747 if( wid_t id = idMap.find( adj_array[i] ) ) adj[j++] = id; 01748 adj.resize( j ); 01749 01750 return MB_SUCCESS; 01751 } 01752 01753 ErrorCode WriteHDF5::write_adjacencies( const ExportSet& elements ) 01754 { 01755 ErrorCode rval; 01756 mhdf_Status status; 01757 Range::const_iterator iter; 01758 const Range::const_iterator end = elements.range.end(); 01759 std::vector< wid_t > adj_list; 01760 01761 CHECK_OPEN_HANDLES; 01762 01763 debug_barrier(); 01764 01765 /* Count Adjacencies */ 01766 long count = 0; 01767 // for (iter = elements.range.begin(); iter != end; ++iter) { 01768 // adj_list.clear(); 01769 // rval = get_adjacencies(*iter, adj_list);CHK_MB_ERR_0(rval); 01770 // 01771 // if (adj_list.size() > 0) 01772 // count += adj_list.size() + 2; 01773 //} 01774 01775 // if (count == 0) 01776 // return MB_SUCCESS; 01777 01778 long offset = elements.adj_offset; 01779 if( elements.max_num_adjs == 0 ) return MB_SUCCESS; 01780 01781 /* Create data list */ 01782 hid_t table = mhdf_openAdjacency( filePtr, elements.name(), &count, &status ); 01783 CHK_MHDF_ERR_0( status ); 01784 IODebugTrack track( debugTrack, "Adjacencies", count ); 01785 01786 /* Write data */ 01787 wid_t* buffer = (wid_t*)dataBuffer; 01788 long chunk_size = bufferSize / sizeof( wid_t ); 01789 long num_writes = ( elements.max_num_adjs + chunk_size - 1 ) / chunk_size; 01790 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); 01791 count = 0; 01792 for( iter = elements.range.begin(); iter != end; ++iter ) 01793 { 01794 adj_list.clear(); 01795 rval = get_adjacencies( *iter, adj_list ); 01796 CHK_MB_ERR_1( rval, table, status ); 01797 if( adj_list.size() == 0 ) continue; 01798 01799 // If buffer is full, flush it 01800 if( count + adj_list.size() + 2 > (unsigned long)chunk_size ) 01801 { 01802 dbgOut.print( 3, " writing adjacency chunk.\n" ); 01803 track.record_io( offset, count ); 01804 mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status ); 01805 CHK_MHDF_ERR_1( status, table ); 01806 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); 01807 01808 offset += count; 01809 count = 0; 01810 } 01811 01812 buffer[count++] = idMap.find( *iter ); 01813 buffer[count++] = adj_list.size(); 01814 01815 assert( adj_list.size() + 2 < (unsigned long)chunk_size ); 01816 memcpy( buffer + count, &adj_list[0], adj_list.size() * sizeof( wid_t ) ); 01817 count += adj_list.size(); 01818 } 01819 01820 if( count ) 01821 { 01822 dbgOut.print( 2, " writing final adjacency chunk.\n" ); 01823 mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status ); 01824 CHK_MHDF_ERR_1( status, table ); 01825 01826 offset += count; 01827 count = 0; 01828 --num_writes; 01829 } 01830 01831 // Do empty writes if necessary for parallel collective IO 01832 if( collectiveIO ) 01833 { 01834 while( num_writes > 0 ) 01835 { 01836 --num_writes; 01837 assert( writeProp != H5P_DEFAULT ); 01838 dbgOut.print( 2, " writing empty adjacency chunk.\n" ); 01839 mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status ); 01840 CHK_MHDF_ERR_1( status, table ); 01841 } 01842 } 01843 01844 mhdf_closeData( filePtr, table, &status ); 01845 CHK_MHDF_ERR_0( status ); 01846 01847 track.all_reduce(); 01848 return MB_SUCCESS; 01849 } 01850 01851 ErrorCode WriteHDF5::write_tag( const TagDesc& tag_data, double* times ) 01852 { 01853 std::string name; 01854 ErrorCode rval = iFace->tag_get_name( tag_data.tag_id, name ); 01855 if( MB_SUCCESS != rval ) return error( rval ); 01856 01857 CHECK_OPEN_HANDLES; 01858 debug_barrier(); 01859 dbgOut.tprintf( 1, "Writing tag: \"%s\"\n", name.c_str() ); 01860 01861 int moab_size, elem_size, array_len; 01862 DataType moab_type; 01863 mhdf_TagDataType mhdf_type; 01864 hid_t hdf5_type; 01865 rval = get_tag_size( tag_data.tag_id, moab_type, moab_size, elem_size, array_len, mhdf_type, hdf5_type ); 01866 if( MB_SUCCESS != rval ) return error( rval ); 01867 01868 CpuTimer timer; 01869 if( array_len == MB_VARIABLE_LENGTH && tag_data.write_sparse ) 01870 { 01871 dbgOut.printf( 2, "Writing sparse data for var-len tag: \"%s\"\n", name.c_str() ); 01872 rval = write_var_len_tag( tag_data, name, moab_type, hdf5_type, elem_size ); 01873 times[VARLEN_TAG_TIME] += timer.time_elapsed(); 01874 } 01875 else 01876 { 01877 int data_len = elem_size; 01878 if( moab_type != MB_TYPE_BIT ) data_len *= array_len; 01879 if( tag_data.write_sparse ) 01880 { 01881 dbgOut.printf( 2, "Writing sparse data for tag: \"%s\"\n", name.c_str() ); 01882 rval = write_sparse_tag( tag_data, name, moab_type, hdf5_type, data_len ); 01883 times[SPARSE_TAG_TIME] += timer.time_elapsed(); 01884 } 01885 for( size_t i = 0; MB_SUCCESS == rval && i < tag_data.dense_list.size(); ++i ) 01886 { 01887 const ExportSet* set = find( tag_data.dense_list[i] ); 01888 assert( 0 != set ); 01889 debug_barrier(); 01890 dbgOut.printf( 2, "Writing dense data for tag: \"%s\" on group \"%s\"\n", name.c_str(), set->name() ); 01891 subState.start( "writing dense data for tag: ", ( name + ":" + set->name() ).c_str() ); 01892 rval = write_dense_tag( tag_data, *set, name, moab_type, hdf5_type, data_len ); 01893 subState.end( rval ); 01894 } 01895 times[DENSE_TAG_TIME] += timer.time_elapsed(); 01896 } 01897 01898 H5Tclose( hdf5_type ); 01899 return MB_SUCCESS == rval ? MB_SUCCESS : error( rval ); 01900 } 01901 01902 ErrorCode WriteHDF5::write_sparse_ids( const TagDesc& tag_data, const Range& range, hid_t id_table, size_t table_size, 01903 const char* name ) 01904 { 01905 ErrorCode rval; 01906 mhdf_Status status; 01907 01908 CHECK_OPEN_HANDLES; 01909 01910 std::string tname( name ? name : "<UNKNOWN TAG?>" ); 01911 tname += " - Ids"; 01912 IODebugTrack track( debugTrack, tname, table_size ); 01913 01914 // Set up data buffer for writing IDs 01915 size_t chunk_size = bufferSize / sizeof( wid_t ); 01916 wid_t* id_buffer = (wid_t*)dataBuffer; 01917 01918 // Write IDs of tagged entities. 01919 long remaining = range.size(); 01920 long offset = tag_data.sparse_offset; 01921 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size; 01922 if( tag_data.max_num_ents ) 01923 { 01924 assert( tag_data.max_num_ents >= (unsigned long)remaining ); 01925 num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size; 01926 } 01927 Range::const_iterator iter = range.begin(); 01928 while( remaining ) 01929 { 01930 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); 01931 01932 // Write "chunk_size" blocks of data 01933 long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining; 01934 remaining -= count; 01935 Range::const_iterator stop = iter; 01936 stop += count; 01937 Range tmp; 01938 ; 01939 tmp.merge( iter, stop ); 01940 iter = stop; 01941 assert( tmp.size() == (unsigned)count ); 01942 01943 rval = range_to_id_list( tmp, id_buffer ); 01944 CHK_MB_ERR_0( rval ); 01945 01946 // Write the data 01947 dbgOut.print( 3, " writing sparse tag entity chunk.\n" ); 01948 track.record_io( offset, count ); 01949 mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type, id_buffer, writeProp, &status ); 01950 CHK_MHDF_ERR_0( status ); 01951 01952 offset += count; 01953 --num_writes; 01954 } // while (remaining) 01955 01956 // Do empty writes if necessary for parallel collective IO 01957 if( collectiveIO ) 01958 { 01959 while( num_writes-- ) 01960 { 01961 assert( writeProp != H5P_DEFAULT ); 01962 dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" ); 01963 mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type, 0, writeProp, &status ); 01964 CHK_MHDF_ERR_0( status ); 01965 } 01966 } 01967 01968 track.all_reduce(); 01969 return MB_SUCCESS; 01970 } 01971 01972 ErrorCode WriteHDF5::write_sparse_tag( const TagDesc& tag_data, const std::string& name, DataType mb_data_type, 01973 hid_t value_type, int value_type_size ) 01974 { 01975 ErrorCode rval; 01976 mhdf_Status status; 01977 hid_t tables[3]; 01978 long table_size, data_size; 01979 01980 CHECK_OPEN_HANDLES; 01981 01982 // Get entities for which to write tag values 01983 Range range; 01984 rval = get_sparse_tagged_entities( tag_data, range ); 01985 01986 // Open tables to write info 01987 mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_size, tables, &status ); 01988 CHK_MHDF_ERR_0( status ); 01989 assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size ); 01990 // Fixed-length tag 01991 assert( table_size == data_size ); 01992 01993 // Write IDs for tagged entities 01994 subState.start( "writing sparse ids for tag: ", name.c_str() ); 01995 rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() ); 01996 subState.end( rval ); 01997 CHK_MB_ERR_2( rval, tables, status ); 01998 mhdf_closeData( filePtr, tables[0], &status ); 01999 CHK_MHDF_ERR_1( status, tables[1] ); 02000 02001 // Set up data buffer for writing tag values 02002 IODebugTrack track( debugTrack, name + " Data", data_size ); 02003 subState.start( "writing sparse values for tag: ", name.c_str() ); 02004 rval = write_tag_values( tag_data.tag_id, tables[1], tag_data.sparse_offset, range, mb_data_type, value_type, 02005 value_type_size, tag_data.max_num_ents, track ); 02006 subState.end( rval ); 02007 CHK_MB_ERR_0( rval ); 02008 mhdf_closeData( filePtr, tables[1], &status ); 02009 CHK_MHDF_ERR_0( status ); 02010 02011 track.all_reduce(); 02012 return MB_SUCCESS; 02013 } 02014 02015 ErrorCode WriteHDF5::write_var_len_indices( const TagDesc& tag_data, const Range& range, hid_t idx_table, 02016 size_t table_size, int /*type_size*/, const char* name ) 02017 { 02018 ErrorCode rval; 02019 mhdf_Status status; 02020 02021 CHECK_OPEN_HANDLES; 02022 02023 std::string tname( name ? name : "<UNKNOWN TAG?>" ); 02024 tname += " - End Indices"; 02025 IODebugTrack track( debugTrack, tname, table_size ); 02026 02027 // Set up data buffer for writing indices 02028 size_t chunk_size = bufferSize / ( std::max( sizeof( void* ), sizeof( long ) ) + sizeof( int ) ); 02029 mhdf_index_t* idx_buffer = (mhdf_index_t*)dataBuffer; 02030 const void** junk = (const void**)dataBuffer; 02031 int* size_buffer = (int*)( dataBuffer + chunk_size * std::max( sizeof( void* ), sizeof( mhdf_index_t ) ) ); 02032 02033 // Write IDs of tagged entities. 02034 long data_offset = tag_data.var_data_offset - 1; // Offset at which to write data buffer 02035 size_t remaining = range.size(); 02036 size_t offset = tag_data.sparse_offset; 02037 size_t num_writes = ( remaining + chunk_size - 1 ) / chunk_size; 02038 if( tag_data.max_num_ents ) 02039 { 02040 assert( tag_data.max_num_ents >= (unsigned long)remaining ); 02041 num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size; 02042 } 02043 Range::const_iterator iter = range.begin(); 02044 while( remaining ) 02045 { 02046 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); 02047 02048 // Write "chunk_size" blocks of data 02049 size_t count = remaining > chunk_size ? chunk_size : remaining; 02050 remaining -= count; 02051 Range::const_iterator stop = iter; 02052 stop += count; 02053 Range tmp; 02054 tmp.merge( iter, stop ); 02055 iter = stop; 02056 assert( tmp.size() == (unsigned)count ); 02057 02058 rval = iFace->tag_get_by_ptr( tag_data.tag_id, tmp, junk, size_buffer ); 02059 CHK_MB_ERR_0( rval ); 02060 02061 // Calculate end indices 02062 dbgOut.print( 3, " writing var-len tag offset chunk.\n" ); 02063 track.record_io( offset, count ); 02064 for( size_t i = 0; i < count; ++i ) 02065 { 02066 data_offset += size_buffer[i]; 02067 idx_buffer[i] = data_offset; 02068 } 02069 02070 // Write 02071 mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, count, MHDF_INDEX_TYPE, idx_buffer, writeProp, &status ); 02072 CHK_MHDF_ERR_0( status ); 02073 02074 offset += count; 02075 --num_writes; 02076 } // while (remaining) 02077 02078 // Do empty writes if necessary for parallel collective IO 02079 if( collectiveIO ) 02080 { 02081 while( num_writes-- ) 02082 { 02083 assert( writeProp != H5P_DEFAULT ); 02084 dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" ); 02085 mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, 0, id_type, 0, writeProp, &status ); 02086 CHK_MHDF_ERR_0( status ); 02087 } 02088 } 02089 02090 track.all_reduce(); 02091 return MB_SUCCESS; 02092 } 02093 02094 ErrorCode WriteHDF5::write_var_len_data( const TagDesc& tag_data, const Range& range, hid_t table, size_t table_size, 02095 bool handle_tag, hid_t hdf_type, int type_size, const char* name ) 02096 { 02097 ErrorCode rval; 02098 mhdf_Status status; 02099 02100 CHECK_OPEN_HANDLES; 02101 assert( !handle_tag || sizeof( EntityHandle ) == type_size ); 02102 02103 std::string tname( name ? name : "<UNKNOWN TAG?>" ); 02104 tname += " - Values"; 02105 IODebugTrack track( debugTrack, tname, table_size ); 02106 02107 const size_t buffer_size = bufferSize / type_size; 02108 02109 size_t num_writes = ( table_size + buffer_size - 1 ) / buffer_size; 02110 if( collectiveIO ) 02111 { 02112 assert( tag_data.max_num_vals > 0 ); 02113 num_writes = ( tag_data.max_num_vals + buffer_size - 1 ) / buffer_size; 02114 } 02115 02116 unsigned char* buffer = (unsigned char*)dataBuffer; 02117 const void* prev_data = 0; // Data left over from prev iteration 02118 size_t prev_len = 0; 02119 Range::const_iterator iter = range.begin(); 02120 long offset = tag_data.var_data_offset; 02121 while( prev_data || iter != range.end() ) 02122 { 02123 size_t count = 0; 02124 if( prev_data ) 02125 { 02126 size_t len; 02127 const void* ptr = prev_data; 02128 if( prev_len <= buffer_size ) 02129 { 02130 len = prev_len; 02131 prev_data = 0; 02132 prev_len = 0; 02133 } 02134 else 02135 { 02136 len = buffer_size; 02137 prev_data = ( (const char*)prev_data ) + buffer_size * type_size; 02138 prev_len -= buffer_size; 02139 } 02140 02141 if( handle_tag ) 02142 convert_handle_tag( (const EntityHandle*)ptr, (EntityHandle*)buffer, len ); 02143 else 02144 memcpy( buffer, ptr, len * type_size ); 02145 count = len; 02146 } 02147 02148 for( ; count < buffer_size && iter != range.end(); ++iter ) 02149 { 02150 int len; 02151 const void* ptr; 02152 rval = iFace->tag_get_by_ptr( tag_data.tag_id, &*iter, 1, &ptr, &len ); 02153 CHK_MB_ERR_0( rval ); 02154 if( len + count > buffer_size ) 02155 { 02156 prev_len = len + count - buffer_size; 02157 len = buffer_size - count; 02158 prev_data = ( (const char*)ptr ) + len * type_size; 02159 } 02160 02161 if( handle_tag ) 02162 convert_handle_tag( (const EntityHandle*)ptr, ( (EntityHandle*)buffer ) + count, len ); 02163 else 02164 memcpy( buffer + count * type_size, ptr, len * type_size ); 02165 count += len; 02166 } 02167 02168 track.record_io( offset, count ); 02169 mhdf_writeTagValuesWithOpt( table, offset, count, hdf_type, buffer, writeProp, &status ); 02170 offset += count; 02171 CHK_MHDF_ERR_0( status ); 02172 --num_writes; 02173 } 02174 02175 // Do empty writes if necessary for parallel collective IO 02176 if( collectiveIO ) 02177 { 02178 while( num_writes-- ) 02179 { 02180 assert( writeProp != H5P_DEFAULT ); 02181 dbgOut.print( 3, " writing empty var-len tag data chunk.\n" ); 02182 mhdf_writeTagValuesWithOpt( table, 0, 0, hdf_type, 0, writeProp, &status ); 02183 CHK_MHDF_ERR_0( status ); 02184 } 02185 } 02186 02187 track.all_reduce(); 02188 return MB_SUCCESS; 02189 } 02190 02191 ErrorCode WriteHDF5::write_var_len_tag( const TagDesc& tag_data, const std::string& name, DataType mb_data_type, 02192 hid_t hdf_type, int type_size ) 02193 { 02194 ErrorCode rval; 02195 mhdf_Status status; 02196 hid_t tables[3]; 02197 long table_size; 02198 long data_table_size; 02199 02200 CHECK_OPEN_HANDLES; 02201 02202 // Get entities for which to write tag values 02203 Range range; 02204 rval = get_sparse_tagged_entities( tag_data, range ); 02205 02206 // Open tables to write info 02207 mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_table_size, tables, &status ); 02208 CHK_MHDF_ERR_0( status ); 02209 assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size ); 02210 02211 // Write IDs for tagged entities 02212 subState.start( "writing ids for var-len tag: ", name.c_str() ); 02213 rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() ); 02214 subState.end( rval ); 02215 CHK_MB_ERR_2( rval, tables, status ); 02216 mhdf_closeData( filePtr, tables[0], &status ); 02217 CHK_MHDF_ERR_2( status, tables + 1 ); 02218 02219 // Write offsets for tagged entities 02220 subState.start( "writing indices for var-len tag: ", name.c_str() ); 02221 rval = write_var_len_indices( tag_data, range, tables[2], table_size, type_size, name.c_str() ); 02222 subState.end( rval ); 02223 CHK_MB_ERR_1( rval, tables[1], status ); 02224 mhdf_closeData( filePtr, tables[2], &status ); 02225 CHK_MHDF_ERR_1( status, tables[1] ); 02226 02227 // Write the actual tag data 02228 subState.start( "writing values for var-len tag: ", name.c_str() ); 02229 rval = write_var_len_data( tag_data, range, tables[1], data_table_size, mb_data_type == MB_TYPE_HANDLE, hdf_type, 02230 type_size, name.c_str() ); 02231 subState.end( rval ); 02232 CHK_MB_ERR_0( rval ); 02233 mhdf_closeData( filePtr, tables[1], &status ); 02234 CHK_MHDF_ERR_0( status ); 02235 02236 return MB_SUCCESS; 02237 } 02238 02239 ErrorCode WriteHDF5::write_dense_tag( const TagDesc& tag_data, const ExportSet& elem_data, const std::string& name, 02240 DataType mb_data_type, hid_t value_type, int value_type_size ) 02241 { 02242 CHECK_OPEN_HANDLES; 02243 02244 // Open tables to write info 02245 mhdf_Status status; 02246 long table_size; 02247 hid_t table = mhdf_openDenseTagData( filePtr, name.c_str(), elem_data.name(), &table_size, &status ); 02248 CHK_MHDF_ERR_0( status ); 02249 assert( elem_data.range.size() + elem_data.offset <= (unsigned long)table_size ); 02250 02251 IODebugTrack track( debugTrack, name + " " + elem_data.name() + " Data", table_size ); 02252 ErrorCode rval = write_tag_values( tag_data.tag_id, table, elem_data.offset, elem_data.range, mb_data_type, 02253 value_type, value_type_size, elem_data.max_num_ents, track ); 02254 CHK_MB_ERR_0( rval ); 02255 mhdf_closeData( filePtr, table, &status ); 02256 CHK_MHDF_ERR_0( status ); 02257 02258 return MB_SUCCESS; 02259 } 02260 02261 ErrorCode WriteHDF5::write_tag_values( Tag tag_id, hid_t data_table, unsigned long offset_in, const Range& range_in, 02262 DataType mb_data_type, hid_t value_type, int value_type_size, 02263 unsigned long max_num_ents, IODebugTrack& track ) 02264 { 02265 mhdf_Status status; 02266 02267 CHECK_OPEN_HANDLES; 02268 02269 // Set up data buffer for writing tag values 02270 size_t chunk_size = bufferSize / value_type_size; 02271 assert( chunk_size > 0 ); 02272 char* tag_buffer = (char*)dataBuffer; 02273 02274 // Write the tag values 02275 size_t remaining = range_in.size(); 02276 size_t offset = offset_in; 02277 Range::const_iterator iter = range_in.begin(); 02278 long num_writes = ( remaining + chunk_size - 1 ) / chunk_size; 02279 if( max_num_ents ) 02280 { 02281 assert( max_num_ents >= remaining ); 02282 num_writes = ( max_num_ents + chunk_size - 1 ) / chunk_size; 02283 } 02284 while( remaining ) 02285 { 02286 (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize ); 02287 02288 // Write "chunk_size" blocks of data 02289 long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining; 02290 remaining -= count; 02291 memset( tag_buffer, 0, count * value_type_size ); 02292 Range::const_iterator stop = iter; 02293 stop += count; 02294 Range range; 02295 range.merge( iter, stop ); 02296 iter = stop; 02297 assert( range.size() == (unsigned)count ); 02298 02299 ErrorCode rval = iFace->tag_get_data( tag_id, range, tag_buffer ); 02300 CHK_MB_ERR_0( rval ); 02301 02302 // Convert EntityHandles to file ids 02303 if( mb_data_type == MB_TYPE_HANDLE ) 02304 convert_handle_tag( reinterpret_cast< EntityHandle* >( tag_buffer ), 02305 count * value_type_size / sizeof( EntityHandle ) ); 02306 02307 // Write the data 02308 dbgOut.print( 2, " writing tag value chunk.\n" ); 02309 track.record_io( offset, count ); 02310 assert( value_type > 0 ); 02311 mhdf_writeTagValuesWithOpt( data_table, offset, count, value_type, tag_buffer, writeProp, &status ); 02312 CHK_MHDF_ERR_0( status ); 02313 02314 offset += count; 02315 --num_writes; 02316 } // while (remaining) 02317 02318 // Do empty writes if necessary for parallel collective IO 02319 if( collectiveIO ) 02320 { 02321 while( num_writes-- ) 02322 { 02323 assert( writeProp != H5P_DEFAULT ); 02324 dbgOut.print( 2, " writing empty tag value chunk.\n" ); 02325 assert( value_type > 0 ); 02326 mhdf_writeTagValuesWithOpt( data_table, offset, 0, value_type, 0, writeProp, &status ); 02327 CHK_MHDF_ERR_0( status ); 02328 } 02329 } 02330 02331 track.all_reduce(); 02332 return MB_SUCCESS; 02333 } 02334 02335 ErrorCode WriteHDF5::write_qa( const std::vector< std::string >& list ) 02336 { 02337 const char* app = "MOAB"; 02338 const char* vers = MOAB_VERSION; 02339 char date_str[64]; 02340 char time_str[64]; 02341 02342 CHECK_OPEN_HANDLES; 02343 02344 std::vector< const char* > strs( list.size() ? list.size() : 4 ); 02345 if( list.size() == 0 ) 02346 { 02347 time_t t = time( NULL ); 02348 tm* lt = localtime( &t ); 02349 #ifdef WIN32 02350 strftime( date_str, sizeof( date_str ), "%m/%d/%y", lt ); // VS 2008 does not support %D 02351 strftime( time_str, sizeof( time_str ), "%H:%M:%S", lt ); // VS 2008 does not support %T 02352 #else 02353 strftime( date_str, sizeof( date_str ), "%D", lt ); 02354 strftime( time_str, sizeof( time_str ), "%T", lt ); 02355 #endif 02356 02357 strs[0] = app; 02358 strs[1] = vers; 02359 strs[2] = date_str; 02360 strs[3] = time_str; 02361 } 02362 else 02363 { 02364 for( unsigned int i = 0; i < list.size(); ++i ) 02365 strs[i] = list[i].c_str(); 02366 } 02367 02368 mhdf_Status status; 02369 dbgOut.print( 2, " writing QA history.\n" ); 02370 mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status ); 02371 CHK_MHDF_ERR_0( status ); 02372 02373 return MB_SUCCESS; 02374 } 02375 02376 /* 02377 ErrorCode WriteHDF5::register_known_tag_types(Interface* iface) 02378 { 02379 hid_t int4, double16; 02380 hsize_t dim[1]; 02381 int error = 0; 02382 ErrorCode rval; 02383 02384 dim[0] = 4; 02385 int4 = H5Tarray_create(H5T_NATIVE_INT, 1, dim, NULL); 02386 02387 dim[0] = 16; 02388 double16 = H5Tarray_create(H5T_NATIVE_DOUBLE, 1, dim, NULL); 02389 02390 if (int4 < 0 || double16 < 0) 02391 error = 1; 02392 02393 struct { const char* name; hid_t type; } list[] = { 02394 { GLOBAL_ID_TAG_NAME, H5T_NATIVE_INT } , 02395 { MATERIAL_SET_TAG_NAME, H5T_NATIVE_INT }, 02396 { DIRICHLET_SET_TAG_NAME, H5T_NATIVE_INT }, 02397 { NEUMANN_SET_TAG_NAME, H5T_NATIVE_INT }, 02398 { HAS_MID_NODES_TAG_NAME, int4 }, 02399 { GEOM_DIMENSION_TAG_NAME, H5T_NATIVE_INT }, 02400 { MESH_TRANSFORM_TAG_NAME, double16 }, 02401 { 0, 0 } }; 02402 02403 for (int i = 0; list[i].name; ++i) { 02404 if (list[i].type < 1) { 02405 ++error; 02406 continue; 02407 } 02408 02409 Tag handle; 02410 02411 std::string name("__hdf5_tag_type_"); 02412 name += list[i].name; 02413 02414 rval = iface->tag_get_handle(name.c_str(), handle); 02415 if (MB_TAG_NOT_FOUND == rval) { 02416 rval = iface->tag_create(name.c_str(), sizeof(hid_t), MB_TAG_SPARSE, handle, NULL); 02417 if (MB_SUCCESS != rval) { 02418 ++error; 02419 continue; 02420 } 02421 02422 hid_t copy_id = H5Tcopy(list[i].type); 02423 const EntityHandle mesh = 0; 02424 rval = iface->tag_set_data(handle, &mesh, 1, ©_id); 02425 if (MB_SUCCESS != rval) { 02426 ++error; 02427 continue; 02428 } 02429 } 02430 } 02431 02432 H5Tclose(int4); 02433 H5Tclose(double16); 02434 return error ? MB_FAILURE : MB_SUCCESS; 02435 } 02436 */ 02437 02438 ErrorCode WriteHDF5::gather_tags( const Tag* user_tag_list, int num_tags ) 02439 { 02440 ErrorCode result; 02441 std::vector< Tag > tag_list; 02442 std::vector< Tag >::iterator t_itor; 02443 Range range; 02444 02445 // Get list of Tags to write 02446 result = writeUtil->get_tag_list( tag_list, user_tag_list, num_tags ); 02447 CHK_MB_ERR_0( result ); 02448 02449 // Get list of tags 02450 for( t_itor = tag_list.begin(); t_itor != tag_list.end(); ++t_itor ) 02451 { 02452 // Add tag to export list 02453 TagDesc tag_data; 02454 tag_data.write_sparse = false; 02455 tag_data.tag_id = *t_itor; 02456 tag_data.sparse_offset = 0; 02457 tag_data.var_data_offset = 0; 02458 tag_data.max_num_ents = 0; 02459 tag_data.max_num_vals = 0; 02460 tagList.push_back( tag_data ); 02461 } 02462 02463 return MB_SUCCESS; 02464 } 02465 02466 // If we support parallel, then this function will have been 02467 // overridden with an alternate version in WriteHDF5Parallel 02468 // that supports parallel I/O. If we're here 02469 // then MOAB was not built with support for parallel HDF5 I/O. 02470 ErrorCode WriteHDF5::parallel_create_file( const char* /* filename */, bool /* overwrite */, 02471 const std::vector< std::string >& /* qa_records */, 02472 const FileOptions& /* opts */, const Tag* /* tag_list */, int /* num_tags */, 02473 int /* dimension */, double* /* times */ ) 02474 { 02475 MB_SET_ERR( MB_NOT_IMPLEMENTED, "WriteHDF5 does not support parallel writing" ); 02476 } 02477 02478 ErrorCode WriteHDF5::serial_create_file( const char* filename, bool overwrite, 02479 const std::vector< std::string >& qa_records, const Tag* user_tag_list, 02480 int num_user_tags, int dimension ) 02481 { 02482 long first_id; 02483 mhdf_Status status; 02484 hid_t handle; 02485 std::list< ExportSet >::iterator ex_itor; 02486 ErrorCode rval; 02487 02488 topState.start( "creating file" ); 02489 02490 const char* type_names[MBMAXTYPE]; 02491 memset( type_names, 0, MBMAXTYPE * sizeof( char* ) ); 02492 for( EntityType i = MBEDGE; i < MBENTITYSET; ++i ) 02493 type_names[i] = CN::EntityTypeName( i ); 02494 02495 // Create the file 02496 filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status ); 02497 CHK_MHDF_ERR_0( status ); 02498 assert( !!filePtr ); 02499 02500 rval = write_qa( qa_records ); 02501 CHK_MB_ERR_0( rval ); 02502 02503 // Create node table 02504 if( nodeSet.range.size() ) 02505 { 02506 nodeSet.total_num_ents = nodeSet.range.size(); 02507 handle = mhdf_createNodeCoords( filePtr, dimension, nodeSet.total_num_ents, &first_id, &status ); 02508 CHK_MHDF_ERR_0( status ); 02509 mhdf_closeData( filePtr, handle, &status ); 02510 CHK_MHDF_ERR_0( status ); 02511 nodeSet.first_id = (wid_t)first_id; 02512 rval = assign_ids( nodeSet.range, nodeSet.first_id ); 02513 CHK_MB_ERR_0( rval ); 02514 } 02515 else 02516 { 02517 nodeSet.first_id = std::numeric_limits< wid_t >::max(); 02518 } 02519 nodeSet.offset = 0; 02520 02521 // Create element tables 02522 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor ) 02523 { 02524 ex_itor->total_num_ents = ex_itor->range.size(); 02525 rval = create_elem_table( *ex_itor, ex_itor->total_num_ents, first_id ); 02526 CHK_MB_ERR_0( rval ); 02527 02528 ex_itor->first_id = (wid_t)first_id; 02529 ex_itor->offset = 0; 02530 rval = assign_ids( ex_itor->range, ex_itor->first_id ); 02531 CHK_MB_ERR_0( rval ); 02532 } 02533 // Create set tables 02534 writeSets = !setSet.range.empty(); 02535 if( writeSets ) 02536 { 02537 long contents_len, children_len, parents_len; 02538 02539 setSet.total_num_ents = setSet.range.size(); 02540 setSet.max_num_ents = setSet.total_num_ents; 02541 rval = create_set_meta( setSet.total_num_ents, first_id ); 02542 CHK_MB_ERR_0( rval ); 02543 02544 setSet.first_id = (wid_t)first_id; 02545 rval = assign_ids( setSet.range, setSet.first_id ); 02546 CHK_MB_ERR_0( rval ); 02547 02548 rval = count_set_size( setSet.range, contents_len, children_len, parents_len ); 02549 CHK_MB_ERR_0( rval ); 02550 02551 rval = create_set_tables( contents_len, children_len, parents_len ); 02552 CHK_MB_ERR_0( rval ); 02553 02554 setSet.offset = 0; 02555 setContentsOffset = 0; 02556 setChildrenOffset = 0; 02557 setParentsOffset = 0; 02558 writeSetContents = !!contents_len; 02559 writeSetChildren = !!children_len; 02560 writeSetParents = !!parents_len; 02561 02562 maxNumSetContents = contents_len; 02563 maxNumSetChildren = children_len; 02564 maxNumSetParents = parents_len; 02565 } // if (!setSet.range.empty()) 02566 02567 // Create adjacency table after set table, because sets do not have yet an id 02568 // some entities are adjacent to sets (exodus?) 02569 // Create node adjacency table 02570 wid_t num_adjacencies; 02571 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES 02572 rval = count_adjacencies( nodeSet.range, num_adjacencies ); 02573 CHK_MB_ERR_0( rval ); 02574 nodeSet.adj_offset = 0; 02575 nodeSet.max_num_adjs = num_adjacencies; 02576 if( num_adjacencies > 0 ) 02577 { 02578 handle = mhdf_createAdjacency( filePtr, mhdf_node_type_handle(), num_adjacencies, &status ); 02579 CHK_MHDF_ERR_0( status ); 02580 mhdf_closeData( filePtr, handle, &status ); 02581 } 02582 #endif 02583 02584 // Create element adjacency tables 02585 for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor ) 02586 { 02587 rval = count_adjacencies( ex_itor->range, num_adjacencies ); 02588 CHK_MB_ERR_0( rval ); 02589 02590 ex_itor->adj_offset = 0; 02591 ex_itor->max_num_adjs = num_adjacencies; 02592 if( num_adjacencies > 0 ) 02593 { 02594 handle = mhdf_createAdjacency( filePtr, ex_itor->name(), num_adjacencies, &status ); 02595 CHK_MHDF_ERR_0( status ); 02596 mhdf_closeData( filePtr, handle, &status ); 02597 } 02598 } 02599 02600 dbgOut.tprint( 1, "Gathering Tags\n" ); 02601 02602 rval = gather_tags( user_tag_list, num_user_tags ); 02603 CHK_MB_ERR_0( rval ); 02604 02605 // Create the tags and tag data tables 02606 std::list< TagDesc >::iterator tag_iter = tagList.begin(); 02607 for( ; tag_iter != tagList.end(); ++tag_iter ) 02608 { 02609 // As we haven't yet added any ExportSets for which to write 02610 // dense tag data to the TagDesc struct pointed to by 02611 // tag_iter, this call will initially return all tagged entities 02612 // in the set of entities to be written. 02613 Range range; 02614 rval = get_sparse_tagged_entities( *tag_iter, range ); 02615 CHK_MB_ERR_0( rval ); 02616 02617 int s; 02618 bool var_len = ( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) ); 02619 02620 // Determine which ExportSets we want to write dense 02621 // data for. We never write dense data for variable-length 02622 // tag data. 02623 if( !var_len && writeTagDense ) 02624 { 02625 // Check if we want to write this tag in dense format even if not 02626 // all of the entities have a tag value. The criterion of this 02627 // is that the tag be dense, have a default value, and have at 02628 // least 2/3 of the entities tagged. 02629 bool prefer_dense = false; 02630 TagType type; 02631 rval = iFace->tag_get_type( tag_iter->tag_id, type ); 02632 CHK_MB_ERR_0( rval ); 02633 if( MB_TAG_DENSE == type ) 02634 { 02635 const void* defval = 0; 02636 rval = iFace->tag_get_default_value( tag_iter->tag_id, defval, s ); 02637 if( MB_SUCCESS == rval ) prefer_dense = true; 02638 } 02639 02640 if( check_dense_format_tag( nodeSet, range, prefer_dense ) ) 02641 { 02642 range -= nodeSet.range; 02643 tag_iter->dense_list.push_back( nodeSet ); 02644 } 02645 02646 std::list< ExportSet >::const_iterator ex = exportList.begin(); 02647 for( ; ex != exportList.end(); ++ex ) 02648 { 02649 if( check_dense_format_tag( *ex, range, prefer_dense ) ) 02650 { 02651 range -= ex->range; 02652 tag_iter->dense_list.push_back( *ex ); 02653 } 02654 } 02655 02656 if( check_dense_format_tag( setSet, range, prefer_dense ) ) 02657 { 02658 range -= setSet.range; 02659 tag_iter->dense_list.push_back( setSet ); 02660 } 02661 } 02662 02663 tag_iter->write_sparse = !range.empty(); 02664 02665 unsigned long var_len_total = 0; 02666 if( var_len ) 02667 { 02668 rval = get_tag_data_length( *tag_iter, range, var_len_total ); 02669 CHK_MB_ERR_0( rval ); 02670 } 02671 02672 rval = create_tag( *tag_iter, range.size(), var_len_total ); 02673 CHK_MB_ERR_0( rval ); 02674 } // for (tags) 02675 02676 topState.end(); 02677 return MB_SUCCESS; 02678 } 02679 02680 bool WriteHDF5::check_dense_format_tag( const ExportSet& ents, const Range& all_tagged, bool prefer_dense ) 02681 { 02682 // If there are no tagged entities, then don't write anything 02683 if( ents.range.empty() ) return false; 02684 02685 // If all of the entities are tagged, then write in dense format 02686 if( all_tagged.contains( ents.range ) ) return true; 02687 02688 // Unless asked for more lenient choice of dense format, return false 02689 if( !prefer_dense ) return false; 02690 02691 // If we're being lenient about choosing dense format, then 02692 // return true if at least 2/3 of the entities are tagged. 02693 Range xsect = intersect( setSet.range, all_tagged ); 02694 if( 3 * xsect.size() >= 2 * setSet.range.size() ) return true; 02695 02696 return false; 02697 } 02698 02699 ErrorCode WriteHDF5::count_adjacencies( const Range& set, wid_t& result ) 02700 { 02701 ErrorCode rval; 02702 std::vector< wid_t > adj_list; 02703 Range::const_iterator iter = set.begin(); 02704 const Range::const_iterator end = set.end(); 02705 result = 0; 02706 for( ; iter != end; ++iter ) 02707 { 02708 adj_list.clear(); 02709 rval = get_adjacencies( *iter, adj_list ); 02710 CHK_MB_ERR_0( rval ); 02711 02712 if( adj_list.size() > 0 ) result += 2 + adj_list.size(); 02713 } 02714 02715 return MB_SUCCESS; 02716 } 02717 02718 ErrorCode WriteHDF5::create_elem_table( const ExportSet& block, long num_entities, long& first_id_out ) 02719 { 02720 mhdf_Status status; 02721 hid_t handle; 02722 02723 CHECK_OPEN_HANDLES; 02724 02725 mhdf_addElement( filePtr, block.name(), block.type, &status ); 02726 CHK_MHDF_ERR_0( status ); 02727 02728 handle = mhdf_createConnectivity( filePtr, block.name(), block.num_nodes, num_entities, &first_id_out, &status ); 02729 CHK_MHDF_ERR_0( status ); 02730 mhdf_closeData( filePtr, handle, &status ); 02731 CHK_MHDF_ERR_0( status ); 02732 02733 return MB_SUCCESS; 02734 } 02735 02736 ErrorCode WriteHDF5::count_set_size( const Range& sets, long& contents_length_out, long& children_length_out, 02737 long& parents_length_out ) 02738 { 02739 ErrorCode rval; 02740 Range set_contents; 02741 long contents_length_set, children_length_set, parents_length_set; 02742 unsigned long flags; 02743 std::vector< wid_t > set_contents_ids; 02744 std::vector< SpecialSetData >::const_iterator si = specialSets.begin(); 02745 02746 contents_length_out = 0; 02747 children_length_out = 0; 02748 parents_length_out = 0; 02749 02750 for( Range::const_iterator iter = sets.begin(); iter != sets.end(); ++iter ) 02751 { 02752 while( si != specialSets.end() && si->setHandle < *iter ) 02753 ++si; 02754 02755 if( si != specialSets.end() && si->setHandle == *iter ) 02756 { 02757 contents_length_out += si->contentIds.size(); 02758 children_length_out += si->childIds.size(); 02759 parents_length_out += si->parentIds.size(); 02760 ++si; 02761 continue; 02762 } 02763 02764 rval = get_set_info( *iter, contents_length_set, children_length_set, parents_length_set, flags ); 02765 CHK_MB_ERR_0( rval ); 02766 02767 // Check if can and should compress as ranges 02768 if( !( flags & MESHSET_ORDERED ) && contents_length_set ) 02769 { 02770 set_contents.clear(); 02771 rval = iFace->get_entities_by_handle( *iter, set_contents, false ); 02772 CHK_MB_ERR_0( rval ); 02773 02774 bool blocked_list; 02775 rval = range_to_blocked_list( set_contents, set_contents_ids, blocked_list ); 02776 CHK_MB_ERR_0( rval ); 02777 02778 if( blocked_list ) 02779 { 02780 assert( set_contents_ids.size() % 2 == 0 ); 02781 contents_length_set = set_contents_ids.size(); 02782 } 02783 } 02784 02785 contents_length_out += contents_length_set; 02786 children_length_out += children_length_set; 02787 parents_length_out += parents_length_set; 02788 } 02789 02790 return MB_SUCCESS; 02791 } 02792 02793 ErrorCode WriteHDF5::create_set_meta( long num_sets, long& first_id_out ) 02794 { 02795 hid_t handle; 02796 mhdf_Status status; 02797 02798 CHECK_OPEN_HANDLES; 02799 02800 handle = mhdf_createSetMeta( filePtr, num_sets, &first_id_out, &status ); 02801 CHK_MHDF_ERR_0( status ); 02802 mhdf_closeData( filePtr, handle, &status ); 02803 02804 return MB_SUCCESS; 02805 } 02806 02807 WriteHDF5::SpecialSetData* WriteHDF5::find_set_data( EntityHandle h ) 02808 { 02809 SpecialSetData tmp; 02810 tmp.setHandle = h; 02811 std::vector< SpecialSetData >::iterator i; 02812 i = std::lower_bound( specialSets.begin(), specialSets.end(), tmp, SpecSetLess() ); 02813 return ( i == specialSets.end() || i->setHandle != h ) ? 0 : &*i; 02814 } 02815 02816 ErrorCode WriteHDF5::create_set_tables( long num_set_contents, long num_set_children, long num_set_parents ) 02817 { 02818 hid_t handle; 02819 mhdf_Status status; 02820 02821 CHECK_OPEN_HANDLES; 02822 02823 if( num_set_contents > 0 ) 02824 { 02825 handle = mhdf_createSetData( filePtr, num_set_contents, &status ); 02826 CHK_MHDF_ERR_0( status ); 02827 mhdf_closeData( filePtr, handle, &status ); 02828 } 02829 02830 if( num_set_children > 0 ) 02831 { 02832 handle = mhdf_createSetChildren( filePtr, num_set_children, &status ); 02833 CHK_MHDF_ERR_0( status ); 02834 mhdf_closeData( filePtr, handle, &status ); 02835 } 02836 02837 if( num_set_parents > 0 ) 02838 { 02839 handle = mhdf_createSetParents( filePtr, num_set_parents, &status ); 02840 CHK_MHDF_ERR_0( status ); 02841 mhdf_closeData( filePtr, handle, &status ); 02842 } 02843 02844 return MB_SUCCESS; 02845 } 02846 02847 ErrorCode WriteHDF5::get_tag_size( Tag tag, DataType& moab_type, int& num_bytes, int& type_size, int& array_length, 02848 mhdf_TagDataType& file_type, hid_t& hdf_type ) 02849 { 02850 ErrorCode rval; 02851 Tag type_handle; 02852 std::string tag_name, tag_type_name; 02853 02854 CHECK_OPEN_HANDLES; 02855 02856 // We return NULL for hdf_type if it can be determined from 02857 // the file_type. The only case where it is non-zero is 02858 // if the user specified a specific type via a mesh tag. 02859 hdf_type = (hid_t)0; 02860 bool close_hdf_type = false; 02861 02862 rval = iFace->tag_get_data_type( tag, moab_type ); 02863 CHK_MB_ERR_0( rval ); 02864 rval = iFace->tag_get_length( tag, array_length ); 02865 if( MB_VARIABLE_DATA_LENGTH == rval ) { array_length = MB_VARIABLE_LENGTH; } 02866 else if( MB_SUCCESS != rval ) 02867 return error( rval ); 02868 rval = iFace->tag_get_bytes( tag, num_bytes ); 02869 if( MB_VARIABLE_DATA_LENGTH == rval ) 02870 num_bytes = MB_VARIABLE_LENGTH; 02871 else if( MB_SUCCESS != rval ) 02872 return error( rval ); 02873 02874 switch( moab_type ) 02875 { 02876 case MB_TYPE_INTEGER: 02877 type_size = sizeof( int ); 02878 file_type = mhdf_INTEGER; 02879 hdf_type = H5T_NATIVE_INT; 02880 close_hdf_type = false; 02881 break; 02882 case MB_TYPE_DOUBLE: 02883 type_size = sizeof( double ); 02884 file_type = mhdf_FLOAT; 02885 hdf_type = H5T_NATIVE_DOUBLE; 02886 close_hdf_type = false; 02887 break; 02888 case MB_TYPE_BIT: 02889 type_size = sizeof( bool ); 02890 file_type = mhdf_BITFIELD; 02891 assert( array_length <= 8 ); 02892 hdf_type = H5Tcopy( H5T_NATIVE_B8 ); 02893 H5Tset_precision( hdf_type, array_length ); 02894 close_hdf_type = true; 02895 break; 02896 case MB_TYPE_HANDLE: 02897 type_size = sizeof( EntityHandle ); 02898 file_type = mhdf_ENTITY_ID; 02899 hdf_type = id_type; 02900 close_hdf_type = false; 02901 break; 02902 case MB_TYPE_OPAQUE: 02903 file_type = mhdf_OPAQUE; 02904 rval = iFace->tag_get_name( tag, tag_name ); 02905 CHK_MB_ERR_0( rval ); 02906 tag_type_name = "__hdf5_tag_type_"; 02907 tag_type_name += tag_name; 02908 rval = iFace->tag_get_handle( tag_type_name.c_str(), 0, MB_TYPE_OPAQUE, type_handle, MB_TAG_ANY ); 02909 if( MB_TAG_NOT_FOUND == rval ) 02910 { 02911 if( num_bytes == MB_VARIABLE_LENGTH ) 02912 type_size = 1; 02913 else 02914 type_size = num_bytes; 02915 hdf_type = H5Tcreate( H5T_OPAQUE, type_size ); 02916 close_hdf_type = true; 02917 } 02918 else if( MB_SUCCESS == rval ) 02919 { 02920 int hsize; 02921 rval = iFace->tag_get_bytes( type_handle, hsize ); 02922 if( hsize != sizeof( hid_t ) ) return error( MB_FAILURE ); 02923 02924 const EntityHandle root = 0; 02925 rval = iFace->tag_get_data( type_handle, &root, 1, &hdf_type ); 02926 if( rval != MB_SUCCESS ) return error( rval ); 02927 02928 type_size = H5Tget_size( hdf_type ); 02929 if( type_size != num_bytes ) return error( MB_FAILURE ); 02930 02931 close_hdf_type = false; 02932 } 02933 else 02934 return error( rval ); 02935 num_bytes = array_length; 02936 array_length = ( num_bytes == MB_VARIABLE_LENGTH ) ? MB_VARIABLE_LENGTH : 1; 02937 break; 02938 default: 02939 break; 02940 } 02941 02942 assert( num_bytes == MB_VARIABLE_LENGTH || ( moab_type == MB_TYPE_BIT && num_bytes == 1 ) || 02943 array_length * type_size == num_bytes ); 02944 02945 if( num_bytes == MB_VARIABLE_LENGTH ) 02946 { 02947 array_length = MB_VARIABLE_LENGTH; 02948 if( !close_hdf_type ) 02949 { 02950 hdf_type = H5Tcopy( hdf_type ); 02951 // close_hdf_type = true; 02952 } 02953 } 02954 else if( array_length > 1 && moab_type != MB_TYPE_BIT ) 02955 { 02956 hsize_t len = array_length; 02957 #if defined( H5Tarray_create_vers ) && ( H5Tarray_create_vers > 1 ) 02958 hid_t temp_id = H5Tarray_create2( hdf_type, 1, &len ); 02959 #else 02960 hid_t temp_id = H5Tarray_create( hdf_type, 1, &len, NULL ); 02961 #endif 02962 if( close_hdf_type ) H5Tclose( hdf_type ); 02963 hdf_type = temp_id; 02964 } 02965 else if( !close_hdf_type ) 02966 { 02967 hdf_type = H5Tcopy( hdf_type ); 02968 // close_hdf_type = true; 02969 } 02970 02971 return MB_SUCCESS; 02972 } 02973 02974 ErrorCode WriteHDF5::get_tag_data_length( const TagDesc& tag_info, const Range& range, unsigned long& result ) 02975 { 02976 ErrorCode rval; 02977 result = 0; 02978 02979 // Split buffer into two pieces, one for pointers and one for sizes 02980 size_t step, remaining; 02981 step = bufferSize / ( sizeof( int ) + sizeof( void* ) ); 02982 const void** ptr_buffer = reinterpret_cast< const void** >( dataBuffer ); 02983 int* size_buffer = reinterpret_cast< int* >( ptr_buffer + step ); 02984 Range subrange; 02985 Range::const_iterator iter = range.begin(); 02986 for( remaining = range.size(); remaining >= step; remaining -= step ) 02987 { 02988 // Get subset of range containing 'count' entities 02989 Range::const_iterator end = iter; 02990 end += step; 02991 subrange.clear(); 02992 subrange.merge( iter, end ); 02993 iter = end; 02994 // Get tag sizes for entities 02995 rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer ); 02996 if( MB_SUCCESS != rval ) return error( rval ); 02997 // Sum lengths 02998 for( size_t i = 0; i < step; ++i ) 02999 result += size_buffer[i]; 03000 } 03001 // Process remaining 03002 subrange.clear(); 03003 subrange.merge( iter, range.end() ); 03004 assert( subrange.size() == remaining ); 03005 rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer ); 03006 if( MB_SUCCESS != rval ) return error( rval ); 03007 for( size_t i = 0; i < remaining; ++i ) 03008 result += size_buffer[i]; 03009 03010 return MB_SUCCESS; 03011 } 03012 03013 ErrorCode WriteHDF5::create_tag( const TagDesc& tag_data, unsigned long num_sparse_entities, 03014 unsigned long data_table_size ) 03015 { 03016 TagType mb_storage; 03017 DataType mb_type; 03018 mhdf_TagDataType mhdf_type; 03019 int tag_bytes, type_size, num_vals, storage; 03020 hid_t hdf_type = (hid_t)0; 03021 hid_t handles[3]; 03022 std::string tag_name; 03023 ErrorCode rval; 03024 mhdf_Status status; 03025 03026 CHECK_OPEN_HANDLES; 03027 03028 // Get tag properties 03029 rval = iFace->tag_get_type( tag_data.tag_id, mb_storage ); 03030 CHK_MB_ERR_0( rval ); 03031 switch( mb_storage ) 03032 { 03033 case MB_TAG_DENSE: 03034 storage = mhdf_DENSE_TYPE; 03035 break; 03036 case MB_TAG_SPARSE: 03037 storage = mhdf_SPARSE_TYPE; 03038 break; 03039 case MB_TAG_BIT: 03040 storage = mhdf_BIT_TYPE; 03041 break; 03042 case MB_TAG_MESH: 03043 storage = mhdf_MESH_TYPE; 03044 break; 03045 default: 03046 return error( MB_FAILURE ); 03047 } 03048 rval = iFace->tag_get_name( tag_data.tag_id, tag_name ); 03049 CHK_MB_ERR_0( rval ); 03050 rval = get_tag_size( tag_data.tag_id, mb_type, tag_bytes, type_size, num_vals, mhdf_type, hdf_type ); 03051 CHK_MB_ERR_0( rval ); 03052 03053 // Get default value 03054 const void *def_value, *mesh_value; 03055 int def_val_len, mesh_val_len; 03056 rval = iFace->tag_get_default_value( tag_data.tag_id, def_value, def_val_len ); 03057 if( MB_ENTITY_NOT_FOUND == rval ) 03058 { 03059 def_value = 0; 03060 def_val_len = 0; 03061 } 03062 else if( MB_SUCCESS != rval ) 03063 { 03064 H5Tclose( hdf_type ); 03065 return error( rval ); 03066 } 03067 03068 // Get mesh value 03069 unsigned char byte; 03070 const EntityHandle root = 0; 03071 if( mb_storage == MB_TAG_BIT ) 03072 { 03073 rval = iFace->tag_get_data( tag_data.tag_id, &root, 1, &byte ); 03074 mesh_value = &byte; 03075 mesh_val_len = 1; 03076 } 03077 else 03078 { 03079 rval = iFace->tag_get_by_ptr( tag_data.tag_id, &root, 1, &mesh_value, &mesh_val_len ); 03080 } 03081 if( MB_TAG_NOT_FOUND == rval ) 03082 { 03083 mesh_value = 0; 03084 mesh_val_len = 0; 03085 } 03086 else if( MB_SUCCESS != rval ) 03087 { 03088 H5Tclose( hdf_type ); 03089 return error( rval ); 03090 } 03091 03092 // For handle-type tags, need to convert from handles to file ids 03093 if( MB_TYPE_HANDLE == mb_type ) 03094 { 03095 // Make sure there's room in the buffer for both 03096 assert( ( def_val_len + mesh_val_len ) * sizeof( long ) < (size_t)bufferSize ); 03097 03098 // Convert default value 03099 if( def_value ) 03100 { 03101 memcpy( dataBuffer, def_value, def_val_len * sizeof( EntityHandle ) ); 03102 convert_handle_tag( reinterpret_cast< EntityHandle* >( dataBuffer ), def_val_len ); 03103 def_value = dataBuffer; 03104 } 03105 03106 // Convert mesh value 03107 if( mesh_value ) 03108 { 03109 EntityHandle* ptr = reinterpret_cast< EntityHandle* >( dataBuffer ) + def_val_len; 03110 memcpy( ptr, mesh_value, mesh_val_len * sizeof( EntityHandle ) ); 03111 if( convert_handle_tag( ptr, mesh_val_len ) ) 03112 mesh_value = ptr; 03113 else 03114 mesh_value = 0; 03115 } 03116 } 03117 03118 if( MB_VARIABLE_LENGTH != tag_bytes ) 03119 { 03120 // Write the tag description to the file 03121 mhdf_createTag( filePtr, tag_name.c_str(), mhdf_type, num_vals, storage, def_value, mesh_value, hdf_type, 03122 mb_type == MB_TYPE_HANDLE ? id_type : 0, &status ); 03123 CHK_MHDF_ERR_0( status ); 03124 H5Tclose( hdf_type ); 03125 03126 // Create empty table for tag data 03127 if( num_sparse_entities ) 03128 { 03129 mhdf_createSparseTagData( filePtr, tag_name.c_str(), num_sparse_entities, handles, &status ); 03130 CHK_MHDF_ERR_0( status ); 03131 mhdf_closeData( filePtr, handles[0], &status ); 03132 mhdf_closeData( filePtr, handles[1], &status ); 03133 } 03134 03135 for( size_t i = 0; i < tag_data.dense_list.size(); ++i ) 03136 { 03137 const ExportSet* ex = find( tag_data.dense_list[i] ); 03138 assert( 0 != ex ); 03139 handles[0] = mhdf_createDenseTagData( filePtr, tag_name.c_str(), ex->name(), ex->total_num_ents, &status ); 03140 CHK_MHDF_ERR_0( status ); 03141 mhdf_closeData( filePtr, handles[0], &status ); 03142 } 03143 } 03144 else 03145 { 03146 mhdf_createVarLenTag( filePtr, tag_name.c_str(), mhdf_type, storage, def_value, def_val_len, mesh_value, 03147 mesh_val_len, hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0, &status ); 03148 CHK_MHDF_ERR_0( status ); 03149 H5Tclose( hdf_type ); 03150 03151 // Create empty table for tag data 03152 if( num_sparse_entities ) 03153 { 03154 mhdf_createVarLenTagData( filePtr, tag_name.c_str(), num_sparse_entities, data_table_size, handles, 03155 &status ); 03156 CHK_MHDF_ERR_0( status ); 03157 mhdf_closeData( filePtr, handles[0], &status ); 03158 mhdf_closeData( filePtr, handles[1], &status ); 03159 mhdf_closeData( filePtr, handles[2], &status ); 03160 } 03161 } 03162 03163 return MB_SUCCESS; 03164 } 03165 03166 ErrorCode WriteHDF5::get_num_sparse_tagged_entities( const TagDesc& tag, size_t& count ) 03167 { 03168 Range tmp; 03169 ErrorCode rval = get_sparse_tagged_entities( tag, tmp ); 03170 count = tmp.size(); 03171 return rval; 03172 } 03173 03174 ErrorCode WriteHDF5::get_sparse_tagged_entities( const TagDesc& tag, Range& results ) 03175 { 03176 results.clear(); 03177 if( !tag.have_dense( setSet ) ) results.merge( setSet.range ); 03178 std::list< ExportSet >::reverse_iterator e; 03179 for( e = exportList.rbegin(); e != exportList.rend(); ++e ) 03180 { 03181 if( !tag.have_dense( *e ) ) results.merge( e->range ); 03182 } 03183 if( !tag.have_dense( nodeSet ) ) results.merge( nodeSet.range ); 03184 if( results.empty() ) return MB_SUCCESS; 03185 03186 return iFace->get_entities_by_type_and_tag( 0, MBMAXTYPE, &tag.tag_id, 0, 1, results, Interface::INTERSECT ); 03187 } 03188 03189 void WriteHDF5::get_write_entities( Range& range ) 03190 { 03191 range.clear(); 03192 range.merge( setSet.range ); 03193 std::list< ExportSet >::reverse_iterator e; 03194 for( e = exportList.rbegin(); e != exportList.rend(); ++e ) 03195 range.merge( e->range ); 03196 range.merge( nodeSet.range ); 03197 } 03198 03199 void WriteHDF5::print_id_map() const 03200 { 03201 print_id_map( std::cout, "" ); 03202 } 03203 03204 void WriteHDF5::print_id_map( std::ostream& s, const char* pfx ) const 03205 { 03206 RangeMap< EntityHandle, wid_t >::const_iterator i; 03207 for( i = idMap.begin(); i != idMap.end(); ++i ) 03208 { 03209 const char* n1 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin ) ); 03210 EntityID id = ID_FROM_HANDLE( i->begin ); 03211 if( 1 == i->count ) { s << pfx << n1 << " " << id << " -> " << i->value << std::endl; } 03212 else 03213 { 03214 const char* n2 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin + i->count - 1 ) ); 03215 if( n1 == n2 ) 03216 { 03217 s << pfx << n1 << " " << id << "-" << id + i->count - 1 << " -> " << i->value << "-" 03218 << i->value + i->count - 1 << std::endl; 03219 } 03220 else 03221 { 03222 s << pfx << n1 << " " << id << "-" << n1 << " " << ID_FROM_HANDLE( i->begin + i->count - 1 ) << " -> " 03223 << i->value << "-" << i->value + i->count - 1 << std::endl; 03224 } 03225 } 03226 } 03227 } 03228 03229 void WriteHDF5::print_times( const double* t ) const 03230 { 03231 std::cout << "WriteHDF5: " << t[TOTAL_TIME] << std::endl 03232 << " gather mesh: " << t[GATHER_TIME] << std::endl 03233 << " create file: " << t[CREATE_TIME] << std::endl 03234 << " create nodes: " << t[CREATE_NODE_TIME] << std::endl 03235 << " negotiate types: " << t[NEGOTIATE_TYPES_TIME] << std::endl 03236 << " create elem: " << t[CREATE_ELEM_TIME] << std::endl 03237 << " file id exch: " << t[FILEID_EXCHANGE_TIME] << std::endl 03238 << " create adj: " << t[CREATE_ADJ_TIME] << std::endl 03239 << " create set: " << t[CREATE_SET_TIME] << std::endl 03240 << " shared ids: " << t[SHARED_SET_IDS] << std::endl 03241 << " shared data: " << t[SHARED_SET_CONTENTS] << std::endl 03242 << " set offsets: " << t[SET_OFFSET_TIME] << std::endl 03243 << " create tags: " << t[CREATE_TAG_TIME] << std::endl 03244 << " coordinates: " << t[COORD_TIME] << std::endl 03245 << " connectivity: " << t[CONN_TIME] << std::endl 03246 << " sets: " << t[SET_TIME] << std::endl 03247 << " set descrip: " << t[SET_META] << std::endl 03248 << " set content: " << t[SET_CONTENT] << std::endl 03249 << " set parent: " << t[SET_PARENT] << std::endl 03250 << " set child: " << t[SET_CHILD] << std::endl 03251 << " adjacencies: " << t[ADJ_TIME] << std::endl 03252 << " tags: " << t[TAG_TIME] << std::endl 03253 << " dense data: " << t[DENSE_TAG_TIME] << std::endl 03254 << " sparse data: " << t[SPARSE_TAG_TIME] << std::endl 03255 << " var-len data: " << t[VARLEN_TAG_TIME] << std::endl; 03256 } 03257 03258 } // namespace moab