MOAB: Mesh Oriented datABase  (version 5.2.1)
WriteHDF5.cpp
Go to the documentation of this file.
00001 /**
00002  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003  * storing and accessing finite element mesh data.
00004  *
00005  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
00006  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007  * retains certain rights in this software.
00008  *
00009  * This library is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU Lesser General Public
00011  * License as published by the Free Software Foundation; either
00012  * version 2.1 of the License, or (at your option) any later version.
00013  *
00014  */
00015 
00016 //-------------------------------------------------------------------------
00017 // Filename      : WriteHDF5.cpp
00018 //
00019 // Purpose       : TSTT HDF5 Writer
00020 //
00021 // Special Notes : WriteSLAC used as template for this
00022 //
00023 // Creator       : Jason Kraftcheck
00024 //
00025 // Creation Date : 04/01/04
00026 //-------------------------------------------------------------------------
00027 
00028 #include <assert.h>
00029 #if defined( _WIN32 )
00030 typedef int id_t;
00031 #elif defined( __MINGW32__ )
00032 #include <sys/time.h>
00033 #endif
00034 #include <time.h>
00035 #include <stdlib.h>
00036 #include <string.h>
00037 #include <stdarg.h>
00038 #include <limits>
00039 #include <cstdio>
00040 #include <iostream>
00041 #include "WriteHDF5.hpp"
00042 #include <H5Tpublic.h>
00043 #include <H5Ppublic.h>
00044 #include <H5Epublic.h>
00045 #include "moab/Interface.hpp"
00046 #include "Internals.hpp"
00047 #include "MBTagConventions.hpp"
00048 #include "moab/CN.hpp"
00049 #include "moab/FileOptions.hpp"
00050 #include "moab/CpuTimer.hpp"
00051 #include "IODebugTrack.hpp"
00052 #include "mhdf.h"
00053 
00054 #ifndef MOAB_HAVE_HDF5
00055 #error Attempt to compile WriteHDF5 with HDF5 support disabled
00056 #endif
00057 
00058 #undef BLOCKED_COORD_IO
00059 
00060 #ifdef MOAB_HAVE_VALGRIND
00061 #include <valgrind/memcheck.h>
00062 #else
00063 #ifndef VALGRIND_CHECK_MEM_IS_DEFINED
00064 #define VALGRIND_CHECK_MEM_IS_DEFINED( a, b ) ( (void)0 )
00065 #endif
00066 #ifndef VALGRIND_CHECK_MEM_IS_ADDRESSABLE
00067 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE( a, b ) ( (void)0 )
00068 #endif
00069 #ifndef VALGRIND_MAKE_MEM_UNDEFINED
00070 #define VALGRIND_MAKE_MEM_UNDEFINED( a, b ) ( (void)0 )
00071 #endif
00072 #endif
00073 
00074 namespace moab
00075 {
00076 
00077 template < typename T >
00078 inline void VALGRIND_MAKE_VEC_UNDEFINED( std::vector< T >& v )
00079 {
00080     (void)VALGRIND_MAKE_MEM_UNDEFINED( (T*)&v[0], v.size() * sizeof( T ) );
00081 }
00082 
00083 #define WRITE_HDF5_BUFFER_SIZE ( 40 * 1024 * 1024 )
00084 
00085 static hid_t get_id_type()
00086 {
00087     if( 8 == sizeof( WriteHDF5::wid_t ) )
00088     {
00089         if( 8 == sizeof( long ) )
00090             return H5T_NATIVE_ULONG;
00091         else
00092             return H5T_NATIVE_UINT64;
00093     }
00094     else if( 4 == sizeof( WriteHDF5::wid_t ) )
00095     {
00096         if( 4 == sizeof( int ) )
00097             return H5T_NATIVE_UINT;
00098         else
00099             return H5T_NATIVE_UINT32;
00100     }
00101     else
00102     {
00103         assert( 0 );
00104         return (hid_t)-1;
00105     }
00106 }
00107 
00108 // This is the HDF5 type used to store file IDs
00109 const hid_t WriteHDF5::id_type = get_id_type();
00110 
00111 // This function doesn't do anything useful. It's just a nice
00112 // place to set a break point to determine why the writer fails.
00113 static inline ErrorCode error( ErrorCode rval )
00114 {
00115     return rval;
00116 }
00117 
00118 // Call \c error function during HDF5 library errors to make
00119 // it easier to trap such errors in the debugger. This function
00120 // gets registered with the HDF5 library as a callback. It
00121 // works the same as the default (H5Eprint), except that it
00122 // also calls the \c error function as a no-op.
00123 #if defined( H5E_auto_t_vers ) && H5E_auto_t_vers > 1
00124 static herr_t handle_hdf5_error( hid_t stack, void* data )
00125 {
00126     WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data );
00127     herr_t result                  = 0;
00128     if( h->func ) result = ( *h->func )( stack, h->data );
00129     error( MB_FAILURE );
00130     return result;
00131 }
00132 #else
00133 static herr_t handle_hdf5_error( void* data )
00134 {
00135     WriteHDF5::HDF5ErrorHandler* h = reinterpret_cast< WriteHDF5::HDF5ErrorHandler* >( data );
00136     herr_t result                  = 0;
00137     if( h->func ) result = ( *h->func )( h->data );
00138     error( MB_FAILURE );
00139     return result;
00140 }
00141 #endif
00142 
00143 // Some macros to handle error checking. The
00144 // CHK_MHDF__ERR* macros check the value of an mhdf_Status
00145 // object. The CHK_MB_ERR_* check the value of an ErrorCode.
00146 // The *_0 macros accept no other arguments. The *_1
00147 // macros accept a single hdf5 handle to close on error.
00148 // The *_2 macros accept an array of two hdf5 handles to
00149 // close on error. The _*2C macros accept one hdf5 handle
00150 // to close on error and a bool and an hdf5 handle where
00151 // the latter handle is conditionally closed depending on
00152 // the value of the bool. All macros contain a "return"
00153 // statement.
00154 #define CHK_MHDF_ERR_0( A )                            \
00155     do                                                 \
00156     {                                                  \
00157         if( mhdf_isError( &( A ) ) )                   \
00158         {                                              \
00159             MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
00160             assert( 0 );                               \
00161             return error( MB_FAILURE );                \
00162         }                                              \
00163     } while( false )
00164 
00165 #define CHK_MHDF_ERR_1( A, B )                         \
00166     do                                                 \
00167     {                                                  \
00168         if( mhdf_isError( &( A ) ) )                   \
00169         {                                              \
00170             MB_SET_ERR_CONT( mhdf_message( &( A ) ) ); \
00171             assert( 0 );                               \
00172             mhdf_closeData( filePtr, ( B ), &( A ) );  \
00173             return error( MB_FAILURE );                \
00174         }                                              \
00175     } while( false )
00176 
00177 #define CHK_MHDF_ERR_2( A, B )                           \
00178     do                                                   \
00179     {                                                    \
00180         if( mhdf_isError( &( A ) ) )                     \
00181         {                                                \
00182             MB_SET_ERR_CONT( mhdf_message( &( A ) ) );   \
00183             assert( 0 );                                 \
00184             mhdf_closeData( filePtr, ( B )[0], &( A ) ); \
00185             mhdf_closeData( filePtr, ( B )[1], &( A ) ); \
00186             return error( MB_FAILURE );                  \
00187         }                                                \
00188     } while( false )
00189 
00190 #define CHK_MHDF_ERR_3( A, B )                           \
00191     do                                                   \
00192     {                                                    \
00193         if( mhdf_isError( &( A ) ) )                     \
00194         {                                                \
00195             MB_SET_ERR_CONT( mhdf_message( &( A ) ) );   \
00196             assert( 0 );                                 \
00197             mhdf_closeData( filePtr, ( B )[0], &( A ) ); \
00198             mhdf_closeData( filePtr, ( B )[1], &( A ) ); \
00199             mhdf_closeData( filePtr, ( B )[2], &( A ) ); \
00200             return error( MB_FAILURE );                  \
00201         }                                                \
00202     } while( false )
00203 
00204 #define CHK_MHDF_ERR_2C( A, B, C, D )                         \
00205     do                                                        \
00206     {                                                         \
00207         if( mhdf_isError( &( A ) ) )                          \
00208         {                                                     \
00209             MB_SET_ERR_CONT( mhdf_message( &( A ) ) );        \
00210             assert( 0 );                                      \
00211             mhdf_closeData( filePtr, ( B ), &( A ) );         \
00212             if( C ) mhdf_closeData( filePtr, ( D ), &( A ) ); \
00213             return error( MB_FAILURE );                       \
00214         }                                                     \
00215     } while( false )
00216 
00217 #define CHK_MB_ERR_0( A )             \
00218     do                                \
00219     {                                 \
00220         if( MB_SUCCESS != ( A ) )     \
00221         {                             \
00222             MB_CHK_ERR_CONT( ( A ) ); \
00223             return error( A );        \
00224         }                             \
00225     } while( false )
00226 
00227 #define CHK_MB_ERR_1( A, B, C )                       \
00228     do                                                \
00229     {                                                 \
00230         if( MB_SUCCESS != ( A ) )                     \
00231         {                                             \
00232             MB_CHK_ERR_CONT( ( A ) );                 \
00233             mhdf_closeData( filePtr, ( B ), &( C ) ); \
00234             assert( 0 );                              \
00235             return error( A );                        \
00236         }                                             \
00237     } while( false )
00238 
00239 #define CHK_MB_ERR_2( A, B, C )                          \
00240     do                                                   \
00241     {                                                    \
00242         if( MB_SUCCESS != ( A ) )                        \
00243         {                                                \
00244             MB_CHK_ERR_CONT( ( A ) );                    \
00245             mhdf_closeData( filePtr, ( B )[0], &( C ) ); \
00246             mhdf_closeData( filePtr, ( B )[1], &( C ) ); \
00247             write_finished();                            \
00248             assert( 0 );                                 \
00249             return error( A );                           \
00250         }                                                \
00251     } while( false )
00252 
00253 #define CHK_MB_ERR_3( A, B, C )                          \
00254     do                                                   \
00255     {                                                    \
00256         if( MB_SUCCESS != ( A ) )                        \
00257         {                                                \
00258             MB_CHK_ERR_CONT( ( A ) );                    \
00259             mhdf_closeData( filePtr, ( B )[0], &( C ) ); \
00260             mhdf_closeData( filePtr, ( B )[1], &( C ) ); \
00261             mhdf_closeData( filePtr, ( B )[2], &( C ) ); \
00262             write_finished();                            \
00263             assert( 0 );                                 \
00264             return error( A );                           \
00265         }                                                \
00266     } while( false )
00267 
00268 #define CHK_MB_ERR_2C( A, B, C, D, E )                        \
00269     do                                                        \
00270     {                                                         \
00271         if( MB_SUCCESS != ( A ) )                             \
00272         {                                                     \
00273             MB_CHK_ERR_CONT( ( A ) );                         \
00274             mhdf_closeData( filePtr, ( B ), &( E ) );         \
00275             if( C ) mhdf_closeData( filePtr, ( D ), &( E ) ); \
00276             write_finished();                                 \
00277             assert( 0 );                                      \
00278             return error( A );                                \
00279         }                                                     \
00280     } while( false )
00281 
00282 #define debug_barrier() debug_barrier_line( __LINE__ )
00283 void WriteHDF5::debug_barrier_line( int ) {}
00284 
00285 class CheckOpenWriteHDF5Handles
00286 {
00287     int fileline;
00288     mhdf_FileHandle handle;
00289     int enter_count;
00290 
00291   public:
00292     CheckOpenWriteHDF5Handles( mhdf_FileHandle file, int line )
00293         : fileline( line ), handle( file ), enter_count( mhdf_countOpenHandles( file ) )
00294     {
00295     }
00296 
00297     ~CheckOpenWriteHDF5Handles()
00298     {
00299         int new_count = mhdf_countOpenHandles( handle );
00300         if( new_count != enter_count )
00301         {
00302             std::cout << "Leaked HDF5 object handle in function at " << __FILE__ << ":" << fileline << std::endl
00303                       << "Open at entrance: " << enter_count << std::endl
00304                       << "Open at exit:     " << new_count << std::endl;
00305         }
00306     }
00307 };
00308 
00309 MPEState WriteHDF5::topState;
00310 MPEState WriteHDF5::subState;
00311 
00312 #ifdef NDEBUG
00313 #define CHECK_OPEN_HANDLES
00314 #else
00315 #define CHECK_OPEN_HANDLES CheckOpenWriteHDF5Handles check_open_handles_( filePtr, __LINE__ )
00316 #endif
00317 
00318 bool WriteHDF5::convert_handle_tag( const EntityHandle* source, EntityHandle* dest, size_t count ) const
00319 {
00320     bool some_valid = false;
00321     for( size_t i = 0; i < count; ++i )
00322     {
00323         if( !source[i] )
00324             dest[i] = 0;
00325         else
00326         {
00327             dest[i] = idMap.find( source[i] );
00328             if( dest[i] ) some_valid = true;
00329         }
00330     }
00331 
00332     return some_valid;
00333 }
00334 
00335 bool WriteHDF5::convert_handle_tag( EntityHandle* data, size_t count ) const
00336 {
00337     assert( sizeof( EntityHandle ) == sizeof( wid_t ) );
00338     return convert_handle_tag( data, data, count );
00339 }
00340 
00341 ErrorCode WriteHDF5::assign_ids( const Range& entities, wid_t id )
00342 {
00343     Range::const_pair_iterator pi;
00344     for( pi = entities.const_pair_begin(); pi != entities.const_pair_end(); ++pi )
00345     {
00346         const EntityHandle n = pi->second - pi->first + 1;
00347         dbgOut.printf( 3, "Assigning %s %lu to %lu to file IDs [%lu,%lu]\n",
00348                        CN::EntityTypeName( TYPE_FROM_HANDLE( pi->first ) ),
00349                        (unsigned long)( ID_FROM_HANDLE( pi->first ) ),
00350                        (unsigned long)( ID_FROM_HANDLE( pi->first ) + n - 1 ), (unsigned long)id,
00351                        (unsigned long)( id + n - 1 ) );
00352         if( TYPE_FROM_HANDLE( pi->first ) == MBPOLYGON || TYPE_FROM_HANDLE( pi->first ) == MBPOLYHEDRON )
00353         {
00354             int num_vertices         = 0;
00355             const EntityHandle* conn = 0;
00356             iFace->get_connectivity( pi->first, conn, num_vertices );
00357             dbgOut.printf( 3, "  poly with %d verts/faces \n", num_vertices );
00358         }
00359         if( !idMap.insert( pi->first, id, n ).second ) return error( MB_FAILURE );
00360         id += n;
00361     }
00362 
00363     return MB_SUCCESS;
00364 }
00365 
00366 const char* WriteHDF5::ExportSet::name() const
00367 {
00368     static char buffer[128];
00369     switch( type )
00370     {
00371         case MBVERTEX:
00372             return mhdf_node_type_handle();
00373         case MBENTITYSET:
00374             return mhdf_set_type_handle();
00375         default:
00376             sprintf( buffer, "%s%d", CN::EntityTypeName( type ), num_nodes );
00377             return buffer;
00378     }
00379 }
00380 
00381 WriterIface* WriteHDF5::factory( Interface* iface )
00382 {
00383     return new WriteHDF5( iface );
00384 }
00385 
00386 WriteHDF5::WriteHDF5( Interface* iface )
00387     : bufferSize( WRITE_HDF5_BUFFER_SIZE ), dataBuffer( 0 ), iFace( iface ), writeUtil( 0 ), filePtr( 0 ),
00388       setContentsOffset( 0 ), setChildrenOffset( 0 ), setParentsOffset( 0 ), maxNumSetContents( 0 ),
00389       maxNumSetChildren( 0 ), maxNumSetParents( 0 ), writeSets( false ), writeSetContents( false ),
00390       writeSetChildren( false ), writeSetParents( false ), parallelWrite( false ), collectiveIO( false ),
00391       writeTagDense( false ), writeProp( H5P_DEFAULT ), dbgOut( "H5M", stderr ), debugTrack( false )
00392 {
00393 }
00394 
00395 ErrorCode WriteHDF5::init()
00396 {
00397     ErrorCode rval;
00398 
00399     if( writeUtil )  // init has already been called
00400         return MB_SUCCESS;
00401     /*
00402     #ifdef DEBUG
00403       H5Eset_auto(&hdf_error_handler, writeUtil); // HDF5 callback for errors
00404     #endif
00405     */
00406     // For known tag types, store the corresponding HDF5 in which
00407     // the tag data is to be written in the file.
00408     // register_known_tag_types(iFace);
00409 
00410     // Get the util interface
00411     rval = iFace->query_interface( writeUtil );
00412     CHK_MB_ERR_0( rval );
00413 
00414     idMap.clear();
00415 
00416 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00417     herr_t err = H5Eget_auto( H5E_DEFAULT, &errorHandler.func, &errorHandler.data );
00418 #else
00419     herr_t err = H5Eget_auto( &errorHandler.func, &errorHandler.data );
00420 #endif
00421     if( err < 0 )
00422     {
00423         errorHandler.func = 0;
00424         errorHandler.data = 0;
00425     }
00426     else
00427     {
00428 #if defined( H5Eset_auto_vers ) && H5Eset_auto_vers > 1
00429         err = H5Eset_auto( H5E_DEFAULT, &handle_hdf5_error, &errorHandler );
00430 #else
00431         err = H5Eset_auto( &handle_hdf5_error, &errorHandler );
00432 #endif
00433         if( err < 0 )
00434         {
00435             errorHandler.func = 0;
00436             errorHandler.data = 0;
00437         }
00438     }
00439 
00440     if( !topState.valid() ) topState = MPEState( "WriteHDF5", "yellow" );
00441     if( !subState.valid() ) subState = MPEState( "WriteHDF5 subevent", "cyan" );
00442 
00443     return MB_SUCCESS;
00444 }
00445 
00446 ErrorCode WriteHDF5::write_finished()
00447 {
00448     // Release memory allocated in lists
00449     exportList.clear();
00450     nodeSet.range.clear();
00451     setSet.range.clear();
00452     tagList.clear();
00453     idMap.clear();
00454 
00455     HDF5ErrorHandler handler;
00456 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00457     herr_t err = H5Eget_auto( H5E_DEFAULT, &handler.func, &handler.data );
00458 #else
00459     herr_t err = H5Eget_auto( &handler.func, &handler.data );
00460 #endif
00461     if( err >= 0 && handler.func == &handle_hdf5_error )
00462     {
00463         assert( handler.data == &errorHandler );
00464 #if defined( H5Eget_auto_vers ) && H5Eget_auto_vers > 1
00465         H5Eset_auto( H5E_DEFAULT, errorHandler.func, errorHandler.data );
00466 #else
00467         H5Eset_auto( errorHandler.func, errorHandler.data );
00468 #endif
00469     }
00470 
00471     return MB_SUCCESS;
00472 }
00473 
00474 WriteHDF5::~WriteHDF5()
00475 {
00476     if( !writeUtil )  // init() failed.
00477         return;
00478 
00479     iFace->release_interface( writeUtil );
00480 }
00481 
00482 ErrorCode WriteHDF5::write_file( const char* filename, bool overwrite, const FileOptions& opts,
00483                                  const EntityHandle* set_array, const int num_sets,
00484                                  const std::vector< std::string >& qa_records, const Tag* tag_list, int num_tags,
00485                                  int user_dimension )
00486 {
00487     mhdf_Status status;
00488 
00489     parallelWrite = false;
00490     collectiveIO  = false;
00491 
00492     // Enable debug output
00493     int tmpval = 0;
00494     if( MB_SUCCESS == opts.get_int_option( "DEBUG_IO", 1, tmpval ) ) dbgOut.set_verbosity( tmpval );
00495 
00496     // writeTagDense = (MB_SUCCESS == opts.get_null_option("DENSE_TAGS"));
00497     writeTagDense = true;
00498 
00499     // Enable some extra checks for reads.  Note: amongst other things this
00500     // will print errors if the entire file is not read, so if doing a
00501     // partial read that is not a parallel read, this should be disabled.
00502     debugTrack = ( MB_SUCCESS == opts.get_null_option( "DEBUG_BINIO" ) );
00503 
00504     bufferSize = WRITE_HDF5_BUFFER_SIZE;
00505     int buf_size;
00506     ErrorCode rval = opts.get_int_option( "BUFFER_SIZE", buf_size );
00507     if( MB_SUCCESS == rval && buf_size >= 24 ) bufferSize = buf_size;
00508 
00509     // Allocate internal buffer to use when gathering data to write.
00510     dataBuffer = (char*)malloc( bufferSize );
00511     if( !dataBuffer ) return error( MB_MEMORY_ALLOCATION_FAILED );
00512 
00513     // Clear filePtr so we know if it is open upon failure
00514     filePtr = 0;
00515 
00516     // Do actual write.
00517     writeProp        = H5P_DEFAULT;
00518     ErrorCode result = write_file_impl( filename, overwrite, opts, set_array, num_sets, qa_records, tag_list, num_tags,
00519                                         user_dimension );
00520     // Close writeProp if it was opened
00521     if( writeProp != H5P_DEFAULT ) H5Pclose( writeProp );
00522 
00523     // Free memory buffer
00524     free( dataBuffer );
00525     dataBuffer = 0;
00526 
00527     // Close file
00528     bool created_file = false;
00529     if( filePtr )
00530     {
00531         created_file = true;
00532         mhdf_closeFile( filePtr, &status );
00533         filePtr = 0;
00534         if( mhdf_isError( &status ) )
00535         {
00536             MB_SET_ERR_CONT( mhdf_message( &status ) );
00537             if( MB_SUCCESS == result ) result = MB_FAILURE;
00538         }
00539     }
00540 
00541     // Release other resources
00542     if( MB_SUCCESS == result )
00543         result = write_finished();
00544     else
00545         write_finished();
00546 
00547     // If write failed, remove file unless KEEP option was specified
00548     if( MB_SUCCESS != result && created_file && MB_ENTITY_NOT_FOUND == opts.get_null_option( "KEEP" ) )
00549         remove( filename );
00550 
00551     return result;
00552 }
00553 
00554 ErrorCode WriteHDF5::write_file_impl( const char* filename, bool overwrite, const FileOptions& opts,
00555                                       const EntityHandle* set_array, const int num_sets,
00556                                       const std::vector< std::string >& qa_records, const Tag* tag_list, int num_tags,
00557                                       int user_dimension )
00558 {
00559     ErrorCode result;
00560     std::list< TagDesc >::const_iterator t_itor;
00561     std::list< ExportSet >::iterator ex_itor;
00562     EntityHandle elem_count, max_id;
00563     double times[NUM_TIMES] = { 0 };
00564 
00565     if( MB_SUCCESS != init() ) return error( MB_FAILURE );
00566 
00567     // See if we need to report times
00568     bool cputime = false;
00569     result       = opts.get_null_option( "CPUTIME" );
00570     if( MB_SUCCESS == result ) cputime = true;
00571 
00572     CpuTimer timer;
00573 
00574     dbgOut.tprint( 1, "Gathering Mesh\n" );
00575     topState.start( "gathering mesh" );
00576 
00577     // Gather mesh to export
00578     exportList.clear();
00579     if( 0 == num_sets || ( 1 == num_sets && set_array[0] == 0 ) )
00580     {
00581         result = gather_all_mesh();
00582         topState.end( result );
00583         CHK_MB_ERR_0( result );
00584     }
00585     else
00586     {
00587         std::vector< EntityHandle > passed_export_list( set_array, set_array + num_sets );
00588         result = gather_mesh_info( passed_export_list );
00589         topState.end( result );
00590         CHK_MB_ERR_0( result );
00591     }
00592 
00593     times[GATHER_TIME] = timer.time_elapsed();
00594 
00595     // if (nodeSet.range.size() == 0)
00596     //  return error(MB_ENTITY_NOT_FOUND);
00597 
00598     dbgOut.tprint( 1, "Checking ID space\n" );
00599 
00600     // Make sure ID space is sufficient
00601     elem_count = nodeSet.range.size() + setSet.range.size();
00602     for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
00603         elem_count += ex_itor->range.size();
00604     max_id = (EntityHandle)1 << ( 8 * sizeof( wid_t ) - 1 );
00605     if( elem_count > max_id )
00606     {
00607         MB_SET_ERR_CONT( "ID space insufficient for mesh size" );
00608         return error( result );
00609     }
00610 
00611     dbgOut.tprint( 1, "Creating File\n" );
00612 
00613     // Figure out the dimension in which to write the mesh.
00614     int mesh_dim;
00615     result = iFace->get_dimension( mesh_dim );
00616     CHK_MB_ERR_0( result );
00617 
00618     if( user_dimension < 1 ) user_dimension = mesh_dim;
00619     user_dimension = user_dimension > mesh_dim ? mesh_dim : user_dimension;
00620 
00621     // Create the file layout, including all tables (zero-ed) and
00622     // all structure and meta information.
00623     const char* optnames[] = { "WRITE_PART", "FORMAT", 0 };
00624     int junk;
00625     parallelWrite = ( MB_SUCCESS == opts.match_option( "PARALLEL", optnames, junk ) );
00626     if( parallelWrite )
00627     {
00628         // Just store Boolean value based on string option here.
00629         // parallel_create_file will set writeProp accordingly.
00630         // collectiveIO = (MB_SUCCESS == opts.get_null_option("COLLECTIVE"));
00631         // dbgOut.printf(2, "'COLLECTIVE' option = %s\n", collectiveIO ? "YES" : "NO");
00632         // Do this all the time, as it appears to be much faster than indep in some cases
00633         collectiveIO = true;
00634         result =
00635             parallel_create_file( filename, overwrite, qa_records, opts, tag_list, num_tags, user_dimension, times );
00636     }
00637     else
00638     {
00639         result = serial_create_file( filename, overwrite, qa_records, tag_list, num_tags, user_dimension );
00640     }
00641     if( MB_SUCCESS != result ) return error( result );
00642 
00643     times[CREATE_TIME] = timer.time_elapsed();
00644 
00645     dbgOut.tprint( 1, "Writing Nodes.\n" );
00646     // Write node coordinates
00647     if( !nodeSet.range.empty() || parallelWrite )
00648     {
00649         topState.start( "writing coords" );
00650         result = write_nodes();
00651         topState.end( result );
00652         if( MB_SUCCESS != result ) return error( result );
00653     }
00654 
00655     times[COORD_TIME] = timer.time_elapsed();
00656 
00657     dbgOut.tprint( 1, "Writing connectivity.\n" );
00658 
00659     // Write element connectivity
00660     for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
00661     {
00662         topState.start( "writing connectivity for ", ex_itor->name() );
00663         result = write_elems( *ex_itor );
00664         topState.end( result );
00665         if( MB_SUCCESS != result ) return error( result );
00666     }
00667     times[CONN_TIME] = timer.time_elapsed();
00668 
00669     dbgOut.tprint( 1, "Writing sets.\n" );
00670 
00671     // Write meshsets
00672     result = write_sets( times );
00673     if( MB_SUCCESS != result ) return error( result );
00674     debug_barrier();
00675 
00676     times[SET_TIME] = timer.time_elapsed();
00677     dbgOut.tprint( 1, "Writing adjacencies.\n" );
00678 
00679     // Write adjacencies
00680     // Tim says don't save node adjacencies!
00681 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
00682     result = write_adjacencies( nodeSet );
00683     if( MB_SUCCESS != result ) return error( result );
00684 #endif
00685     for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
00686     {
00687         topState.start( "writing adjacencies for ", ex_itor->name() );
00688         result = write_adjacencies( *ex_itor );
00689         topState.end( result );
00690         if( MB_SUCCESS != result ) return error( result );
00691     }
00692     times[ADJ_TIME] = timer.time_elapsed();
00693 
00694     dbgOut.tprint( 1, "Writing tags.\n" );
00695 
00696     // Write tags
00697     for( t_itor = tagList.begin(); t_itor != tagList.end(); ++t_itor )
00698     {
00699         std::string name;
00700         iFace->tag_get_name( t_itor->tag_id, name );
00701         topState.start( "writing tag: ", name.c_str() );
00702         result = write_tag( *t_itor, times );
00703         topState.end( result );
00704         if( MB_SUCCESS != result ) return error( result );
00705     }
00706     times[TAG_TIME] = timer.time_elapsed();
00707 
00708     times[TOTAL_TIME] = timer.time_since_birth();
00709 
00710     if( cputime ) { print_times( times ); }
00711 
00712     return MB_SUCCESS;
00713 }
00714 
00715 ErrorCode WriteHDF5::initialize_mesh( const Range ranges[5] )
00716 {
00717     ErrorCode rval;
00718 
00719     if( !ranges[0].all_of_type( MBVERTEX ) ) return error( MB_FAILURE );
00720     nodeSet.range        = ranges[0];
00721     nodeSet.type         = MBVERTEX;
00722     nodeSet.num_nodes    = 1;
00723     nodeSet.max_num_ents = nodeSet.max_num_adjs = 0;
00724 
00725     if( !ranges[4].all_of_type( MBENTITYSET ) ) return error( MB_FAILURE );
00726     setSet.range        = ranges[4];
00727     setSet.type         = MBENTITYSET;
00728     setSet.num_nodes    = 0;
00729     setSet.max_num_ents = setSet.max_num_adjs = 0;
00730     maxNumSetContents = maxNumSetChildren = maxNumSetParents = 0;
00731 
00732     exportList.clear();
00733     std::vector< Range > bins( 1024 );  // Sort entities by connectivity length
00734                                         // Resize is expensive due to Range copy, so start big
00735     for( EntityType type = MBEDGE; type < MBENTITYSET; ++type )
00736     {
00737         ExportSet set;
00738         set.max_num_ents = set.max_num_adjs = 0;
00739         const int dim                       = CN::Dimension( type );
00740 
00741         // Group entities by connectivity length
00742         bins.clear();
00743         assert( dim >= 0 && dim <= 4 );
00744         std::pair< Range::const_iterator, Range::const_iterator > p = ranges[dim].equal_range( type );
00745         Range::const_iterator i                                     = p.first;
00746         while( i != p.second )
00747         {
00748             Range::const_iterator first = i;
00749             EntityHandle const* conn;
00750             int len, firstlen;
00751 
00752             // Dummy storage vector for structured mesh "get_connectivity" function
00753             std::vector< EntityHandle > storage;
00754 
00755             rval = iFace->get_connectivity( *i, conn, firstlen, false, &storage );
00756             if( MB_SUCCESS != rval ) return error( rval );
00757 
00758             for( ++i; i != p.second; ++i )
00759             {
00760                 rval = iFace->get_connectivity( *i, conn, len, false, &storage );
00761                 if( MB_SUCCESS != rval ) return error( rval );
00762 
00763                 if( len != firstlen ) break;
00764             }
00765 
00766             if( firstlen >= (int)bins.size() ) bins.resize( firstlen + 1 );
00767             bins[firstlen].merge( first, i );
00768         }
00769         // Create ExportSet for each group
00770         for( std::vector< Range >::iterator j = bins.begin(); j != bins.end(); ++j )
00771         {
00772             if( j->empty() ) continue;
00773 
00774             set.range.clear();
00775             set.type      = type;
00776             set.num_nodes = j - bins.begin();
00777             exportList.push_back( set );
00778             exportList.back().range.swap( *j );
00779         }
00780     }
00781 
00782     return MB_SUCCESS;
00783 }
00784 
00785 // Gather the mesh to be written from a list of owning meshsets.
00786 ErrorCode WriteHDF5::gather_mesh_info( const std::vector< EntityHandle >& export_sets )
00787 {
00788     ErrorCode rval;
00789 
00790     int dim;
00791     Range range;      // Temporary storage
00792     Range ranges[5];  // Lists of entities to export, grouped by dimension
00793 
00794     // Gather list of all related sets
00795     std::vector< EntityHandle > stack( export_sets );
00796     std::copy( export_sets.begin(), export_sets.end(), stack.begin() );
00797     std::vector< EntityHandle > set_children;
00798     while( !stack.empty() )
00799     {
00800         EntityHandle meshset = stack.back();
00801         stack.pop_back();
00802         ranges[4].insert( meshset );
00803 
00804         // Get contained sets
00805         range.clear();
00806         rval = iFace->get_entities_by_type( meshset, MBENTITYSET, range );
00807         CHK_MB_ERR_0( rval );
00808         for( Range::iterator ritor = range.begin(); ritor != range.end(); ++ritor )
00809         {
00810             if( ranges[4].find( *ritor ) == ranges[4].end() ) stack.push_back( *ritor );
00811         }
00812 
00813         // Get child sets
00814         set_children.clear();
00815         rval = iFace->get_child_meshsets( meshset, set_children, 1 );
00816         CHK_MB_ERR_0( rval );
00817         for( std::vector< EntityHandle >::iterator vitor = set_children.begin(); vitor != set_children.end(); ++vitor )
00818         {
00819             if( ranges[4].find( *vitor ) == ranges[4].end() ) stack.push_back( *vitor );
00820         }
00821     }
00822 
00823     // Gather list of all mesh entities from list of sets,
00824     // grouped by dimension.
00825     for( Range::iterator setitor = ranges[4].begin(); setitor != ranges[4].end(); ++setitor )
00826     {
00827         for( dim = 0; dim < 4; ++dim )
00828         {
00829             range.clear();
00830             rval = iFace->get_entities_by_dimension( *setitor, dim, range, false );
00831             CHK_MB_ERR_0( rval );
00832 
00833             ranges[dim].merge( range );
00834         }
00835     }
00836 
00837     // For each list of elements, append adjacent children and
00838     // nodes to lists.
00839     for( dim = 3; dim > 0; --dim )
00840     {
00841         for( int cdim = 1; cdim < dim; ++cdim )
00842         {
00843             range.clear();
00844             rval = iFace->get_adjacencies( ranges[dim], cdim, false, range );
00845             CHK_MB_ERR_0( rval );
00846             ranges[cdim].merge( range );
00847         }
00848         range.clear();
00849         rval = writeUtil->gather_nodes_from_elements( ranges[dim], 0, range );
00850         CHK_MB_ERR_0( rval );
00851         ranges[0].merge( range );
00852     }
00853 
00854     return initialize_mesh( ranges );
00855 }
00856 
00857 // Gather all the mesh and related information to be written.
00858 ErrorCode WriteHDF5::gather_all_mesh()
00859 {
00860     ErrorCode rval;
00861     Range ranges[5];
00862 
00863     rval = iFace->get_entities_by_type( 0, MBVERTEX, ranges[0] );
00864     if( MB_SUCCESS != rval ) return error( rval );
00865 
00866     rval = iFace->get_entities_by_dimension( 0, 1, ranges[1] );
00867     if( MB_SUCCESS != rval ) return error( rval );
00868 
00869     rval = iFace->get_entities_by_dimension( 0, 2, ranges[2] );
00870     if( MB_SUCCESS != rval ) return error( rval );
00871 
00872     rval = iFace->get_entities_by_dimension( 0, 3, ranges[3] );
00873     if( MB_SUCCESS != rval ) return error( rval );
00874 
00875     rval = iFace->get_entities_by_type( 0, MBENTITYSET, ranges[4] );
00876     if( MB_SUCCESS != rval ) return error( rval );
00877 
00878     return initialize_mesh( ranges );
00879 }
00880 
00881 ErrorCode WriteHDF5::write_nodes()
00882 {
00883     mhdf_Status status;
00884     int dim, mesh_dim;
00885     ErrorCode rval;
00886     hid_t node_table;
00887     long first_id, num_nodes;
00888 
00889     if( !nodeSet.total_num_ents ) return MB_SUCCESS;  // No nodes!
00890 
00891     CHECK_OPEN_HANDLES;
00892 
00893     rval = iFace->get_dimension( mesh_dim );
00894     CHK_MB_ERR_0( rval );
00895 
00896     debug_barrier();
00897     dbgOut.print( 3, "Opening Node Coords\n" );
00898     node_table = mhdf_openNodeCoords( filePtr, &num_nodes, &dim, &first_id, &status );
00899     CHK_MHDF_ERR_0( status );
00900     IODebugTrack track( debugTrack, "nodes", num_nodes );
00901 
00902     double* buffer = (double*)dataBuffer;
00903 #ifdef BLOCKED_COORD_IO
00904     int chunk_size = bufferSize / sizeof( double );
00905 #else
00906     int chunk_size = bufferSize / ( 3 * sizeof( double ) );
00907 #endif
00908 
00909     long remaining  = nodeSet.range.size();
00910     long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
00911     if( nodeSet.max_num_ents )
00912     {
00913         assert( nodeSet.max_num_ents >= remaining );
00914         num_writes = ( nodeSet.max_num_ents + chunk_size - 1 ) / chunk_size;
00915     }
00916     long remaining_writes = num_writes;
00917 
00918     long offset                = nodeSet.offset;
00919     Range::const_iterator iter = nodeSet.range.begin();
00920     dbgOut.printf( 3, "Writing %ld nodes in %ld blocks of %d\n", remaining, ( remaining + chunk_size - 1 ) / chunk_size,
00921                    chunk_size );
00922     while( remaining )
00923     {
00924         (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
00925         long count = chunk_size < remaining ? chunk_size : remaining;
00926         remaining -= count;
00927         Range::const_iterator end = iter;
00928         end += count;
00929 
00930 #ifdef BLOCKED_COORD_IO
00931         for( int d = 0; d < dim; d++ )
00932         {
00933             if( d < mesh_dim )
00934             {
00935                 rval = writeUtil->get_node_coords( d, iter, end, count, buffer );
00936                 CHK_MB_ERR_1( rval, node_table, status );
00937             }
00938             else
00939                 memset( buffer, 0, count * sizeof( double ) );
00940 
00941             dbgOut.printf( 3, " writing %c node chunk %ld of %ld, %ld values at %ld\n", (char)( 'X' + d ),
00942                            num_writes - remaining_writes + 1, num_writes, count, offset );
00943             mhdf_writeNodeCoordWithOpt( node_table, offset, count, d, buffer, writeProp, &status );
00944             CHK_MHDF_ERR_1( status, node_table );
00945         }
00946 #else
00947         rval = writeUtil->get_node_coords( -1, iter, end, 3 * count, buffer );
00948         CHK_MB_ERR_1( rval, node_table, status );
00949         dbgOut.printf( 3, " writing node chunk %ld of %ld, %ld values at %ld\n", num_writes - remaining_writes + 1,
00950                        num_writes, count, offset );
00951         mhdf_writeNodeCoordsWithOpt( node_table, offset, count, buffer, writeProp, &status );
00952         CHK_MHDF_ERR_1( status, node_table );
00953 #endif
00954         track.record_io( offset, count );
00955 
00956         iter = end;
00957         offset += count;
00958         --remaining_writes;
00959     }
00960 
00961     // Do empty writes if necessary for parallel collective IO
00962     if( collectiveIO )
00963     {
00964         while( remaining_writes-- )
00965         {
00966             assert( writeProp != H5P_DEFAULT );
00967 #ifdef BLOCKED_COORD_IO
00968             for( int d = 0; d < dim; ++d )
00969             {
00970                 dbgOut.printf( 3, " writing (empty) %c node chunk %ld of %ld.\n", (char)( 'X' + d ),
00971                                num_writes - remaining_writes, num_writes );
00972                 mhdf_writeNodeCoordWithOpt( node_table, offset, 0, d, 0, writeProp, &status );
00973                 CHK_MHDF_ERR_1( status, node_table );
00974             }
00975 #else
00976             dbgOut.printf( 3, " writing (empty) node chunk %ld of %ld.\n", num_writes - remaining_writes, num_writes );
00977             mhdf_writeNodeCoordsWithOpt( node_table, offset, 0, 0, writeProp, &status );
00978             CHK_MHDF_ERR_1( status, node_table );
00979 #endif
00980         }
00981     }
00982 
00983     mhdf_closeData( filePtr, node_table, &status );
00984     CHK_MHDF_ERR_0( status );
00985 
00986     track.all_reduce();
00987     return MB_SUCCESS;
00988 }
00989 
00990 ErrorCode WriteHDF5::write_elems( ExportSet& elems )
00991 {
00992     mhdf_Status status;
00993     ErrorCode rval;
00994     long first_id;
00995     int nodes_per_elem;
00996     long table_size;
00997 
00998     CHECK_OPEN_HANDLES;
00999 
01000     debug_barrier();
01001     dbgOut.printf( 2, "Writing %lu elements of type %s%d\n", (unsigned long)elems.range.size(),
01002                    CN::EntityTypeName( elems.type ), elems.num_nodes );
01003     dbgOut.print( 3, "Writing elements", elems.range );
01004 
01005     hid_t elem_table = mhdf_openConnectivity( filePtr, elems.name(), &nodes_per_elem, &table_size, &first_id, &status );
01006     CHK_MHDF_ERR_0( status );
01007     IODebugTrack track( debugTrack, elems.name() && strlen( elems.name() ) ? elems.name() : "<ANONYMOUS ELEM SET?>",
01008                         table_size );
01009 
01010     assert( (unsigned long)first_id <= elems.first_id );
01011     assert( (unsigned long)table_size >= elems.offset + elems.range.size() );
01012 
01013     EntityHandle* buffer = (EntityHandle*)dataBuffer;
01014     int chunk_size       = bufferSize / ( elems.num_nodes * sizeof( wid_t ) );
01015     long offset          = elems.offset;
01016     long remaining       = elems.range.size();
01017     long num_writes      = ( remaining + chunk_size - 1 ) / chunk_size;
01018     if( elems.max_num_ents )
01019     {
01020         assert( elems.max_num_ents >= remaining );
01021         num_writes = ( elems.max_num_ents + chunk_size - 1 ) / chunk_size;
01022     }
01023     long remaining_writes = num_writes;
01024     Range::iterator iter  = elems.range.begin();
01025 
01026     while( remaining )
01027     {
01028         (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01029         long count = chunk_size < remaining ? chunk_size : remaining;
01030         remaining -= count;
01031 
01032         Range::iterator next = iter;
01033         next += count;
01034         rval = writeUtil->get_element_connect( iter, next, elems.num_nodes, count * elems.num_nodes, buffer );
01035         CHK_MB_ERR_1( rval, elem_table, status );
01036         iter = next;
01037 
01038         for( long i = 0; i < count * nodes_per_elem; ++i )
01039         {
01040             buffer[i] = idMap.find( buffer[i] );
01041             if( 0 == buffer[i] )
01042             {
01043                 MB_SET_ERR_CONT( "Invalid " << elems.name() << " element connectivity. Write Aborted" );
01044                 mhdf_closeData( filePtr, elem_table, &status );
01045                 return error( MB_FAILURE );
01046             }
01047         }
01048 
01049         dbgOut.printf( 3, " writing node connectivity %ld of %ld, %ld values at %ld\n",
01050                        num_writes - remaining_writes + 1, num_writes, count, offset );
01051         track.record_io( offset, count );
01052         mhdf_writeConnectivityWithOpt( elem_table, offset, count, id_type, buffer, writeProp, &status );
01053         CHK_MHDF_ERR_1( status, elem_table );
01054 
01055         offset += count;
01056         --remaining_writes;
01057     }
01058 
01059     // Do empty writes if necessary for parallel collective IO
01060     if( collectiveIO )
01061     {
01062         while( remaining_writes-- )
01063         {
01064             assert( writeProp != H5P_DEFAULT );
01065             dbgOut.printf( 3, " writing (empty) connectivity chunk %ld of %ld.\n", num_writes - remaining_writes + 1,
01066                            num_writes );
01067             mhdf_writeConnectivityWithOpt( elem_table, offset, 0, id_type, 0, writeProp, &status );
01068             CHK_MHDF_ERR_1( status, elem_table );
01069         }
01070     }
01071 
01072     mhdf_closeData( filePtr, elem_table, &status );
01073     CHK_MHDF_ERR_0( status );
01074 
01075     track.all_reduce();
01076     return MB_SUCCESS;
01077 }
01078 
01079 ErrorCode WriteHDF5::get_set_info( EntityHandle set, long& num_entities, long& num_children, long& num_parents,
01080                                    unsigned long& flags )
01081 {
01082     ErrorCode rval;
01083     int i;
01084     unsigned int u;
01085 
01086     rval = iFace->get_number_entities_by_handle( set, i, false );
01087     CHK_MB_ERR_0( rval );
01088     num_entities = i;
01089 
01090     rval = iFace->num_child_meshsets( set, &i );
01091     CHK_MB_ERR_0( rval );
01092     num_children = i;
01093 
01094     rval = iFace->num_parent_meshsets( set, &i );
01095     CHK_MB_ERR_0( rval );
01096     num_parents = i;
01097 
01098     rval = iFace->get_meshset_options( set, u );
01099     CHK_MB_ERR_0( rval );
01100     flags = u;
01101 
01102     return MB_SUCCESS;
01103 }
01104 
01105 ErrorCode WriteHDF5::write_set_data( const WriteUtilIface::EntityListType which_data, const hid_t handle,
01106                                      IODebugTrack& track, Range* ranged, Range* null_stripped,
01107                                      std::vector< long >* set_sizes )
01108 {
01109     // ranged must be non-null for CONTENTS and null for anything else
01110     assert( ( which_data == WriteUtilIface::CONTENTS ) == ( 0 != ranged ) );
01111     ErrorCode rval;
01112     mhdf_Status status;
01113 
01114     debug_barrier();
01115 
01116     // Function pointer type used to write set data
01117     void ( *write_func )( hid_t, long, long, hid_t, const void*, hid_t, mhdf_Status* );
01118     long max_vals;  // Max over all procs of number of values to write to data set
01119     long offset;    // Offset in HDF5 dataset at which to write next block of data
01120     switch( which_data )
01121     {
01122         case WriteUtilIface::CONTENTS:
01123             assert( ranged != 0 && null_stripped != 0 && set_sizes != 0 );
01124             write_func = &mhdf_writeSetDataWithOpt;
01125             max_vals   = maxNumSetContents;
01126             offset     = setContentsOffset;
01127             dbgOut.print( 2, "Writing set contents\n" );
01128             break;
01129         case WriteUtilIface::CHILDREN:
01130             assert( !ranged && !null_stripped && !set_sizes );
01131             write_func = &mhdf_writeSetParentsChildrenWithOpt;
01132             max_vals   = maxNumSetChildren;
01133             offset     = setChildrenOffset;
01134             dbgOut.print( 2, "Writing set child lists\n" );
01135             break;
01136         case WriteUtilIface::PARENTS:
01137             assert( !ranged && !null_stripped && !set_sizes );
01138             write_func = &mhdf_writeSetParentsChildrenWithOpt;
01139             max_vals   = maxNumSetParents;
01140             offset     = setParentsOffset;
01141             dbgOut.print( 2, "Writing set parent lists\n" );
01142             break;
01143         default:
01144             assert( false );
01145             return MB_FAILURE;
01146     }
01147     // assert(max_vals > 0); // Should have skipped this function otherwise
01148 
01149     // buffer to use for IO
01150     wid_t* buffer = reinterpret_cast< wid_t* >( dataBuffer );
01151     // number of handles that will fit in the buffer
01152     const size_t buffer_size = bufferSize / sizeof( EntityHandle );
01153     // the total number of write calls that must be made, including no-ops for collective io
01154     const size_t num_total_writes = ( max_vals + buffer_size - 1 ) / buffer_size;
01155 
01156     std::vector< SpecialSetData >::iterator si = specialSets.begin();
01157 
01158     std::vector< wid_t > remaining;         // data left over from prev iteration because it didn't fit in buffer
01159     size_t remaining_offset           = 0;  // avoid erasing from front of 'remaining'
01160     const EntityHandle* remaining_ptr = 0;  // remaining for non-ranged data
01161     size_t remaining_count            = 0;
01162     const wid_t* special_rem_ptr      = 0;
01163     Range::const_iterator i           = setSet.range.begin(), j, rhint, nshint;
01164     if( ranged ) rhint = ranged->begin();
01165     if( null_stripped ) nshint = null_stripped->begin();
01166     for( size_t w = 0; w < num_total_writes; ++w )
01167     {
01168         if( i == setSet.range.end() && !remaining.empty() && !remaining_ptr )
01169         {
01170             // If here, then we've written everything but we need to
01171             // make more write calls because we're doing collective IO
01172             // in parallel
01173             ( *write_func )( handle, 0, 0, id_type, 0, writeProp, &status );
01174             CHK_MHDF_ERR_0( status );
01175             continue;
01176         }
01177 
01178         // If we had some left-over data from a range-compacted set
01179         // from the last iteration, add it to the buffer now
01180         size_t count = 0;
01181         if( !remaining.empty() )
01182         {
01183             count = remaining.size() - remaining_offset;
01184             if( count > buffer_size )
01185             {
01186                 memcpy( buffer, &remaining[remaining_offset], buffer_size * sizeof( wid_t ) );
01187                 count = buffer_size;
01188                 remaining_offset += buffer_size;
01189             }
01190             else
01191             {
01192                 memcpy( buffer, &remaining[remaining_offset], count * sizeof( wid_t ) );
01193                 remaining_offset = 0;
01194                 remaining.clear();
01195             }
01196         }
01197         // If we had some left-over data from a non-range-compacted set
01198         // from the last iteration, add it to the buffer now
01199         else if( remaining_ptr )
01200         {
01201             if( remaining_count > buffer_size )
01202             {
01203                 rval = vector_to_id_list( remaining_ptr, buffer, buffer_size );
01204                 CHK_MB_ERR_0( rval );
01205                 count = buffer_size;
01206                 remaining_ptr += count;
01207                 remaining_count -= count;
01208             }
01209             else
01210             {
01211                 rval = vector_to_id_list( remaining_ptr, buffer, remaining_count );
01212                 CHK_MB_ERR_0( rval );
01213                 count           = remaining_count;
01214                 remaining_ptr   = 0;
01215                 remaining_count = 0;
01216             }
01217         }
01218         // If we had some left-over data from a "special" (i.e. parallel shared)
01219         // set.
01220         else if( special_rem_ptr )
01221         {
01222             if( remaining_count > buffer_size )
01223             {
01224                 memcpy( buffer, special_rem_ptr, buffer_size * sizeof( wid_t ) );
01225                 count = buffer_size;
01226                 special_rem_ptr += count;
01227                 remaining_count -= count;
01228             }
01229             else
01230             {
01231                 memcpy( buffer, special_rem_ptr, remaining_count * sizeof( wid_t ) );
01232                 count           = remaining_count;
01233                 special_rem_ptr = 0;
01234                 remaining_count = 0;
01235             }
01236         }
01237 
01238         // While there is both space remaining in the buffer and
01239         // more sets to write, append more set data to buffer.
01240 
01241         while( count < buffer_size && i != setSet.range.end() )
01242         {
01243             // Special case for "special" (i.e. parallel shared) sets:
01244             // we already have the data in a vector, just copy it.
01245             if( si != specialSets.end() && si->setHandle == *i )
01246             {
01247                 std::vector< wid_t >& list =
01248                     ( which_data == WriteUtilIface::CONTENTS )
01249                         ? si->contentIds
01250                         : ( which_data == WriteUtilIface::PARENTS ) ? si->parentIds : si->childIds;
01251                 size_t append = list.size();
01252                 if( count + list.size() > buffer_size )
01253                 {
01254                     append          = buffer_size - count;
01255                     special_rem_ptr = &list[append];
01256                     remaining_count = list.size() - append;
01257                 }
01258                 memcpy( buffer + count, &list[0], append * sizeof( wid_t ) );
01259                 ++i;
01260                 ++si;
01261                 count += append;
01262                 continue;
01263             }
01264 
01265             j = i;
01266             ++i;
01267             const EntityHandle* ptr;
01268             int len;
01269             unsigned char flags;
01270             rval = writeUtil->get_entity_list_pointers( j, i, &ptr, which_data, &len, &flags );
01271             if( MB_SUCCESS != rval ) return rval;
01272             if( which_data == WriteUtilIface::CONTENTS && !( flags & MESHSET_ORDERED ) )
01273             {
01274                 bool compacted;
01275                 remaining.clear();
01276                 if( len == 0 )
01277                     compacted = false;
01278                 else
01279                 {
01280                     assert( !( len % 2 ) );
01281                     rval = range_to_blocked_list( ptr, len / 2, remaining, compacted );
01282                     if( MB_SUCCESS != rval ) return rval;
01283                 }
01284                 if( compacted )
01285                 {
01286                     rhint = ranged->insert( rhint, *j );
01287                     set_sizes->push_back( remaining.size() );
01288                 }
01289                 else if( remaining.size() != (unsigned)len )
01290                 {
01291                     nshint = null_stripped->insert( nshint, *j );
01292                     set_sizes->push_back( remaining.size() );
01293                 }
01294 
01295                 if( count + remaining.size() <= buffer_size )
01296                 {
01297                     if( !remaining.empty() )
01298                         memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining.size() );
01299                     count += remaining.size();
01300                     remaining.clear();
01301                     remaining_offset = 0;
01302                 }
01303                 else
01304                 {
01305                     remaining_offset = buffer_size - count;
01306                     memcpy( buffer + count, &remaining[0], sizeof( wid_t ) * remaining_offset );
01307                     count += remaining_offset;
01308                 }
01309             }
01310             else
01311             {
01312                 if( count + len > buffer_size )
01313                 {
01314                     size_t append   = buffer_size - count;
01315                     remaining_ptr   = ptr + append;
01316                     remaining_count = len - append;
01317                     len             = append;
01318                 }
01319 
01320                 rval = vector_to_id_list( ptr, buffer + count, len );
01321                 count += len;
01322             }
01323         }
01324 
01325         // Write the buffer.
01326         ( *write_func )( handle, offset, count, id_type, buffer, writeProp, &status );
01327         CHK_MHDF_ERR_0( status );
01328         track.record_io( offset, count );
01329         offset += count;
01330     }
01331 
01332     return MB_SUCCESS;
01333 }
01334 
01335 ErrorCode WriteHDF5::write_sets( double* times )
01336 {
01337     mhdf_Status status;
01338     ErrorCode rval;
01339     long first_id, size;
01340     hid_t table;
01341     CpuTimer timer;
01342 
01343     CHECK_OPEN_HANDLES;
01344     /* If no sets, just return success */
01345     if( !writeSets ) return MB_SUCCESS;
01346 
01347     debug_barrier();
01348     dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01349     dbgOut.print( 3, "Non-shared sets", setSet.range );
01350 
01351     /* Write set parents */
01352     if( writeSetParents )
01353     {
01354         topState.start( "writing parent lists for local sets" );
01355         table = mhdf_openSetParents( filePtr, &size, &status );
01356         CHK_MHDF_ERR_0( status );
01357         IODebugTrack track( debugTrack, "SetParents", size );
01358 
01359         rval = write_set_data( WriteUtilIface::PARENTS, table, track );
01360         topState.end( rval );
01361         CHK_MB_ERR_1( rval, table, status );
01362 
01363         mhdf_closeData( filePtr, table, &status );
01364         CHK_MHDF_ERR_0( status );
01365 
01366         times[SET_PARENT] = timer.time_elapsed();
01367         track.all_reduce();
01368     }
01369 
01370     /* Write set children */
01371     if( writeSetChildren )
01372     {
01373         topState.start( "writing child lists for local sets" );
01374         table = mhdf_openSetChildren( filePtr, &size, &status );
01375         CHK_MHDF_ERR_0( status );
01376         IODebugTrack track( debugTrack, "SetChildren", size );
01377 
01378         rval = write_set_data( WriteUtilIface::CHILDREN, table, track );
01379         topState.end( rval );
01380         CHK_MB_ERR_1( rval, table, status );
01381 
01382         mhdf_closeData( filePtr, table, &status );
01383         CHK_MHDF_ERR_0( status );
01384 
01385         times[SET_CHILD] = timer.time_elapsed();
01386         track.all_reduce();
01387     }
01388 
01389     /* Write set contents */
01390     Range ranged_sets, null_stripped_sets;
01391     std::vector< long > set_sizes;
01392     if( writeSetContents )
01393     {
01394         topState.start( "writing content lists for local sets" );
01395         table = mhdf_openSetData( filePtr, &size, &status );
01396         CHK_MHDF_ERR_0( status );
01397         IODebugTrack track( debugTrack, "SetContents", size );
01398 
01399         rval = write_set_data( WriteUtilIface::CONTENTS, table, track, &ranged_sets, &null_stripped_sets, &set_sizes );
01400         topState.end( rval );
01401         CHK_MB_ERR_1( rval, table, status );
01402 
01403         mhdf_closeData( filePtr, table, &status );
01404         CHK_MHDF_ERR_0( status );
01405 
01406         times[SET_CONTENT] = timer.time_elapsed();
01407         track.all_reduce();
01408     }
01409     assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
01410 
01411     /* Write set description table */
01412 
01413     debug_barrier();
01414     topState.start( "writing descriptions of local sets" );
01415     dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01416     dbgOut.print( 3, "Non-shared sets", setSet.range );
01417 
01418     /* Open the table */
01419     table = mhdf_openSetMeta( filePtr, &size, &first_id, &status );
01420     CHK_MHDF_ERR_0( status );
01421     IODebugTrack track_meta( debugTrack, "SetMeta", size );
01422 
01423     /* Some debug stuff */
01424     debug_barrier();
01425     dbgOut.printf( 2, "Writing %lu non-shared sets\n", (unsigned long)setSet.range.size() );
01426     dbgOut.print( 3, "Non-shared sets", setSet.range );
01427 
01428     /* Counts and buffers and such */
01429     mhdf_index_t* const buffer     = reinterpret_cast< mhdf_index_t* >( dataBuffer );
01430     const size_t buffer_size       = bufferSize / ( 4 * sizeof( mhdf_index_t ) );
01431     const size_t num_local_writes  = ( setSet.range.size() + buffer_size - 1 ) / buffer_size;
01432     const size_t num_global_writes = ( setSet.max_num_ents + buffer_size - 1 ) / buffer_size;
01433     assert( num_local_writes <= num_global_writes );
01434     assert( num_global_writes > 0 );
01435 
01436     /* data about sets for which number of handles written is
01437      * not the same as the number of handles in the set
01438      * (range-compacted or null handles stripped out)
01439      */
01440     Range::const_iterator i                       = setSet.range.begin();
01441     Range::const_iterator r                       = ranged_sets.begin();
01442     Range::const_iterator s                       = null_stripped_sets.begin();
01443     std::vector< mhdf_index_t >::const_iterator n = set_sizes.begin();
01444     assert( ranged_sets.size() + null_stripped_sets.size() == set_sizes.size() );
01445 
01446     /* We write the end index for each list, rather than the count */
01447     mhdf_index_t prev_contents_end = setContentsOffset - 1;
01448     mhdf_index_t prev_children_end = setChildrenOffset - 1;
01449     mhdf_index_t prev_parents_end  = setParentsOffset - 1;
01450 
01451     /* While there is more data to write */
01452     size_t offset                                    = setSet.offset;
01453     std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
01454     for( size_t w = 0; w < num_local_writes; ++w )
01455     {
01456         // Get a buffer full of data
01457         size_t count = 0;
01458         while( count < buffer_size && i != setSet.range.end() )
01459         {
01460             // Get set properties
01461             long num_ent, num_child, num_parent;
01462             unsigned long flags;
01463             if( si != specialSets.end() && si->setHandle == *i )
01464             {
01465                 flags      = si->setFlags;
01466                 num_ent    = si->contentIds.size();
01467                 num_child  = si->childIds.size();
01468                 num_parent = si->parentIds.size();
01469                 ++si;
01470                 if( r != ranged_sets.end() && *i == *r )
01471                 {
01472                     assert( flags & mhdf_SET_RANGE_BIT );
01473                     ++r;
01474                     ++n;
01475                 }
01476                 else if( s != null_stripped_sets.end() && *i == *s )
01477                 {
01478                     ++s;
01479                     ++n;
01480                 }
01481             }
01482             else
01483             {
01484                 assert( si == specialSets.end() || si->setHandle > *i );
01485 
01486                 // Get set properties
01487                 rval = get_set_info( *i, num_ent, num_child, num_parent, flags );
01488                 CHK_MB_ERR_1( rval, table, status );
01489 
01490                 // Check if size is something other than num handles in set
01491                 if( r != ranged_sets.end() && *i == *r )
01492                 {
01493                     num_ent = *n;
01494                     ++r;
01495                     ++n;
01496                     flags |= mhdf_SET_RANGE_BIT;
01497                 }
01498                 else if( s != null_stripped_sets.end() && *i == *s )
01499                 {
01500                     num_ent = *n;
01501                     ++s;
01502                     ++n;
01503                 }
01504             }
01505 
01506             // Put data in buffer
01507             mhdf_index_t* local = buffer + 4 * count;
01508             prev_contents_end += num_ent;
01509             prev_children_end += num_child;
01510             prev_parents_end += num_parent;
01511             local[0] = prev_contents_end;
01512             local[1] = prev_children_end;
01513             local[2] = prev_parents_end;
01514             local[3] = flags;
01515 
01516             // Iterate
01517             ++count;
01518             ++i;
01519         }
01520 
01521         // Write the data
01522         mhdf_writeSetMetaWithOpt( table, offset, count, MHDF_INDEX_TYPE, buffer, writeProp, &status );
01523         CHK_MHDF_ERR_1( status, table );
01524         track_meta.record_io( offset, count );
01525         offset += count;
01526     }
01527     assert( r == ranged_sets.end() );
01528     assert( s == null_stripped_sets.end() );
01529     assert( n == set_sizes.end() );
01530 
01531     /* If doing parallel write with collective IO, do null write
01532      * calls because other procs aren't done yet and write calls
01533      * are collective */
01534     for( size_t w = num_local_writes; w != num_global_writes; ++w )
01535     {
01536         mhdf_writeSetMetaWithOpt( table, 0, 0, MHDF_INDEX_TYPE, 0, writeProp, &status );
01537         CHK_MHDF_ERR_1( status, table );
01538     }
01539 
01540     topState.end();
01541     mhdf_closeData( filePtr, table, &status );
01542     CHK_MHDF_ERR_0( status );
01543 
01544     times[SET_META] = timer.time_elapsed();
01545     track_meta.all_reduce();
01546 
01547     return MB_SUCCESS;
01548 }
01549 
01550 template < class HandleRangeIter >
01551 inline size_t count_num_handles( HandleRangeIter iter, HandleRangeIter end )
01552 {
01553     size_t result = 0;
01554     for( ; iter != end; ++iter )
01555         result += iter->second - iter->first + 1;
01556 
01557     return result;
01558 }
01559 
01560 template < class HandleRangeIter >
01561 inline ErrorCode range_to_id_list_templ( HandleRangeIter begin, HandleRangeIter end,
01562                                          const RangeMap< EntityHandle, WriteHDF5::wid_t >& idMap,
01563                                          WriteHDF5::wid_t* array )
01564 {
01565     ErrorCode rval                                          = MB_SUCCESS;
01566     RangeMap< EntityHandle, WriteHDF5::wid_t >::iterator ri = idMap.begin();
01567     WriteHDF5::wid_t* i                                     = array;
01568     for( HandleRangeIter pi = begin; pi != end; ++pi )
01569     {
01570         EntityHandle h = pi->first;
01571         while( h <= pi->second )
01572         {
01573             ri = idMap.lower_bound( ri, idMap.end(), h );
01574             if( ri == idMap.end() || ri->begin > h )
01575             {
01576                 rval = MB_ENTITY_NOT_FOUND;
01577                 *i   = 0;
01578                 ++i;
01579                 ++h;
01580                 continue;
01581             }
01582 
01583             // compute the last available value of the found target range (ri iterator)
01584             WriteHDF5::wid_t last_valid_input_value_in_current_map_range = ri->begin + ri->count - 1;
01585             // limit the number of steps we do on top of h so we do not overflow the output range
01586             // span
01587             WriteHDF5::wid_t step_until = std::min( last_valid_input_value_in_current_map_range, pi->second );
01588             WriteHDF5::wid_t n          = step_until - h + 1;
01589             assert( n > 0 );  // We must at least step 1
01590 
01591             WriteHDF5::wid_t id = ri->value + ( h - ri->begin );
01592             for( WriteHDF5::wid_t j = 0; j < n; ++i, ++j )
01593                 *i = id + j;
01594             h += n;
01595         }
01596     }
01597 
01598     assert( i == array + count_num_handles( begin, end ) );
01599     return rval;
01600 }
01601 
01602 template < class HandleRangeIter >
01603 inline ErrorCode range_to_blocked_list_templ( HandleRangeIter begin, HandleRangeIter end,
01604                                               const RangeMap< EntityHandle, WriteHDF5::wid_t >& idMap,
01605                                               std::vector< WriteHDF5::wid_t >& output_id_list, bool& ranged_list )
01606 {
01607     output_id_list.clear();
01608     if( begin == end )
01609     {
01610         ranged_list = false;
01611         return MB_SUCCESS;
01612     }
01613 
01614     // First try ranged format, but give up if we reach the
01615     // non-range format size.
01616     RangeMap< EntityHandle, WriteHDF5::wid_t >::iterator ri = idMap.begin();
01617 
01618     const size_t num_handles = count_num_handles( begin, end );
01619     // If we end up with more than this many range blocks, then
01620     // we're better off just writing the set as a simple list
01621     size_t pairs_remaining = num_handles / 2;
01622     for( HandleRangeIter pi = begin; pi != end; ++pi )
01623     {
01624         EntityHandle h                              = pi->first;
01625         WriteHDF5::wid_t local_mapped_from_subrange = 0;
01626         while( h <= pi->second )
01627         {
01628             ri = idMap.lower_bound( ri, idMap.end(), h );
01629             if( ri == idMap.end() || ri->begin > h )
01630             {
01631                 ++h;
01632                 continue;
01633             }
01634 
01635             WriteHDF5::wid_t n = pi->second - pi->first + 1 - local_mapped_from_subrange;
01636             if( n > ri->count ) n = ri->count;
01637 
01638             WriteHDF5::wid_t id = ri->value + ( h - ri->begin );
01639             // see if we can go to the end of the range
01640             if( id + n > ri->value + ri->count )  // we have to reduce n, because we cannot go over next subrange
01641             {
01642                 if( ri->value + ri->count - id > 0 ) n = ri->value + ri->count - id;
01643             }
01644 
01645             // See if we can append it to the previous range
01646             if( !output_id_list.empty() && output_id_list[output_id_list.size() - 2] + output_id_list.back() == id )
01647             { output_id_list.back() += n; }
01648 
01649             // If we ran out of space, (or set is empty) just do list format
01650             else if( !pairs_remaining )
01651             {
01652                 ranged_list = false;
01653                 output_id_list.resize( num_handles );
01654                 range_to_id_list_templ( begin, end, idMap, &output_id_list[0] );
01655                 output_id_list.erase( std::remove( output_id_list.begin(), output_id_list.end(), 0u ),
01656                                       output_id_list.end() );
01657                 return MB_SUCCESS;
01658             }
01659 
01660             //
01661             else
01662             {
01663                 --pairs_remaining;
01664                 output_id_list.push_back( id );
01665                 output_id_list.push_back( n );
01666             }
01667             local_mapped_from_subrange += n;  // we already mapped so many
01668             h += n;
01669         }
01670     }
01671 
01672     ranged_list = true;
01673     return MB_SUCCESS;
01674 }
01675 
01676 ErrorCode WriteHDF5::range_to_blocked_list( const Range& input_range, std::vector< wid_t >& output_id_list,
01677                                             bool& ranged_list )
01678 {
01679     return range_to_blocked_list_templ( input_range.const_pair_begin(), input_range.const_pair_end(), idMap,
01680                                         output_id_list, ranged_list );
01681 }
01682 
01683 ErrorCode WriteHDF5::range_to_blocked_list( const EntityHandle* array, size_t num_input_ranges,
01684                                             std::vector< wid_t >& output_id_list, bool& ranged_list )
01685 {
01686     // We assume this in the cast on the following line
01687     typedef std::pair< EntityHandle, EntityHandle > mtype;
01688     assert( sizeof( mtype ) == 2 * sizeof( EntityHandle ) );
01689     const mtype* arr = reinterpret_cast< const mtype* >( array );
01690     return range_to_blocked_list_templ( arr, arr + num_input_ranges, idMap, output_id_list, ranged_list );
01691 }
01692 
01693 ErrorCode WriteHDF5::range_to_id_list( const Range& range, wid_t* array )
01694 {
01695     return range_to_id_list_templ( range.const_pair_begin(), range.const_pair_end(), idMap, array );
01696 }
01697 
01698 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input, size_t input_len, wid_t* output, size_t& output_len,
01699                                         bool remove_zeros )
01700 {
01701     const EntityHandle* i_iter = input;
01702     const EntityHandle* i_end  = input + input_len;
01703     wid_t* o_iter              = output;
01704     for( ; i_iter != i_end; ++i_iter )
01705     {
01706         wid_t id = idMap.find( *i_iter );
01707         if( !remove_zeros || id != 0 )
01708         {
01709             *o_iter = id;
01710             ++o_iter;
01711         }
01712     }
01713     output_len = o_iter - output;
01714 
01715     return MB_SUCCESS;
01716 }
01717 
01718 ErrorCode WriteHDF5::vector_to_id_list( const std::vector< EntityHandle >& input, std::vector< wid_t >& output,
01719                                         bool remove_zeros )
01720 {
01721     output.resize( input.size() );
01722     size_t output_size = 0;
01723     ErrorCode rval     = vector_to_id_list( &input[0], input.size(), &output[0], output_size, remove_zeros );
01724     output.resize( output_size );
01725     return rval;
01726 }
01727 
01728 ErrorCode WriteHDF5::vector_to_id_list( const EntityHandle* input, wid_t* output, size_t count )
01729 {
01730     size_t output_len;
01731     return vector_to_id_list( input, count, output, output_len, false );
01732 }
01733 
01734 inline ErrorCode WriteHDF5::get_adjacencies( EntityHandle entity, std::vector< wid_t >& adj )
01735 {
01736     const EntityHandle* adj_array;
01737     int num_adj;
01738     ErrorCode rval = writeUtil->get_adjacencies( entity, adj_array, num_adj );
01739     if( MB_SUCCESS != rval ) return error( rval );
01740 
01741     size_t j = 0;
01742     adj.resize( num_adj );
01743     for( int i = 0; i < num_adj; ++i )
01744         if( wid_t id = idMap.find( adj_array[i] ) ) adj[j++] = id;
01745     adj.resize( j );
01746 
01747     return MB_SUCCESS;
01748 }
01749 
01750 ErrorCode WriteHDF5::write_adjacencies( const ExportSet& elements )
01751 {
01752     ErrorCode rval;
01753     mhdf_Status status;
01754     Range::const_iterator iter;
01755     const Range::const_iterator end = elements.range.end();
01756     std::vector< wid_t > adj_list;
01757 
01758     CHECK_OPEN_HANDLES;
01759 
01760     debug_barrier();
01761 
01762     /* Count Adjacencies */
01763     long count = 0;
01764     // for (iter = elements.range.begin(); iter != end; ++iter) {
01765     //  adj_list.clear();
01766     //  rval = get_adjacencies(*iter, adj_list);CHK_MB_ERR_0(rval);
01767     //
01768     //  if (adj_list.size() > 0)
01769     //    count += adj_list.size() + 2;
01770     //}
01771 
01772     // if (count == 0)
01773     //  return MB_SUCCESS;
01774 
01775     long offset = elements.adj_offset;
01776     if( elements.max_num_adjs == 0 ) return MB_SUCCESS;
01777 
01778     /* Create data list */
01779     hid_t table = mhdf_openAdjacency( filePtr, elements.name(), &count, &status );
01780     CHK_MHDF_ERR_0( status );
01781     IODebugTrack track( debugTrack, "Adjacencies", count );
01782 
01783     /* Write data */
01784     wid_t* buffer   = (wid_t*)dataBuffer;
01785     long chunk_size = bufferSize / sizeof( wid_t );
01786     long num_writes = ( elements.max_num_adjs + chunk_size - 1 ) / chunk_size;
01787     (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01788     count = 0;
01789     for( iter = elements.range.begin(); iter != end; ++iter )
01790     {
01791         adj_list.clear();
01792         rval = get_adjacencies( *iter, adj_list );
01793         CHK_MB_ERR_1( rval, table, status );
01794         if( adj_list.size() == 0 ) continue;
01795 
01796         // If buffer is full, flush it
01797         if( count + adj_list.size() + 2 > (unsigned long)chunk_size )
01798         {
01799             dbgOut.print( 3, " writing adjacency chunk.\n" );
01800             track.record_io( offset, count );
01801             mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
01802             CHK_MHDF_ERR_1( status, table );
01803             (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01804 
01805             offset += count;
01806             count = 0;
01807         }
01808 
01809         buffer[count++] = idMap.find( *iter );
01810         buffer[count++] = adj_list.size();
01811 
01812         assert( adj_list.size() + 2 < (unsigned long)chunk_size );
01813         memcpy( buffer + count, &adj_list[0], adj_list.size() * sizeof( wid_t ) );
01814         count += adj_list.size();
01815     }
01816 
01817     if( count )
01818     {
01819         dbgOut.print( 2, " writing final adjacency chunk.\n" );
01820         mhdf_writeAdjacencyWithOpt( table, offset, count, id_type, buffer, writeProp, &status );
01821         CHK_MHDF_ERR_1( status, table );
01822 
01823         offset += count;
01824         count = 0;
01825         --num_writes;
01826     }
01827 
01828     // Do empty writes if necessary for parallel collective IO
01829     if( collectiveIO )
01830     {
01831         while( num_writes > 0 )
01832         {
01833             --num_writes;
01834             assert( writeProp != H5P_DEFAULT );
01835             dbgOut.print( 2, " writing empty adjacency chunk.\n" );
01836             mhdf_writeAdjacencyWithOpt( table, offset, 0, id_type, 0, writeProp, &status );
01837             CHK_MHDF_ERR_1( status, table );
01838         }
01839     }
01840 
01841     mhdf_closeData( filePtr, table, &status );
01842     CHK_MHDF_ERR_0( status );
01843 
01844     track.all_reduce();
01845     return MB_SUCCESS;
01846 }
01847 
01848 ErrorCode WriteHDF5::write_tag( const TagDesc& tag_data, double* times )
01849 {
01850     std::string name;
01851     ErrorCode rval = iFace->tag_get_name( tag_data.tag_id, name );
01852     if( MB_SUCCESS != rval ) return error( rval );
01853 
01854     CHECK_OPEN_HANDLES;
01855     debug_barrier();
01856     dbgOut.tprintf( 1, "Writing tag: \"%s\"\n", name.c_str() );
01857 
01858     int moab_size, elem_size, array_len;
01859     DataType moab_type;
01860     mhdf_TagDataType mhdf_type;
01861     hid_t hdf5_type;
01862     rval = get_tag_size( tag_data.tag_id, moab_type, moab_size, elem_size, array_len, mhdf_type, hdf5_type );
01863     if( MB_SUCCESS != rval ) return error( rval );
01864 
01865     CpuTimer timer;
01866     if( array_len == MB_VARIABLE_LENGTH && tag_data.write_sparse )
01867     {
01868         dbgOut.printf( 2, "Writing sparse data for var-len tag: \"%s\"\n", name.c_str() );
01869         rval = write_var_len_tag( tag_data, name, moab_type, hdf5_type, elem_size );
01870         times[VARLEN_TAG_TIME] += timer.time_elapsed();
01871     }
01872     else
01873     {
01874         int data_len = elem_size;
01875         if( moab_type != MB_TYPE_BIT ) data_len *= array_len;
01876         if( tag_data.write_sparse )
01877         {
01878             dbgOut.printf( 2, "Writing sparse data for tag: \"%s\"\n", name.c_str() );
01879             rval = write_sparse_tag( tag_data, name, moab_type, hdf5_type, data_len );
01880             times[SPARSE_TAG_TIME] += timer.time_elapsed();
01881         }
01882         for( size_t i = 0; MB_SUCCESS == rval && i < tag_data.dense_list.size(); ++i )
01883         {
01884             const ExportSet* set = find( tag_data.dense_list[i] );
01885             assert( 0 != set );
01886             debug_barrier();
01887             dbgOut.printf( 2, "Writing dense data for tag: \"%s\" on group \"%s\"\n", name.c_str(), set->name() );
01888             subState.start( "writing dense data for tag: ", ( name + ":" + set->name() ).c_str() );
01889             rval = write_dense_tag( tag_data, *set, name, moab_type, hdf5_type, data_len );
01890             subState.end( rval );
01891         }
01892         times[DENSE_TAG_TIME] += timer.time_elapsed();
01893     }
01894 
01895     H5Tclose( hdf5_type );
01896     return MB_SUCCESS == rval ? MB_SUCCESS : error( rval );
01897 }
01898 
01899 ErrorCode WriteHDF5::write_sparse_ids( const TagDesc& tag_data, const Range& range, hid_t id_table, size_t table_size,
01900                                        const char* name )
01901 {
01902     ErrorCode rval;
01903     mhdf_Status status;
01904 
01905     CHECK_OPEN_HANDLES;
01906 
01907     std::string tname( name ? name : "<UNKNOWN TAG?>" );
01908     tname += " - Ids";
01909     IODebugTrack track( debugTrack, tname, table_size );
01910 
01911     // Set up data buffer for writing IDs
01912     size_t chunk_size = bufferSize / sizeof( wid_t );
01913     wid_t* id_buffer  = (wid_t*)dataBuffer;
01914 
01915     // Write IDs of tagged entities.
01916     long remaining  = range.size();
01917     long offset     = tag_data.sparse_offset;
01918     long num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
01919     if( tag_data.max_num_ents )
01920     {
01921         assert( tag_data.max_num_ents >= (unsigned long)remaining );
01922         num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
01923     }
01924     Range::const_iterator iter = range.begin();
01925     while( remaining )
01926     {
01927         (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
01928 
01929         // Write "chunk_size" blocks of data
01930         long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
01931         remaining -= count;
01932         Range::const_iterator stop = iter;
01933         stop += count;
01934         Range tmp;
01935         ;
01936         tmp.merge( iter, stop );
01937         iter = stop;
01938         assert( tmp.size() == (unsigned)count );
01939 
01940         rval = range_to_id_list( tmp, id_buffer );
01941         CHK_MB_ERR_0( rval );
01942 
01943         // Write the data
01944         dbgOut.print( 3, " writing sparse tag entity chunk.\n" );
01945         track.record_io( offset, count );
01946         mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, count, id_type, id_buffer, writeProp, &status );
01947         CHK_MHDF_ERR_0( status );
01948 
01949         offset += count;
01950         --num_writes;
01951     }  // while (remaining)
01952 
01953     // Do empty writes if necessary for parallel collective IO
01954     if( collectiveIO )
01955     {
01956         while( num_writes-- )
01957         {
01958             assert( writeProp != H5P_DEFAULT );
01959             dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
01960             mhdf_writeSparseTagEntitiesWithOpt( id_table, offset, 0, id_type, 0, writeProp, &status );
01961             CHK_MHDF_ERR_0( status );
01962         }
01963     }
01964 
01965     track.all_reduce();
01966     return MB_SUCCESS;
01967 }
01968 
01969 ErrorCode WriteHDF5::write_sparse_tag( const TagDesc& tag_data, const std::string& name, DataType mb_data_type,
01970                                        hid_t value_type, int value_type_size )
01971 {
01972     ErrorCode rval;
01973     mhdf_Status status;
01974     hid_t tables[3];
01975     long table_size, data_size;
01976 
01977     CHECK_OPEN_HANDLES;
01978 
01979     // Get entities for which to write tag values
01980     Range range;
01981     rval = get_sparse_tagged_entities( tag_data, range );
01982 
01983     // Open tables to write info
01984     mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_size, tables, &status );
01985     CHK_MHDF_ERR_0( status );
01986     assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
01987     // Fixed-length tag
01988     assert( table_size == data_size );
01989 
01990     // Write IDs for tagged entities
01991     subState.start( "writing sparse ids for tag: ", name.c_str() );
01992     rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
01993     subState.end( rval );
01994     CHK_MB_ERR_2( rval, tables, status );
01995     mhdf_closeData( filePtr, tables[0], &status );
01996     CHK_MHDF_ERR_1( status, tables[1] );
01997 
01998     // Set up data buffer for writing tag values
01999     IODebugTrack track( debugTrack, name + " Data", data_size );
02000     subState.start( "writing sparse values for tag: ", name.c_str() );
02001     rval = write_tag_values( tag_data.tag_id, tables[1], tag_data.sparse_offset, range, mb_data_type, value_type,
02002                              value_type_size, tag_data.max_num_ents, track );
02003     subState.end( rval );
02004     CHK_MB_ERR_0( rval );
02005     mhdf_closeData( filePtr, tables[1], &status );
02006     CHK_MHDF_ERR_0( status );
02007 
02008     track.all_reduce();
02009     return MB_SUCCESS;
02010 }
02011 
02012 ErrorCode WriteHDF5::write_var_len_indices( const TagDesc& tag_data, const Range& range, hid_t idx_table,
02013                                             size_t table_size, int /*type_size*/, const char* name )
02014 {
02015     ErrorCode rval;
02016     mhdf_Status status;
02017 
02018     CHECK_OPEN_HANDLES;
02019 
02020     std::string tname( name ? name : "<UNKNOWN TAG?>" );
02021     tname += " - End Indices";
02022     IODebugTrack track( debugTrack, tname, table_size );
02023 
02024     // Set up data buffer for writing indices
02025     size_t chunk_size        = bufferSize / ( std::max( sizeof( void* ), sizeof( long ) ) + sizeof( int ) );
02026     mhdf_index_t* idx_buffer = (mhdf_index_t*)dataBuffer;
02027     const void** junk        = (const void**)dataBuffer;
02028     int* size_buffer         = (int*)( dataBuffer + chunk_size * std::max( sizeof( void* ), sizeof( mhdf_index_t ) ) );
02029 
02030     // Write IDs of tagged entities.
02031     long data_offset  = tag_data.var_data_offset - 1;  // Offset at which to write data buffer
02032     size_t remaining  = range.size();
02033     size_t offset     = tag_data.sparse_offset;
02034     size_t num_writes = ( remaining + chunk_size - 1 ) / chunk_size;
02035     if( tag_data.max_num_ents )
02036     {
02037         assert( tag_data.max_num_ents >= (unsigned long)remaining );
02038         num_writes = ( tag_data.max_num_ents + chunk_size - 1 ) / chunk_size;
02039     }
02040     Range::const_iterator iter = range.begin();
02041     while( remaining )
02042     {
02043         (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
02044 
02045         // Write "chunk_size" blocks of data
02046         size_t count = remaining > chunk_size ? chunk_size : remaining;
02047         remaining -= count;
02048         Range::const_iterator stop = iter;
02049         stop += count;
02050         Range tmp;
02051         tmp.merge( iter, stop );
02052         iter = stop;
02053         assert( tmp.size() == (unsigned)count );
02054 
02055         rval = iFace->tag_get_by_ptr( tag_data.tag_id, tmp, junk, size_buffer );
02056         CHK_MB_ERR_0( rval );
02057 
02058         // Calculate end indices
02059         dbgOut.print( 3, " writing var-len tag offset chunk.\n" );
02060         track.record_io( offset, count );
02061         for( size_t i = 0; i < count; ++i )
02062         {
02063             data_offset += size_buffer[i];
02064             idx_buffer[i] = data_offset;
02065         }
02066 
02067         // Write
02068         mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, count, MHDF_INDEX_TYPE, idx_buffer, writeProp, &status );
02069         CHK_MHDF_ERR_0( status );
02070 
02071         offset += count;
02072         --num_writes;
02073     }  // while (remaining)
02074 
02075     // Do empty writes if necessary for parallel collective IO
02076     if( collectiveIO )
02077     {
02078         while( num_writes-- )
02079         {
02080             assert( writeProp != H5P_DEFAULT );
02081             dbgOut.print( 3, " writing empty sparse tag entity chunk.\n" );
02082             mhdf_writeSparseTagIndicesWithOpt( idx_table, offset, 0, id_type, 0, writeProp, &status );
02083             CHK_MHDF_ERR_0( status );
02084         }
02085     }
02086 
02087     track.all_reduce();
02088     return MB_SUCCESS;
02089 }
02090 
02091 ErrorCode WriteHDF5::write_var_len_data( const TagDesc& tag_data, const Range& range, hid_t table, size_t table_size,
02092                                          bool handle_tag, hid_t hdf_type, int type_size, const char* name )
02093 {
02094     ErrorCode rval;
02095     mhdf_Status status;
02096 
02097     CHECK_OPEN_HANDLES;
02098     assert( !handle_tag || sizeof( EntityHandle ) == type_size );
02099 
02100     std::string tname( name ? name : "<UNKNOWN TAG?>" );
02101     tname += " - Values";
02102     IODebugTrack track( debugTrack, tname, table_size );
02103 
02104     const size_t buffer_size = bufferSize / type_size;
02105 
02106     size_t num_writes = ( table_size + buffer_size - 1 ) / buffer_size;
02107     if( collectiveIO )
02108     {
02109         assert( tag_data.max_num_vals > 0 );
02110         num_writes = ( tag_data.max_num_vals + buffer_size - 1 ) / buffer_size;
02111     }
02112 
02113     unsigned char* buffer      = (unsigned char*)dataBuffer;
02114     const void* prev_data      = 0;  // Data left over from prev iteration
02115     size_t prev_len            = 0;
02116     Range::const_iterator iter = range.begin();
02117     long offset                = tag_data.var_data_offset;
02118     while( prev_data || iter != range.end() )
02119     {
02120         size_t count = 0;
02121         if( prev_data )
02122         {
02123             size_t len;
02124             const void* ptr = prev_data;
02125             if( prev_len <= buffer_size )
02126             {
02127                 len       = prev_len;
02128                 prev_data = 0;
02129                 prev_len  = 0;
02130             }
02131             else
02132             {
02133                 len       = buffer_size;
02134                 prev_data = ( (const char*)prev_data ) + buffer_size * type_size;
02135                 prev_len -= buffer_size;
02136             }
02137 
02138             if( handle_tag )
02139                 convert_handle_tag( (const EntityHandle*)ptr, (EntityHandle*)buffer, len );
02140             else
02141                 memcpy( buffer, ptr, len * type_size );
02142             count = len;
02143         }
02144 
02145         for( ; count < buffer_size && iter != range.end(); ++iter )
02146         {
02147             int len;
02148             const void* ptr;
02149             rval = iFace->tag_get_by_ptr( tag_data.tag_id, &*iter, 1, &ptr, &len );
02150             CHK_MB_ERR_0( rval );
02151             if( len + count > buffer_size )
02152             {
02153                 prev_len  = len + count - buffer_size;
02154                 len       = buffer_size - count;
02155                 prev_data = ( (const char*)ptr ) + len * type_size;
02156             }
02157 
02158             if( handle_tag )
02159                 convert_handle_tag( (const EntityHandle*)ptr, ( (EntityHandle*)buffer ) + count, len );
02160             else
02161                 memcpy( buffer + count * type_size, ptr, len * type_size );
02162             count += len;
02163         }
02164 
02165         track.record_io( offset, count );
02166         mhdf_writeTagValuesWithOpt( table, offset, count, hdf_type, buffer, writeProp, &status );
02167         offset+=count;
02168         CHK_MHDF_ERR_0( status );
02169         --num_writes;
02170     }
02171 
02172     // Do empty writes if necessary for parallel collective IO
02173     if( collectiveIO )
02174     {
02175         while( num_writes-- )
02176         {
02177             assert( writeProp != H5P_DEFAULT );
02178             dbgOut.print( 3, " writing empty var-len tag data chunk.\n" );
02179             mhdf_writeTagValuesWithOpt( table, 0, 0, hdf_type, 0, writeProp, &status );
02180             CHK_MHDF_ERR_0( status );
02181         }
02182     }
02183 
02184     track.all_reduce();
02185     return MB_SUCCESS;
02186 }
02187 
02188 ErrorCode WriteHDF5::write_var_len_tag( const TagDesc& tag_data, const std::string& name, DataType mb_data_type,
02189                                         hid_t hdf_type, int type_size )
02190 {
02191     ErrorCode rval;
02192     mhdf_Status status;
02193     hid_t tables[3];
02194     long table_size;
02195     long data_table_size;
02196 
02197     CHECK_OPEN_HANDLES;
02198 
02199     // Get entities for which to write tag values
02200     Range range;
02201     rval = get_sparse_tagged_entities( tag_data, range );
02202 
02203     // Open tables to write info
02204     mhdf_openSparseTagData( filePtr, name.c_str(), &table_size, &data_table_size, tables, &status );
02205     CHK_MHDF_ERR_0( status );
02206     assert( range.size() + tag_data.sparse_offset <= (unsigned long)table_size );
02207 
02208     // Write IDs for tagged entities
02209     subState.start( "writing ids for var-len tag: ", name.c_str() );
02210     rval = write_sparse_ids( tag_data, range, tables[0], table_size, name.c_str() );
02211     subState.end( rval );
02212     CHK_MB_ERR_2( rval, tables, status );
02213     mhdf_closeData( filePtr, tables[0], &status );
02214     CHK_MHDF_ERR_2( status, tables + 1 );
02215 
02216     // Write offsets for tagged entities
02217     subState.start( "writing indices for var-len tag: ", name.c_str() );
02218     rval = write_var_len_indices( tag_data, range, tables[2], table_size, type_size, name.c_str() );
02219     subState.end( rval );
02220     CHK_MB_ERR_1( rval, tables[1], status );
02221     mhdf_closeData( filePtr, tables[2], &status );
02222     CHK_MHDF_ERR_1( status, tables[1] );
02223 
02224     // Write the actual tag data
02225     subState.start( "writing values for var-len tag: ", name.c_str() );
02226     rval = write_var_len_data( tag_data, range, tables[1], data_table_size, mb_data_type == MB_TYPE_HANDLE, hdf_type,
02227                                type_size, name.c_str() );
02228     subState.end( rval );
02229     CHK_MB_ERR_0( rval );
02230     mhdf_closeData( filePtr, tables[1], &status );
02231     CHK_MHDF_ERR_0( status );
02232 
02233     return MB_SUCCESS;
02234 }
02235 
02236 ErrorCode WriteHDF5::write_dense_tag( const TagDesc& tag_data, const ExportSet& elem_data, const std::string& name,
02237                                       DataType mb_data_type, hid_t value_type, int value_type_size )
02238 {
02239     CHECK_OPEN_HANDLES;
02240 
02241     // Open tables to write info
02242     mhdf_Status status;
02243     long table_size;
02244     hid_t table = mhdf_openDenseTagData( filePtr, name.c_str(), elem_data.name(), &table_size, &status );
02245     CHK_MHDF_ERR_0( status );
02246     assert( elem_data.range.size() + elem_data.offset <= (unsigned long)table_size );
02247 
02248     IODebugTrack track( debugTrack, name + " " + elem_data.name() + " Data", table_size );
02249     ErrorCode rval = write_tag_values( tag_data.tag_id, table, elem_data.offset, elem_data.range, mb_data_type,
02250                                        value_type, value_type_size, elem_data.max_num_ents, track );
02251     CHK_MB_ERR_0( rval );
02252     mhdf_closeData( filePtr, table, &status );
02253     CHK_MHDF_ERR_0( status );
02254 
02255     return MB_SUCCESS;
02256 }
02257 
02258 ErrorCode WriteHDF5::write_tag_values( Tag tag_id, hid_t data_table, unsigned long offset_in, const Range& range_in,
02259                                        DataType mb_data_type, hid_t value_type, int value_type_size,
02260                                        unsigned long max_num_ents, IODebugTrack& track )
02261 {
02262     mhdf_Status status;
02263 
02264     CHECK_OPEN_HANDLES;
02265 
02266     // Set up data buffer for writing tag values
02267     size_t chunk_size = bufferSize / value_type_size;
02268     assert( chunk_size > 0 );
02269     char* tag_buffer = (char*)dataBuffer;
02270 
02271     // Write the tag values
02272     size_t remaining           = range_in.size();
02273     size_t offset              = offset_in;
02274     Range::const_iterator iter = range_in.begin();
02275     long num_writes            = ( remaining + chunk_size - 1 ) / chunk_size;
02276     if( max_num_ents )
02277     {
02278         assert( max_num_ents >= remaining );
02279         num_writes = ( max_num_ents + chunk_size - 1 ) / chunk_size;
02280     }
02281     while( remaining )
02282     {
02283         (void)VALGRIND_MAKE_MEM_UNDEFINED( dataBuffer, bufferSize );
02284 
02285         // Write "chunk_size" blocks of data
02286         long count = (unsigned long)remaining > chunk_size ? chunk_size : remaining;
02287         remaining -= count;
02288         memset( tag_buffer, 0, count * value_type_size );
02289         Range::const_iterator stop = iter;
02290         stop += count;
02291         Range range;
02292         range.merge( iter, stop );
02293         iter = stop;
02294         assert( range.size() == (unsigned)count );
02295 
02296         ErrorCode rval = iFace->tag_get_data( tag_id, range, tag_buffer );
02297         CHK_MB_ERR_0( rval );
02298 
02299         // Convert EntityHandles to file ids
02300         if( mb_data_type == MB_TYPE_HANDLE )
02301             convert_handle_tag( reinterpret_cast< EntityHandle* >( tag_buffer ),
02302                                 count * value_type_size / sizeof( EntityHandle ) );
02303 
02304         // Write the data
02305         dbgOut.print( 2, " writing tag value chunk.\n" );
02306         track.record_io( offset, count );
02307         assert( value_type > 0 );
02308         mhdf_writeTagValuesWithOpt( data_table, offset, count, value_type, tag_buffer, writeProp, &status );
02309         CHK_MHDF_ERR_0( status );
02310 
02311         offset += count;
02312         --num_writes;
02313     }  // while (remaining)
02314 
02315     // Do empty writes if necessary for parallel collective IO
02316     if( collectiveIO )
02317     {
02318         while( num_writes-- )
02319         {
02320             assert( writeProp != H5P_DEFAULT );
02321             dbgOut.print( 2, " writing empty tag value chunk.\n" );
02322             assert( value_type > 0 );
02323             mhdf_writeTagValuesWithOpt( data_table, offset, 0, value_type, 0, writeProp, &status );
02324             CHK_MHDF_ERR_0( status );
02325         }
02326     }
02327 
02328     track.all_reduce();
02329     return MB_SUCCESS;
02330 }
02331 
02332 ErrorCode WriteHDF5::write_qa( const std::vector< std::string >& list )
02333 {
02334     const char* app  = "MOAB";
02335     const char* vers = MOAB_VERSION;
02336     char date_str[64];
02337     char time_str[64];
02338 
02339     CHECK_OPEN_HANDLES;
02340 
02341     std::vector< const char* > strs( list.size() ? list.size() : 4 );
02342     if( list.size() == 0 )
02343     {
02344         time_t t = time( NULL );
02345         tm* lt   = localtime( &t );
02346 #ifdef WIN32
02347         strftime( date_str, sizeof( date_str ), "%m/%d/%y", lt );  // VS 2008 does not support %D
02348         strftime( time_str, sizeof( time_str ), "%H:%M:%S", lt );  // VS 2008 does not support %T
02349 #else
02350         strftime( date_str, sizeof( date_str ), "%D", lt );
02351         strftime( time_str, sizeof( time_str ), "%T", lt );
02352 #endif
02353 
02354         strs[0] = app;
02355         strs[1] = vers;
02356         strs[2] = date_str;
02357         strs[3] = time_str;
02358     }
02359     else
02360     {
02361         for( unsigned int i = 0; i < list.size(); ++i )
02362             strs[i] = list[i].c_str();
02363     }
02364 
02365     mhdf_Status status;
02366     dbgOut.print( 2, " writing QA history.\n" );
02367     mhdf_writeHistory( filePtr, &strs[0], strs.size(), &status );
02368     CHK_MHDF_ERR_0( status );
02369 
02370     return MB_SUCCESS;
02371 }
02372 
02373 /*
02374 ErrorCode WriteHDF5::register_known_tag_types(Interface* iface)
02375 {
02376   hid_t int4, double16;
02377   hsize_t dim[1];
02378   int error = 0;
02379   ErrorCode rval;
02380 
02381   dim[0] = 4;
02382   int4 = H5Tarray_create(H5T_NATIVE_INT, 1, dim, NULL);
02383 
02384   dim[0] = 16;
02385   double16 = H5Tarray_create(H5T_NATIVE_DOUBLE, 1, dim, NULL);
02386 
02387   if (int4 < 0 || double16 < 0)
02388     error = 1;
02389 
02390   struct { const char* name; hid_t type; } list[] = {
02391     { GLOBAL_ID_TAG_NAME, H5T_NATIVE_INT } ,
02392     { MATERIAL_SET_TAG_NAME, H5T_NATIVE_INT },
02393     { DIRICHLET_SET_TAG_NAME, H5T_NATIVE_INT },
02394     { NEUMANN_SET_TAG_NAME, H5T_NATIVE_INT },
02395     { HAS_MID_NODES_TAG_NAME, int4 },
02396     { GEOM_DIMENSION_TAG_NAME, H5T_NATIVE_INT },
02397     { MESH_TRANSFORM_TAG_NAME, double16 },
02398     { 0, 0 } };
02399 
02400   for (int i = 0; list[i].name; ++i) {
02401     if (list[i].type < 1) {
02402       ++error;
02403       continue;
02404     }
02405 
02406     Tag handle;
02407 
02408     std::string name("__hdf5_tag_type_");
02409     name += list[i].name;
02410 
02411     rval = iface->tag_get_handle(name.c_str(), handle);
02412     if (MB_TAG_NOT_FOUND == rval) {
02413       rval = iface->tag_create(name.c_str(), sizeof(hid_t), MB_TAG_SPARSE, handle, NULL);
02414       if (MB_SUCCESS != rval) {
02415         ++error;
02416         continue;
02417       }
02418 
02419       hid_t copy_id = H5Tcopy(list[i].type);
02420       const EntityHandle mesh = 0;
02421       rval = iface->tag_set_data(handle, &mesh, 1, &copy_id);
02422       if (MB_SUCCESS != rval) {
02423         ++error;
02424         continue;
02425       }
02426     }
02427   }
02428 
02429   H5Tclose(int4);
02430   H5Tclose(double16);
02431   return error ? MB_FAILURE : MB_SUCCESS;
02432 }
02433 */
02434 
02435 ErrorCode WriteHDF5::gather_tags( const Tag* user_tag_list, int num_tags )
02436 {
02437     ErrorCode result;
02438     std::vector< Tag > tag_list;
02439     std::vector< Tag >::iterator t_itor;
02440     Range range;
02441 
02442     // Get list of Tags to write
02443     result = writeUtil->get_tag_list( tag_list, user_tag_list, num_tags );
02444     CHK_MB_ERR_0( result );
02445 
02446     // Get list of tags
02447     for( t_itor = tag_list.begin(); t_itor != tag_list.end(); ++t_itor )
02448     {
02449         // Add tag to export list
02450         TagDesc tag_data;
02451         tag_data.write_sparse    = false;
02452         tag_data.tag_id          = *t_itor;
02453         tag_data.sparse_offset   = 0;
02454         tag_data.var_data_offset = 0;
02455         tag_data.max_num_ents    = 0;
02456         tag_data.max_num_vals    = 0;
02457         tagList.push_back( tag_data );
02458     }
02459 
02460     return MB_SUCCESS;
02461 }
02462 
02463 // If we support parallel, then this function will have been
02464 // overridden with an alternate version in WriteHDF5Parallel
02465 // that supports parallel I/O.  If we're here
02466 // then MOAB was not built with support for parallel HDF5 I/O.
02467 ErrorCode WriteHDF5::parallel_create_file( const char* /* filename */, bool /* overwrite */,
02468                                            const std::vector< std::string >& /* qa_records */,
02469                                            const FileOptions& /* opts */, const Tag* /* tag_list */, int /* num_tags */,
02470                                            int /* dimension */, double* /* times */ )
02471 {
02472     MB_SET_ERR( MB_NOT_IMPLEMENTED, "WriteHDF5 does not support parallel writing" );
02473 }
02474 
02475 ErrorCode WriteHDF5::serial_create_file( const char* filename, bool overwrite,
02476                                          const std::vector< std::string >& qa_records, const Tag* user_tag_list,
02477                                          int num_user_tags, int dimension )
02478 {
02479     long first_id;
02480     mhdf_Status status;
02481     hid_t handle;
02482     std::list< ExportSet >::iterator ex_itor;
02483     ErrorCode rval;
02484 
02485     topState.start( "creating file" );
02486 
02487     const char* type_names[MBMAXTYPE];
02488     memset( type_names, 0, MBMAXTYPE * sizeof( char* ) );
02489     for( EntityType i = MBEDGE; i < MBENTITYSET; ++i )
02490         type_names[i] = CN::EntityTypeName( i );
02491 
02492     // Create the file
02493     filePtr = mhdf_createFile( filename, overwrite, type_names, MBMAXTYPE, id_type, &status );
02494     CHK_MHDF_ERR_0( status );
02495     assert( !!filePtr );
02496 
02497     rval = write_qa( qa_records );
02498     CHK_MB_ERR_0( rval );
02499 
02500     // Create node table
02501     if( nodeSet.range.size() )
02502     {
02503         nodeSet.total_num_ents = nodeSet.range.size();
02504         handle = mhdf_createNodeCoords( filePtr, dimension, nodeSet.total_num_ents, &first_id, &status );
02505         CHK_MHDF_ERR_0( status );
02506         mhdf_closeData( filePtr, handle, &status );
02507         CHK_MHDF_ERR_0( status );
02508         nodeSet.first_id = (wid_t)first_id;
02509         rval             = assign_ids( nodeSet.range, nodeSet.first_id );
02510         CHK_MB_ERR_0( rval );
02511     }
02512     else
02513     {
02514         nodeSet.first_id = std::numeric_limits< wid_t >::max();
02515     }
02516     nodeSet.offset = 0;
02517 
02518     // Create element tables
02519     for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
02520     {
02521         ex_itor->total_num_ents = ex_itor->range.size();
02522         rval                    = create_elem_table( *ex_itor, ex_itor->total_num_ents, first_id );
02523         CHK_MB_ERR_0( rval );
02524 
02525         ex_itor->first_id = (wid_t)first_id;
02526         ex_itor->offset   = 0;
02527         rval              = assign_ids( ex_itor->range, ex_itor->first_id );
02528         CHK_MB_ERR_0( rval );
02529     }
02530     // Create set tables
02531     writeSets = !setSet.range.empty();
02532     if( writeSets )
02533     {
02534         long contents_len, children_len, parents_len;
02535 
02536         setSet.total_num_ents = setSet.range.size();
02537         setSet.max_num_ents   = setSet.total_num_ents;
02538         rval                  = create_set_meta( setSet.total_num_ents, first_id );
02539         CHK_MB_ERR_0( rval );
02540 
02541         setSet.first_id = (wid_t)first_id;
02542         rval            = assign_ids( setSet.range, setSet.first_id );
02543         CHK_MB_ERR_0( rval );
02544 
02545         rval = count_set_size( setSet.range, contents_len, children_len, parents_len );
02546         CHK_MB_ERR_0( rval );
02547 
02548         rval = create_set_tables( contents_len, children_len, parents_len );
02549         CHK_MB_ERR_0( rval );
02550 
02551         setSet.offset     = 0;
02552         setContentsOffset = 0;
02553         setChildrenOffset = 0;
02554         setParentsOffset  = 0;
02555         writeSetContents  = !!contents_len;
02556         writeSetChildren  = !!children_len;
02557         writeSetParents   = !!parents_len;
02558 
02559         maxNumSetContents = contents_len;
02560         maxNumSetChildren = children_len;
02561         maxNumSetParents  = parents_len;
02562     }  // if (!setSet.range.empty())
02563 
02564     // Create adjacency table after set table, because sets do not have yet an id
02565     // some entities are adjacent to sets (exodus?)
02566     // Create node adjacency table
02567     wid_t num_adjacencies;
02568 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES
02569     rval = count_adjacencies( nodeSet.range, num_adjacencies );
02570     CHK_MB_ERR_0( rval );
02571     nodeSet.adj_offset   = 0;
02572     nodeSet.max_num_adjs = num_adjacencies;
02573     if( num_adjacencies > 0 )
02574     {
02575         handle = mhdf_createAdjacency( filePtr, mhdf_node_type_handle(), num_adjacencies, &status );
02576         CHK_MHDF_ERR_0( status );
02577         mhdf_closeData( filePtr, handle, &status );
02578     }
02579 #endif
02580 
02581     // Create element adjacency tables
02582     for( ex_itor = exportList.begin(); ex_itor != exportList.end(); ++ex_itor )
02583     {
02584         rval = count_adjacencies( ex_itor->range, num_adjacencies );
02585         CHK_MB_ERR_0( rval );
02586 
02587         ex_itor->adj_offset   = 0;
02588         ex_itor->max_num_adjs = num_adjacencies;
02589         if( num_adjacencies > 0 )
02590         {
02591             handle = mhdf_createAdjacency( filePtr, ex_itor->name(), num_adjacencies, &status );
02592             CHK_MHDF_ERR_0( status );
02593             mhdf_closeData( filePtr, handle, &status );
02594         }
02595     }
02596 
02597     dbgOut.tprint( 1, "Gathering Tags\n" );
02598 
02599     rval = gather_tags( user_tag_list, num_user_tags );
02600     CHK_MB_ERR_0( rval );
02601 
02602     // Create the tags and tag data tables
02603     std::list< TagDesc >::iterator tag_iter = tagList.begin();
02604     for( ; tag_iter != tagList.end(); ++tag_iter )
02605     {
02606         // As we haven't yet added any ExportSets for which to write
02607         // dense tag data to the TagDesc struct pointed to by
02608         // tag_iter, this call will initially return all tagged entities
02609         // in the set of entities to be written.
02610         Range range;
02611         rval = get_sparse_tagged_entities( *tag_iter, range );
02612         CHK_MB_ERR_0( rval );
02613 
02614         int s;
02615         bool var_len = ( MB_VARIABLE_DATA_LENGTH == iFace->tag_get_length( tag_iter->tag_id, s ) );
02616 
02617         // Determine which ExportSets we want to write dense
02618         // data for. We never write dense data for variable-length
02619         // tag data.
02620         if( !var_len && writeTagDense )
02621         {
02622             // Check if we want to write this tag in dense format even if not
02623             // all of the entities have a tag value.  The criterion of this
02624             // is that the tag be dense, have a default value, and have at
02625             // least 2/3 of the entities tagged.
02626             bool prefer_dense = false;
02627             TagType type;
02628             rval = iFace->tag_get_type( tag_iter->tag_id, type );
02629             CHK_MB_ERR_0( rval );
02630             if( MB_TAG_DENSE == type )
02631             {
02632                 const void* defval = 0;
02633                 rval               = iFace->tag_get_default_value( tag_iter->tag_id, defval, s );
02634                 if( MB_SUCCESS == rval ) prefer_dense = true;
02635             }
02636 
02637             if( check_dense_format_tag( nodeSet, range, prefer_dense ) )
02638             {
02639                 range -= nodeSet.range;
02640                 tag_iter->dense_list.push_back( nodeSet );
02641             }
02642 
02643             std::list< ExportSet >::const_iterator ex = exportList.begin();
02644             for( ; ex != exportList.end(); ++ex )
02645             {
02646                 if( check_dense_format_tag( *ex, range, prefer_dense ) )
02647                 {
02648                     range -= ex->range;
02649                     tag_iter->dense_list.push_back( *ex );
02650                 }
02651             }
02652 
02653             if( check_dense_format_tag( setSet, range, prefer_dense ) )
02654             {
02655                 range -= setSet.range;
02656                 tag_iter->dense_list.push_back( setSet );
02657             }
02658         }
02659 
02660         tag_iter->write_sparse = !range.empty();
02661 
02662         unsigned long var_len_total = 0;
02663         if( var_len )
02664         {
02665             rval = get_tag_data_length( *tag_iter, range, var_len_total );
02666             CHK_MB_ERR_0( rval );
02667         }
02668 
02669         rval = create_tag( *tag_iter, range.size(), var_len_total );
02670         CHK_MB_ERR_0( rval );
02671     }  // for (tags)
02672 
02673     topState.end();
02674     return MB_SUCCESS;
02675 }
02676 
02677 bool WriteHDF5::check_dense_format_tag( const ExportSet& ents, const Range& all_tagged, bool prefer_dense )
02678 {
02679     // If there are no tagged entities, then don't write anything
02680     if( ents.range.empty() ) return false;
02681 
02682     // If all of the entities are tagged, then write in dense format
02683     if( all_tagged.contains( ents.range ) ) return true;
02684 
02685     // Unless asked for more lenient choice of dense format, return false
02686     if( !prefer_dense ) return false;
02687 
02688     // If we're being lenient about choosing dense format, then
02689     // return true if at least 2/3 of the entities are tagged.
02690     Range xsect = intersect( setSet.range, all_tagged );
02691     if( 3 * xsect.size() >= 2 * setSet.range.size() ) return true;
02692 
02693     return false;
02694 }
02695 
02696 ErrorCode WriteHDF5::count_adjacencies( const Range& set, wid_t& result )
02697 {
02698     ErrorCode rval;
02699     std::vector< wid_t > adj_list;
02700     Range::const_iterator iter      = set.begin();
02701     const Range::const_iterator end = set.end();
02702     result                          = 0;
02703     for( ; iter != end; ++iter )
02704     {
02705         adj_list.clear();
02706         rval = get_adjacencies( *iter, adj_list );
02707         CHK_MB_ERR_0( rval );
02708 
02709         if( adj_list.size() > 0 ) result += 2 + adj_list.size();
02710     }
02711 
02712     return MB_SUCCESS;
02713 }
02714 
02715 ErrorCode WriteHDF5::create_elem_table( const ExportSet& block, long num_entities, long& first_id_out )
02716 {
02717     mhdf_Status status;
02718     hid_t handle;
02719 
02720     CHECK_OPEN_HANDLES;
02721 
02722     mhdf_addElement( filePtr, block.name(), block.type, &status );
02723     CHK_MHDF_ERR_0( status );
02724 
02725     handle = mhdf_createConnectivity( filePtr, block.name(), block.num_nodes, num_entities, &first_id_out, &status );
02726     CHK_MHDF_ERR_0( status );
02727     mhdf_closeData( filePtr, handle, &status );
02728     CHK_MHDF_ERR_0( status );
02729 
02730     return MB_SUCCESS;
02731 }
02732 
02733 ErrorCode WriteHDF5::count_set_size( const Range& sets, long& contents_length_out, long& children_length_out,
02734                                      long& parents_length_out )
02735 {
02736     ErrorCode rval;
02737     Range set_contents;
02738     long contents_length_set, children_length_set, parents_length_set;
02739     unsigned long flags;
02740     std::vector< wid_t > set_contents_ids;
02741     std::vector< SpecialSetData >::const_iterator si = specialSets.begin();
02742 
02743     contents_length_out = 0;
02744     children_length_out = 0;
02745     parents_length_out  = 0;
02746 
02747     for( Range::const_iterator iter = sets.begin(); iter != sets.end(); ++iter )
02748     {
02749         while( si != specialSets.end() && si->setHandle < *iter )
02750             ++si;
02751 
02752         if( si != specialSets.end() && si->setHandle == *iter )
02753         {
02754             contents_length_out += si->contentIds.size();
02755             children_length_out += si->childIds.size();
02756             parents_length_out += si->parentIds.size();
02757             ++si;
02758             continue;
02759         }
02760 
02761         rval = get_set_info( *iter, contents_length_set, children_length_set, parents_length_set, flags );
02762         CHK_MB_ERR_0( rval );
02763 
02764         // Check if can and should compress as ranges
02765         if( !( flags & MESHSET_ORDERED ) && contents_length_set )
02766         {
02767             set_contents.clear();
02768             rval = iFace->get_entities_by_handle( *iter, set_contents, false );
02769             CHK_MB_ERR_0( rval );
02770 
02771             bool blocked_list;
02772             rval = range_to_blocked_list( set_contents, set_contents_ids, blocked_list );
02773             CHK_MB_ERR_0( rval );
02774 
02775             if( blocked_list )
02776             {
02777                 assert( set_contents_ids.size() % 2 == 0 );
02778                 contents_length_set = set_contents_ids.size();
02779             }
02780         }
02781 
02782         contents_length_out += contents_length_set;
02783         children_length_out += children_length_set;
02784         parents_length_out += parents_length_set;
02785     }
02786 
02787     return MB_SUCCESS;
02788 }
02789 
02790 ErrorCode WriteHDF5::create_set_meta( long num_sets, long& first_id_out )
02791 {
02792     hid_t handle;
02793     mhdf_Status status;
02794 
02795     CHECK_OPEN_HANDLES;
02796 
02797     handle = mhdf_createSetMeta( filePtr, num_sets, &first_id_out, &status );
02798     CHK_MHDF_ERR_0( status );
02799     mhdf_closeData( filePtr, handle, &status );
02800 
02801     return MB_SUCCESS;
02802 }
02803 
02804 WriteHDF5::SpecialSetData* WriteHDF5::find_set_data( EntityHandle h )
02805 {
02806     SpecialSetData tmp;
02807     tmp.setHandle = h;
02808     std::vector< SpecialSetData >::iterator i;
02809     i = std::lower_bound( specialSets.begin(), specialSets.end(), tmp, SpecSetLess() );
02810     return ( i == specialSets.end() || i->setHandle != h ) ? 0 : &*i;
02811 }
02812 
02813 ErrorCode WriteHDF5::create_set_tables( long num_set_contents, long num_set_children, long num_set_parents )
02814 {
02815     hid_t handle;
02816     mhdf_Status status;
02817 
02818     CHECK_OPEN_HANDLES;
02819 
02820     if( num_set_contents > 0 )
02821     {
02822         handle = mhdf_createSetData( filePtr, num_set_contents, &status );
02823         CHK_MHDF_ERR_0( status );
02824         mhdf_closeData( filePtr, handle, &status );
02825     }
02826 
02827     if( num_set_children > 0 )
02828     {
02829         handle = mhdf_createSetChildren( filePtr, num_set_children, &status );
02830         CHK_MHDF_ERR_0( status );
02831         mhdf_closeData( filePtr, handle, &status );
02832     }
02833 
02834     if( num_set_parents > 0 )
02835     {
02836         handle = mhdf_createSetParents( filePtr, num_set_parents, &status );
02837         CHK_MHDF_ERR_0( status );
02838         mhdf_closeData( filePtr, handle, &status );
02839     }
02840 
02841     return MB_SUCCESS;
02842 }
02843 
02844 ErrorCode WriteHDF5::get_tag_size( Tag tag, DataType& moab_type, int& num_bytes, int& type_size, int& array_length,
02845                                    mhdf_TagDataType& file_type, hid_t& hdf_type )
02846 {
02847     ErrorCode rval;
02848     Tag type_handle;
02849     std::string tag_name, tag_type_name;
02850 
02851     CHECK_OPEN_HANDLES;
02852 
02853     // We return NULL for hdf_type if it can be determined from
02854     // the file_type.  The only case where it is non-zero is
02855     // if the user specified a specific type via a mesh tag.
02856     hdf_type            = (hid_t)0;
02857     bool close_hdf_type = false;
02858 
02859     rval = iFace->tag_get_data_type( tag, moab_type );
02860     CHK_MB_ERR_0( rval );
02861     rval = iFace->tag_get_length( tag, array_length );
02862     if( MB_VARIABLE_DATA_LENGTH == rval ) { array_length = MB_VARIABLE_LENGTH; }
02863     else if( MB_SUCCESS != rval )
02864         return error( rval );
02865     rval = iFace->tag_get_bytes( tag, num_bytes );
02866     if( MB_VARIABLE_DATA_LENGTH == rval )
02867         num_bytes = MB_VARIABLE_LENGTH;
02868     else if( MB_SUCCESS != rval )
02869         return error( rval );
02870 
02871     switch( moab_type )
02872     {
02873         case MB_TYPE_INTEGER:
02874             type_size      = sizeof( int );
02875             file_type      = mhdf_INTEGER;
02876             hdf_type       = H5T_NATIVE_INT;
02877             close_hdf_type = false;
02878             break;
02879         case MB_TYPE_DOUBLE:
02880             type_size      = sizeof( double );
02881             file_type      = mhdf_FLOAT;
02882             hdf_type       = H5T_NATIVE_DOUBLE;
02883             close_hdf_type = false;
02884             break;
02885         case MB_TYPE_BIT:
02886             type_size = sizeof( bool );
02887             file_type = mhdf_BITFIELD;
02888             assert( array_length <= 8 );
02889             hdf_type = H5Tcopy( H5T_NATIVE_B8 );
02890             H5Tset_precision( hdf_type, array_length );
02891             close_hdf_type = true;
02892             break;
02893         case MB_TYPE_HANDLE:
02894             type_size      = sizeof( EntityHandle );
02895             file_type      = mhdf_ENTITY_ID;
02896             hdf_type       = id_type;
02897             close_hdf_type = false;
02898             break;
02899         case MB_TYPE_OPAQUE:
02900             file_type = mhdf_OPAQUE;
02901             rval      = iFace->tag_get_name( tag, tag_name );
02902             CHK_MB_ERR_0( rval );
02903             tag_type_name = "__hdf5_tag_type_";
02904             tag_type_name += tag_name;
02905             rval = iFace->tag_get_handle( tag_type_name.c_str(), 0, MB_TYPE_OPAQUE, type_handle, MB_TAG_ANY );
02906             if( MB_TAG_NOT_FOUND == rval )
02907             {
02908                 if( num_bytes == MB_VARIABLE_LENGTH )
02909                     type_size = 1;
02910                 else
02911                     type_size = num_bytes;
02912                 hdf_type       = H5Tcreate( H5T_OPAQUE, type_size );
02913                 close_hdf_type = true;
02914             }
02915             else if( MB_SUCCESS == rval )
02916             {
02917                 int hsize;
02918                 rval = iFace->tag_get_bytes( type_handle, hsize );
02919                 if( hsize != sizeof( hid_t ) ) return error( MB_FAILURE );
02920 
02921                 const EntityHandle root = 0;
02922                 rval                    = iFace->tag_get_data( type_handle, &root, 1, &hdf_type );
02923                 if( rval != MB_SUCCESS ) return error( rval );
02924 
02925                 type_size = H5Tget_size( hdf_type );
02926                 if( type_size != num_bytes ) return error( MB_FAILURE );
02927 
02928                 close_hdf_type = false;
02929             }
02930             else
02931                 return error( rval );
02932             num_bytes    = array_length;
02933             array_length = ( num_bytes == MB_VARIABLE_LENGTH ) ? MB_VARIABLE_LENGTH : 1;
02934             break;
02935         default:
02936             break;
02937     }
02938 
02939     assert( num_bytes == MB_VARIABLE_LENGTH || ( moab_type == MB_TYPE_BIT && num_bytes == 1 ) ||
02940             array_length * type_size == num_bytes );
02941 
02942     if( num_bytes == MB_VARIABLE_LENGTH )
02943     {
02944         array_length = MB_VARIABLE_LENGTH;
02945         if( !close_hdf_type )
02946         {
02947             hdf_type = H5Tcopy( hdf_type );
02948             // close_hdf_type = true;
02949         }
02950     }
02951     else if( array_length > 1 && moab_type != MB_TYPE_BIT )
02952     {
02953         hsize_t len = array_length;
02954 #if defined( H5Tarray_create_vers ) && ( H5Tarray_create_vers > 1 )
02955         hid_t temp_id = H5Tarray_create2( hdf_type, 1, &len );
02956 #else
02957         hid_t temp_id = H5Tarray_create( hdf_type, 1, &len, NULL );
02958 #endif
02959         if( close_hdf_type ) H5Tclose( hdf_type );
02960         hdf_type = temp_id;
02961     }
02962     else if( !close_hdf_type )
02963     {
02964         hdf_type = H5Tcopy( hdf_type );
02965         // close_hdf_type = true;
02966     }
02967 
02968     return MB_SUCCESS;
02969 }
02970 
02971 ErrorCode WriteHDF5::get_tag_data_length( const TagDesc& tag_info, const Range& range, unsigned long& result )
02972 {
02973     ErrorCode rval;
02974     result = 0;
02975 
02976     // Split buffer into two pieces, one for pointers and one for sizes
02977     size_t step, remaining;
02978     step                    = bufferSize / ( sizeof( int ) + sizeof( void* ) );
02979     const void** ptr_buffer = reinterpret_cast< const void** >( dataBuffer );
02980     int* size_buffer        = reinterpret_cast< int* >( ptr_buffer + step );
02981     Range subrange;
02982     Range::const_iterator iter = range.begin();
02983     for( remaining = range.size(); remaining >= step; remaining -= step )
02984     {
02985         // Get subset of range containing 'count' entities
02986         Range::const_iterator end = iter;
02987         end += step;
02988         subrange.clear();
02989         subrange.merge( iter, end );
02990         iter = end;
02991         // Get tag sizes for entities
02992         rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
02993         if( MB_SUCCESS != rval ) return error( rval );
02994         // Sum lengths
02995         for( size_t i = 0; i < step; ++i )
02996             result += size_buffer[i];
02997     }
02998     // Process remaining
02999     subrange.clear();
03000     subrange.merge( iter, range.end() );
03001     assert( subrange.size() == remaining );
03002     rval = iFace->tag_get_by_ptr( tag_info.tag_id, subrange, ptr_buffer, size_buffer );
03003     if( MB_SUCCESS != rval ) return error( rval );
03004     for( size_t i = 0; i < remaining; ++i )
03005         result += size_buffer[i];
03006 
03007     return MB_SUCCESS;
03008 }
03009 
03010 ErrorCode WriteHDF5::create_tag( const TagDesc& tag_data, unsigned long num_sparse_entities,
03011                                  unsigned long data_table_size )
03012 {
03013     TagType mb_storage;
03014     DataType mb_type;
03015     mhdf_TagDataType mhdf_type;
03016     int tag_bytes, type_size, num_vals, storage;
03017     hid_t hdf_type = (hid_t)0;
03018     hid_t handles[3];
03019     std::string tag_name;
03020     ErrorCode rval;
03021     mhdf_Status status;
03022 
03023     CHECK_OPEN_HANDLES;
03024 
03025     // Get tag properties
03026     rval = iFace->tag_get_type( tag_data.tag_id, mb_storage );
03027     CHK_MB_ERR_0( rval );
03028     switch( mb_storage )
03029     {
03030         case MB_TAG_DENSE:
03031             storage = mhdf_DENSE_TYPE;
03032             break;
03033         case MB_TAG_SPARSE:
03034             storage = mhdf_SPARSE_TYPE;
03035             break;
03036         case MB_TAG_BIT:
03037             storage = mhdf_BIT_TYPE;
03038             break;
03039         case MB_TAG_MESH:
03040             storage = mhdf_MESH_TYPE;
03041             break;
03042         default:
03043             return error( MB_FAILURE );
03044     }
03045     rval = iFace->tag_get_name( tag_data.tag_id, tag_name );
03046     CHK_MB_ERR_0( rval );
03047     rval = get_tag_size( tag_data.tag_id, mb_type, tag_bytes, type_size, num_vals, mhdf_type, hdf_type );
03048     CHK_MB_ERR_0( rval );
03049 
03050     // Get default value
03051     const void *def_value, *mesh_value;
03052     int def_val_len, mesh_val_len;
03053     rval = iFace->tag_get_default_value( tag_data.tag_id, def_value, def_val_len );
03054     if( MB_ENTITY_NOT_FOUND == rval )
03055     {
03056         def_value   = 0;
03057         def_val_len = 0;
03058     }
03059     else if( MB_SUCCESS != rval )
03060     {
03061         H5Tclose( hdf_type );
03062         return error( rval );
03063     }
03064 
03065     // Get mesh value
03066     unsigned char byte;
03067     const EntityHandle root = 0;
03068     if( mb_storage == MB_TAG_BIT )
03069     {
03070         rval         = iFace->tag_get_data( tag_data.tag_id, &root, 1, &byte );
03071         mesh_value   = &byte;
03072         mesh_val_len = 1;
03073     }
03074     else
03075     {
03076         rval = iFace->tag_get_by_ptr( tag_data.tag_id, &root, 1, &mesh_value, &mesh_val_len );
03077     }
03078     if( MB_TAG_NOT_FOUND == rval )
03079     {
03080         mesh_value   = 0;
03081         mesh_val_len = 0;
03082     }
03083     else if( MB_SUCCESS != rval )
03084     {
03085         H5Tclose( hdf_type );
03086         return error( rval );
03087     }
03088 
03089     // For handle-type tags, need to convert from handles to file ids
03090     if( MB_TYPE_HANDLE == mb_type )
03091     {
03092         // Make sure there's room in the buffer for both
03093         assert( ( def_val_len + mesh_val_len ) * sizeof( long ) < (size_t)bufferSize );
03094 
03095         // Convert default value
03096         if( def_value )
03097         {
03098             memcpy( dataBuffer, def_value, def_val_len * sizeof( EntityHandle ) );
03099             convert_handle_tag( reinterpret_cast< EntityHandle* >( dataBuffer ), def_val_len );
03100             def_value = dataBuffer;
03101         }
03102 
03103         // Convert mesh value
03104         if( mesh_value )
03105         {
03106             EntityHandle* ptr = reinterpret_cast< EntityHandle* >( dataBuffer ) + def_val_len;
03107             memcpy( ptr, mesh_value, mesh_val_len * sizeof( EntityHandle ) );
03108             if( convert_handle_tag( ptr, mesh_val_len ) )
03109                 mesh_value = ptr;
03110             else
03111                 mesh_value = 0;
03112         }
03113     }
03114 
03115     if( MB_VARIABLE_LENGTH != tag_bytes )
03116     {
03117         // Write the tag description to the file
03118         mhdf_createTag( filePtr, tag_name.c_str(), mhdf_type, num_vals, storage, def_value, mesh_value, hdf_type,
03119                         mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
03120         CHK_MHDF_ERR_0( status );
03121         H5Tclose( hdf_type );
03122 
03123         // Create empty table for tag data
03124         if( num_sparse_entities )
03125         {
03126             mhdf_createSparseTagData( filePtr, tag_name.c_str(), num_sparse_entities, handles, &status );
03127             CHK_MHDF_ERR_0( status );
03128             mhdf_closeData( filePtr, handles[0], &status );
03129             mhdf_closeData( filePtr, handles[1], &status );
03130         }
03131 
03132         for( size_t i = 0; i < tag_data.dense_list.size(); ++i )
03133         {
03134             const ExportSet* ex = find( tag_data.dense_list[i] );
03135             assert( 0 != ex );
03136             handles[0] = mhdf_createDenseTagData( filePtr, tag_name.c_str(), ex->name(), ex->total_num_ents, &status );
03137             CHK_MHDF_ERR_0( status );
03138             mhdf_closeData( filePtr, handles[0], &status );
03139         }
03140     }
03141     else
03142     {
03143         mhdf_createVarLenTag( filePtr, tag_name.c_str(), mhdf_type, storage, def_value, def_val_len, mesh_value,
03144                               mesh_val_len, hdf_type, mb_type == MB_TYPE_HANDLE ? id_type : 0, &status );
03145         CHK_MHDF_ERR_0( status );
03146         H5Tclose( hdf_type );
03147 
03148         // Create empty table for tag data
03149         if( num_sparse_entities )
03150         {
03151             mhdf_createVarLenTagData( filePtr, tag_name.c_str(), num_sparse_entities, data_table_size, handles,
03152                                       &status );
03153             CHK_MHDF_ERR_0( status );
03154             mhdf_closeData( filePtr, handles[0], &status );
03155             mhdf_closeData( filePtr, handles[1], &status );
03156             mhdf_closeData( filePtr, handles[2], &status );
03157         }
03158     }
03159 
03160     return MB_SUCCESS;
03161 }
03162 
03163 ErrorCode WriteHDF5::get_num_sparse_tagged_entities( const TagDesc& tag, size_t& count )
03164 {
03165     Range tmp;
03166     ErrorCode rval = get_sparse_tagged_entities( tag, tmp );
03167     count          = tmp.size();
03168     return rval;
03169 }
03170 
03171 ErrorCode WriteHDF5::get_sparse_tagged_entities( const TagDesc& tag, Range& results )
03172 {
03173     results.clear();
03174     if( !tag.have_dense( setSet ) ) results.merge( setSet.range );
03175     std::list< ExportSet >::reverse_iterator e;
03176     for( e = exportList.rbegin(); e != exportList.rend(); ++e )
03177     {
03178         if( !tag.have_dense( *e ) ) results.merge( e->range );
03179     }
03180     if( !tag.have_dense( nodeSet ) ) results.merge( nodeSet.range );
03181     if( results.empty() ) return MB_SUCCESS;
03182 
03183     return iFace->get_entities_by_type_and_tag( 0, MBMAXTYPE, &tag.tag_id, 0, 1, results, Interface::INTERSECT );
03184 }
03185 
03186 void WriteHDF5::get_write_entities( Range& range )
03187 {
03188     range.clear();
03189     range.merge( setSet.range );
03190     std::list< ExportSet >::reverse_iterator e;
03191     for( e = exportList.rbegin(); e != exportList.rend(); ++e )
03192         range.merge( e->range );
03193     range.merge( nodeSet.range );
03194 }
03195 
03196 void WriteHDF5::print_id_map() const
03197 {
03198     print_id_map( std::cout, "" );
03199 }
03200 
03201 void WriteHDF5::print_id_map( std::ostream& s, const char* pfx ) const
03202 {
03203     RangeMap< EntityHandle, wid_t >::const_iterator i;
03204     for( i = idMap.begin(); i != idMap.end(); ++i )
03205     {
03206         const char* n1 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin ) );
03207         EntityID id    = ID_FROM_HANDLE( i->begin );
03208         if( 1 == i->count ) { s << pfx << n1 << " " << id << " -> " << i->value << std::endl; }
03209         else
03210         {
03211             const char* n2 = CN::EntityTypeName( TYPE_FROM_HANDLE( i->begin + i->count - 1 ) );
03212             if( n1 == n2 )
03213             {
03214                 s << pfx << n1 << " " << id << "-" << id + i->count - 1 << " -> " << i->value << "-"
03215                   << i->value + i->count - 1 << std::endl;
03216             }
03217             else
03218             {
03219                 s << pfx << n1 << " " << id << "-" << n1 << " " << ID_FROM_HANDLE( i->begin + i->count - 1 ) << " -> "
03220                   << i->value << "-" << i->value + i->count - 1 << std::endl;
03221             }
03222         }
03223     }
03224 }
03225 
03226 void WriteHDF5::print_times( const double* t ) const
03227 {
03228     std::cout << "WriteHDF5:           " << t[TOTAL_TIME] << std::endl
03229               << "  gather mesh:       " << t[GATHER_TIME] << std::endl
03230               << "  create file:       " << t[CREATE_TIME] << std::endl
03231               << "    create nodes:    " << t[CREATE_NODE_TIME] << std::endl
03232               << "    negotiate types: " << t[NEGOTIATE_TYPES_TIME] << std::endl
03233               << "    create elem:     " << t[CREATE_ELEM_TIME] << std::endl
03234               << "    file id exch:    " << t[FILEID_EXCHANGE_TIME] << std::endl
03235               << "    create adj:      " << t[CREATE_ADJ_TIME] << std::endl
03236               << "    create set:      " << t[CREATE_SET_TIME] << std::endl
03237               << "      shared ids:    " << t[SHARED_SET_IDS] << std::endl
03238               << "      shared data:   " << t[SHARED_SET_CONTENTS] << std::endl
03239               << "      set offsets:   " << t[SET_OFFSET_TIME] << std::endl
03240               << "    create tags:     " << t[CREATE_TAG_TIME] << std::endl
03241               << "  coordinates:       " << t[COORD_TIME] << std::endl
03242               << "  connectivity:      " << t[CONN_TIME] << std::endl
03243               << "  sets:              " << t[SET_TIME] << std::endl
03244               << "    set descrip:     " << t[SET_META] << std::endl
03245               << "    set content:     " << t[SET_CONTENT] << std::endl
03246               << "    set parent:      " << t[SET_PARENT] << std::endl
03247               << "    set child:       " << t[SET_CHILD] << std::endl
03248               << "  adjacencies:       " << t[ADJ_TIME] << std::endl
03249               << "  tags:              " << t[TAG_TIME] << std::endl
03250               << "    dense data:      " << t[DENSE_TAG_TIME] << std::endl
03251               << "    sparse data:     " << t[SPARSE_TAG_TIME] << std::endl
03252               << "    var-len data:    " << t[VARLEN_TAG_TIME] << std::endl;
03253 }
03254 
03255 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines