MOAB: Mesh Oriented datABase
(version 5.3.1)
|
00001 /** 00002 * MOAB, a Mesh-Oriented datABase, is a software component for creating, 00003 * storing and accessing finite element mesh data. 00004 * 00005 * Copyright 2004 Sandia Corporation. Under the terms of Contract 00006 * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government 00007 * retains certain rights in this software. 00008 * 00009 * This library is free software; you can redistribute it and/or 00010 * modify it under the terms of the GNU Lesser General Public 00011 * License as published by the Free Software Foundation; either 00012 * version 2.1 of the License, or (at your option) any later version. 00013 * 00014 */ 00015 00016 #include <H5Tpublic.h> 00017 #include <H5Dpublic.h> 00018 #include <H5Gpublic.h> 00019 #include <H5Spublic.h> 00020 #include <H5Ppublic.h> 00021 #include "mhdf.h" 00022 #include "util.h" 00023 #include "file-handle.h" 00024 #include "status.h" 00025 #include "names-and-paths.h" 00026 00027 int mhdf_haveSets( mhdf_FileHandle file, int* have_data, int* have_child, int* have_parent, mhdf_Status* status ) 00028 { 00029 FileHandle* file_ptr = (FileHandle*)file; 00030 hid_t root_id, set_id; 00031 int result; 00032 API_BEGIN; 00033 00034 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00035 00036 #if defined( H5Gopen_vers ) && H5Gopen_vers > 1 00037 root_id = H5Gopen2( file_ptr->hdf_handle, ROOT_GROUP, H5P_DEFAULT ); 00038 #else 00039 root_id = H5Gopen( file_ptr->hdf_handle, ROOT_GROUP ); 00040 #endif 00041 if( root_id < 0 ) 00042 { 00043 mhdf_setFail( status, "H5Gopen( \"%s\" ) failed.", ROOT_GROUP ); 00044 return -1; 00045 } 00046 00047 result = mhdf_is_in_group( root_id, SET_GROUP_NAME, status ); 00048 if( result < 1 ) 00049 { 00050 H5Gclose( root_id ); 00051 return result; 00052 } 00053 00054 #if defined( H5Gopen_vers ) && H5Gopen_vers > 1 00055 set_id = H5Gopen2( root_id, SET_GROUP_NAME, H5P_DEFAULT ); 00056 #else 00057 set_id = H5Gopen( root_id, SET_GROUP_NAME ); 00058 #endif 00059 H5Gclose( root_id ); 00060 if( set_id < 0 ) 00061 { 00062 mhdf_setFail( status, "H5Gopen( \"%s\" ) failed.", SET_GROUP ); 00063 return -1; 00064 } 00065 00066 result = mhdf_is_in_group( set_id, SET_META_NAME, status ); 00067 if( result < 0 ) 00068 { 00069 H5Gclose( set_id ); 00070 return result; 00071 } 00072 00073 if( have_data ) 00074 { 00075 *have_data = mhdf_is_in_group( set_id, SET_DATA_NAME, status ); 00076 if( *have_data < 0 ) 00077 { 00078 H5Gclose( set_id ); 00079 return *have_data; 00080 } 00081 } 00082 00083 if( have_child ) 00084 { 00085 *have_child = mhdf_is_in_group( set_id, SET_CHILD_NAME, status ); 00086 if( *have_child < 0 ) 00087 { 00088 H5Gclose( set_id ); 00089 return *have_child; 00090 } 00091 } 00092 00093 if( have_parent ) 00094 { 00095 *have_parent = mhdf_is_in_group( set_id, SET_PARENT_NAME, status ); 00096 if( *have_parent < 0 ) 00097 { 00098 H5Gclose( set_id ); 00099 return *have_parent; 00100 } 00101 } 00102 00103 mhdf_setOkay( status ); 00104 H5Gclose( set_id ); 00105 API_END; 00106 return result; 00107 } 00108 00109 hid_t mhdf_createSetMeta( mhdf_FileHandle file, long num_sets, long* first_id_out, mhdf_Status* status ) 00110 { 00111 FileHandle* file_ptr = (FileHandle*)file; 00112 hid_t table_id; 00113 hsize_t dims[ 2 ]; 00114 long first_id; 00115 API_BEGIN; 00116 00117 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00118 00119 dims[ 0 ] = (hsize_t)num_sets; 00120 dims[ 1 ] = (hsize_t)4; 00121 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_META_PATH, MHDF_INDEX_TYPE, 2, dims, status ); 00122 if( table_id < 0 ) return -1; 00123 00124 first_id = file_ptr->max_id + 1; 00125 if( !mhdf_create_scalar_attrib( table_id, START_ID_ATTRIB, H5T_NATIVE_LONG, &first_id, status ) ) 00126 { 00127 H5Dclose( table_id ); 00128 return -1; 00129 } 00130 00131 *first_id_out = first_id; 00132 file_ptr->max_id += num_sets; 00133 if( !mhdf_write_max_id( file_ptr, status ) ) 00134 { 00135 H5Dclose( table_id ); 00136 return -1; 00137 } 00138 file_ptr->open_handle_count++; 00139 mhdf_setOkay( status ); 00140 00141 API_END_H( 1 ); 00142 return table_id; 00143 } 00144 00145 hid_t mhdf_openSetMeta( mhdf_FileHandle file, long* num_sets, long* first_id_out, mhdf_Status* status ) 00146 { 00147 FileHandle* file_ptr = (FileHandle*)file; 00148 hid_t table_id; 00149 hsize_t dims[ 2 ]; 00150 API_BEGIN; 00151 00152 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00153 00154 table_id = mhdf_open_table2( file_ptr->hdf_handle, SET_META_PATH, 2, dims, first_id_out, status ); 00155 if( table_id < 0 ) return -1; 00156 00157 /* If dims[1] == 3, then old format of table. 00158 * Deal with it in mhdf_readSetMeta and mhdf_writeSetMeta 00159 */ 00160 if( dims[ 1 ] != 4 && dims[ 1 ] != 3 ) 00161 { 00162 mhdf_setFail( status, "Invalid format for meshset table.\n" ); 00163 H5Dclose( table_id ); 00164 return -1; 00165 } 00166 00167 *num_sets = dims[ 0 ]; 00168 file_ptr->open_handle_count++; 00169 mhdf_setOkay( status ); 00170 API_END_H( 1 ); 00171 return table_id; 00172 } 00173 00174 hid_t mhdf_openSetMetaSimple( mhdf_FileHandle file, mhdf_Status* status ) 00175 { 00176 FileHandle* file_ptr = (FileHandle*)file; 00177 hid_t table_id; 00178 API_BEGIN; 00179 00180 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00181 00182 table_id = mhdf_open_table_simple( file_ptr->hdf_handle, SET_META_PATH, status ); 00183 if( table_id < 0 ) return -1; 00184 00185 file_ptr->open_handle_count++; 00186 mhdf_setOkay( status ); 00187 API_END_H( 1 ); 00188 return table_id; 00189 } 00190 00191 static int mhdf_readwriteSetMeta( hid_t table_id, int read, long offset, long count, hid_t type, void* data, hid_t prop, 00192 mhdf_Status* status ) 00193 { 00194 hid_t slab_id, sslab_id, smem_id, mem_id; 00195 hsize_t offsets[ 2 ], counts[ 2 ], mcounts[ 2 ], moffsets[ 2 ] = { 0, 0 }; 00196 herr_t rval = 0; 00197 int dims, i; 00198 const int fill_val = -1; 00199 const hsize_t one = 1; 00200 00201 mcounts[ 0 ] = count; 00202 mcounts[ 1 ] = 4; 00203 if( offset < 0 || count < 0 ) 00204 { 00205 mhdf_setFail( status, 00206 "Invalid input for %s: " 00207 "offset = %ld, count = %ld\n", 00208 read ? "read" : "write", offset, count ); 00209 return 0; 00210 } 00211 00212 slab_id = H5Dget_space( table_id ); 00213 if( slab_id < 0 ) 00214 { 00215 mhdf_setFail( status, "Internal error calling H5Dget_space." ); 00216 return 0; 00217 } 00218 00219 dims = H5Sget_simple_extent_ndims( slab_id ); 00220 if( dims != 2 ) 00221 { 00222 H5Sclose( slab_id ); 00223 mhdf_setFail( status, "Internal error: unexpected dataset rank: %d.", dims ); 00224 return 0; 00225 } 00226 00227 dims = H5Sget_simple_extent_dims( slab_id, counts, NULL ); 00228 if( dims < 0 ) 00229 { 00230 H5Sclose( slab_id ); 00231 mhdf_setFail( status, "Internal error calling H5Sget_simple_extend_dims." ); 00232 return 0; 00233 } 00234 00235 if( (unsigned long)( offset + count ) > counts[ 0 ] ) 00236 { 00237 H5Sclose( slab_id ); 00238 mhdf_setFail( status, "Requested %s of rows %ld to %ld of a %ld row table.\n", read ? "read" : "write", offset, 00239 offset + count - 1, (long)counts[ dims - 1 ] ); 00240 return 0; 00241 } 00242 counts[ 0 ] = (hsize_t)count; 00243 offsets[ 0 ] = (hsize_t)offset; 00244 00245 if( count ) 00246 mem_id = H5Screate_simple( dims, mcounts, NULL ); 00247 else 00248 { /* special case for 'NULL' read during collective parallel IO */ 00249 mem_id = H5Screate_simple( 1, &one, NULL ); 00250 if( mem_id && 0 > H5Sselect_none( mem_id ) ) 00251 { 00252 H5Sclose( mem_id ); 00253 mem_id = -1; 00254 } 00255 } 00256 if( mem_id < 0 ) 00257 { 00258 mhdf_setFail( status, "Internal error calling H5Screate_simple." ); 00259 return 0; 00260 } 00261 00262 /* Normal table: 4 columns */ 00263 if( counts[ 1 ] == 4 ) 00264 { 00265 offsets[ 1 ] = 0; 00266 if( count ) 00267 rval = H5Sselect_hyperslab( slab_id, H5S_SELECT_SET, offsets, NULL, counts, NULL ); 00268 else /* special case for 'NULL' read during collective parallel IO */ 00269 rval = H5Sselect_none( slab_id ); 00270 if( rval < 0 ) 00271 { 00272 H5Sclose( mem_id ); 00273 H5Sclose( slab_id ); 00274 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." ); 00275 return 0; 00276 } 00277 00278 if( read ) 00279 rval = H5Dread( table_id, type, mem_id, slab_id, prop, data ); 00280 else 00281 rval = H5Dwrite( table_id, type, mem_id, slab_id, prop, data ); 00282 } 00283 /* Old table: 3 columns, no parent link counts */ 00284 else if( counts[ 1 ] == 3 ) 00285 { 00286 rval = 0; 00287 for( i = 0; i < 3 && rval >= 0; ++i ) 00288 { 00289 smem_id = H5Scopy( mem_id ); 00290 sslab_id = H5Scopy( slab_id ); 00291 if( smem_id < 0 || sslab_id < 0 ) 00292 { 00293 if( smem_id >= 0 ) H5Sclose( smem_id ); 00294 mhdf_setFail( status, "Internal error calling H5Scopy." ); 00295 return 0; 00296 } 00297 00298 counts[ 1 ] = 1; 00299 offsets[ 1 ] = i; 00300 if( count ) 00301 rval = H5Sselect_hyperslab( sslab_id, H5S_SELECT_SET, offsets, NULL, counts, NULL ); 00302 else 00303 rval = H5Sselect_none( sslab_id ); 00304 if( rval < 0 ) 00305 { 00306 H5Sclose( slab_id ); 00307 H5Sclose( mem_id ); 00308 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." ); 00309 return 0; 00310 } 00311 00312 mcounts[ 1 ] = 1; 00313 moffsets[ 1 ] = ( i == 2 ) ? 3 : i; 00314 rval = H5Sselect_hyperslab( smem_id, H5S_SELECT_SET, moffsets, NULL, mcounts, NULL ); 00315 if( rval < 0 ) 00316 { 00317 H5Sclose( sslab_id ); 00318 H5Sclose( slab_id ); 00319 H5Sclose( mem_id ); 00320 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." ); 00321 return 0; 00322 } 00323 00324 if( read ) 00325 rval = H5Dread( table_id, type, smem_id, sslab_id, prop, data ); 00326 else 00327 rval = H5Dwrite( table_id, type, smem_id, sslab_id, prop, data ); 00328 00329 H5Sclose( sslab_id ); 00330 H5Sclose( smem_id ); 00331 } 00332 00333 if( read && rval >= 0 ) 00334 { 00335 mcounts[ 1 ] = 1; 00336 moffsets[ 1 ] = 2; 00337 H5Sselect_hyperslab( mem_id, H5S_SELECT_SET, moffsets, NULL, mcounts, NULL ); 00338 rval = H5Dfill( &fill_val, H5T_NATIVE_INT, data, type, mem_id ); 00339 } 00340 } 00341 else 00342 { 00343 H5Sclose( mem_id ); 00344 H5Sclose( slab_id ); 00345 mhdf_setFail( status, "Invalid dimension for meshset metadata table." ); 00346 return 0; 00347 } 00348 00349 H5Sclose( slab_id ); 00350 H5Sclose( mem_id ); 00351 if( rval < 0 ) 00352 { 00353 mhdf_setFail( status, "Internal error calling H5D%s.", read ? "read" : "write" ); 00354 return 0; 00355 } 00356 00357 mhdf_setOkay( status ); 00358 return 1; 00359 } 00360 00361 void mhdf_readSetMeta( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status ) 00362 { 00363 API_BEGIN; 00364 mhdf_readwriteSetMeta( table_id, 1, offset, count, type, data, H5P_DEFAULT, status ); 00365 API_END; 00366 } 00367 void mhdf_readSetMetaWithOpt( hid_t table_id, long offset, long count, hid_t type, void* data, hid_t prop, 00368 mhdf_Status* status ) 00369 { 00370 API_BEGIN; 00371 mhdf_readwriteSetMeta( table_id, 1, offset, count, type, data, prop, status ); 00372 API_END; 00373 } 00374 00375 void mhdf_writeSetMeta( hid_t table_id, long offset, long count, hid_t type, const void* data, mhdf_Status* status ) 00376 { 00377 API_BEGIN; 00378 mhdf_readwriteSetMeta( table_id, 0, offset, count, type, (void*)data, H5P_DEFAULT, status ); 00379 API_END; 00380 } 00381 void mhdf_writeSetMetaWithOpt( hid_t table_id, long offset, long count, hid_t type, const void* data, hid_t prop, 00382 mhdf_Status* status ) 00383 { 00384 API_BEGIN; 00385 mhdf_readwriteSetMeta( table_id, 0, offset, count, type, (void*)data, prop, status ); 00386 API_END; 00387 } 00388 00389 enum SetMetaCol 00390 { 00391 CONTENT = 0, 00392 CHILDREN = 1, 00393 PARENTS = 2, 00394 FLAGS = 3 00395 }; 00396 00397 static int mhdf_readSetMetaColumn( hid_t table_id, enum SetMetaCol column, long offset, long count, hid_t type, 00398 void* data, hid_t prop, mhdf_Status* status ) 00399 { 00400 hid_t slab_id, mem_id; 00401 hsize_t offsets[ 2 ], counts[ 2 ], mcount = count; 00402 herr_t rval = 0; 00403 int dims; 00404 const int fill_val = -1; 00405 00406 if( offset < 0 || count < 0 ) 00407 { 00408 mhdf_setFail( status, 00409 "Invalid input for reading set description column: " 00410 "offset = %ld, count = %ld\n", 00411 offset, count ); 00412 return 0; 00413 } 00414 00415 /* Get dimensions of table, and check against requested count and offset */ 00416 00417 slab_id = H5Dget_space( table_id ); 00418 if( slab_id < 0 ) 00419 { 00420 mhdf_setFail( status, "Internal error calling H5Dget_space." ); 00421 return 0; 00422 } 00423 00424 dims = H5Sget_simple_extent_ndims( slab_id ); 00425 if( dims != 2 ) 00426 { 00427 H5Sclose( slab_id ); 00428 mhdf_setFail( status, "Internal error: unexpected dataset rank: %d.", dims ); 00429 return 0; 00430 } 00431 00432 dims = H5Sget_simple_extent_dims( slab_id, counts, NULL ); 00433 if( dims < 0 ) 00434 { 00435 H5Sclose( slab_id ); 00436 mhdf_setFail( status, "Internal error calling H5Sget_simple_extend_dims." ); 00437 return 0; 00438 } 00439 00440 if( (unsigned long)( offset + count ) > counts[ 0 ] ) 00441 { 00442 H5Sclose( slab_id ); 00443 mhdf_setFail( status, "Requested read of rows %ld to %ld of a %ld row table.\n", offset, offset + count - 1, 00444 (long)counts[ 0 ] ); 00445 return 0; 00446 } 00447 00448 /* Create a slab definition for the block of memory we're reading into */ 00449 00450 mem_id = H5Screate_simple( 1, &mcount, NULL ); 00451 if( mem_id < 0 ) 00452 { 00453 H5Sclose( slab_id ); 00454 mhdf_setFail( status, "Internal error calling H5Screate_simple." ); 00455 return 0; 00456 } 00457 00458 /* Old, 3-column table. 00459 * New table is {contents, children, parents, flags} 00460 * Old table is {contents, children, flags} 00461 * If asking for parents, just return zeros. 00462 * If asking for flags, fix column value. 00463 */ 00464 offsets[ 1 ] = column; 00465 if( counts[ 1 ] == 3 ) 00466 { 00467 if( column == PARENTS ) 00468 { 00469 rval = H5Dfill( &fill_val, H5T_NATIVE_INT, data, type, mem_id ); 00470 H5Sclose( mem_id ); 00471 H5Sclose( slab_id ); 00472 if( rval < 0 ) 00473 { 00474 mhdf_setFail( status, "Internal error calling H5Dfill" ); 00475 return 0; 00476 } 00477 else 00478 { 00479 mhdf_setOkay( status ); 00480 return 1; 00481 } 00482 } 00483 else if( column == FLAGS ) 00484 --offsets[ 1 ]; 00485 } 00486 else if( counts[ 1 ] != 4 ) 00487 { 00488 H5Sclose( mem_id ); 00489 H5Sclose( slab_id ); 00490 mhdf_setFail( status, "Invalid dimension for meshset metadata table." ); 00491 return 0; 00492 } 00493 00494 /* Create a slab defintion for the portion of the table we want to read. */ 00495 00496 /* offsets[1] was initialized in the above block of code. */ 00497 offsets[ 0 ] = (hsize_t)offset; 00498 counts[ 0 ] = (hsize_t)count; 00499 counts[ 1 ] = 1; /* one column */ 00500 if( count ) 00501 rval = H5Sselect_hyperslab( slab_id, H5S_SELECT_SET, offsets, NULL, counts, NULL ); 00502 else 00503 rval = H5Sselect_none( slab_id ); 00504 if( rval < 0 ) 00505 { 00506 H5Sclose( mem_id ); 00507 H5Sclose( slab_id ); 00508 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." ); 00509 return 0; 00510 } 00511 00512 /* Read the data */ 00513 00514 rval = H5Dread( table_id, type, mem_id, slab_id, prop, data ); 00515 H5Sclose( mem_id ); 00516 H5Sclose( slab_id ); 00517 if( rval < 0 ) 00518 { 00519 mhdf_setFail( status, "Internal error calling H5Dread." ); 00520 return 0; 00521 } 00522 00523 mhdf_setOkay( status ); 00524 return 1; 00525 } 00526 00527 void mhdf_readSetFlags( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status ) 00528 { 00529 API_BEGIN; 00530 mhdf_readSetMetaColumn( table_id, FLAGS, offset, count, type, data, H5P_DEFAULT, status ); 00531 API_END; 00532 } 00533 void mhdf_readSetFlagsWithOpt( hid_t table_id, long offset, long count, hid_t type, void* data, hid_t prop, 00534 mhdf_Status* status ) 00535 { 00536 API_BEGIN; 00537 mhdf_readSetMetaColumn( table_id, FLAGS, offset, count, type, data, prop, status ); 00538 API_END; 00539 } 00540 00541 void mhdf_readSetContentEndIndices( hid_t table_id, long offset, long count, hid_t type, void* data, 00542 mhdf_Status* status ) 00543 { 00544 API_BEGIN; 00545 mhdf_readSetMetaColumn( table_id, CONTENT, offset, count, type, data, H5P_DEFAULT, status ); 00546 API_END; 00547 } 00548 void mhdf_readSetContentEndIndicesWithOpt( hid_t table_id, long offset, long count, hid_t type, void* data, hid_t prop, 00549 mhdf_Status* status ) 00550 { 00551 API_BEGIN; 00552 mhdf_readSetMetaColumn( table_id, CONTENT, offset, count, type, data, prop, status ); 00553 API_END; 00554 } 00555 00556 void mhdf_readSetChildEndIndices( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status ) 00557 { 00558 API_BEGIN; 00559 mhdf_readSetMetaColumn( table_id, CHILDREN, offset, count, type, data, H5P_DEFAULT, status ); 00560 API_END; 00561 } 00562 void mhdf_readSetChildEndIndicesWithOpt( hid_t table_id, long offset, long count, hid_t type, void* data, hid_t prop, 00563 mhdf_Status* status ) 00564 { 00565 API_BEGIN; 00566 mhdf_readSetMetaColumn( table_id, CHILDREN, offset, count, type, data, prop, status ); 00567 API_END; 00568 } 00569 00570 void mhdf_readSetParentEndIndices( hid_t table_id, long offset, long count, hid_t type, void* data, 00571 mhdf_Status* status ) 00572 { 00573 API_BEGIN; 00574 mhdf_readSetMetaColumn( table_id, PARENTS, offset, count, type, data, H5P_DEFAULT, status ); 00575 API_END; 00576 } 00577 void mhdf_readSetParentEndIndicesWithOpt( hid_t table_id, long offset, long count, hid_t type, void* data, hid_t prop, 00578 mhdf_Status* status ) 00579 { 00580 API_BEGIN; 00581 mhdf_readSetMetaColumn( table_id, PARENTS, offset, count, type, data, prop, status ); 00582 API_END; 00583 } 00584 00585 hid_t mhdf_createSetData( mhdf_FileHandle file_handle, long data_list_size, mhdf_Status* status ) 00586 { 00587 FileHandle* file_ptr; 00588 hid_t table_id; 00589 hsize_t dim = (hsize_t)data_list_size; 00590 API_BEGIN; 00591 00592 file_ptr = (FileHandle*)( file_handle ); 00593 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00594 00595 if( data_list_size < 1 ) 00596 { 00597 mhdf_setFail( status, "Invalid argument.\n" ); 00598 return -1; 00599 } 00600 00601 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_DATA_PATH, file_ptr->id_type, 1, &dim, status ); 00602 00603 API_END_H( 1 ); 00604 return table_id; 00605 } 00606 00607 hid_t mhdf_openSetData( mhdf_FileHandle file_handle, long* data_list_size_out, mhdf_Status* status ) 00608 { 00609 FileHandle* file_ptr; 00610 hid_t table_id; 00611 hsize_t dim; 00612 API_BEGIN; 00613 00614 file_ptr = (FileHandle*)( file_handle ); 00615 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00616 00617 if( !data_list_size_out ) 00618 { 00619 mhdf_setFail( status, "Invalid argument.\n" ); 00620 return -1; 00621 } 00622 00623 table_id = mhdf_open_table( file_ptr->hdf_handle, SET_DATA_PATH, 1, &dim, status ); 00624 00625 *data_list_size_out = (long)dim; 00626 API_END_H( 1 ); 00627 return table_id; 00628 } 00629 00630 void mhdf_writeSetData( hid_t table_id, long offset, long count, hid_t type, const void* data, mhdf_Status* status ) 00631 { 00632 API_BEGIN; 00633 mhdf_write_data( table_id, offset, count, type, data, H5P_DEFAULT, status ); 00634 API_END; 00635 } 00636 void mhdf_writeSetDataWithOpt( hid_t table_id, long offset, long count, hid_t type, const void* data, hid_t prop, 00637 mhdf_Status* status ) 00638 { 00639 API_BEGIN; 00640 mhdf_write_data( table_id, offset, count, type, data, prop, status ); 00641 API_END; 00642 } 00643 00644 void mhdf_readSetData( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status ) 00645 { 00646 API_BEGIN; 00647 mhdf_read_data( table_id, offset, count, type, data, H5P_DEFAULT, status ); 00648 API_END; 00649 } 00650 void mhdf_readSetDataWithOpt( hid_t table_id, long offset, long count, hid_t type, void* data, hid_t prop, 00651 mhdf_Status* status ) 00652 { 00653 API_BEGIN; 00654 mhdf_read_data( table_id, offset, count, type, data, prop, status ); 00655 API_END; 00656 } 00657 00658 hid_t mhdf_createSetChildren( mhdf_FileHandle file_handle, long child_list_size, mhdf_Status* status ) 00659 { 00660 FileHandle* file_ptr; 00661 hid_t table_id; 00662 hsize_t dim = (hsize_t)child_list_size; 00663 API_BEGIN; 00664 00665 file_ptr = (FileHandle*)( file_handle ); 00666 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00667 00668 if( child_list_size < 1 ) 00669 { 00670 mhdf_setFail( status, "Invalid argument.\n" ); 00671 return -1; 00672 } 00673 00674 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_CHILD_PATH, file_ptr->id_type, 1, &dim, status ); 00675 00676 API_END_H( 1 ); 00677 return table_id; 00678 } 00679 00680 hid_t mhdf_openSetChildren( mhdf_FileHandle file_handle, long* child_list_size, mhdf_Status* status ) 00681 { 00682 FileHandle* file_ptr; 00683 hid_t table_id; 00684 hsize_t dim; 00685 API_BEGIN; 00686 00687 file_ptr = (FileHandle*)( file_handle ); 00688 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00689 00690 if( !child_list_size ) 00691 { 00692 mhdf_setFail( status, "Invalid argument.\n" ); 00693 return -1; 00694 } 00695 00696 table_id = mhdf_open_table( file_ptr->hdf_handle, SET_CHILD_PATH, 1, &dim, status ); 00697 00698 *child_list_size = (long)dim; 00699 API_END_H( 1 ); 00700 return table_id; 00701 } 00702 00703 hid_t mhdf_createSetParents( mhdf_FileHandle file_handle, long parent_list_size, mhdf_Status* status ) 00704 { 00705 FileHandle* file_ptr; 00706 hid_t table_id; 00707 hsize_t dim = (hsize_t)parent_list_size; 00708 API_BEGIN; 00709 00710 file_ptr = (FileHandle*)( file_handle ); 00711 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00712 00713 if( parent_list_size < 1 ) 00714 { 00715 mhdf_setFail( status, "Invalid argument.\n" ); 00716 return -1; 00717 } 00718 00719 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_PARENT_PATH, file_ptr->id_type, 1, &dim, status ); 00720 00721 API_END_H( 1 ); 00722 return table_id; 00723 } 00724 00725 hid_t mhdf_openSetParents( mhdf_FileHandle file_handle, long* parent_list_size, mhdf_Status* status ) 00726 { 00727 FileHandle* file_ptr; 00728 hid_t table_id; 00729 hsize_t dim; 00730 API_BEGIN; 00731 00732 file_ptr = (FileHandle*)( file_handle ); 00733 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1; 00734 00735 if( !parent_list_size ) 00736 { 00737 mhdf_setFail( status, "Invalid argument.\n" ); 00738 return -1; 00739 } 00740 00741 table_id = mhdf_open_table( file_ptr->hdf_handle, SET_PARENT_PATH, 1, &dim, status ); 00742 00743 *parent_list_size = (long)dim; 00744 API_END_H( 1 ); 00745 return table_id; 00746 } 00747 00748 void mhdf_writeSetParentsChildren( hid_t table_id, long offset, long count, hid_t type, const void* data, 00749 mhdf_Status* status ) 00750 { 00751 API_BEGIN; 00752 mhdf_write_data( table_id, offset, count, type, data, H5P_DEFAULT, status ); 00753 API_END; 00754 } 00755 void mhdf_writeSetParentsChildrenWithOpt( hid_t table_id, long offset, long count, hid_t type, const void* data, 00756 hid_t prop, mhdf_Status* status ) 00757 { 00758 API_BEGIN; 00759 mhdf_write_data( table_id, offset, count, type, data, prop, status ); 00760 API_END; 00761 } 00762 00763 void mhdf_readSetParentsChildren( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status ) 00764 { 00765 API_BEGIN; 00766 mhdf_read_data( table_id, offset, count, type, data, H5P_DEFAULT, status ); 00767 API_END; 00768 } 00769 void mhdf_readSetParentsChildrenWithOpt( hid_t table_id, long offset, long count, hid_t type, void* data, hid_t prop, 00770 mhdf_Status* status ) 00771 { 00772 API_BEGIN; 00773 mhdf_read_data( table_id, offset, count, type, data, prop, status ); 00774 API_END; 00775 }