![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 /**
00002 * MOAB, a Mesh-Oriented datABase, is a software component for creating,
00003 * storing and accessing finite element mesh data.
00004 *
00005 * Copyright 2004 Sandia Corporation. Under the terms of Contract
00006 * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
00007 * retains certain rights in this software.
00008 *
00009 * This library is free software; you can redistribute it and/or
00010 * modify it under the terms of the GNU Lesser General Public
00011 * License as published by the Free Software Foundation; either
00012 * version 2.1 of the License, or (at your option) any later version.
00013 *
00014 */
00015
00016 #include
00017 #include
00018 #include
00019 #include
00020 #include
00021 #include "mhdf.h"
00022 #include "util.h"
00023 #include "file-handle.h"
00024 #include "status.h"
00025 #include "names-and-paths.h"
00026
00027 int mhdf_haveSets( mhdf_FileHandle file, int* have_data, int* have_child, int* have_parent, mhdf_Status* status )
00028 {
00029 FileHandle* file_ptr = (FileHandle*)file;
00030 hid_t root_id, set_id;
00031 int result;
00032 API_BEGIN;
00033
00034 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00035
00036 #if defined( H5Gopen_vers ) && H5Gopen_vers > 1
00037 root_id = H5Gopen2( file_ptr->hdf_handle, ROOT_GROUP, H5P_DEFAULT );
00038 #else
00039 root_id = H5Gopen( file_ptr->hdf_handle, ROOT_GROUP );
00040 #endif
00041 if( root_id < 0 )
00042 {
00043 mhdf_setFail( status, "H5Gopen( \"%s\" ) failed.", ROOT_GROUP );
00044 return -1;
00045 }
00046
00047 result = mhdf_is_in_group( root_id, SET_GROUP_NAME, status );
00048 if( result < 1 )
00049 {
00050 H5Gclose( root_id );
00051 return result;
00052 }
00053
00054 #if defined( H5Gopen_vers ) && H5Gopen_vers > 1
00055 set_id = H5Gopen2( root_id, SET_GROUP_NAME, H5P_DEFAULT );
00056 #else
00057 set_id = H5Gopen( root_id, SET_GROUP_NAME );
00058 #endif
00059 H5Gclose( root_id );
00060 if( set_id < 0 )
00061 {
00062 mhdf_setFail( status, "H5Gopen( \"%s\" ) failed.", SET_GROUP );
00063 return -1;
00064 }
00065
00066 result = mhdf_is_in_group( set_id, SET_META_NAME, status );
00067 if( result < 0 )
00068 {
00069 H5Gclose( set_id );
00070 return result;
00071 }
00072
00073 if( have_data )
00074 {
00075 *have_data = mhdf_is_in_group( set_id, SET_DATA_NAME, status );
00076 if( *have_data < 0 )
00077 {
00078 H5Gclose( set_id );
00079 return *have_data;
00080 }
00081 }
00082
00083 if( have_child )
00084 {
00085 *have_child = mhdf_is_in_group( set_id, SET_CHILD_NAME, status );
00086 if( *have_child < 0 )
00087 {
00088 H5Gclose( set_id );
00089 return *have_child;
00090 }
00091 }
00092
00093 if( have_parent )
00094 {
00095 *have_parent = mhdf_is_in_group( set_id, SET_PARENT_NAME, status );
00096 if( *have_parent < 0 )
00097 {
00098 H5Gclose( set_id );
00099 return *have_parent;
00100 }
00101 }
00102
00103 mhdf_setOkay( status );
00104 H5Gclose( set_id );
00105 API_END;
00106 return result;
00107 }
00108
00109 hid_t mhdf_createSetMeta( mhdf_FileHandle file, long num_sets, long* first_id_out, mhdf_Status* status )
00110 {
00111 FileHandle* file_ptr = (FileHandle*)file;
00112 hid_t table_id;
00113 hsize_t dims[2];
00114 long first_id;
00115 API_BEGIN;
00116
00117 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00118
00119 dims[0] = (hsize_t)num_sets;
00120 dims[1] = (hsize_t)4;
00121 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_META_PATH, MHDF_INDEX_TYPE, 2, dims, status );
00122 if( table_id < 0 ) return -1;
00123
00124 first_id = file_ptr->max_id + 1;
00125 if( !mhdf_create_scalar_attrib( table_id, START_ID_ATTRIB, H5T_NATIVE_LONG, &first_id, status ) )
00126 {
00127 H5Dclose( table_id );
00128 return -1;
00129 }
00130
00131 *first_id_out = first_id;
00132 file_ptr->max_id += num_sets;
00133 if( !mhdf_write_max_id( file_ptr, status ) )
00134 {
00135 H5Dclose( table_id );
00136 return -1;
00137 }
00138 file_ptr->open_handle_count++;
00139 mhdf_setOkay( status );
00140
00141 API_END_H( 1 );
00142 return table_id;
00143 }
00144
00145 hid_t mhdf_openSetMeta( mhdf_FileHandle file, long* num_sets, long* first_id_out, mhdf_Status* status )
00146 {
00147 FileHandle* file_ptr = (FileHandle*)file;
00148 hid_t table_id;
00149 hsize_t dims[2];
00150 API_BEGIN;
00151
00152 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00153
00154 table_id = mhdf_open_table2( file_ptr->hdf_handle, SET_META_PATH, 2, dims, first_id_out, status );
00155 if( table_id < 0 ) return -1;
00156
00157 /* If dims[1] == 3, then old format of table.
00158 * Deal with it in mhdf_readSetMeta and mhdf_writeSetMeta
00159 */
00160 if( dims[1] != 4 && dims[1] != 3 )
00161 {
00162 mhdf_setFail( status, "Invalid format for meshset table.\n" );
00163 H5Dclose( table_id );
00164 return -1;
00165 }
00166
00167 *num_sets = dims[0];
00168 file_ptr->open_handle_count++;
00169 mhdf_setOkay( status );
00170 API_END_H( 1 );
00171 return table_id;
00172 }
00173
00174 hid_t mhdf_openSetMetaSimple( mhdf_FileHandle file, mhdf_Status* status )
00175 {
00176 FileHandle* file_ptr = (FileHandle*)file;
00177 hid_t table_id;
00178 API_BEGIN;
00179
00180 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00181
00182 table_id = mhdf_open_table_simple( file_ptr->hdf_handle, SET_META_PATH, status );
00183 if( table_id < 0 ) return -1;
00184
00185 file_ptr->open_handle_count++;
00186 mhdf_setOkay( status );
00187 API_END_H( 1 );
00188 return table_id;
00189 }
00190
00191 static int mhdf_readwriteSetMeta( hid_t table_id,
00192 int read,
00193 long offset,
00194 long count,
00195 hid_t type,
00196 void* data,
00197 hid_t prop,
00198 mhdf_Status* status )
00199 {
00200 hid_t slab_id, sslab_id, smem_id, mem_id;
00201 hsize_t offsets[2], counts[2], mcounts[2], moffsets[2] = { 0, 0 };
00202 herr_t rval = 0;
00203 int dims, i;
00204 const int fill_val = -1;
00205 const hsize_t one = 1;
00206
00207 mcounts[0] = count;
00208 mcounts[1] = 4;
00209 if( offset < 0 || count < 0 )
00210 {
00211 mhdf_setFail( status,
00212 "Invalid input for %s: "
00213 "offset = %ld, count = %ld\n",
00214 read ? "read" : "write", offset, count );
00215 return 0;
00216 }
00217
00218 slab_id = H5Dget_space( table_id );
00219 if( slab_id < 0 )
00220 {
00221 mhdf_setFail( status, "Internal error calling H5Dget_space." );
00222 return 0;
00223 }
00224
00225 dims = H5Sget_simple_extent_ndims( slab_id );
00226 if( dims != 2 )
00227 {
00228 H5Sclose( slab_id );
00229 mhdf_setFail( status, "Internal error: unexpected dataset rank: %d.", dims );
00230 return 0;
00231 }
00232
00233 dims = H5Sget_simple_extent_dims( slab_id, counts, NULL );
00234 if( dims < 0 )
00235 {
00236 H5Sclose( slab_id );
00237 mhdf_setFail( status, "Internal error calling H5Sget_simple_extend_dims." );
00238 return 0;
00239 }
00240
00241 if( (unsigned long)( offset + count ) > counts[0] )
00242 {
00243 H5Sclose( slab_id );
00244 mhdf_setFail( status, "Requested %s of rows %ld to %ld of a %ld row table.\n", read ? "read" : "write", offset,
00245 offset + count - 1, (long)counts[dims - 1] );
00246 return 0;
00247 }
00248 counts[0] = (hsize_t)count;
00249 offsets[0] = (hsize_t)offset;
00250
00251 if( count )
00252 mem_id = H5Screate_simple( dims, mcounts, NULL );
00253 else
00254 { /* special case for 'NULL' read during collective parallel IO */
00255 mem_id = H5Screate_simple( 1, &one, NULL );
00256 if( mem_id && 0 > H5Sselect_none( mem_id ) )
00257 {
00258 H5Sclose( mem_id );
00259 mem_id = -1;
00260 }
00261 }
00262 if( mem_id < 0 )
00263 {
00264 mhdf_setFail( status, "Internal error calling H5Screate_simple." );
00265 return 0;
00266 }
00267
00268 /* Normal table: 4 columns */
00269 if( counts[1] == 4 )
00270 {
00271 offsets[1] = 0;
00272 if( count )
00273 rval = H5Sselect_hyperslab( slab_id, H5S_SELECT_SET, offsets, NULL, counts, NULL );
00274 else /* special case for 'NULL' read during collective parallel IO */
00275 rval = H5Sselect_none( slab_id );
00276 if( rval < 0 )
00277 {
00278 H5Sclose( mem_id );
00279 H5Sclose( slab_id );
00280 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." );
00281 return 0;
00282 }
00283
00284 if( read )
00285 rval = H5Dread( table_id, type, mem_id, slab_id, prop, data );
00286 else
00287 rval = H5Dwrite( table_id, type, mem_id, slab_id, prop, data );
00288 }
00289 /* Old table: 3 columns, no parent link counts */
00290 else if( counts[1] == 3 )
00291 {
00292 rval = 0;
00293 for( i = 0; i < 3 && rval >= 0; ++i )
00294 {
00295 smem_id = H5Scopy( mem_id );
00296 sslab_id = H5Scopy( slab_id );
00297 if( smem_id < 0 || sslab_id < 0 )
00298 {
00299 if( smem_id >= 0 ) H5Sclose( smem_id );
00300 mhdf_setFail( status, "Internal error calling H5Scopy." );
00301 return 0;
00302 }
00303
00304 counts[1] = 1;
00305 offsets[1] = i;
00306 if( count )
00307 rval = H5Sselect_hyperslab( sslab_id, H5S_SELECT_SET, offsets, NULL, counts, NULL );
00308 else
00309 rval = H5Sselect_none( sslab_id );
00310 if( rval < 0 )
00311 {
00312 H5Sclose( slab_id );
00313 H5Sclose( mem_id );
00314 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." );
00315 return 0;
00316 }
00317
00318 mcounts[1] = 1;
00319 moffsets[1] = ( i == 2 ) ? 3 : i;
00320 rval = H5Sselect_hyperslab( smem_id, H5S_SELECT_SET, moffsets, NULL, mcounts, NULL );
00321 if( rval < 0 )
00322 {
00323 H5Sclose( sslab_id );
00324 H5Sclose( slab_id );
00325 H5Sclose( mem_id );
00326 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." );
00327 return 0;
00328 }
00329
00330 if( read )
00331 rval = H5Dread( table_id, type, smem_id, sslab_id, prop, data );
00332 else
00333 rval = H5Dwrite( table_id, type, smem_id, sslab_id, prop, data );
00334
00335 H5Sclose( sslab_id );
00336 H5Sclose( smem_id );
00337 }
00338
00339 if( read && rval >= 0 )
00340 {
00341 mcounts[1] = 1;
00342 moffsets[1] = 2;
00343 H5Sselect_hyperslab( mem_id, H5S_SELECT_SET, moffsets, NULL, mcounts, NULL );
00344 rval = H5Dfill( &fill_val, H5T_NATIVE_INT, data, type, mem_id );
00345 }
00346 }
00347 else
00348 {
00349 H5Sclose( mem_id );
00350 H5Sclose( slab_id );
00351 mhdf_setFail( status, "Invalid dimension for meshset metadata table." );
00352 return 0;
00353 }
00354
00355 H5Sclose( slab_id );
00356 H5Sclose( mem_id );
00357 if( rval < 0 )
00358 {
00359 mhdf_setFail( status, "Internal error calling H5D%s.", read ? "read" : "write" );
00360 return 0;
00361 }
00362
00363 mhdf_setOkay( status );
00364 return 1;
00365 }
00366
00367 void mhdf_readSetMeta( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status )
00368 {
00369 API_BEGIN;
00370 mhdf_readwriteSetMeta( table_id, 1, offset, count, type, data, H5P_DEFAULT, status );
00371 API_END;
00372 }
00373 void mhdf_readSetMetaWithOpt( hid_t table_id,
00374 long offset,
00375 long count,
00376 hid_t type,
00377 void* data,
00378 hid_t prop,
00379 mhdf_Status* status )
00380 {
00381 API_BEGIN;
00382 mhdf_readwriteSetMeta( table_id, 1, offset, count, type, data, prop, status );
00383 API_END;
00384 }
00385
00386 void mhdf_writeSetMeta( hid_t table_id, long offset, long count, hid_t type, const void* data, mhdf_Status* status )
00387 {
00388 API_BEGIN;
00389 mhdf_readwriteSetMeta( table_id, 0, offset, count, type, (void*)data, H5P_DEFAULT, status );
00390 API_END;
00391 }
00392 void mhdf_writeSetMetaWithOpt( hid_t table_id,
00393 long offset,
00394 long count,
00395 hid_t type,
00396 const void* data,
00397 hid_t prop,
00398 mhdf_Status* status )
00399 {
00400 API_BEGIN;
00401 mhdf_readwriteSetMeta( table_id, 0, offset, count, type, (void*)data, prop, status );
00402 API_END;
00403 }
00404
00405 enum SetMetaCol
00406 {
00407 CONTENT = 0,
00408 CHILDREN = 1,
00409 PARENTS = 2,
00410 FLAGS = 3
00411 };
00412
00413 static int mhdf_readSetMetaColumn( hid_t table_id,
00414 enum SetMetaCol column,
00415 long offset,
00416 long count,
00417 hid_t type,
00418 void* data,
00419 hid_t prop,
00420 mhdf_Status* status )
00421 {
00422 hid_t slab_id, mem_id;
00423 hsize_t offsets[2], counts[2], mcount = count;
00424 herr_t rval = 0;
00425 int dims;
00426 const int fill_val = -1;
00427
00428 if( offset < 0 || count < 0 )
00429 {
00430 mhdf_setFail( status,
00431 "Invalid input for reading set description column: "
00432 "offset = %ld, count = %ld\n",
00433 offset, count );
00434 return 0;
00435 }
00436
00437 /* Get dimensions of table, and check against requested count and offset */
00438
00439 slab_id = H5Dget_space( table_id );
00440 if( slab_id < 0 )
00441 {
00442 mhdf_setFail( status, "Internal error calling H5Dget_space." );
00443 return 0;
00444 }
00445
00446 dims = H5Sget_simple_extent_ndims( slab_id );
00447 if( dims != 2 )
00448 {
00449 H5Sclose( slab_id );
00450 mhdf_setFail( status, "Internal error: unexpected dataset rank: %d.", dims );
00451 return 0;
00452 }
00453
00454 dims = H5Sget_simple_extent_dims( slab_id, counts, NULL );
00455 if( dims < 0 )
00456 {
00457 H5Sclose( slab_id );
00458 mhdf_setFail( status, "Internal error calling H5Sget_simple_extend_dims." );
00459 return 0;
00460 }
00461
00462 if( (unsigned long)( offset + count ) > counts[0] )
00463 {
00464 H5Sclose( slab_id );
00465 mhdf_setFail( status, "Requested read of rows %ld to %ld of a %ld row table.\n", offset, offset + count - 1,
00466 (long)counts[0] );
00467 return 0;
00468 }
00469
00470 /* Create a slab definition for the block of memory we're reading into */
00471
00472 mem_id = H5Screate_simple( 1, &mcount, NULL );
00473 if( mem_id < 0 )
00474 {
00475 H5Sclose( slab_id );
00476 mhdf_setFail( status, "Internal error calling H5Screate_simple." );
00477 return 0;
00478 }
00479
00480 /* Old, 3-column table.
00481 * New table is {contents, children, parents, flags}
00482 * Old table is {contents, children, flags}
00483 * If asking for parents, just return zeros.
00484 * If asking for flags, fix column value.
00485 */
00486 offsets[1] = column;
00487 if( counts[1] == 3 )
00488 {
00489 if( column == PARENTS )
00490 {
00491 rval = H5Dfill( &fill_val, H5T_NATIVE_INT, data, type, mem_id );
00492 H5Sclose( mem_id );
00493 H5Sclose( slab_id );
00494 if( rval < 0 )
00495 {
00496 mhdf_setFail( status, "Internal error calling H5Dfill" );
00497 return 0;
00498 }
00499 else
00500 {
00501 mhdf_setOkay( status );
00502 return 1;
00503 }
00504 }
00505 else if( column == FLAGS )
00506 --offsets[1];
00507 }
00508 else if( counts[1] != 4 )
00509 {
00510 H5Sclose( mem_id );
00511 H5Sclose( slab_id );
00512 mhdf_setFail( status, "Invalid dimension for meshset metadata table." );
00513 return 0;
00514 }
00515
00516 /* Create a slab defintion for the portion of the table we want to read. */
00517
00518 /* offsets[1] was initialized in the above block of code. */
00519 offsets[0] = (hsize_t)offset;
00520 counts[0] = (hsize_t)count;
00521 counts[1] = 1; /* one column */
00522 if( count )
00523 rval = H5Sselect_hyperslab( slab_id, H5S_SELECT_SET, offsets, NULL, counts, NULL );
00524 else
00525 rval = H5Sselect_none( slab_id );
00526 if( rval < 0 )
00527 {
00528 H5Sclose( mem_id );
00529 H5Sclose( slab_id );
00530 mhdf_setFail( status, "Internal error calling H5Sselect_hyperslab." );
00531 return 0;
00532 }
00533
00534 /* Read the data */
00535
00536 rval = H5Dread( table_id, type, mem_id, slab_id, prop, data );
00537 H5Sclose( mem_id );
00538 H5Sclose( slab_id );
00539 if( rval < 0 )
00540 {
00541 mhdf_setFail( status, "Internal error calling H5Dread." );
00542 return 0;
00543 }
00544
00545 mhdf_setOkay( status );
00546 return 1;
00547 }
00548
00549 void mhdf_readSetFlags( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status )
00550 {
00551 API_BEGIN;
00552 mhdf_readSetMetaColumn( table_id, FLAGS, offset, count, type, data, H5P_DEFAULT, status );
00553 API_END;
00554 }
00555 void mhdf_readSetFlagsWithOpt( hid_t table_id,
00556 long offset,
00557 long count,
00558 hid_t type,
00559 void* data,
00560 hid_t prop,
00561 mhdf_Status* status )
00562 {
00563 API_BEGIN;
00564 mhdf_readSetMetaColumn( table_id, FLAGS, offset, count, type, data, prop, status );
00565 API_END;
00566 }
00567
00568 void mhdf_readSetContentEndIndices( hid_t table_id,
00569 long offset,
00570 long count,
00571 hid_t type,
00572 void* data,
00573 mhdf_Status* status )
00574 {
00575 API_BEGIN;
00576 mhdf_readSetMetaColumn( table_id, CONTENT, offset, count, type, data, H5P_DEFAULT, status );
00577 API_END;
00578 }
00579 void mhdf_readSetContentEndIndicesWithOpt( hid_t table_id,
00580 long offset,
00581 long count,
00582 hid_t type,
00583 void* data,
00584 hid_t prop,
00585 mhdf_Status* status )
00586 {
00587 API_BEGIN;
00588 mhdf_readSetMetaColumn( table_id, CONTENT, offset, count, type, data, prop, status );
00589 API_END;
00590 }
00591
00592 void mhdf_readSetChildEndIndices( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status )
00593 {
00594 API_BEGIN;
00595 mhdf_readSetMetaColumn( table_id, CHILDREN, offset, count, type, data, H5P_DEFAULT, status );
00596 API_END;
00597 }
00598 void mhdf_readSetChildEndIndicesWithOpt( hid_t table_id,
00599 long offset,
00600 long count,
00601 hid_t type,
00602 void* data,
00603 hid_t prop,
00604 mhdf_Status* status )
00605 {
00606 API_BEGIN;
00607 mhdf_readSetMetaColumn( table_id, CHILDREN, offset, count, type, data, prop, status );
00608 API_END;
00609 }
00610
00611 void mhdf_readSetParentEndIndices( hid_t table_id,
00612 long offset,
00613 long count,
00614 hid_t type,
00615 void* data,
00616 mhdf_Status* status )
00617 {
00618 API_BEGIN;
00619 mhdf_readSetMetaColumn( table_id, PARENTS, offset, count, type, data, H5P_DEFAULT, status );
00620 API_END;
00621 }
00622 void mhdf_readSetParentEndIndicesWithOpt( hid_t table_id,
00623 long offset,
00624 long count,
00625 hid_t type,
00626 void* data,
00627 hid_t prop,
00628 mhdf_Status* status )
00629 {
00630 API_BEGIN;
00631 mhdf_readSetMetaColumn( table_id, PARENTS, offset, count, type, data, prop, status );
00632 API_END;
00633 }
00634
00635 hid_t mhdf_createSetData( mhdf_FileHandle file_handle, long data_list_size, mhdf_Status* status )
00636 {
00637 FileHandle* file_ptr;
00638 hid_t table_id;
00639 hsize_t dim = (hsize_t)data_list_size;
00640 API_BEGIN;
00641
00642 file_ptr = (FileHandle*)( file_handle );
00643 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00644
00645 if( data_list_size < 1 )
00646 {
00647 mhdf_setFail( status, "Invalid argument.\n" );
00648 return -1;
00649 }
00650
00651 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_DATA_PATH, file_ptr->id_type, 1, &dim, status );
00652
00653 API_END_H( 1 );
00654 return table_id;
00655 }
00656
00657 hid_t mhdf_openSetData( mhdf_FileHandle file_handle, long* data_list_size_out, mhdf_Status* status )
00658 {
00659 FileHandle* file_ptr;
00660 hid_t table_id;
00661 hsize_t dim;
00662 API_BEGIN;
00663
00664 file_ptr = (FileHandle*)( file_handle );
00665 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00666
00667 if( !data_list_size_out )
00668 {
00669 mhdf_setFail( status, "Invalid argument.\n" );
00670 return -1;
00671 }
00672
00673 table_id = mhdf_open_table( file_ptr->hdf_handle, SET_DATA_PATH, 1, &dim, status );
00674
00675 *data_list_size_out = (long)dim;
00676 API_END_H( 1 );
00677 return table_id;
00678 }
00679
00680 void mhdf_writeSetData( hid_t table_id, long offset, long count, hid_t type, const void* data, mhdf_Status* status )
00681 {
00682 API_BEGIN;
00683 mhdf_write_data( table_id, offset, count, type, data, H5P_DEFAULT, status );
00684 API_END;
00685 }
00686 void mhdf_writeSetDataWithOpt( hid_t table_id,
00687 long offset,
00688 long count,
00689 hid_t type,
00690 const void* data,
00691 hid_t prop,
00692 mhdf_Status* status )
00693 {
00694 API_BEGIN;
00695 mhdf_write_data( table_id, offset, count, type, data, prop, status );
00696 API_END;
00697 }
00698
00699 void mhdf_readSetData( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status )
00700 {
00701 API_BEGIN;
00702 mhdf_read_data( table_id, offset, count, type, data, H5P_DEFAULT, status );
00703 API_END;
00704 }
00705 void mhdf_readSetDataWithOpt( hid_t table_id,
00706 long offset,
00707 long count,
00708 hid_t type,
00709 void* data,
00710 hid_t prop,
00711 mhdf_Status* status )
00712 {
00713 API_BEGIN;
00714 mhdf_read_data( table_id, offset, count, type, data, prop, status );
00715 API_END;
00716 }
00717
00718 hid_t mhdf_createSetChildren( mhdf_FileHandle file_handle, long child_list_size, mhdf_Status* status )
00719 {
00720 FileHandle* file_ptr;
00721 hid_t table_id;
00722 hsize_t dim = (hsize_t)child_list_size;
00723 API_BEGIN;
00724
00725 file_ptr = (FileHandle*)( file_handle );
00726 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00727
00728 if( child_list_size < 1 )
00729 {
00730 mhdf_setFail( status, "Invalid argument.\n" );
00731 return -1;
00732 }
00733
00734 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_CHILD_PATH, file_ptr->id_type, 1, &dim, status );
00735
00736 API_END_H( 1 );
00737 return table_id;
00738 }
00739
00740 hid_t mhdf_openSetChildren( mhdf_FileHandle file_handle, long* child_list_size, mhdf_Status* status )
00741 {
00742 FileHandle* file_ptr;
00743 hid_t table_id;
00744 hsize_t dim;
00745 API_BEGIN;
00746
00747 file_ptr = (FileHandle*)( file_handle );
00748 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00749
00750 if( !child_list_size )
00751 {
00752 mhdf_setFail( status, "Invalid argument.\n" );
00753 return -1;
00754 }
00755
00756 table_id = mhdf_open_table( file_ptr->hdf_handle, SET_CHILD_PATH, 1, &dim, status );
00757
00758 *child_list_size = (long)dim;
00759 API_END_H( 1 );
00760 return table_id;
00761 }
00762
00763 hid_t mhdf_createSetParents( mhdf_FileHandle file_handle, long parent_list_size, mhdf_Status* status )
00764 {
00765 FileHandle* file_ptr;
00766 hid_t table_id;
00767 hsize_t dim = (hsize_t)parent_list_size;
00768 API_BEGIN;
00769
00770 file_ptr = (FileHandle*)( file_handle );
00771 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00772
00773 if( parent_list_size < 1 )
00774 {
00775 mhdf_setFail( status, "Invalid argument.\n" );
00776 return -1;
00777 }
00778
00779 table_id = mhdf_create_table( file_ptr->hdf_handle, SET_PARENT_PATH, file_ptr->id_type, 1, &dim, status );
00780
00781 API_END_H( 1 );
00782 return table_id;
00783 }
00784
00785 hid_t mhdf_openSetParents( mhdf_FileHandle file_handle, long* parent_list_size, mhdf_Status* status )
00786 {
00787 FileHandle* file_ptr;
00788 hid_t table_id;
00789 hsize_t dim;
00790 API_BEGIN;
00791
00792 file_ptr = (FileHandle*)( file_handle );
00793 if( !mhdf_check_valid_file( file_ptr, status ) ) return -1;
00794
00795 if( !parent_list_size )
00796 {
00797 mhdf_setFail( status, "Invalid argument.\n" );
00798 return -1;
00799 }
00800
00801 table_id = mhdf_open_table( file_ptr->hdf_handle, SET_PARENT_PATH, 1, &dim, status );
00802
00803 *parent_list_size = (long)dim;
00804 API_END_H( 1 );
00805 return table_id;
00806 }
00807
00808 void mhdf_writeSetParentsChildren( hid_t table_id,
00809 long offset,
00810 long count,
00811 hid_t type,
00812 const void* data,
00813 mhdf_Status* status )
00814 {
00815 API_BEGIN;
00816 mhdf_write_data( table_id, offset, count, type, data, H5P_DEFAULT, status );
00817 API_END;
00818 }
00819 void mhdf_writeSetParentsChildrenWithOpt( hid_t table_id,
00820 long offset,
00821 long count,
00822 hid_t type,
00823 const void* data,
00824 hid_t prop,
00825 mhdf_Status* status )
00826 {
00827 API_BEGIN;
00828 mhdf_write_data( table_id, offset, count, type, data, prop, status );
00829 API_END;
00830 }
00831
00832 void mhdf_readSetParentsChildren( hid_t table_id, long offset, long count, hid_t type, void* data, mhdf_Status* status )
00833 {
00834 API_BEGIN;
00835 mhdf_read_data( table_id, offset, count, type, data, H5P_DEFAULT, status );
00836 API_END;
00837 }
00838 void mhdf_readSetParentsChildrenWithOpt( hid_t table_id,
00839 long offset,
00840 long count,
00841 hid_t type,
00842 void* data,
00843 hid_t prop,
00844 mhdf_Status* status )
00845 {
00846 API_BEGIN;
00847 mhdf_read_data( table_id, offset, count, type, data, prop, status );
00848 API_END;
00849 }