MOAB: Mesh Oriented datABase
(version 5.2.1)
|
00001 #include "moab/Range.hpp" 00002 #include "TestUtil.hpp" 00003 00004 #include "moab/Core.hpp" 00005 #include "moab/ParallelComm.hpp" 00006 #include "moab/Skinner.hpp" 00007 #include "MBTagConventions.hpp" 00008 #include "moab/CN.hpp" 00009 #include "MBParallelConventions.h" 00010 00011 #include <iostream> 00012 #include <sstream> 00013 #include <algorithm> 00014 #include "moab_mpi.h" 00015 #include <unistd.h> 00016 #include <float.h> 00017 #include <stdio.h> 00018 #include <ctime> 00019 00020 using namespace moab; 00021 00022 #ifdef MESHDIR 00023 const char* InputFile = STRINGIFY( MESHDIR ) "/ptest.cub"; 00024 const char* InputMix = STRINGIFY( MESHDIR ) "/io/mix.h5m"; 00025 const char* InputOneSide = STRINGIFY( MESHDIR ) "/io/oneside.h5m"; 00026 #else 00027 #error Specify MESHDIR to compile test 00028 #endif 00029 00030 void load_and_partition( Interface& moab, const char* filename, bool print_debug = false ); 00031 00032 void save_and_load_on_root( Interface& moab, const char* tmp_filename ); 00033 00034 void check_identical_mesh( Interface& moab1, Interface& moab2 ); 00035 00036 void test_write_elements(); 00037 void test_write_shared_sets(); 00038 void test_var_length_parallel(); 00039 00040 void test_read_elements_common( bool by_rank, int intervals, bool print_time, const char* extra_opts = 0 ); 00041 00042 int ReadIntervals = 0; 00043 void test_read_elements() 00044 { 00045 test_read_elements_common( false, ReadIntervals, false ); 00046 } 00047 void test_read_elements_by_rank() 00048 { 00049 test_read_elements_common( true, ReadIntervals, false ); 00050 } 00051 void test_bcast_summary() 00052 { 00053 test_read_elements_common( false, ReadIntervals, false, "BCAST_SUMMARY=yes" ); 00054 } 00055 void test_read_summary() 00056 { 00057 test_read_elements_common( false, ReadIntervals, false, "BCAST_SUMMARY=no" ); 00058 } 00059 void test_read_time(); 00060 00061 void test_read_tags(); 00062 void test_read_global_tags(); 00063 void test_read_sets_common( const char* extra_opts = 0 ); 00064 void test_read_sets() 00065 { 00066 test_read_sets_common(); 00067 } 00068 void test_read_sets_bcast_dups() 00069 { 00070 test_read_sets_common( "BCAST_DUPLICATE_READS=yes" ); 00071 } 00072 void test_read_sets_read_dups() 00073 { 00074 test_read_sets_common( "BCAST_DUPLICATE_READS=no" ); 00075 } 00076 void test_read_bc_sets(); 00077 00078 void test_write_different_element_types(); 00079 void test_write_different_tags(); 00080 void test_write_polygons(); 00081 void test_write_unbalanced(); 00082 void test_write_dense_tags(); 00083 void test_read_non_adjs_side(); 00084 00085 const char PARTITION_TAG[] = "PARTITION"; 00086 00087 bool KeepTmpFiles = false; 00088 bool PauseOnStart = false; 00089 bool HyperslabAppend = false; 00090 const int DefaultReadIntervals = 2; 00091 int ReadDebugLevel = 0; 00092 int WriteDebugLevel = 0; 00093 int ReadBlocks = 1; 00094 00095 enum Mode 00096 { 00097 READ_PART, 00098 READ_DELETE, 00099 BCAST_DELETE 00100 }; 00101 const Mode DEFAULT_MODE = READ_PART; 00102 const bool DEFAULT_BY_RANK = false; 00103 00104 std::string get_read_options( bool by_rank = DEFAULT_BY_RANK, Mode mode = DEFAULT_MODE, const char* extra_opts = 0 ); 00105 std::string get_read_options( const char* extra_opts ) 00106 { 00107 return get_read_options( DEFAULT_BY_RANK, DEFAULT_MODE, extra_opts ); 00108 } 00109 std::string get_read_options( bool by_rank, const char* extra_opts ) 00110 { 00111 return get_read_options( by_rank, DEFAULT_MODE, extra_opts ); 00112 } 00113 00114 std::string get_read_options( bool by_rank, Mode mode, const char* extra_opts ) 00115 { 00116 int numproc; 00117 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 00118 00119 // do parallel read unless only one processor 00120 std::ostringstream str; 00121 if( numproc > 1 ) 00122 { 00123 str << "PARALLEL="; 00124 switch( mode ) 00125 { 00126 case READ_PART: 00127 str << "READ_PART"; 00128 break; 00129 case READ_DELETE: 00130 str << "READ_DELETE"; 00131 break; 00132 case BCAST_DELETE: 00133 str << "BCAST_DELETE"; 00134 break; 00135 } 00136 str << ";PARTITION=" << PARTITION_TAG << ";"; 00137 if( by_rank ) str << "PARTITION_BY_RANK;"; 00138 } 00139 00140 if( extra_opts ) str << extra_opts << ";"; 00141 00142 if( ReadDebugLevel ) str << "DEBUG_IO=" << ReadDebugLevel << ";"; 00143 00144 if( HyperslabAppend ) str << "HYPERSLAB_APPEND;HYPERSLAB_SELECT_LIMIT=2147483647"; 00145 00146 return str.str(); 00147 } 00148 00149 int main( int argc, char* argv[] ) 00150 { 00151 int err = MPI_Init( &argc, &argv ); 00152 CHECK( !err ); 00153 00154 for( int i = 1; i < argc; ++i ) 00155 { 00156 if( !strcmp( argv[i], "-k" ) ) 00157 KeepTmpFiles = true; 00158 else if( !strcmp( argv[i], "-p" ) ) 00159 PauseOnStart = true; 00160 else if( !strcmp( argv[i], "-R" ) ) 00161 { 00162 ++i; 00163 CHECK( i < argc ); 00164 ReadIntervals = atoi( argv[i] ); 00165 CHECK( ReadIntervals > 0 ); 00166 } 00167 else if( !strcmp( argv[i], "-b" ) ) 00168 { 00169 ++i; 00170 CHECK( i < argc ); 00171 ReadBlocks = atoi( argv[i] ); 00172 CHECK( ReadBlocks > 0 ); 00173 } 00174 else if( !strcmp( argv[i], "-r" ) ) 00175 { 00176 ++i; 00177 CHECK( i < argc ); 00178 ReadDebugLevel = atoi( argv[i] ); 00179 CHECK( ReadDebugLevel > 0 ); 00180 } 00181 else if( !strcmp( argv[i], "-w" ) ) 00182 { 00183 ++i; 00184 CHECK( i < argc ); 00185 WriteDebugLevel = atoi( argv[i] ); 00186 CHECK( WriteDebugLevel > 0 ); 00187 } 00188 else if( !strcmp( argv[i], "-A" ) ) 00189 HyperslabAppend = true; 00190 else 00191 { 00192 std::cerr << "Usage: " << argv[0] 00193 << " [-k] [-p] [-R <intervals>] [-r <level>] [-w <level>] [-b <blocks>] [-A]" << std::endl; 00194 return 1; 00195 } 00196 } 00197 00198 if( PauseOnStart ) 00199 { 00200 int rank; 00201 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00202 printf( "Rank %2d PID %lu\n", rank, (unsigned long)getpid() ); 00203 sleep( 30 ); 00204 } 00205 00206 int result = 0; 00207 if( ReadIntervals ) { result = RUN_TEST( test_read_time ); } 00208 else 00209 { 00210 ReadIntervals = DefaultReadIntervals; 00211 result += RUN_TEST( test_write_elements ); 00212 MPI_Barrier( MPI_COMM_WORLD ); 00213 result += RUN_TEST( test_write_shared_sets ); 00214 MPI_Barrier( MPI_COMM_WORLD ); 00215 result += RUN_TEST( test_var_length_parallel ); 00216 MPI_Barrier( MPI_COMM_WORLD ); 00217 result += RUN_TEST( test_read_elements ); 00218 MPI_Barrier( MPI_COMM_WORLD ); 00219 result += RUN_TEST( test_read_elements_by_rank ); 00220 MPI_Barrier( MPI_COMM_WORLD ); 00221 result += RUN_TEST( test_read_tags ); 00222 MPI_Barrier( MPI_COMM_WORLD ); 00223 result += RUN_TEST( test_read_global_tags ); 00224 MPI_Barrier( MPI_COMM_WORLD ); 00225 result += RUN_TEST( test_read_sets ); 00226 MPI_Barrier( MPI_COMM_WORLD ); 00227 result += RUN_TEST( test_read_sets_bcast_dups ); 00228 MPI_Barrier( MPI_COMM_WORLD ); 00229 result += RUN_TEST( test_read_sets_read_dups ); 00230 MPI_Barrier( MPI_COMM_WORLD ); 00231 result += RUN_TEST( test_read_bc_sets ); 00232 MPI_Barrier( MPI_COMM_WORLD ); 00233 result += RUN_TEST( test_write_different_element_types ); 00234 MPI_Barrier( MPI_COMM_WORLD ); 00235 result += RUN_TEST( test_write_different_tags ); 00236 MPI_Barrier( MPI_COMM_WORLD ); 00237 result += RUN_TEST( test_write_polygons ); 00238 MPI_Barrier( MPI_COMM_WORLD ); 00239 result += RUN_TEST( test_write_unbalanced ); 00240 MPI_Barrier( MPI_COMM_WORLD ); 00241 result += RUN_TEST( test_write_dense_tags ); 00242 MPI_Barrier( MPI_COMM_WORLD ); 00243 result += RUN_TEST( test_read_non_adjs_side ); 00244 MPI_Barrier( MPI_COMM_WORLD ); 00245 } 00246 00247 MPI_Finalize(); 00248 return result; 00249 } 00250 00251 /* Assume geometric topology sets correspond to interface sets 00252 * in that the entities contained inclusively in a geometric 00253 * topology set must be shared by the same set of processors. 00254 * As we partition based on geometric volumes, this assumption 00255 * should aways be true. Print for each geometric topology set 00256 * the list of processors it is shared with. 00257 */ 00258 void print_partitioned_entities( Interface& moab, bool list_non_shared = false ) 00259 { 00260 ErrorCode rval; 00261 int size, rank; 00262 std::vector< int > ent_procs( MAX_SHARING_PROCS ), tmp_ent_procs( MAX_SHARING_PROCS ); 00263 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00264 MPI_Comm_size( MPI_COMM_WORLD, &size ); 00265 00266 // expect shared entities to correspond to geometric sets 00267 00268 // get tags for parallel data 00269 Tag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag; 00270 rval = moab.tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedp_tag );CHECK_ERR( rval ); 00271 rval = moab.tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER, sharedps_tag );CHECK_ERR( rval ); 00272 rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedh_tag );CHECK_ERR( rval ); 00273 rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE, sharedhs_tag );CHECK_ERR( rval ); 00274 rval = moab.tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatus_tag );CHECK_ERR( rval ); 00275 00276 // for each geometric entity, check which processor we are sharing 00277 // entities with 00278 Tag geom_tag, id_tag; 00279 rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHECK_ERR( rval ); 00280 id_tag = moab.globalId_tag(); 00281 const char* topo_names_s[] = { "Vertex", "Curve", "Surface", "Volume" }; 00282 // const char* topo_names_p[] = { "Vertices", "Curves", "Surfaces", "Volumes" }; 00283 std::ostringstream buffer; // buffer output in an attempt to prevent lines from different 00284 // processsors being mixed up. 00285 for( int t = 0; t < 4; ++t ) 00286 { 00287 Range geom; 00288 int dim = t; 00289 const void* ptr = &dim; 00290 rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &geom_tag, &ptr, 1, geom );CHECK_ERR( rval ); 00291 00292 // for each geometric entity of dimension 't' 00293 for( Range::const_iterator i = geom.begin(); i != geom.end(); ++i ) 00294 { 00295 EntityHandle set = *i; 00296 int id; 00297 rval = moab.tag_get_data( id_tag, &set, 1, &id );CHECK_ERR( rval ); 00298 00299 buffer.clear(); 00300 00301 // get entities contained in this set but not its children 00302 Range entities, tmp_entities, children, diff; 00303 rval = moab.get_entities_by_handle( set, entities );CHECK_ERR( rval ); 00304 rval = moab.get_child_meshsets( set, children );CHECK_ERR( rval ); 00305 for( Range::const_iterator j = children.begin(); j != children.end(); ++j ) 00306 { 00307 tmp_entities.clear(); 00308 rval = moab.get_entities_by_handle( *j, tmp_entities );CHECK_ERR( rval ); 00309 diff = subtract( entities, tmp_entities ); 00310 entities.swap( diff ); 00311 } 00312 00313 // for each entity, check owning processors 00314 std::vector< char > status_flags( entities.size(), 0 ); 00315 std::vector< int > shared_procs( entities.size(), 0 ); 00316 rval = moab.tag_get_data( pstatus_tag, entities, &status_flags[0] ); 00317 if( MB_TAG_NOT_FOUND == rval ) 00318 { 00319 // keep default values of zero (not shared) 00320 } 00321 CHECK_ERR( rval ); 00322 unsigned num_shared = 0, num_owned = 0; 00323 for( size_t j = 0; j < status_flags.size(); ++j ) 00324 { 00325 num_shared += !!( status_flags[j] & PSTATUS_SHARED ); 00326 num_owned += !( status_flags[j] & PSTATUS_NOT_OWNED ); 00327 } 00328 00329 if( !num_shared ) 00330 { 00331 if( list_non_shared ) 00332 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00333 << "not shared" << std::endl; 00334 } 00335 else if( num_shared != entities.size() ) 00336 { 00337 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00338 << "ERROR: " << num_shared << " of " << entities.size() << " entities marked as 'shared'" 00339 << std::endl; 00340 } 00341 else if( num_owned && num_owned != entities.size() ) 00342 { 00343 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00344 << "ERROR: " << num_owned << " of " << entities.size() << " entities owned by this processor" 00345 << std::endl; 00346 } 00347 else 00348 { 00349 rval = moab.tag_get_data( sharedp_tag, entities, &shared_procs[0] );CHECK_ERR( rval ); 00350 int proc = shared_procs[0]; 00351 bool all_match = true; 00352 for( size_t j = 1; j < shared_procs.size(); ++j ) 00353 if( shared_procs[j] != proc ) all_match = false; 00354 if( !all_match ) 00355 { 00356 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00357 << "ERROR: processsor IDs do not match!" << std::endl; 00358 } 00359 else if( proc != -1 ) 00360 { 00361 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00362 << "shared with processor " << proc; 00363 if( num_owned ) buffer << " (owned by this processor)"; 00364 buffer << std::endl; 00365 } 00366 else if( entities.empty() ) 00367 { 00368 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00369 << "ERROR: no entities!" << std::endl; 00370 } 00371 else 00372 { 00373 Range::const_iterator j = entities.begin(); 00374 rval = moab.tag_get_data( sharedps_tag, &*j, 1, &ent_procs[0] );CHECK_ERR( rval ); 00375 for( ++j; j != entities.end(); ++j ) 00376 { 00377 rval = moab.tag_get_data( sharedps_tag, &*j, 1, &tmp_ent_procs[0] );CHECK_ERR( rval ); 00378 if( ent_procs != tmp_ent_procs ) all_match = false; 00379 } 00380 if( !all_match ) 00381 { 00382 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00383 << "ERROR: processsor IDs do not match!" << std::endl; 00384 } 00385 else 00386 { 00387 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t" 00388 << "processors "; 00389 for( int k = 0; k < MAX_SHARING_PROCS; ++k ) 00390 if( ent_procs[k] != -1 ) buffer << ent_procs[k] << ", "; 00391 if( num_owned ) buffer << " (owned by this processor)"; 00392 buffer << std::endl; 00393 } 00394 } 00395 } 00396 } 00397 } 00398 for( int i = 0; i < size; ++i ) 00399 { 00400 MPI_Barrier( MPI_COMM_WORLD ); 00401 if( i == rank ) 00402 { 00403 std::cout << buffer.str(); 00404 std::cout.flush(); 00405 } 00406 } 00407 } 00408 00409 void load_and_partition( Interface& moab, const char* filename, bool print ) 00410 { 00411 ErrorCode rval; 00412 00413 rval = moab.load_file( filename, 0, 00414 "PARALLEL=READ_DELETE;" 00415 "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;" 00416 "PARTITION_DISTRIBUTE;" 00417 "PARALLEL_RESOLVE_SHARED_ENTS" ); 00418 00419 if( print ) print_partitioned_entities( moab ); 00420 00421 CHECK_ERR( rval ); 00422 } 00423 00424 void save_and_load_on_root( Interface& moab, const char* tmp_filename ) 00425 { 00426 ErrorCode rval; 00427 int procnum; 00428 MPI_Comm_rank( MPI_COMM_WORLD, &procnum ); 00429 00430 const char* opt = "PARALLEL=WRITE_PART"; 00431 std::string str; 00432 if( WriteDebugLevel ) 00433 { 00434 std::ostringstream s; 00435 s << opt << ";DEBUG_IO=" << WriteDebugLevel; 00436 str = s.str(); 00437 opt = str.c_str(); 00438 } 00439 rval = moab.write_file( tmp_filename, 0, opt ); 00440 if( MB_SUCCESS != rval ) 00441 { 00442 std::cerr << "Parallel write failed on processor " << procnum << std::endl; 00443 if( procnum == 0 && !KeepTmpFiles ) remove( tmp_filename );CHECK_ERR( rval ); 00444 } 00445 00446 if( procnum == 0 && KeepTmpFiles ) std::cout << "Wrote file: \"" << tmp_filename << "\"\n"; 00447 00448 // All created pcomm objects should be retrieved (with the pcomm tag) and 00449 // deleted at this time. Otherwise, the memory used by them will be leaked 00450 // as the pcomm tag will be deleted by moab.delete_mesh() below. 00451 std::vector< ParallelComm* > pc_list; 00452 ParallelComm::get_all_pcomm( &moab, pc_list ); 00453 for( std::vector< ParallelComm* >::iterator vit = pc_list.begin(); vit != pc_list.end(); ++vit ) 00454 delete *vit; 00455 00456 moab.delete_mesh(); 00457 std::vector< Tag > tags; 00458 rval = moab.tag_get_tags( tags );CHECK_ERR( rval ); 00459 for( size_t i = 0; i < tags.size(); ++i ) 00460 { 00461 rval = moab.tag_delete( tags[i] );CHECK_ERR( rval ); 00462 } 00463 00464 if( procnum == 0 ) 00465 { 00466 rval = moab.load_file( tmp_filename ); 00467 if( !KeepTmpFiles ) remove( tmp_filename );CHECK_ERR( rval ); 00468 } 00469 } 00470 00471 void count_owned_entities( Interface& moab, int counts[MBENTITYSET] ) 00472 { 00473 ErrorCode rval; 00474 ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 ); 00475 CHECK( 0 != pcomm ); 00476 std::fill( counts, counts + MBENTITYSET, 0u ); 00477 00478 for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t ) 00479 { 00480 Range range; 00481 rval = moab.get_entities_by_type( 0, t, range );CHECK_ERR( rval ); 00482 if( !range.empty() ) rval = pcomm->filter_pstatus( range, PSTATUS_NOT_OWNED, PSTATUS_NOT );CHECK_ERR( rval ); 00483 counts[t] = range.size(); 00484 } 00485 } 00486 00487 void check_identical_mesh( Interface& mb1, Interface& mb2 ) 00488 { 00489 ErrorCode rval; 00490 std::map< EntityHandle, EntityHandle > entmap; 00491 00492 // match vertices by coordinate 00493 Range r1, r2; 00494 Range::iterator i1, i2; 00495 rval = mb1.get_entities_by_type( 0, MBVERTEX, r1 );CHECK_ERR( rval ); 00496 rval = mb2.get_entities_by_type( 0, MBVERTEX, r2 );CHECK_ERR( rval ); 00497 CHECK_EQUAL( r1.size(), r2.size() ); 00498 for( i1 = r1.begin(); i1 != r1.end(); ++i1 ) 00499 { 00500 double coords1[3]; 00501 rval = mb1.get_coords( &*i1, 1, coords1 );CHECK_ERR( rval ); 00502 for( i2 = r2.begin(); i2 != r2.end(); ++i2 ) 00503 { 00504 double coords2[3]; 00505 rval = mb2.get_coords( &*i2, 1, coords2 );CHECK_ERR( rval ); 00506 coords2[0] -= coords1[0]; 00507 coords2[1] -= coords1[1]; 00508 coords2[2] -= coords1[2]; 00509 double lensqr = coords2[0] * coords2[0] + coords2[1] * coords2[1] + coords2[2] * coords2[2]; 00510 if( lensqr < 1e-12 ) break; 00511 } 00512 CHECK( i2 != r2.end() ); 00513 entmap[*i2] = *i1; 00514 r2.erase( i2 ); 00515 } 00516 00517 // match element connectivity 00518 std::vector< EntityHandle > conn1, conn2; 00519 for( EntityType t = MBEDGE; t < MBENTITYSET; ++t ) 00520 { 00521 r1.clear(); 00522 rval = mb1.get_entities_by_type( 0, t, r1 );CHECK_ERR( rval ); 00523 r2.clear(); 00524 rval = mb2.get_entities_by_type( 0, t, r2 );CHECK_ERR( rval ); 00525 CHECK_EQUAL( r1.size(), r2.size() ); 00526 00527 for( i1 = r1.begin(); i1 != r1.end(); ++i1 ) 00528 { 00529 conn1.clear(); 00530 rval = mb1.get_connectivity( &*i1, 1, conn1 );CHECK_ERR( rval ); 00531 for( i2 = r2.begin(); i2 != r2.end(); ++i2 ) 00532 { 00533 conn2.clear(); 00534 rval = mb2.get_connectivity( &*i2, 1, conn2 );CHECK_ERR( rval ); 00535 if( conn1.size() != conn2.size() ) continue; 00536 for( std::vector< EntityHandle >::iterator j = conn2.begin(); j != conn2.end(); ++j ) 00537 *j = entmap[*j]; 00538 if( conn1 == conn2 ) break; 00539 } 00540 00541 CHECK( i2 != r2.end() ); 00542 entmap[*i2] = *i1; 00543 r2.erase( i2 ); 00544 } 00545 } 00546 } 00547 00548 void test_write_elements() 00549 { 00550 int proc_counts[MBENTITYSET], all_counts[MBENTITYSET], file_counts[MBENTITYSET]; 00551 int err, rank; 00552 ErrorCode rval; 00553 err = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00554 CHECK( !err ); 00555 00556 // load and partition a .cub file 00557 Core moab_instance; 00558 Interface& moab = moab_instance; 00559 load_and_partition( moab, InputFile, false ); 00560 00561 // count number of owned entities of each type and sum over all procs 00562 count_owned_entities( moab, proc_counts ); 00563 std::fill( all_counts, all_counts + MBENTITYSET, 0u ); 00564 err = MPI_Allreduce( proc_counts, all_counts, MBENTITYSET, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); 00565 CHECK( !err ); 00566 00567 // do parallel write and on root proc do serial read of written file 00568 save_and_load_on_root( moab, "test_write_elements.h5m" ); 00569 if( rank == 0 ) 00570 { 00571 for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t ) 00572 { 00573 rval = moab.get_number_entities_by_type( 0, t, file_counts[t] );CHECK_ERR( rval ); 00574 } 00575 } 00576 00577 // check that sum of owned entities equals number of entities from serial read 00578 00579 err = MPI_Bcast( file_counts, MBENTITYSET, MPI_INT, 0, MPI_COMM_WORLD ); 00580 CHECK( !err ); 00581 00582 bool all_equal = true; 00583 for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t ) 00584 if( file_counts[t] != all_counts[t] ) all_equal = false; 00585 00586 if( rank == 0 && !all_equal ) 00587 { 00588 std::cerr << "Type\tPartnd\tWritten" << std::endl; 00589 for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t ) 00590 std::cerr << CN::EntityTypeName( t ) << '\t' << all_counts[t] << '\t' << file_counts[t] << std::endl; 00591 } 00592 00593 CHECK( all_equal ); 00594 00595 // on root processor, do serial read of original .cub file and compare 00596 00597 if( rank == 0 ) 00598 { 00599 Core moab2; 00600 rval = moab2.load_file( InputFile );CHECK_ERR( rval ); 00601 check_identical_mesh( moab, moab2 ); 00602 } 00603 } 00604 00605 bool check_sets_sizes( Interface& mb1, EntityHandle set1, Interface& mb2, EntityHandle set2 ) 00606 { 00607 ErrorCode rval; 00608 bool result = true; 00609 for( EntityType t = MBVERTEX; t < MBMAXTYPE; ++t ) 00610 { 00611 int count1, count2; 00612 rval = mb1.get_number_entities_by_type( set1, t, count1 );CHECK_ERR( rval ); 00613 rval = mb2.get_number_entities_by_type( set2, t, count2 );CHECK_ERR( rval ); 00614 if( count1 != count2 ) 00615 { 00616 std::cerr << "Sets differ in number of " << CN::EntityTypeName( t ) << " : " << count1 << " vs. " << count2 00617 << std::endl; 00618 result = false; 00619 } 00620 } 00621 return result; 00622 } 00623 00624 void test_write_shared_sets() 00625 { 00626 int err, rank, size; 00627 ErrorCode rval; 00628 err = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00629 CHECK( !err ); 00630 err = MPI_Comm_size( MPI_COMM_WORLD, &size ); 00631 CHECK( !err ); 00632 00633 Core moab_instance; 00634 Interface& moab = moab_instance; 00635 load_and_partition( moab, InputFile ); 00636 save_and_load_on_root( moab, "test_write_shared_sets.h5m" ); 00637 00638 if( rank != 0 ) return; 00639 00640 Core moab2_instance; 00641 Interface& moab2 = moab2_instance; 00642 rval = moab2.load_file( InputFile );CHECK_ERR( rval ); 00643 00644 Tag mattag1, mattag2; 00645 rval = moab.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag1 );CHECK_ERR( rval ); 00646 rval = moab2.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag2 );CHECK_ERR( rval ); 00647 00648 Range matsets; 00649 rval = moab2.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag2, 0, 1, matsets );CHECK_ERR( rval ); 00650 for( Range::iterator i = matsets.begin(); i != matsets.end(); ++i ) 00651 { 00652 int block_id; 00653 rval = moab2.tag_get_data( mattag2, &*i, 1, &block_id );CHECK_ERR( rval ); 00654 00655 Range tmpents; 00656 void* tagdata[] = { &block_id }; 00657 rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag1, tagdata, 1, tmpents ); 00658 if( tmpents.size() != 1 ) std::cerr << tmpents.size() << " sets with material set id " << block_id << std::endl; 00659 CHECK_EQUAL( (int)tmpents.size(), 1 ); 00660 00661 CHECK( check_sets_sizes( moab2, *i, moab, tmpents.front() ) ); 00662 } 00663 } 00664 00665 void test_var_length_parallel() 00666 { 00667 Range::const_iterator i; 00668 ErrorCode rval; 00669 Core moab; 00670 Interface& mb = moab; 00671 Range verts; 00672 Tag vartag; 00673 const char* filename = "var-len-para.h5m"; 00674 const char* tagname = "ParVar"; 00675 00676 // If this tag doesn't exist, writer will fail 00677 Tag junk_tag; 00678 mb.tag_get_handle( PARALLEL_GID_TAG_NAME, 1, MB_TYPE_INTEGER, junk_tag, MB_TAG_DENSE | MB_TAG_EXCL ); 00679 00680 int numproc, rank; 00681 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 00682 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00683 00684 // Create N+1 vertices on each processor, where N is the rank 00685 std::vector< double > coords( 3 * rank + 3, (double)rank ); 00686 rval = mb.create_vertices( &coords[0], rank + 1, verts );CHECK_ERR( rval ); 00687 00688 // Create a var-len tag 00689 rval = mb.tag_get_handle( tagname, 0, MB_TYPE_INTEGER, vartag, MB_TAG_DENSE | MB_TAG_VARLEN | MB_TAG_EXCL );CHECK_ERR( rval ); 00690 00691 // Write data on each vertex: 00692 // { n, rank, rank+1, ..., rank+n-1 } where n >= 1 00693 std::vector< int > data; 00694 rval = MB_SUCCESS; 00695 for( i = verts.begin(); i != verts.end(); ++i ) 00696 { 00697 EntityHandle h = *i; 00698 const int n = h % 7 + 1; 00699 data.resize( n + 1 ); 00700 data[0] = n; 00701 for( int j = 0; j < n; ++j ) 00702 data[j + 1] = rank + j; 00703 const int s = ( n + 1 ); 00704 const void* ptrarr[] = { &data[0] }; 00705 ErrorCode tmperr = mb.tag_set_by_ptr( vartag, &h, 1, ptrarr, &s ); 00706 if( MB_SUCCESS != tmperr ) rval = tmperr; 00707 } 00708 CHECK_ERR( rval ); 00709 00710 // Write file 00711 const char* opt = "PARALLEL=WRITE_PART"; 00712 std::string str; 00713 if( WriteDebugLevel ) 00714 { 00715 std::ostringstream s; 00716 s << opt << ";DEBUG_IO=" << WriteDebugLevel; 00717 str = s.str(); 00718 opt = str.c_str(); 00719 } 00720 rval = mb.write_file( filename, "MOAB", opt );CHECK_ERR( rval ); 00721 00722 // Read file. We only reset and re-read the file on the 00723 // root processor. All other processors keep the pre-write 00724 // mesh. This allows many of the tests to be run on all 00725 // processors. Running the tests on the pre-write mesh on 00726 // non-root processors allows us to verify that any problems 00727 // are due to the file API rather than some other bug. 00728 ErrorCode rval2 = rval = MB_SUCCESS; 00729 if( !rank ) 00730 { 00731 moab.~Core(); 00732 new( &moab ) Core; 00733 rval = mb.load_mesh( filename ); 00734 if( !KeepTmpFiles ) remove( filename ); 00735 rval2 = mb.tag_get_handle( tagname, 0, MB_TYPE_INTEGER, vartag ); 00736 } 00737 CHECK_ERR( rval );CHECK_ERR( rval2 ); 00738 00739 // Check that tag is correct 00740 int tag_size; 00741 rval = mb.tag_get_length( vartag, tag_size ); 00742 CHECK_EQUAL( MB_VARIABLE_DATA_LENGTH, rval ); 00743 TagType storage; 00744 rval = mb.tag_get_type( vartag, storage );CHECK_ERR( rval ); 00745 CHECK_EQUAL( MB_TAG_DENSE, storage ); 00746 DataType type; 00747 rval = mb.tag_get_data_type( vartag, type );CHECK_ERR( rval ); 00748 CHECK_EQUAL( MB_TYPE_INTEGER, type ); 00749 00750 // get vertices 00751 verts.clear(); 00752 rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval ); 00753 00754 // Check consistency of tag data on each vertex 00755 // and count the number of vertices for each rank. 00756 std::vector< int > vtx_counts( numproc, 0 ); 00757 for( i = verts.begin(); i != verts.end(); ++i ) 00758 { 00759 EntityHandle h = *i; 00760 int size = -1; 00761 const void* ptrarr[1] = { 0 }; 00762 rval = mb.tag_get_by_ptr( vartag, &h, 1, ptrarr, &size );CHECK_ERR( rval ); 00763 const int* tag_data = reinterpret_cast< const int* >( ptrarr[0] ); 00764 CHECK( size >= 2 ); 00765 CHECK( NULL != tag_data ); 00766 CHECK_EQUAL( size - 1, tag_data[0] ); 00767 CHECK( tag_data[1] >= 0 && tag_data[1] < numproc ); 00768 ++vtx_counts[tag_data[1]]; 00769 for( int j = 1; j < size - 1; ++j ) 00770 CHECK_EQUAL( tag_data[1] + j, tag_data[1 + j] ); 00771 } 00772 00773 // Check number of vertices for each rank 00774 for( int j = 0; j < numproc; ++j ) 00775 { 00776 // Only root should have data for other processors. 00777 if( rank == 0 || rank == j ) 00778 CHECK_EQUAL( j + 1, vtx_counts[j] ); 00779 else 00780 CHECK_EQUAL( 0, vtx_counts[j] ); 00781 } 00782 } 00783 00784 // create row of cubes of mesh 00785 void create_input_file( const char* file_name, int intervals, int num_cpu, int blocks_per_cpu = 1, 00786 const char* ijk_vert_tag_name = 0, const char* ij_set_tag_name = 0, 00787 const char* global_tag_name = 0, const int* global_mesh_value = 0, 00788 const int* global_default_value = 0, bool create_bcsets = false ) 00789 { 00790 Core moab; 00791 Interface& mb = moab; 00792 ErrorCode rval; 00793 00794 Tag ijk_vert_tag = 0, ij_set_tag = 0, global_tag = 0; 00795 if( ijk_vert_tag_name ) 00796 { 00797 rval = mb.tag_get_handle( ijk_vert_tag_name, 3, MB_TYPE_INTEGER, ijk_vert_tag, MB_TAG_EXCL | MB_TAG_DENSE );CHECK_ERR( rval ); 00798 } 00799 if( ij_set_tag_name ) 00800 { 00801 rval = mb.tag_get_handle( ij_set_tag_name, 2, MB_TYPE_INTEGER, ij_set_tag, MB_TAG_SPARSE | MB_TAG_EXCL );CHECK_ERR( rval ); 00802 } 00803 if( global_tag_name ) 00804 { 00805 rval = mb.tag_get_handle( global_tag_name, 1, MB_TYPE_INTEGER, global_tag, MB_TAG_DENSE | MB_TAG_EXCL, 00806 global_default_value );CHECK_ERR( rval ); 00807 if( global_mesh_value ) 00808 { 00809 EntityHandle root = 0; 00810 rval = mb.tag_set_data( global_tag, &root, 1, global_mesh_value );CHECK_ERR( rval ); 00811 } 00812 } 00813 00814 const int num_blk = num_cpu * blocks_per_cpu; 00815 int iv = intervals + 1, ii = num_blk * intervals + 1; 00816 std::vector< EntityHandle > verts( iv * iv * ii ); 00817 int idx = 0; 00818 for( int i = 0; i < ii; ++i ) 00819 { 00820 for( int j = 0; j < iv; ++j ) 00821 { 00822 int start = idx; 00823 for( int k = 0; k < iv; ++k ) 00824 { 00825 const double coords[3] = { static_cast< double >( i ), static_cast< double >( j ), 00826 static_cast< double >( k ) }; 00827 rval = mb.create_vertex( coords, verts[idx] );CHECK_ERR( rval ); 00828 if( ijk_vert_tag ) 00829 { 00830 int vals[] = { i, j, k }; 00831 rval = mb.tag_set_data( ijk_vert_tag, &verts[idx], 1, vals );CHECK_ERR( rval ); 00832 } 00833 ++idx; 00834 } 00835 00836 if( ij_set_tag ) 00837 { 00838 EntityHandle set; 00839 rval = mb.create_meshset( MESHSET_SET, set );CHECK_ERR( rval ); 00840 rval = mb.add_entities( set, &verts[start], idx - start );CHECK_ERR( rval ); 00841 int vals[] = { i, j }; 00842 rval = mb.tag_set_data( ij_set_tag, &set, 1, vals );CHECK_ERR( rval ); 00843 } 00844 } 00845 } 00846 00847 const int eb = intervals * intervals * intervals; 00848 std::vector< EntityHandle > elems( num_blk * eb ); 00849 idx = 0; 00850 for( int c = 0; c < num_blk; ++c ) 00851 { 00852 for( int i = c * intervals; i < ( c + 1 ) * intervals; ++i ) 00853 { 00854 for( int j = 0; j < intervals; ++j ) 00855 { 00856 for( int k = 0; k < intervals; ++k ) 00857 { 00858 EntityHandle conn[8] = { verts[iv * ( iv * i + j ) + k], 00859 verts[iv * ( iv * ( i + 1 ) + j ) + k], 00860 verts[iv * ( iv * ( i + 1 ) + j + 1 ) + k], 00861 verts[iv * ( iv * i + j + 1 ) + k], 00862 verts[iv * ( iv * i + j ) + k + 1], 00863 verts[iv * ( iv * ( i + 1 ) + j ) + k + 1], 00864 verts[iv * ( iv * ( i + 1 ) + j + 1 ) + k + 1], 00865 verts[iv * ( iv * i + j + 1 ) + k + 1] }; 00866 00867 rval = mb.create_element( MBHEX, conn, 8, elems[idx++] );CHECK_ERR( rval ); 00868 } 00869 } 00870 } 00871 } 00872 00873 Tag part_tag; 00874 rval = mb.tag_get_handle( PARTITION_TAG, 1, MB_TYPE_INTEGER, part_tag, MB_TAG_SPARSE | MB_TAG_EXCL );CHECK_ERR( rval ); 00875 00876 std::vector< EntityHandle > parts( num_cpu ); 00877 for( int i = 0; i < num_cpu; ++i ) 00878 { 00879 rval = mb.create_meshset( MESHSET_SET, parts[i] );CHECK_ERR( rval ); 00880 for( int j = 0; j < blocks_per_cpu; ++j ) 00881 { 00882 rval = mb.add_entities( parts[i], &elems[( num_cpu * j + i ) * eb], eb );CHECK_ERR( rval ); 00883 } 00884 rval = mb.tag_set_data( part_tag, &parts[i], 1, &i );CHECK_ERR( rval ); 00885 } 00886 00887 if( create_bcsets ) 00888 { 00889 // neumann set 00890 Range skin_ents; 00891 rval = Skinner( &mb ).find_skin( 0, &elems[0], elems.size(), false, skin_ents );CHECK_ERR( rval ); 00892 EntityHandle bcset; 00893 rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval ); 00894 Tag bcset_tag; 00895 rval = mb.tag_get_handle( NEUMANN_SET_TAG_NAME, 1, MB_TYPE_INTEGER, bcset_tag, MB_TAG_SPARSE | MB_TAG_CREAT );CHECK_ERR( rval ); 00896 int dum = 100; 00897 rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval ); 00898 rval = mb.add_entities( bcset, skin_ents );CHECK_ERR( rval ); 00899 00900 // dirichlet set 00901 rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval ); 00902 rval = mb.tag_get_handle( DIRICHLET_SET_TAG_NAME, 1, MB_TYPE_INTEGER, bcset_tag, MB_TAG_SPARSE | MB_TAG_CREAT );CHECK_ERR( rval ); 00903 dum = 200; 00904 rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval ); 00905 Range nodes; 00906 rval = mb.get_adjacencies( skin_ents, 0, false, nodes, Interface::UNION );CHECK_ERR( rval ); 00907 rval = mb.add_entities( bcset, nodes );CHECK_ERR( rval ); 00908 00909 // material set 00910 rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval ); 00911 rval = mb.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, bcset_tag, MB_TAG_SPARSE | MB_TAG_CREAT );CHECK_ERR( rval ); 00912 dum = 300; 00913 rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval ); 00914 rval = mb.add_entities( bcset, &elems[0], elems.size() );CHECK_ERR( rval ); 00915 } 00916 00917 rval = mb.write_file( file_name, "MOAB" );CHECK_ERR( rval ); 00918 } 00919 00920 void test_read_elements_common( bool by_rank, int intervals, bool /* print_time */, const char* extra_opts ) 00921 { 00922 const char* file_name = by_rank ? "test_read_rank.h5m" : "test_read.h5m"; 00923 int numproc, rank; 00924 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 00925 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00926 Core moab; 00927 Interface& mb = moab; 00928 ErrorCode rval; 00929 00930 // if root processor, create hdf5 file for use in testing 00931 if( 0 == rank ) create_input_file( file_name, intervals, numproc ); 00932 MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file 00933 00934 // do parallel read unless only one processor 00935 std::string opt = get_read_options( by_rank, extra_opts ); 00936 rval = mb.load_file( file_name, 0, opt.c_str() ); 00937 00938 MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file 00939 if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval ); 00940 00941 Tag part_tag; 00942 rval = mb.tag_get_handle( PARTITION_TAG, 1, MB_TYPE_INTEGER, part_tag );CHECK_ERR( rval ); 00943 00944 Range parts; 00945 rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &part_tag, 0, 1, parts );CHECK_ERR( rval ); 00946 CHECK_EQUAL( 1, (int)parts.size() ); 00947 EntityHandle part = parts.front(); 00948 int id; 00949 rval = mb.tag_get_data( part_tag, &part, 1, &id );CHECK_ERR( rval ); 00950 if( by_rank ) { CHECK_EQUAL( rank, id ); } 00951 00952 // check that all of the elements in the mesh are in the part 00953 int npart, nall; 00954 rval = mb.get_number_entities_by_dimension( part, 3, npart );CHECK_ERR( rval ); 00955 rval = mb.get_number_entities_by_dimension( 0, 3, nall );CHECK_ERR( rval ); 00956 CHECK_EQUAL( npart, nall ); 00957 00958 // check that we have the correct vertices 00959 const double x_min = intervals * rank; 00960 const double x_max = intervals * ( rank + 1 ); 00961 Range verts; 00962 rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval ); 00963 std::vector< double > coords( verts.size() ); 00964 rval = mb.get_coords( verts, &coords[0], 0, 0 );CHECK_ERR( rval ); 00965 const double act_x_min = *std::min_element( coords.begin(), coords.end() ); 00966 const double act_x_max = *std::max_element( coords.begin(), coords.end() ); 00967 CHECK_REAL_EQUAL( x_min, act_x_min, DBL_EPSILON ); 00968 CHECK_REAL_EQUAL( x_max, act_x_max, DBL_EPSILON ); 00969 } 00970 00971 void test_read_time() 00972 { 00973 const char file_name[] = "read_time.h5m"; 00974 int numproc, rank; 00975 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 00976 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00977 ErrorCode rval; 00978 00979 // if root processor, create hdf5 file for use in testing 00980 if( 0 == rank ) create_input_file( file_name, ReadIntervals, numproc, ReadBlocks ); 00981 MPI_Barrier( MPI_COMM_WORLD ); 00982 00983 // CPU Time for true paralle, wall time for true paralle, 00984 // CPU time for read and delete, wall time for read and delete 00985 double times[6]; 00986 clock_t tmp_t; 00987 00988 // Time true parallel read 00989 Core moab; 00990 Interface& mb = moab; 00991 times[0] = MPI_Wtime(); 00992 tmp_t = clock(); 00993 std::string opt = get_read_options( true, READ_PART ); 00994 rval = mb.load_file( file_name, 0, opt.c_str() );CHECK_ERR( rval ); 00995 times[0] = MPI_Wtime() - times[0]; 00996 times[1] = double( clock() - tmp_t ) / CLOCKS_PER_SEC; 00997 mb.delete_mesh(); 00998 00999 // Time read and delete 01000 Core moab2; 01001 Interface& mb2 = moab2; 01002 std::string opt2 = get_read_options( true, READ_DELETE ); 01003 times[2] = MPI_Wtime(); 01004 tmp_t = clock(); 01005 rval = mb2.load_file( file_name, 0, opt2.c_str() );CHECK_ERR( rval ); 01006 times[2] = MPI_Wtime() - times[2]; 01007 times[3] = double( clock() - tmp_t ) / CLOCKS_PER_SEC; 01008 mb2.delete_mesh(); 01009 01010 // Time broadcast and delete 01011 Core moab3; 01012 Interface& mb3 = moab3; 01013 std::string opt3 = get_read_options( true, BCAST_DELETE ); 01014 times[4] = MPI_Wtime(); 01015 tmp_t = clock(); 01016 rval = mb3.load_file( file_name, 0, opt3.c_str() );CHECK_ERR( rval ); 01017 times[4] = MPI_Wtime() - times[4]; 01018 times[5] = double( clock() - tmp_t ) / CLOCKS_PER_SEC; 01019 mb3.delete_mesh(); 01020 01021 double max_times[6] = { 0, 0, 0, 0, 0, 0 }, sum_times[6] = { 0, 0, 0, 0, 0, 0 }; 01022 MPI_Reduce( times, max_times, 6, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD ); 01023 MPI_Reduce( times, sum_times, 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD ); 01024 MPI_Barrier( MPI_COMM_WORLD ); 01025 if( 0 == rank ) 01026 { 01027 printf( "%12s %12s %12s %12s\n", "", "READ_PART", "READ_DELETE", "BCAST_DELETE" ); 01028 printf( "%12s %12g %12g %12g\n", "Max Wall", max_times[0], max_times[2], max_times[4] ); 01029 printf( "%12s %12g %12g %12g\n", "Total Wall", sum_times[0], sum_times[2], sum_times[4] ); 01030 printf( "%12s %12g %12g %12g\n", "Max CPU", max_times[1], max_times[3], max_times[5] ); 01031 printf( "%12s %12g %12g %12g\n", "Total CPU", sum_times[1], sum_times[3], sum_times[5] ); 01032 } 01033 01034 MPI_Barrier( MPI_COMM_WORLD ); 01035 if( 0 == rank && !KeepTmpFiles ) remove( file_name ); 01036 } 01037 01038 void test_read_tags() 01039 { 01040 const char tag_name[] = "test_tag_xx"; 01041 const char file_name[] = "test_read_tags.h5m"; 01042 int numproc, rank; 01043 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01044 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01045 Core moab; 01046 Interface& mb = moab; 01047 ErrorCode rval; 01048 01049 // if root processor, create hdf5 file for use in testing 01050 if( 0 == rank ) create_input_file( file_name, DefaultReadIntervals, numproc, 1, tag_name ); 01051 MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file 01052 01053 // do parallel read unless only one processor 01054 std::string opt = get_read_options(); 01055 rval = mb.load_file( file_name, 0, opt.c_str() ); 01056 MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file 01057 if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval ); 01058 01059 Tag tag; 01060 rval = mb.tag_get_handle( tag_name, 3, MB_TYPE_INTEGER, tag );CHECK_ERR( rval ); 01061 01062 TagType storage; 01063 rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval ); 01064 CHECK_EQUAL( MB_TAG_DENSE, storage ); 01065 01066 Range verts, tagged; 01067 rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval ); 01068 rval = mb.get_entities_by_type_and_tag( 0, MBVERTEX, &tag, 0, 1, tagged );CHECK_ERR( rval ); 01069 CHECK_EQUAL( verts, tagged ); 01070 01071 for( Range::iterator i = verts.begin(); i != verts.end(); ++i ) 01072 { 01073 double coords[3]; 01074 rval = mb.get_coords( &*i, 1, coords );CHECK_ERR( rval ); 01075 int ijk[3]; 01076 rval = mb.tag_get_data( tag, &*i, 1, ijk );CHECK_ERR( rval ); 01077 01078 CHECK( ijk[0] >= DefaultReadIntervals * rank ); 01079 CHECK( ijk[0] <= DefaultReadIntervals * ( rank + 1 ) ); 01080 CHECK( ijk[1] >= 0 ); 01081 CHECK( ijk[1] <= DefaultReadIntervals ); 01082 CHECK( ijk[2] >= 0 ); 01083 CHECK( ijk[2] <= DefaultReadIntervals ); 01084 01085 CHECK_REAL_EQUAL( coords[0], (double)ijk[0], 1e-100 ); 01086 CHECK_REAL_EQUAL( coords[1], (double)ijk[1], 1e-100 ); 01087 CHECK_REAL_EQUAL( coords[2], (double)ijk[2], 1e-100 ); 01088 } 01089 } 01090 01091 void test_read_global_tags() 01092 { 01093 const char tag_name[] = "test_tag_g"; 01094 const char file_name[] = "test_read_global_tags.h5m"; 01095 int numproc, rank; 01096 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01097 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01098 Core moab; 01099 Interface& mb = moab; 01100 ErrorCode rval; 01101 const int def_val = 0xdeadcad; 01102 const int global_val = -11; 01103 01104 // if root processor, create hdf5 file for use in testing 01105 if( 0 == rank ) create_input_file( file_name, 1, numproc, 1, 0, 0, tag_name, &global_val, &def_val ); 01106 MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file 01107 01108 // do parallel read unless only one processor 01109 std::string opt = get_read_options(); 01110 rval = mb.load_file( file_name, 0, opt.c_str() ); 01111 MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file 01112 if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval ); 01113 01114 Tag tag; 01115 rval = mb.tag_get_handle( tag_name, 1, MB_TYPE_INTEGER, tag );CHECK_ERR( rval ); 01116 01117 TagType storage; 01118 rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval ); 01119 CHECK_EQUAL( MB_TAG_DENSE, storage ); 01120 01121 int mesh_def_val, mesh_gbl_val; 01122 rval = mb.tag_get_default_value( tag, &mesh_def_val );CHECK_ERR( rval ); 01123 CHECK_EQUAL( def_val, mesh_def_val ); 01124 EntityHandle root = 0; 01125 rval = mb.tag_get_data( tag, &root, 1, &mesh_gbl_val );CHECK_ERR( rval ); 01126 CHECK_EQUAL( global_val, mesh_gbl_val ); 01127 } 01128 01129 void test_read_sets_common( const char* extra_opts ) 01130 { 01131 const char tag_name[] = "test_tag_s"; 01132 const char file_name[] = "test_read_sets.h5m"; 01133 int numproc, rank; 01134 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01135 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01136 Core moab; 01137 Interface& mb = moab; 01138 ErrorCode rval; 01139 01140 // if root processor, create hdf5 file for use in testing 01141 if( 0 == rank ) create_input_file( file_name, DefaultReadIntervals, numproc, 1, 0, tag_name ); 01142 MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file 01143 01144 // do parallel read unless only one processor 01145 std::string opt = get_read_options( extra_opts ); 01146 rval = mb.load_file( file_name, 0, opt.c_str() ); 01147 MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file 01148 if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval ); 01149 01150 Tag tag; 01151 rval = mb.tag_get_handle( tag_name, 2, MB_TYPE_INTEGER, tag );CHECK_ERR( rval ); 01152 01153 TagType storage; 01154 rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval ); 01155 CHECK_EQUAL( MB_TAG_SPARSE, storage ); 01156 01157 const int iv = DefaultReadIntervals + 1; 01158 Range sets; 01159 rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, sets );CHECK_ERR( rval ); 01160 CHECK_EQUAL( ( iv * iv ), (int)sets.size() ); 01161 01162 for( Range::iterator i = sets.begin(); i != sets.end(); ++i ) 01163 { 01164 int ij[2]; 01165 rval = mb.tag_get_data( tag, &*i, 1, &ij );CHECK_ERR( rval ); 01166 01167 CHECK( ij[0] >= DefaultReadIntervals * rank ); 01168 CHECK( ij[0] <= DefaultReadIntervals * ( rank + 1 ) ); 01169 CHECK( ij[1] >= 0 ); 01170 CHECK( ij[1] <= DefaultReadIntervals ); 01171 01172 Range contents; 01173 rval = mb.get_entities_by_handle( *i, contents );CHECK_ERR( rval ); 01174 CHECK( contents.all_of_type( MBVERTEX ) ); 01175 CHECK_EQUAL( iv, (int)contents.size() ); 01176 01177 for( Range::iterator v = contents.begin(); v != contents.end(); ++v ) 01178 { 01179 double coords[3]; 01180 rval = mb.get_coords( &*v, 1, coords );CHECK_ERR( rval ); 01181 CHECK_REAL_EQUAL( coords[0], (double)ij[0], 1e-100 ); 01182 CHECK_REAL_EQUAL( coords[1], (double)ij[1], 1e-100 ); 01183 } 01184 } 01185 } 01186 01187 void test_read_bc_sets() 01188 { 01189 // const char tag_name[] = "test_tag_s"; 01190 const char file_name[] = "test_read_sets.h5m"; 01191 int numproc, rank; 01192 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01193 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01194 Core moab; 01195 Interface& mb = moab; 01196 ErrorCode rval; 01197 01198 // if root processor, create hdf5 file for use in testing 01199 if( 0 == rank ) 01200 create_input_file( file_name, DefaultReadIntervals, numproc, 1, NULL, NULL, NULL, NULL, NULL, true ); 01201 MPI_Barrier( MPI_COMM_WORLD ); // make sure root has completed writing the file 01202 01203 // do parallel read unless only one processor 01204 std::string opt = get_read_options(); 01205 rval = mb.load_file( file_name, 0, opt.c_str() ); 01206 MPI_Barrier( MPI_COMM_WORLD ); // make sure all procs complete before removing file 01207 if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval ); 01208 01209 Tag tag; 01210 int num_ents[3], global_num_ents[3] = { 0, 0, 0 }; 01211 Range sets, contents; 01212 const char* names[] = { NEUMANN_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, MATERIAL_SET_TAG_NAME }; 01213 int vints = DefaultReadIntervals + 1; 01214 int expected_num_ents[] = { ( numproc * 4 + 2 ) * DefaultReadIntervals * DefaultReadIntervals, 01215 ( ( numproc * 4 + 2 ) * ( vints - 2 ) * ( vints - 2 ) + 12 * numproc * ( vints - 2 ) + 01216 8 * numproc ), 01217 numproc * DefaultReadIntervals * DefaultReadIntervals * DefaultReadIntervals }; 01218 01219 for( int i = 0; i < 3; i++ ) 01220 { 01221 rval = mb.tag_get_handle( names[i], 1, MB_TYPE_INTEGER, tag );CHECK_ERR( rval ); 01222 rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, sets );CHECK_ERR( rval ); 01223 CHECK_EQUAL( 1, (int)sets.size() ); 01224 rval = mb.get_entities_by_handle( *sets.begin(), contents, true );CHECK_ERR( rval ); 01225 num_ents[i] = contents.size(); 01226 sets.clear(); 01227 contents.clear(); 01228 } 01229 01230 MPI_Reduce( num_ents, global_num_ents, 3, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); 01231 if( 0 == rank ) 01232 { 01233 // std::cout << "Global:" << global_num_ents[0] << " " << global_num_ents[1] << " " << 01234 // global_num_ents[2] << " " << std::endl; std::cout << "Expected:" << 01235 // expected_num_ents[0] << " " << expected_num_ents[1] << " " << expected_num_ents[2] << 01236 // " " << std::endl; 01237 01238 for( int i = 0; i < 3; i++ ) 01239 { 01240 CHECK_EQUAL( global_num_ents[i], expected_num_ents[i] ); 01241 } 01242 } 01243 } 01244 01245 void test_write_different_element_types() 01246 { 01247 const char file_name[] = "test_write_different_element_types.h5m"; 01248 int numproc, rank; 01249 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01250 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01251 Core moab; 01252 Interface& mb = moab; 01253 ErrorCode rval; 01254 01255 const EntityType topos[] = { MBEDGE, MBEDGE, MBQUAD, MBTRI, MBQUAD, MBTRI, 01256 MBTET, MBHEX, MBPRISM, MBPYRAMID, MBHEX, MBHEX }; 01257 const int verts[] = { 2, 3, 4, 3, 9, 6, 4, 8, 6, 5, 20, 27 }; 01258 const int ntypes = sizeof( topos ) / sizeof( topos[0] ); 01259 01260 const EntityType mtype = topos[rank % ntypes]; 01261 const int nvert = verts[rank % ntypes]; 01262 std::vector< EntityHandle > conn( nvert ); 01263 for( int i = 0; i < nvert; ++i ) 01264 { 01265 const double coords[] = { static_cast< double >( rank ), static_cast< double >( i ), 0 }; 01266 rval = mb.create_vertex( coords, conn[i] );CHECK_ERR( rval ); 01267 } 01268 EntityHandle handle; 01269 rval = mb.create_element( mtype, &conn[0], nvert, handle );CHECK_ERR( rval ); 01270 01271 save_and_load_on_root( mb, file_name ); 01272 01273 if( rank ) return; 01274 01275 for( int i = 0; i < ntypes; ++i ) 01276 { 01277 const int num_exp = numproc / ntypes + ( i < ( numproc % ntypes ) ? 1 : 0 ); 01278 Range ents; 01279 rval = mb.get_entities_by_type( 0, topos[i], ents );CHECK_ERR( rval ); 01280 int num = 0; 01281 for( Range::iterator e = ents.begin(); e != ents.end(); ++e ) 01282 { 01283 const EntityHandle* junk; 01284 int len; 01285 rval = mb.get_connectivity( *e, junk, len, false ); 01286 if( len == verts[i] ) ++num; 01287 } 01288 01289 CHECK_EQUAL( num_exp, num ); 01290 } 01291 } 01292 01293 Tag get_tag( Interface& mb, int rank, bool create ) 01294 { 01295 DataType type = ( DataType )( rank % ( MB_MAX_DATA_TYPE + 1 ) ); 01296 TagType storage = ( type == MB_TYPE_BIT ) ? MB_TAG_BIT : ( rank % 2 ) ? MB_TAG_DENSE : MB_TAG_SPARSE; 01297 int len = rank % 3 + 1; 01298 TagType cbit = create ? MB_TAG_EXCL : (TagType)0; 01299 TagType vbit = rank % 4 == 1 && storage != MB_TAG_BIT ? MB_TAG_VARLEN : (TagType)0; 01300 std::ostringstream name; 01301 name << "TestTag" << rank; 01302 const void* defval = 0; 01303 const int defint[] = { static_cast< int >( rank ), static_cast< int >( rank / 2 ), static_cast< int >( rank + 1 ), 01304 static_cast< int >( rank - 1 ) }; 01305 const double defreal[] = { 0.1 * rank, 1.0 / rank, static_cast< double >( -rank ), static_cast< double >( rank ) }; 01306 const int defhandle[] = { 0, 0, 0, 0 }; 01307 const unsigned char defbit = 0x1; 01308 const char defopq[] = "Jason"; 01309 if( rank % 4 < 2 ) 01310 { 01311 switch( type ) 01312 { 01313 case MB_TYPE_BIT: 01314 defval = &defbit; 01315 break; 01316 case MB_TYPE_INTEGER: 01317 defval = defint; 01318 break; 01319 case MB_TYPE_DOUBLE: 01320 defval = defreal; 01321 break; 01322 case MB_TYPE_HANDLE: 01323 defval = defhandle; 01324 break; 01325 case MB_TYPE_OPAQUE: 01326 defval = defopq; 01327 break; 01328 } 01329 } 01330 01331 Tag result; 01332 ErrorCode rval = mb.tag_get_handle( name.str().c_str(), len, type, result, storage | cbit | vbit, defval );CHECK_ERR( rval ); 01333 return result; 01334 } 01335 01336 void test_write_different_tags() 01337 { 01338 const char file_name[] = "test_write_different_tags.h5m"; 01339 int numproc, rank; 01340 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01341 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01342 Core moab; 01343 Interface& mb = moab; 01344 01345 const int min_tags = 8; 01346 get_tag( mb, rank, true ); 01347 for( int idx = rank + numproc; idx < min_tags; idx += numproc ) 01348 get_tag( mb, idx, true ); 01349 01350 save_and_load_on_root( mb, file_name ); 01351 01352 if( 0 == rank ) 01353 { 01354 for( int i = 0; i < std::max( min_tags, numproc ); ++i ) 01355 get_tag( mb, i, false ); 01356 } 01357 } 01358 01359 void test_write_polygons() 01360 { 01361 const char file_name[] = "test_write_polygons.h5m"; 01362 int numproc, rank; 01363 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01364 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01365 Core moab; 01366 Interface& mb = moab; 01367 ErrorCode rval; 01368 01369 // create a polygon on each process 01370 const double r = 0.70710678118654757; 01371 const double points[8][3] = { 01372 { 1, 0, static_cast< double >( rank ) }, 01373 { static_cast< double >( r ), static_cast< double >( r ), static_cast< double >( rank ) }, 01374 { 0, 1, static_cast< double >( rank ) }, 01375 { static_cast< double >( -r ), static_cast< double >( r ), static_cast< double >( rank ) }, 01376 { -1, 0, static_cast< double >( rank ) }, 01377 { static_cast< double >( -r ), static_cast< double >( -r ), static_cast< double >( rank ) }, 01378 { 0, -1, static_cast< double >( rank ) }, 01379 { static_cast< double >( r ), static_cast< double >( -r ), static_cast< double >( rank ) } 01380 }; 01381 const int nvtx = rank % 4 + 5; 01382 std::vector< EntityHandle > conn( nvtx ); 01383 for( int i = 0; i < nvtx; ++i ) 01384 { 01385 rval = mb.create_vertex( points[i], conn[i] );CHECK_ERR( rval ); 01386 } 01387 01388 EntityHandle h; 01389 rval = mb.create_element( MBPOLYGON, &conn[0], nvtx, h );CHECK_ERR( rval ); 01390 01391 save_and_load_on_root( mb, file_name ); 01392 01393 if( rank != 0 ) return; 01394 01395 // check results on root process 01396 01397 // determine which polygon was created by which proc by 01398 // looking at the z-coordinate of the vertices 01399 Range range; 01400 rval = mb.get_entities_by_type( 0, MBPOLYGON, range ); 01401 std::vector< EntityHandle > poly( numproc, 0 ); 01402 CHECK_EQUAL( numproc, (int)range.size() ); 01403 for( Range::iterator it = range.begin(); it != range.end(); ++it ) 01404 { 01405 const EntityHandle* conn_arr; 01406 int len; 01407 rval = mb.get_connectivity( *it, conn_arr, len );CHECK_ERR( rval ); 01408 double coords[3]; 01409 rval = mb.get_coords( conn_arr, 1, coords );CHECK_ERR( rval ); 01410 int proc = (int)( coords[2] ); 01411 CHECK_EQUAL( (EntityHandle)0, poly[proc] ); 01412 poly[proc] = *it; 01413 } 01414 01415 // check that each poly has the expected number of vertices 01416 for( int i = 0; i < numproc; ++i ) 01417 { 01418 const EntityHandle* conn_arr; 01419 int len; 01420 rval = mb.get_connectivity( poly[i], conn_arr, len );CHECK_ERR( rval ); 01421 CHECK_EQUAL( i % 4 + 5, len ); 01422 } 01423 } 01424 01425 // Some processes have no mesh to write 01426 void test_write_unbalanced() 01427 { 01428 const char file_name[] = "test_write_unbalanced.h5m"; 01429 int numproc, rank; 01430 MPI_Comm_size( MPI_COMM_WORLD, &numproc ); 01431 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01432 Core moab; 01433 Interface& mb = moab; 01434 ErrorCode rval; 01435 Tag idtag = mb.globalId_tag(); 01436 01437 // create a shared set 01438 const int two = 2; 01439 Range entities, sets; 01440 01441 EntityHandle set; 01442 rval = mb.create_meshset( MESHSET_SET, set );CHECK_ERR( rval ); 01443 rval = mb.tag_set_data( idtag, &set, 1, &two );CHECK_ERR( rval ); 01444 sets.insert( set ); 01445 01446 // create a quad on every odd processor 01447 if( rank % 2 ) 01448 { 01449 const double coords[4][3] = { { static_cast< double >( rank ), 0, 0 }, 01450 { static_cast< double >( rank + 2 ), 0, 0 }, 01451 { static_cast< double >( rank + 2 ), 2, 0 }, 01452 { static_cast< double >( rank ), 2, 0 } }; 01453 EntityHandle conn[4], quad; 01454 for( int i = 0; i < 4; ++i ) 01455 mb.create_vertex( coords[i], conn[i] ); 01456 mb.create_element( MBQUAD, conn, 4, quad ); 01457 01458 const int ids[4] = { rank, rank + 2, rank + 3, rank + 1 }; 01459 rval = mb.tag_set_data( idtag, conn, 4, ids );CHECK_ERR( rval ); 01460 01461 rval = mb.add_entities( set, &quad, 1 );CHECK_ERR( rval ); 01462 01463 entities.insert( quad ); 01464 } 01465 01466 // set up sharing data 01467 ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 ); 01468 if( 0 == pcomm ) pcomm = new ParallelComm( &mb, MPI_COMM_WORLD ); 01469 rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, NULL, &idtag );CHECK_ERR( rval ); 01470 rval = pcomm->resolve_shared_sets( sets, idtag );CHECK_ERR( rval ); 01471 01472 // do parallel write and serial load 01473 save_and_load_on_root( mb, file_name ); 01474 01475 if( rank != 0 ) return; 01476 01477 // check that we got what we expected 01478 Range quads, verts; 01479 rval = mb.get_entities_by_type( 0, MBQUAD, quads );CHECK_ERR( rval ); 01480 rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval ); 01481 01482 const size_t nquads = numproc / 2; 01483 const size_t nverts = nquads ? 2 + 2 * nquads : 0; 01484 CHECK_EQUAL( nquads, quads.size() ); 01485 CHECK_EQUAL( nverts, verts.size() ); 01486 01487 rval = mb.tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, idtag );CHECK_ERR( rval ); 01488 sets.clear(); 01489 const void* vals[] = { &two }; 01490 rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &idtag, vals, 1, sets );CHECK_ERR( rval ); 01491 CHECK_EQUAL( (size_t)1, sets.size() ); 01492 01493 entities.clear(); 01494 rval = mb.get_entities_by_handle( sets.front(), entities );CHECK_ERR( rval ); 01495 CHECK_EQUAL( nquads, entities.size() ); 01496 CHECK_EQUAL( quads, entities ); 01497 } 01498 01499 // this test will load a file that has 2 partitions (mix.h5m) 01500 // one partition with triangles, and one with triangles and quads 01501 // a dense tag created on this should be dense in the file 01502 // 01503 void test_write_dense_tags() 01504 { 01505 int err, rank, size; 01506 ErrorCode rval; 01507 err = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01508 CHECK( !err ); 01509 err = MPI_Comm_size( MPI_COMM_WORLD, &size ); 01510 CHECK( !err ); 01511 01512 Core moab; 01513 rval = moab.load_file( InputMix, 0, 01514 "PARALLEL=READ_PART;" 01515 "PARTITION=PARALLEL_PARTITION;" 01516 "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval ); 01517 01518 // create an dense tag, on all elements, then write the output in parallel 01519 // load the file again, and test the type of the tag 01520 Range elems; 01521 moab.get_entities_by_dimension( 0, 2, elems ); 01522 01523 const char* tagname = "element_tag"; 01524 const double defVal = 0.; 01525 Tag fieldTag; 01526 rval = moab.tag_get_handle( tagname, 1, MB_TYPE_DOUBLE, fieldTag, MB_TAG_DENSE | MB_TAG_CREAT, &defVal );CHECK_ERR( rval ); 01527 01528 int numElems = (int)elems.size(); 01529 01530 for( int i = 0; i < numElems; i++ ) 01531 { 01532 EntityHandle elem = elems[i]; 01533 double xyz[3]; 01534 moab.get_coords( &elem, 1, xyz ); 01535 // some value 01536 double fieldValue = sqrt( xyz[0] * xyz[0] + xyz[1] * xyz[1] + xyz[2] * xyz[2] ); 01537 moab.tag_set_data( fieldTag, &elem, 1, &fieldValue ); 01538 } 01539 01540 // write the file in parallel 01541 rval = moab.write_file( "newfile.h5m", 0, "PARALLEL=WRITE_PART" );CHECK_ERR( rval ); 01542 01543 // now read the new file, in a new instance, and test the tag type 01544 01545 Core moab2; 01546 rval = moab2.load_file( "newfile.h5m", 0, 01547 "PARALLEL=READ_PART;" 01548 "PARTITION=PARALLEL_PARTITION;" 01549 "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval ); 01550 01551 // find the element tag 01552 Tag found_tag; 01553 rval = moab2.tag_get_handle( tagname, 1, MB_TYPE_DOUBLE, found_tag );CHECK_ERR( rval ); 01554 TagType tagt; 01555 rval = moab2.tag_get_type( found_tag, tagt );CHECK_ERR( rval ); 01556 CHECK( tagt == MB_TAG_DENSE ); 01557 } 01558 // this test will load a file that has 2 partitions (oneside.h5m) 01559 // and one side set, that is adjacent to one part only 01560 // 01561 void test_read_non_adjs_side() 01562 { 01563 int err, rank, size; 01564 ErrorCode rval; 01565 err = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 01566 CHECK( !err ); 01567 err = MPI_Comm_size( MPI_COMM_WORLD, &size ); 01568 CHECK( !err ); 01569 01570 Core moab; 01571 rval = moab.load_file( InputOneSide, 0, 01572 "PARALLEL=READ_PART;" 01573 "PARTITION=PARALLEL_PARTITION;" 01574 "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval ); 01575 01576 return; 01577 }