MOAB: Mesh Oriented datABase  (version 5.4.1)
parallel_hdf5_test.cpp
Go to the documentation of this file.
00001 #include "moab/Range.hpp"
00002 #include "TestUtil.hpp"
00003 
00004 #include "moab/Core.hpp"
00005 #include "moab/ParallelComm.hpp"
00006 #include "moab/Skinner.hpp"
00007 #include "MBTagConventions.hpp"
00008 #include "moab/CN.hpp"
00009 #include "MBParallelConventions.h"
00010 
00011 #include <iostream>
00012 #include <sstream>
00013 #include <algorithm>
00014 #include "moab_mpi.h"
00015 #include <unistd.h>
00016 #include <cfloat>
00017 #include <cstdio>
00018 #include <ctime>
00019 
00020 using namespace moab;
00021 
00022 #ifdef MESHDIR
00023 const char* InputFile    = STRINGIFY( MESHDIR ) "/ptest.cub";
00024 const char* InputMix     = STRINGIFY( MESHDIR ) "/io/mix.h5m";
00025 const char* InputOneSide = STRINGIFY( MESHDIR ) "/io/oneside.h5m";
00026 #else
00027 #error Specify MESHDIR to compile test
00028 #endif
00029 
00030 void load_and_partition( Interface& moab, const char* filename, bool print_debug = false );
00031 
00032 void save_and_load_on_root( Interface& moab, const char* tmp_filename );
00033 
00034 void check_identical_mesh( Interface& moab1, Interface& moab2 );
00035 
00036 void test_write_elements();
00037 void test_write_shared_sets();
00038 void test_var_length_parallel();
00039 
00040 void test_read_elements_common( bool by_rank, int intervals, bool print_time, const char* extra_opts = 0 );
00041 
00042 int ReadIntervals = 0;
00043 void test_read_elements()
00044 {
00045     test_read_elements_common( false, ReadIntervals, false );
00046 }
00047 void test_read_elements_by_rank()
00048 {
00049     test_read_elements_common( true, ReadIntervals, false );
00050 }
00051 void test_bcast_summary()
00052 {
00053     test_read_elements_common( false, ReadIntervals, false, "BCAST_SUMMARY=yes" );
00054 }
00055 void test_read_summary()
00056 {
00057     test_read_elements_common( false, ReadIntervals, false, "BCAST_SUMMARY=no" );
00058 }
00059 void test_read_time();
00060 
00061 void test_read_tags();
00062 void test_read_global_tags();
00063 void test_read_sets_common( const char* extra_opts = 0 );
00064 void test_read_sets()
00065 {
00066     test_read_sets_common();
00067 }
00068 void test_read_sets_bcast_dups()
00069 {
00070     test_read_sets_common( "BCAST_DUPLICATE_READS=yes" );
00071 }
00072 void test_read_sets_read_dups()
00073 {
00074     test_read_sets_common( "BCAST_DUPLICATE_READS=no" );
00075 }
00076 void test_read_bc_sets();
00077 
00078 void test_write_different_element_types();
00079 void test_write_different_tags();
00080 void test_write_polygons();
00081 void test_write_unbalanced();
00082 void test_write_dense_tags();
00083 void test_read_non_adjs_side();
00084 
00085 const char PARTITION_TAG[] = "PARTITION";
00086 
00087 bool KeepTmpFiles              = false;
00088 bool PauseOnStart              = false;
00089 bool HyperslabAppend           = false;
00090 const int DefaultReadIntervals = 2;
00091 int ReadDebugLevel             = 0;
00092 int WriteDebugLevel            = 0;
00093 int ReadBlocks                 = 1;
00094 
00095 enum Mode
00096 {
00097     READ_PART,
00098     READ_DELETE,
00099     BCAST_DELETE
00100 };
00101 const Mode DEFAULT_MODE    = READ_PART;
00102 const bool DEFAULT_BY_RANK = false;
00103 
00104 std::string get_read_options( bool by_rank = DEFAULT_BY_RANK, Mode mode = DEFAULT_MODE, const char* extra_opts = 0 );
00105 std::string get_read_options( const char* extra_opts )
00106 {
00107     return get_read_options( DEFAULT_BY_RANK, DEFAULT_MODE, extra_opts );
00108 }
00109 std::string get_read_options( bool by_rank, const char* extra_opts )
00110 {
00111     return get_read_options( by_rank, DEFAULT_MODE, extra_opts );
00112 }
00113 
00114 std::string get_read_options( bool by_rank, Mode mode, const char* extra_opts )
00115 {
00116     int numproc;
00117     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
00118 
00119     // do parallel read unless only one processor
00120     std::ostringstream str;
00121     if( numproc > 1 )
00122     {
00123         str << "PARALLEL=";
00124         switch( mode )
00125         {
00126             case READ_PART:
00127                 str << "READ_PART";
00128                 break;
00129             case READ_DELETE:
00130                 str << "READ_DELETE";
00131                 break;
00132             case BCAST_DELETE:
00133                 str << "BCAST_DELETE";
00134                 break;
00135         }
00136         str << ";PARTITION=" << PARTITION_TAG << ";";
00137         if( by_rank ) str << "PARTITION_BY_RANK;";
00138     }
00139 
00140     if( extra_opts ) str << extra_opts << ";";
00141 
00142     if( ReadDebugLevel ) str << "DEBUG_IO=" << ReadDebugLevel << ";";
00143 
00144     if( HyperslabAppend ) str << "HYPERSLAB_APPEND;HYPERSLAB_SELECT_LIMIT=2147483647";
00145 
00146     return str.str();
00147 }
00148 
00149 int main( int argc, char* argv[] )
00150 {
00151     int err = MPI_Init( &argc, &argv );
00152     CHECK( !err );
00153 
00154     for( int i = 1; i < argc; ++i )
00155     {
00156         if( !strcmp( argv[i], "-k" ) )
00157             KeepTmpFiles = true;
00158         else if( !strcmp( argv[i], "-p" ) )
00159             PauseOnStart = true;
00160         else if( !strcmp( argv[i], "-R" ) )
00161         {
00162             ++i;
00163             CHECK( i < argc );
00164             ReadIntervals = atoi( argv[i] );
00165             CHECK( ReadIntervals > 0 );
00166         }
00167         else if( !strcmp( argv[i], "-b" ) )
00168         {
00169             ++i;
00170             CHECK( i < argc );
00171             ReadBlocks = atoi( argv[i] );
00172             CHECK( ReadBlocks > 0 );
00173         }
00174         else if( !strcmp( argv[i], "-r" ) )
00175         {
00176             ++i;
00177             CHECK( i < argc );
00178             ReadDebugLevel = atoi( argv[i] );
00179             CHECK( ReadDebugLevel > 0 );
00180         }
00181         else if( !strcmp( argv[i], "-w" ) )
00182         {
00183             ++i;
00184             CHECK( i < argc );
00185             WriteDebugLevel = atoi( argv[i] );
00186             CHECK( WriteDebugLevel > 0 );
00187         }
00188         else if( !strcmp( argv[i], "-A" ) )
00189             HyperslabAppend = true;
00190         else
00191         {
00192             std::cerr << "Usage: " << argv[0]
00193                       << " [-k] [-p] [-R <intervals>] [-r <level>] [-w <level>] [-b <blocks>]  [-A]" << std::endl;
00194             return 1;
00195         }
00196     }
00197 
00198     if( PauseOnStart )
00199     {
00200         int rank;
00201         MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00202         printf( "Rank %2d PID %lu\n", rank, (unsigned long)getpid() );
00203         sleep( 30 );
00204     }
00205 
00206     int result = 0;
00207     if( ReadIntervals )
00208     {
00209         result = RUN_TEST( test_read_time );
00210     }
00211     else
00212     {
00213         ReadIntervals = DefaultReadIntervals;
00214         result += RUN_TEST( test_write_elements );
00215         MPI_Barrier( MPI_COMM_WORLD );
00216         result += RUN_TEST( test_write_shared_sets );
00217         MPI_Barrier( MPI_COMM_WORLD );
00218         result += RUN_TEST( test_var_length_parallel );
00219         MPI_Barrier( MPI_COMM_WORLD );
00220         result += RUN_TEST( test_read_elements );
00221         MPI_Barrier( MPI_COMM_WORLD );
00222         result += RUN_TEST( test_read_elements_by_rank );
00223         MPI_Barrier( MPI_COMM_WORLD );
00224         result += RUN_TEST( test_read_tags );
00225         MPI_Barrier( MPI_COMM_WORLD );
00226         result += RUN_TEST( test_read_global_tags );
00227         MPI_Barrier( MPI_COMM_WORLD );
00228         result += RUN_TEST( test_read_sets );
00229         MPI_Barrier( MPI_COMM_WORLD );
00230         result += RUN_TEST( test_read_sets_bcast_dups );
00231         MPI_Barrier( MPI_COMM_WORLD );
00232         result += RUN_TEST( test_read_sets_read_dups );
00233         MPI_Barrier( MPI_COMM_WORLD );
00234         result += RUN_TEST( test_read_bc_sets );
00235         MPI_Barrier( MPI_COMM_WORLD );
00236         result += RUN_TEST( test_write_different_element_types );
00237         MPI_Barrier( MPI_COMM_WORLD );
00238         result += RUN_TEST( test_write_different_tags );
00239         MPI_Barrier( MPI_COMM_WORLD );
00240         result += RUN_TEST( test_write_polygons );
00241         MPI_Barrier( MPI_COMM_WORLD );
00242         result += RUN_TEST( test_write_unbalanced );
00243         MPI_Barrier( MPI_COMM_WORLD );
00244         result += RUN_TEST( test_write_dense_tags );
00245         MPI_Barrier( MPI_COMM_WORLD );
00246         result += RUN_TEST( test_read_non_adjs_side );
00247         MPI_Barrier( MPI_COMM_WORLD );
00248     }
00249 
00250     MPI_Finalize();
00251     return result;
00252 }
00253 
00254 /* Assume geometric topology sets correspond to interface sets
00255  * in that the entities contained inclusively in a geometric
00256  * topology set must be shared by the same set of processors.
00257  * As we partition based on geometric volumes, this assumption
00258  * should aways be true.  Print for each geometric topology set
00259  * the list of processors it is shared with.
00260  */
00261 void print_partitioned_entities( Interface& moab, bool list_non_shared = false )
00262 {
00263     ErrorCode rval;
00264     int size, rank;
00265     std::vector< int > ent_procs( MAX_SHARING_PROCS ), tmp_ent_procs( MAX_SHARING_PROCS );
00266     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00267     MPI_Comm_size( MPI_COMM_WORLD, &size );
00268 
00269     // expect shared entities to correspond to geometric sets
00270 
00271     // get tags for parallel data
00272     Tag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
00273     rval = moab.tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedp_tag );CHECK_ERR( rval );
00274     rval = moab.tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER, sharedps_tag );CHECK_ERR( rval );
00275     rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedh_tag );CHECK_ERR( rval );
00276     rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE, sharedhs_tag );CHECK_ERR( rval );
00277     rval = moab.tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatus_tag );CHECK_ERR( rval );
00278 
00279     // for each geometric entity, check which processor we are sharing
00280     // entities with
00281     Tag geom_tag, id_tag;
00282     rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHECK_ERR( rval );
00283     id_tag                     = moab.globalId_tag();
00284     const char* topo_names_s[] = { "Vertex", "Curve", "Surface", "Volume" };
00285     //  const char* topo_names_p[] = { "Vertices", "Curves", "Surfaces", "Volumes" };
00286     std::ostringstream buffer;  // buffer output in an attempt to prevent lines from different
00287                                 // processsors being mixed up.
00288     for( int t = 0; t < 4; ++t )
00289     {
00290         Range geom;
00291         int dim         = t;
00292         const void* ptr = &dim;
00293         rval            = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &geom_tag, &ptr, 1, geom );CHECK_ERR( rval );
00294 
00295         // for each geometric entity of dimension 't'
00296         for( Range::const_iterator i = geom.begin(); i != geom.end(); ++i )
00297         {
00298             EntityHandle set = *i;
00299             int id;
00300             rval = moab.tag_get_data( id_tag, &set, 1, &id );CHECK_ERR( rval );
00301 
00302             buffer.clear();
00303 
00304             // get entities contained in this set but not its children
00305             Range entities, tmp_entities, children, diff;
00306             rval = moab.get_entities_by_handle( set, entities );CHECK_ERR( rval );
00307             rval = moab.get_child_meshsets( set, children );CHECK_ERR( rval );
00308             for( Range::const_iterator j = children.begin(); j != children.end(); ++j )
00309             {
00310                 tmp_entities.clear();
00311                 rval = moab.get_entities_by_handle( *j, tmp_entities );CHECK_ERR( rval );
00312                 diff = subtract( entities, tmp_entities );
00313                 entities.swap( diff );
00314             }
00315 
00316             // for each entity, check owning processors
00317             std::vector< char > status_flags( entities.size(), 0 );
00318             std::vector< int > shared_procs( entities.size(), 0 );
00319             rval = moab.tag_get_data( pstatus_tag, entities, &status_flags[0] );
00320             if( MB_TAG_NOT_FOUND == rval )
00321             {
00322                 // keep default values of zero (not shared)
00323             }
00324             CHECK_ERR( rval );
00325             unsigned num_shared = 0, num_owned = 0;
00326             for( size_t j = 0; j < status_flags.size(); ++j )
00327             {
00328                 num_shared += !!( status_flags[j] & PSTATUS_SHARED );
00329                 num_owned += !( status_flags[j] & PSTATUS_NOT_OWNED );
00330             }
00331 
00332             if( !num_shared )
00333             {
00334                 if( list_non_shared )
00335                     buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00336                            << "not shared" << std::endl;
00337             }
00338             else if( num_shared != entities.size() )
00339             {
00340                 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00341                        << "ERROR: " << num_shared << " of " << entities.size() << " entities marked as 'shared'"
00342                        << std::endl;
00343             }
00344             else if( num_owned && num_owned != entities.size() )
00345             {
00346                 buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00347                        << "ERROR: " << num_owned << " of " << entities.size() << " entities owned by this processor"
00348                        << std::endl;
00349             }
00350             else
00351             {
00352                 rval = moab.tag_get_data( sharedp_tag, entities, &shared_procs[0] );CHECK_ERR( rval );
00353                 int proc       = shared_procs[0];
00354                 bool all_match = true;
00355                 for( size_t j = 1; j < shared_procs.size(); ++j )
00356                     if( shared_procs[j] != proc ) all_match = false;
00357                 if( !all_match )
00358                 {
00359                     buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00360                            << "ERROR: processsor IDs do not match!" << std::endl;
00361                 }
00362                 else if( proc != -1 )
00363                 {
00364                     buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00365                            << "shared with processor " << proc;
00366                     if( num_owned ) buffer << " (owned by this processor)";
00367                     buffer << std::endl;
00368                 }
00369                 else if( entities.empty() )
00370                 {
00371                     buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00372                            << "ERROR: no entities!" << std::endl;
00373                 }
00374                 else
00375                 {
00376                     Range::const_iterator j = entities.begin();
00377                     rval                    = moab.tag_get_data( sharedps_tag, &*j, 1, &ent_procs[0] );CHECK_ERR( rval );
00378                     for( ++j; j != entities.end(); ++j )
00379                     {
00380                         rval = moab.tag_get_data( sharedps_tag, &*j, 1, &tmp_ent_procs[0] );CHECK_ERR( rval );
00381                         if( ent_procs != tmp_ent_procs ) all_match = false;
00382                     }
00383                     if( !all_match )
00384                     {
00385                         buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00386                                << "ERROR: processsor IDs do not match!" << std::endl;
00387                     }
00388                     else
00389                     {
00390                         buffer << rank << ":\t" << topo_names_s[t] << " " << id << ":\t"
00391                                << "processors ";
00392                         for( int k = 0; k < MAX_SHARING_PROCS; ++k )
00393                             if( ent_procs[k] != -1 ) buffer << ent_procs[k] << ", ";
00394                         if( num_owned ) buffer << " (owned by this processor)";
00395                         buffer << std::endl;
00396                     }
00397                 }
00398             }
00399         }
00400     }
00401     for( int i = 0; i < size; ++i )
00402     {
00403         MPI_Barrier( MPI_COMM_WORLD );
00404         if( i == rank )
00405         {
00406             std::cout << buffer.str();
00407             std::cout.flush();
00408         }
00409     }
00410 }
00411 
00412 void load_and_partition( Interface& moab, const char* filename, bool print )
00413 {
00414     ErrorCode rval;
00415 
00416     rval = moab.load_file( filename, 0,
00417                            "PARALLEL=READ_DELETE;"
00418                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00419                            "PARTITION_DISTRIBUTE;"
00420                            "PARALLEL_RESOLVE_SHARED_ENTS" );
00421 
00422     if( print ) print_partitioned_entities( moab );
00423 
00424     CHECK_ERR( rval );
00425 }
00426 
00427 void save_and_load_on_root( Interface& moab, const char* tmp_filename )
00428 {
00429     ErrorCode rval;
00430     int procnum;
00431     MPI_Comm_rank( MPI_COMM_WORLD, &procnum );
00432 
00433     const char* opt = "PARALLEL=WRITE_PART";
00434     std::string str;
00435     if( WriteDebugLevel )
00436     {
00437         std::ostringstream s;
00438         s << opt << ";DEBUG_IO=" << WriteDebugLevel;
00439         str = s.str();
00440         opt = str.c_str();
00441     }
00442     rval = moab.write_file( tmp_filename, 0, opt );
00443     if( MB_SUCCESS != rval )
00444     {
00445         std::cerr << "Parallel write failed on processor " << procnum << std::endl;
00446         if( procnum == 0 && !KeepTmpFiles ) remove( tmp_filename );CHECK_ERR( rval );
00447     }
00448 
00449     if( procnum == 0 && KeepTmpFiles ) std::cout << "Wrote file: \"" << tmp_filename << "\"\n";
00450 
00451     // All created pcomm objects should be retrieved (with the pcomm tag) and
00452     // deleted at this time. Otherwise, the memory used by them will be leaked
00453     // as the pcomm tag will be deleted by moab.delete_mesh() below.
00454     std::vector< ParallelComm* > pc_list;
00455     ParallelComm::get_all_pcomm( &moab, pc_list );
00456     for( std::vector< ParallelComm* >::iterator vit = pc_list.begin(); vit != pc_list.end(); ++vit )
00457         delete *vit;
00458 
00459     moab.delete_mesh();
00460     std::vector< Tag > tags;
00461     rval = moab.tag_get_tags( tags );CHECK_ERR( rval );
00462     for( size_t i = 0; i < tags.size(); ++i )
00463     {
00464         rval = moab.tag_delete( tags[i] );CHECK_ERR( rval );
00465     }
00466 
00467     if( procnum == 0 )
00468     {
00469         rval = moab.load_file( tmp_filename );
00470         if( !KeepTmpFiles ) remove( tmp_filename );CHECK_ERR( rval );
00471     }
00472 }
00473 
00474 void count_owned_entities( Interface& moab, int counts[MBENTITYSET] )
00475 {
00476     ErrorCode rval;
00477     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00478     CHECK( 0 != pcomm );
00479     std::fill( counts, counts + MBENTITYSET, 0u );
00480 
00481     for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
00482     {
00483         Range range;
00484         rval = moab.get_entities_by_type( 0, t, range );CHECK_ERR( rval );
00485         if( !range.empty() ) rval = pcomm->filter_pstatus( range, PSTATUS_NOT_OWNED, PSTATUS_NOT );CHECK_ERR( rval );
00486         counts[t] = range.size();
00487     }
00488 }
00489 
00490 void check_identical_mesh( Interface& mb1, Interface& mb2 )
00491 {
00492     ErrorCode rval;
00493     std::map< EntityHandle, EntityHandle > entmap;
00494 
00495     // match vertices by coordinate
00496     Range r1, r2;
00497     Range::iterator i1, i2;
00498     rval = mb1.get_entities_by_type( 0, MBVERTEX, r1 );CHECK_ERR( rval );
00499     rval = mb2.get_entities_by_type( 0, MBVERTEX, r2 );CHECK_ERR( rval );
00500     CHECK_EQUAL( r1.size(), r2.size() );
00501     for( i1 = r1.begin(); i1 != r1.end(); ++i1 )
00502     {
00503         double coords1[3];
00504         rval = mb1.get_coords( &*i1, 1, coords1 );CHECK_ERR( rval );
00505         for( i2 = r2.begin(); i2 != r2.end(); ++i2 )
00506         {
00507             double coords2[3];
00508             rval = mb2.get_coords( &*i2, 1, coords2 );CHECK_ERR( rval );
00509             coords2[0] -= coords1[0];
00510             coords2[1] -= coords1[1];
00511             coords2[2] -= coords1[2];
00512             double lensqr = coords2[0] * coords2[0] + coords2[1] * coords2[1] + coords2[2] * coords2[2];
00513             if( lensqr < 1e-12 ) break;
00514         }
00515         CHECK( i2 != r2.end() );
00516         entmap[*i2] = *i1;
00517         r2.erase( i2 );
00518     }
00519 
00520     // match element connectivity
00521     std::vector< EntityHandle > conn1, conn2;
00522     for( EntityType t = MBEDGE; t < MBENTITYSET; ++t )
00523     {
00524         r1.clear();
00525         rval = mb1.get_entities_by_type( 0, t, r1 );CHECK_ERR( rval );
00526         r2.clear();
00527         rval = mb2.get_entities_by_type( 0, t, r2 );CHECK_ERR( rval );
00528         CHECK_EQUAL( r1.size(), r2.size() );
00529 
00530         for( i1 = r1.begin(); i1 != r1.end(); ++i1 )
00531         {
00532             conn1.clear();
00533             rval = mb1.get_connectivity( &*i1, 1, conn1 );CHECK_ERR( rval );
00534             for( i2 = r2.begin(); i2 != r2.end(); ++i2 )
00535             {
00536                 conn2.clear();
00537                 rval = mb2.get_connectivity( &*i2, 1, conn2 );CHECK_ERR( rval );
00538                 if( conn1.size() != conn2.size() ) continue;
00539                 for( std::vector< EntityHandle >::iterator j = conn2.begin(); j != conn2.end(); ++j )
00540                     *j = entmap[*j];
00541                 if( conn1 == conn2 ) break;
00542             }
00543 
00544             CHECK( i2 != r2.end() );
00545             entmap[*i2] = *i1;
00546             r2.erase( i2 );
00547         }
00548     }
00549 }
00550 
00551 void test_write_elements()
00552 {
00553     int proc_counts[MBENTITYSET], all_counts[MBENTITYSET], file_counts[MBENTITYSET];
00554     int err, rank;
00555     ErrorCode rval;
00556     err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00557     CHECK( !err );
00558 
00559     // load and partition a .cub file
00560     Core moab_instance;
00561     Interface& moab = moab_instance;
00562     load_and_partition( moab, InputFile, false );
00563 
00564     // count number of owned entities of each type and sum over all procs
00565     count_owned_entities( moab, proc_counts );
00566     std::fill( all_counts, all_counts + MBENTITYSET, 0u );
00567     err = MPI_Allreduce( proc_counts, all_counts, MBENTITYSET, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
00568     CHECK( !err );
00569 
00570     // do parallel write and on root proc do serial read of written file
00571     save_and_load_on_root( moab, "test_write_elements.h5m" );
00572     if( rank == 0 )
00573     {
00574         for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
00575         {
00576             rval = moab.get_number_entities_by_type( 0, t, file_counts[t] );CHECK_ERR( rval );
00577         }
00578     }
00579 
00580     // check that sum of owned entities equals number of entities from serial read
00581 
00582     err = MPI_Bcast( file_counts, MBENTITYSET, MPI_INT, 0, MPI_COMM_WORLD );
00583     CHECK( !err );
00584 
00585     bool all_equal = true;
00586     for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
00587         if( file_counts[t] != all_counts[t] ) all_equal = false;
00588 
00589     if( rank == 0 && !all_equal )
00590     {
00591         std::cerr << "Type\tPartnd\tWritten" << std::endl;
00592         for( EntityType t = MBVERTEX; t < MBENTITYSET; ++t )
00593             std::cerr << CN::EntityTypeName( t ) << '\t' << all_counts[t] << '\t' << file_counts[t] << std::endl;
00594     }
00595 
00596     CHECK( all_equal );
00597 
00598     // on root processor, do serial read of original .cub file and compare
00599 
00600     if( rank == 0 )
00601     {
00602         Core moab2;
00603         rval = moab2.load_file( InputFile );CHECK_ERR( rval );
00604         check_identical_mesh( moab, moab2 );
00605     }
00606 }
00607 
00608 bool check_sets_sizes( Interface& mb1, EntityHandle set1, Interface& mb2, EntityHandle set2 )
00609 {
00610     ErrorCode rval;
00611     bool result = true;
00612     for( EntityType t = MBVERTEX; t < MBMAXTYPE; ++t )
00613     {
00614         int count1, count2;
00615         rval = mb1.get_number_entities_by_type( set1, t, count1 );CHECK_ERR( rval );
00616         rval = mb2.get_number_entities_by_type( set2, t, count2 );CHECK_ERR( rval );
00617         if( count1 != count2 )
00618         {
00619             std::cerr << "Sets differ in number of " << CN::EntityTypeName( t ) << " : " << count1 << " vs. " << count2
00620                       << std::endl;
00621             result = false;
00622         }
00623     }
00624     return result;
00625 }
00626 
00627 void test_write_shared_sets()
00628 {
00629     int err, rank, size;
00630     ErrorCode rval;
00631     err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00632     CHECK( !err );
00633     err = MPI_Comm_size( MPI_COMM_WORLD, &size );
00634     CHECK( !err );
00635 
00636     Core moab_instance;
00637     Interface& moab = moab_instance;
00638     load_and_partition( moab, InputFile );
00639     save_and_load_on_root( moab, "test_write_shared_sets.h5m" );
00640 
00641     if( rank != 0 ) return;
00642 
00643     Core moab2_instance;
00644     Interface& moab2 = moab2_instance;
00645     rval             = moab2.load_file( InputFile );CHECK_ERR( rval );
00646 
00647     Tag mattag1, mattag2;
00648     rval = moab.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag1 );CHECK_ERR( rval );
00649     rval = moab2.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, mattag2 );CHECK_ERR( rval );
00650 
00651     Range matsets;
00652     rval = moab2.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag2, 0, 1, matsets );CHECK_ERR( rval );
00653     for( Range::iterator i = matsets.begin(); i != matsets.end(); ++i )
00654     {
00655         int block_id;
00656         rval = moab2.tag_get_data( mattag2, &*i, 1, &block_id );CHECK_ERR( rval );
00657 
00658         Range tmpents;
00659         void* tagdata[] = { &block_id };
00660         rval            = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag1, tagdata, 1, tmpents );
00661         if( tmpents.size() != 1 ) std::cerr << tmpents.size() << " sets with material set id " << block_id << std::endl;
00662         CHECK_EQUAL( (int)tmpents.size(), 1 );
00663 
00664         CHECK( check_sets_sizes( moab2, *i, moab, tmpents.front() ) );
00665     }
00666 }
00667 
00668 void test_var_length_parallel()
00669 {
00670     Range::const_iterator i;
00671     ErrorCode rval;
00672     Core moab;
00673     Interface& mb = moab;
00674     Range verts;
00675     Tag vartag;
00676     const char* filename = "var-len-para.h5m";
00677     const char* tagname  = "ParVar";
00678 
00679     // If this tag doesn't exist, writer will fail
00680     Tag junk_tag;
00681     mb.tag_get_handle( PARALLEL_GID_TAG_NAME, 1, MB_TYPE_INTEGER, junk_tag, MB_TAG_DENSE | MB_TAG_EXCL );
00682 
00683     int numproc, rank;
00684     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
00685     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00686 
00687     // Create N+1 vertices on each processor, where N is the rank
00688     std::vector< double > coords( 3 * rank + 3, (double)rank );
00689     rval = mb.create_vertices( &coords[0], rank + 1, verts );CHECK_ERR( rval );
00690 
00691     // Create a var-len tag
00692     rval = mb.tag_get_handle( tagname, 0, MB_TYPE_INTEGER, vartag, MB_TAG_DENSE | MB_TAG_VARLEN | MB_TAG_EXCL );CHECK_ERR( rval );
00693 
00694     // Write data on each vertex:
00695     // { n, rank, rank+1, ..., rank+n-1 } where n >= 1
00696     std::vector< int > data;
00697     rval = MB_SUCCESS;
00698     for( i = verts.begin(); i != verts.end(); ++i )
00699     {
00700         EntityHandle h = *i;
00701         const int n    = h % 7 + 1;
00702         data.resize( n + 1 );
00703         data[0] = n;
00704         for( int j = 0; j < n; ++j )
00705             data[j + 1] = rank + j;
00706         const int s          = ( n + 1 );
00707         const void* ptrarr[] = { &data[0] };
00708         ErrorCode tmperr     = mb.tag_set_by_ptr( vartag, &h, 1, ptrarr, &s );
00709         if( MB_SUCCESS != tmperr ) rval = tmperr;
00710     }
00711     CHECK_ERR( rval );
00712 
00713     // Write file
00714     const char* opt = "PARALLEL=WRITE_PART";
00715     std::string str;
00716     if( WriteDebugLevel )
00717     {
00718         std::ostringstream s;
00719         s << opt << ";DEBUG_IO=" << WriteDebugLevel;
00720         str = s.str();
00721         opt = str.c_str();
00722     }
00723     rval = mb.write_file( filename, "MOAB", opt );CHECK_ERR( rval );
00724 
00725     // Read file.  We only reset and re-read the file on the
00726     // root processor.  All other processors keep the pre-write
00727     // mesh.  This allows many of the tests to be run on all
00728     // processors.  Running the tests on the pre-write mesh on
00729     // non-root processors allows us to verify that any problems
00730     // are due to the file API rather than some other bug.
00731     ErrorCode rval2 = rval = MB_SUCCESS;
00732     if( !rank )
00733     {
00734         moab.~Core();
00735         new( &moab ) Core;
00736         rval = mb.load_mesh( filename );
00737         if( !KeepTmpFiles ) remove( filename );
00738         rval2 = mb.tag_get_handle( tagname, 0, MB_TYPE_INTEGER, vartag );
00739     }
00740     CHECK_ERR( rval );CHECK_ERR( rval2 );
00741 
00742     // Check that tag is correct
00743     int tag_size;
00744     rval = mb.tag_get_length( vartag, tag_size );
00745     CHECK_EQUAL( MB_VARIABLE_DATA_LENGTH, rval );
00746     TagType storage;
00747     rval = mb.tag_get_type( vartag, storage );CHECK_ERR( rval );
00748     CHECK_EQUAL( MB_TAG_DENSE, storage );
00749     DataType type;
00750     rval = mb.tag_get_data_type( vartag, type );CHECK_ERR( rval );
00751     CHECK_EQUAL( MB_TYPE_INTEGER, type );
00752 
00753     // get vertices
00754     verts.clear();
00755     rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
00756 
00757     // Check consistency of tag data on each vertex
00758     // and count the number of vertices for each rank.
00759     std::vector< int > vtx_counts( numproc, 0 );
00760     for( i = verts.begin(); i != verts.end(); ++i )
00761     {
00762         EntityHandle h        = *i;
00763         int size              = -1;
00764         const void* ptrarr[1] = { 0 };
00765         rval                  = mb.tag_get_by_ptr( vartag, &h, 1, ptrarr, &size );CHECK_ERR( rval );
00766         const int* tag_data = reinterpret_cast< const int* >( ptrarr[0] );
00767         CHECK( size >= 2 );
00768         CHECK( NULL != tag_data );
00769         CHECK_EQUAL( size - 1, tag_data[0] );
00770         CHECK( tag_data[1] >= 0 && tag_data[1] < numproc );
00771         ++vtx_counts[tag_data[1]];
00772         for( int j = 1; j < size - 1; ++j )
00773             CHECK_EQUAL( tag_data[1] + j, tag_data[1 + j] );
00774     }
00775 
00776     // Check number of vertices for each rank
00777     for( int j = 0; j < numproc; ++j )
00778     {
00779         // Only root should have data for other processors.
00780         if( rank == 0 || rank == j )
00781             CHECK_EQUAL( j + 1, vtx_counts[j] );
00782         else
00783             CHECK_EQUAL( 0, vtx_counts[j] );
00784     }
00785 }
00786 
00787 // create row of cubes of mesh
00788 void create_input_file( const char* file_name,
00789                         int intervals,
00790                         int num_cpu,
00791                         int blocks_per_cpu              = 1,
00792                         const char* ijk_vert_tag_name   = 0,
00793                         const char* ij_set_tag_name     = 0,
00794                         const char* global_tag_name     = 0,
00795                         const int* global_mesh_value    = 0,
00796                         const int* global_default_value = 0,
00797                         bool create_bcsets              = false )
00798 {
00799     Core moab;
00800     Interface& mb = moab;
00801     ErrorCode rval;
00802 
00803     Tag ijk_vert_tag = 0, ij_set_tag = 0, global_tag = 0;
00804     if( ijk_vert_tag_name )
00805     {
00806         rval = mb.tag_get_handle( ijk_vert_tag_name, 3, MB_TYPE_INTEGER, ijk_vert_tag, MB_TAG_EXCL | MB_TAG_DENSE );CHECK_ERR( rval );
00807     }
00808     if( ij_set_tag_name )
00809     {
00810         rval = mb.tag_get_handle( ij_set_tag_name, 2, MB_TYPE_INTEGER, ij_set_tag, MB_TAG_SPARSE | MB_TAG_EXCL );CHECK_ERR( rval );
00811     }
00812     if( global_tag_name )
00813     {
00814         rval = mb.tag_get_handle( global_tag_name, 1, MB_TYPE_INTEGER, global_tag, MB_TAG_DENSE | MB_TAG_EXCL,
00815                                   global_default_value );CHECK_ERR( rval );
00816         if( global_mesh_value )
00817         {
00818             EntityHandle root = 0;
00819             rval              = mb.tag_set_data( global_tag, &root, 1, global_mesh_value );CHECK_ERR( rval );
00820         }
00821     }
00822 
00823     const int num_blk = num_cpu * blocks_per_cpu;
00824     int iv = intervals + 1, ii = num_blk * intervals + 1;
00825     std::vector< EntityHandle > verts( iv * iv * ii );
00826     int idx = 0;
00827     for( int i = 0; i < ii; ++i )
00828     {
00829         for( int j = 0; j < iv; ++j )
00830         {
00831             int start = idx;
00832             for( int k = 0; k < iv; ++k )
00833             {
00834                 const double coords[3] = { static_cast< double >( i ), static_cast< double >( j ),
00835                                            static_cast< double >( k ) };
00836                 rval                   = mb.create_vertex( coords, verts[idx] );CHECK_ERR( rval );
00837                 if( ijk_vert_tag )
00838                 {
00839                     int vals[] = { i, j, k };
00840                     rval       = mb.tag_set_data( ijk_vert_tag, &verts[idx], 1, vals );CHECK_ERR( rval );
00841                 }
00842                 ++idx;
00843             }
00844 
00845             if( ij_set_tag )
00846             {
00847                 EntityHandle set;
00848                 rval = mb.create_meshset( MESHSET_SET, set );CHECK_ERR( rval );
00849                 rval = mb.add_entities( set, &verts[start], idx - start );CHECK_ERR( rval );
00850                 int vals[] = { i, j };
00851                 rval       = mb.tag_set_data( ij_set_tag, &set, 1, vals );CHECK_ERR( rval );
00852             }
00853         }
00854     }
00855 
00856     const int eb = intervals * intervals * intervals;
00857     std::vector< EntityHandle > elems( num_blk * eb );
00858     idx = 0;
00859     for( int c = 0; c < num_blk; ++c )
00860     {
00861         for( int i = c * intervals; i < ( c + 1 ) * intervals; ++i )
00862         {
00863             for( int j = 0; j < intervals; ++j )
00864             {
00865                 for( int k = 0; k < intervals; ++k )
00866                 {
00867                     EntityHandle conn[8] = { verts[iv * ( iv * i + j ) + k],
00868                                              verts[iv * ( iv * ( i + 1 ) + j ) + k],
00869                                              verts[iv * ( iv * ( i + 1 ) + j + 1 ) + k],
00870                                              verts[iv * ( iv * i + j + 1 ) + k],
00871                                              verts[iv * ( iv * i + j ) + k + 1],
00872                                              verts[iv * ( iv * ( i + 1 ) + j ) + k + 1],
00873                                              verts[iv * ( iv * ( i + 1 ) + j + 1 ) + k + 1],
00874                                              verts[iv * ( iv * i + j + 1 ) + k + 1] };
00875 
00876                     rval = mb.create_element( MBHEX, conn, 8, elems[idx++] );CHECK_ERR( rval );
00877                 }
00878             }
00879         }
00880     }
00881 
00882     Tag part_tag;
00883     rval = mb.tag_get_handle( PARTITION_TAG, 1, MB_TYPE_INTEGER, part_tag, MB_TAG_SPARSE | MB_TAG_EXCL );CHECK_ERR( rval );
00884 
00885     std::vector< EntityHandle > parts( num_cpu );
00886     for( int i = 0; i < num_cpu; ++i )
00887     {
00888         rval = mb.create_meshset( MESHSET_SET, parts[i] );CHECK_ERR( rval );
00889         for( int j = 0; j < blocks_per_cpu; ++j )
00890         {
00891             rval = mb.add_entities( parts[i], &elems[( num_cpu * j + i ) * eb], eb );CHECK_ERR( rval );
00892         }
00893         rval = mb.tag_set_data( part_tag, &parts[i], 1, &i );CHECK_ERR( rval );
00894     }
00895 
00896     if( create_bcsets )
00897     {
00898         // neumann set
00899         Range skin_ents;
00900         rval = Skinner( &mb ).find_skin( 0, &elems[0], elems.size(), false, skin_ents );CHECK_ERR( rval );
00901         EntityHandle bcset;
00902         rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval );
00903         Tag bcset_tag;
00904         rval = mb.tag_get_handle( NEUMANN_SET_TAG_NAME, 1, MB_TYPE_INTEGER, bcset_tag, MB_TAG_SPARSE | MB_TAG_CREAT );CHECK_ERR( rval );
00905         int dum = 100;
00906         rval    = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval );
00907         rval = mb.add_entities( bcset, skin_ents );CHECK_ERR( rval );
00908 
00909         // dirichlet set
00910         rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval );
00911         rval = mb.tag_get_handle( DIRICHLET_SET_TAG_NAME, 1, MB_TYPE_INTEGER, bcset_tag, MB_TAG_SPARSE | MB_TAG_CREAT );CHECK_ERR( rval );
00912         dum  = 200;
00913         rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval );
00914         Range nodes;
00915         rval = mb.get_adjacencies( skin_ents, 0, false, nodes, Interface::UNION );CHECK_ERR( rval );
00916         rval = mb.add_entities( bcset, nodes );CHECK_ERR( rval );
00917 
00918         // material set
00919         rval = mb.create_meshset( MESHSET_SET, bcset );CHECK_ERR( rval );
00920         rval = mb.tag_get_handle( MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, bcset_tag, MB_TAG_SPARSE | MB_TAG_CREAT );CHECK_ERR( rval );
00921         dum  = 300;
00922         rval = mb.tag_set_data( bcset_tag, &bcset, 1, &dum );CHECK_ERR( rval );
00923         rval = mb.add_entities( bcset, &elems[0], elems.size() );CHECK_ERR( rval );
00924     }
00925 
00926     rval = mb.write_file( file_name, "MOAB" );CHECK_ERR( rval );
00927 }
00928 
00929 void test_read_elements_common( bool by_rank, int intervals, bool /* print_time */, const char* extra_opts )
00930 {
00931     const char* file_name = by_rank ? "test_read_rank.h5m" : "test_read.h5m";
00932     int numproc, rank;
00933     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
00934     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00935     Core moab;
00936     Interface& mb = moab;
00937     ErrorCode rval;
00938 
00939     // if root processor, create hdf5 file for use in testing
00940     if( 0 == rank ) create_input_file( file_name, intervals, numproc );
00941     MPI_Barrier( MPI_COMM_WORLD );  // make sure root has completed writing the file
00942 
00943     // do parallel read unless only one processor
00944     std::string opt = get_read_options( by_rank, extra_opts );
00945     rval            = mb.load_file( file_name, 0, opt.c_str() );
00946 
00947     MPI_Barrier( MPI_COMM_WORLD );  // make sure all procs complete before removing file
00948     if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
00949 
00950     Tag part_tag;
00951     rval = mb.tag_get_handle( PARTITION_TAG, 1, MB_TYPE_INTEGER, part_tag );CHECK_ERR( rval );
00952 
00953     Range parts;
00954     rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &part_tag, 0, 1, parts );CHECK_ERR( rval );
00955     CHECK_EQUAL( 1, (int)parts.size() );
00956     EntityHandle part = parts.front();
00957     int id;
00958     rval = mb.tag_get_data( part_tag, &part, 1, &id );CHECK_ERR( rval );
00959     if( by_rank )
00960     {
00961         CHECK_EQUAL( rank, id );
00962     }
00963 
00964     // check that all of the elements in the mesh are in the part
00965     int npart, nall;
00966     rval = mb.get_number_entities_by_dimension( part, 3, npart );CHECK_ERR( rval );
00967     rval = mb.get_number_entities_by_dimension( 0, 3, nall );CHECK_ERR( rval );
00968     CHECK_EQUAL( npart, nall );
00969 
00970     // check that we have the correct vertices
00971     const double x_min = intervals * rank;
00972     const double x_max = intervals * ( rank + 1 );
00973     Range verts;
00974     rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
00975     std::vector< double > coords( verts.size() );
00976     rval = mb.get_coords( verts, &coords[0], 0, 0 );CHECK_ERR( rval );
00977     const double act_x_min = *std::min_element( coords.begin(), coords.end() );
00978     const double act_x_max = *std::max_element( coords.begin(), coords.end() );
00979     CHECK_REAL_EQUAL( x_min, act_x_min, DBL_EPSILON );
00980     CHECK_REAL_EQUAL( x_max, act_x_max, DBL_EPSILON );
00981 }
00982 
00983 void test_read_time()
00984 {
00985     const char file_name[] = "read_time.h5m";
00986     int numproc, rank;
00987     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
00988     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00989     ErrorCode rval;
00990 
00991     // if root processor, create hdf5 file for use in testing
00992     if( 0 == rank ) create_input_file( file_name, ReadIntervals, numproc, ReadBlocks );
00993     MPI_Barrier( MPI_COMM_WORLD );
00994 
00995     // CPU Time for true paralle, wall time for true paralle,
00996     //   CPU time for read and delete, wall time for read and delete
00997     double times[6];
00998     clock_t tmp_t;
00999 
01000     // Time true parallel read
01001     Core moab;
01002     Interface& mb   = moab;
01003     times[0]        = MPI_Wtime();
01004     tmp_t           = clock();
01005     std::string opt = get_read_options( true, READ_PART );
01006     rval            = mb.load_file( file_name, 0, opt.c_str() );CHECK_ERR( rval );
01007     times[0] = MPI_Wtime() - times[0];
01008     times[1] = double( clock() - tmp_t ) / CLOCKS_PER_SEC;
01009     mb.delete_mesh();
01010 
01011     // Time read and delete
01012     Core moab2;
01013     Interface& mb2   = moab2;
01014     std::string opt2 = get_read_options( true, READ_DELETE );
01015     times[2]         = MPI_Wtime();
01016     tmp_t            = clock();
01017     rval             = mb2.load_file( file_name, 0, opt2.c_str() );CHECK_ERR( rval );
01018     times[2] = MPI_Wtime() - times[2];
01019     times[3] = double( clock() - tmp_t ) / CLOCKS_PER_SEC;
01020     mb2.delete_mesh();
01021 
01022     // Time broadcast and delete
01023     Core moab3;
01024     Interface& mb3   = moab3;
01025     std::string opt3 = get_read_options( true, BCAST_DELETE );
01026     times[4]         = MPI_Wtime();
01027     tmp_t            = clock();
01028     rval             = mb3.load_file( file_name, 0, opt3.c_str() );CHECK_ERR( rval );
01029     times[4] = MPI_Wtime() - times[4];
01030     times[5] = double( clock() - tmp_t ) / CLOCKS_PER_SEC;
01031     mb3.delete_mesh();
01032 
01033     double max_times[6] = { 0, 0, 0, 0, 0, 0 }, sum_times[6] = { 0, 0, 0, 0, 0, 0 };
01034     MPI_Reduce( times, max_times, 6, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
01035     MPI_Reduce( times, sum_times, 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
01036     MPI_Barrier( MPI_COMM_WORLD );
01037     if( 0 == rank )
01038     {
01039         printf( "%12s  %12s  %12s  %12s\n", "", "READ_PART", "READ_DELETE", "BCAST_DELETE" );
01040         printf( "%12s  %12g  %12g  %12g\n", "Max Wall", max_times[0], max_times[2], max_times[4] );
01041         printf( "%12s  %12g  %12g  %12g\n", "Total Wall", sum_times[0], sum_times[2], sum_times[4] );
01042         printf( "%12s  %12g  %12g  %12g\n", "Max CPU", max_times[1], max_times[3], max_times[5] );
01043         printf( "%12s  %12g  %12g  %12g\n", "Total CPU", sum_times[1], sum_times[3], sum_times[5] );
01044     }
01045 
01046     MPI_Barrier( MPI_COMM_WORLD );
01047     if( 0 == rank && !KeepTmpFiles ) remove( file_name );
01048 }
01049 
01050 void test_read_tags()
01051 {
01052     const char tag_name[]  = "test_tag_xx";
01053     const char file_name[] = "test_read_tags.h5m";
01054     int numproc, rank;
01055     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01056     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01057     Core moab;
01058     Interface& mb = moab;
01059     ErrorCode rval;
01060 
01061     // if root processor, create hdf5 file for use in testing
01062     if( 0 == rank ) create_input_file( file_name, DefaultReadIntervals, numproc, 1, tag_name );
01063     MPI_Barrier( MPI_COMM_WORLD );  // make sure root has completed writing the file
01064 
01065     // do parallel read unless only one processor
01066     std::string opt = get_read_options();
01067     rval            = mb.load_file( file_name, 0, opt.c_str() );
01068     MPI_Barrier( MPI_COMM_WORLD );  // make sure all procs complete before removing file
01069     if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
01070 
01071     Tag tag;
01072     rval = mb.tag_get_handle( tag_name, 3, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
01073 
01074     TagType storage;
01075     rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval );
01076     CHECK_EQUAL( MB_TAG_DENSE, storage );
01077 
01078     Range verts, tagged;
01079     rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
01080     rval = mb.get_entities_by_type_and_tag( 0, MBVERTEX, &tag, 0, 1, tagged );CHECK_ERR( rval );
01081     CHECK_EQUAL( verts, tagged );
01082 
01083     for( Range::iterator i = verts.begin(); i != verts.end(); ++i )
01084     {
01085         double coords[3];
01086         rval = mb.get_coords( &*i, 1, coords );CHECK_ERR( rval );
01087         int ijk[3];
01088         rval = mb.tag_get_data( tag, &*i, 1, ijk );CHECK_ERR( rval );
01089 
01090         CHECK( ijk[0] >= DefaultReadIntervals * rank );
01091         CHECK( ijk[0] <= DefaultReadIntervals * ( rank + 1 ) );
01092         CHECK( ijk[1] >= 0 );
01093         CHECK( ijk[1] <= DefaultReadIntervals );
01094         CHECK( ijk[2] >= 0 );
01095         CHECK( ijk[2] <= DefaultReadIntervals );
01096 
01097         CHECK_REAL_EQUAL( coords[0], (double)ijk[0], 1e-100 );
01098         CHECK_REAL_EQUAL( coords[1], (double)ijk[1], 1e-100 );
01099         CHECK_REAL_EQUAL( coords[2], (double)ijk[2], 1e-100 );
01100     }
01101 }
01102 
01103 void test_read_global_tags()
01104 {
01105     const char tag_name[]  = "test_tag_g";
01106     const char file_name[] = "test_read_global_tags.h5m";
01107     int numproc, rank;
01108     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01109     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01110     Core moab;
01111     Interface& mb = moab;
01112     ErrorCode rval;
01113     const int def_val    = 0xdeadcad;
01114     const int global_val = -11;
01115 
01116     // if root processor, create hdf5 file for use in testing
01117     if( 0 == rank ) create_input_file( file_name, 1, numproc, 1, 0, 0, tag_name, &global_val, &def_val );
01118     MPI_Barrier( MPI_COMM_WORLD );  // make sure root has completed writing the file
01119 
01120     // do parallel read unless only one processor
01121     std::string opt = get_read_options();
01122     rval            = mb.load_file( file_name, 0, opt.c_str() );
01123     MPI_Barrier( MPI_COMM_WORLD );  // make sure all procs complete before removing file
01124     if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
01125 
01126     Tag tag;
01127     rval = mb.tag_get_handle( tag_name, 1, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
01128 
01129     TagType storage;
01130     rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval );
01131     CHECK_EQUAL( MB_TAG_DENSE, storage );
01132 
01133     int mesh_def_val, mesh_gbl_val;
01134     rval = mb.tag_get_default_value( tag, &mesh_def_val );CHECK_ERR( rval );
01135     CHECK_EQUAL( def_val, mesh_def_val );
01136     EntityHandle root = 0;
01137     rval              = mb.tag_get_data( tag, &root, 1, &mesh_gbl_val );CHECK_ERR( rval );
01138     CHECK_EQUAL( global_val, mesh_gbl_val );
01139 }
01140 
01141 void test_read_sets_common( const char* extra_opts )
01142 {
01143     const char tag_name[]  = "test_tag_s";
01144     const char file_name[] = "test_read_sets.h5m";
01145     int numproc, rank;
01146     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01147     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01148     Core moab;
01149     Interface& mb = moab;
01150     ErrorCode rval;
01151 
01152     // if root processor, create hdf5 file for use in testing
01153     if( 0 == rank ) create_input_file( file_name, DefaultReadIntervals, numproc, 1, 0, tag_name );
01154     MPI_Barrier( MPI_COMM_WORLD );  // make sure root has completed writing the file
01155 
01156     // do parallel read unless only one processor
01157     std::string opt = get_read_options( extra_opts );
01158     rval            = mb.load_file( file_name, 0, opt.c_str() );
01159     MPI_Barrier( MPI_COMM_WORLD );  // make sure all procs complete before removing file
01160     if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
01161 
01162     Tag tag;
01163     rval = mb.tag_get_handle( tag_name, 2, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
01164 
01165     TagType storage;
01166     rval = mb.tag_get_type( tag, storage );CHECK_ERR( rval );
01167     CHECK_EQUAL( MB_TAG_SPARSE, storage );
01168 
01169     const int iv = DefaultReadIntervals + 1;
01170     Range sets;
01171     rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, sets );CHECK_ERR( rval );
01172     CHECK_EQUAL( ( iv * iv ), (int)sets.size() );
01173 
01174     for( Range::iterator i = sets.begin(); i != sets.end(); ++i )
01175     {
01176         int ij[2];
01177         rval = mb.tag_get_data( tag, &*i, 1, &ij );CHECK_ERR( rval );
01178 
01179         CHECK( ij[0] >= DefaultReadIntervals * rank );
01180         CHECK( ij[0] <= DefaultReadIntervals * ( rank + 1 ) );
01181         CHECK( ij[1] >= 0 );
01182         CHECK( ij[1] <= DefaultReadIntervals );
01183 
01184         Range contents;
01185         rval = mb.get_entities_by_handle( *i, contents );CHECK_ERR( rval );
01186         CHECK( contents.all_of_type( MBVERTEX ) );
01187         CHECK_EQUAL( iv, (int)contents.size() );
01188 
01189         for( Range::iterator v = contents.begin(); v != contents.end(); ++v )
01190         {
01191             double coords[3];
01192             rval = mb.get_coords( &*v, 1, coords );CHECK_ERR( rval );
01193             CHECK_REAL_EQUAL( coords[0], (double)ij[0], 1e-100 );
01194             CHECK_REAL_EQUAL( coords[1], (double)ij[1], 1e-100 );
01195         }
01196     }
01197 }
01198 
01199 void test_read_bc_sets()
01200 {
01201     // const char tag_name[] = "test_tag_s";
01202     const char file_name[] = "test_read_sets.h5m";
01203     int numproc, rank;
01204     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01205     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01206     Core moab;
01207     Interface& mb = moab;
01208     ErrorCode rval;
01209 
01210     // if root processor, create hdf5 file for use in testing
01211     if( 0 == rank )
01212         create_input_file( file_name, DefaultReadIntervals, numproc, 1, NULL, NULL, NULL, NULL, NULL, true );
01213     MPI_Barrier( MPI_COMM_WORLD );  // make sure root has completed writing the file
01214 
01215     // do parallel read unless only one processor
01216     std::string opt = get_read_options();
01217     rval            = mb.load_file( file_name, 0, opt.c_str() );
01218     MPI_Barrier( MPI_COMM_WORLD );  // make sure all procs complete before removing file
01219     if( 0 == rank && !KeepTmpFiles ) remove( file_name );CHECK_ERR( rval );
01220 
01221     Tag tag;
01222     int num_ents[3], global_num_ents[3] = { 0, 0, 0 };
01223     Range sets, contents;
01224     const char* names[]     = { NEUMANN_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, MATERIAL_SET_TAG_NAME };
01225     int vints               = DefaultReadIntervals + 1;
01226     int expected_num_ents[] = { ( numproc * 4 + 2 ) * DefaultReadIntervals * DefaultReadIntervals,
01227                                 ( ( numproc * 4 + 2 ) * ( vints - 2 ) * ( vints - 2 ) + 12 * numproc * ( vints - 2 ) +
01228                                   8 * numproc ),
01229                                 numproc * DefaultReadIntervals * DefaultReadIntervals * DefaultReadIntervals };
01230 
01231     for( int i = 0; i < 3; i++ )
01232     {
01233         rval = mb.tag_get_handle( names[i], 1, MB_TYPE_INTEGER, tag );CHECK_ERR( rval );
01234         rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, sets );CHECK_ERR( rval );
01235         CHECK_EQUAL( 1, (int)sets.size() );
01236         rval = mb.get_entities_by_handle( *sets.begin(), contents, true );CHECK_ERR( rval );
01237         num_ents[i] = contents.size();
01238         sets.clear();
01239         contents.clear();
01240     }
01241 
01242     MPI_Reduce( num_ents, global_num_ents, 3, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
01243     if( 0 == rank )
01244     {
01245         //    std::cout << "Global:" << global_num_ents[0] << " " << global_num_ents[1] << " " <<
01246         //    global_num_ents[2] << " " << std::endl; std::cout << "Expected:" <<
01247         //    expected_num_ents[0] << " " << expected_num_ents[1] << " " << expected_num_ents[2] <<
01248         //    " " << std::endl;
01249 
01250         for( int i = 0; i < 3; i++ )
01251         {
01252             CHECK_EQUAL( global_num_ents[i], expected_num_ents[i] );
01253         }
01254     }
01255 }
01256 
01257 void test_write_different_element_types()
01258 {
01259     const char file_name[] = "test_write_different_element_types.h5m";
01260     int numproc, rank;
01261     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01262     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01263     Core moab;
01264     Interface& mb = moab;
01265     ErrorCode rval;
01266 
01267     const EntityType topos[] = { MBEDGE, MBEDGE, MBQUAD,  MBTRI,     MBQUAD, MBTRI,
01268                                  MBTET,  MBHEX,  MBPRISM, MBPYRAMID, MBHEX,  MBHEX };
01269     const int verts[]        = { 2, 3, 4, 3, 9, 6, 4, 8, 6, 5, 20, 27 };
01270     const int ntypes         = sizeof( topos ) / sizeof( topos[0] );
01271 
01272     const EntityType mtype = topos[rank % ntypes];
01273     const int nvert        = verts[rank % ntypes];
01274     std::vector< EntityHandle > conn( nvert );
01275     for( int i = 0; i < nvert; ++i )
01276     {
01277         const double coords[] = { static_cast< double >( rank ), static_cast< double >( i ), 0 };
01278         rval                  = mb.create_vertex( coords, conn[i] );CHECK_ERR( rval );
01279     }
01280     EntityHandle handle;
01281     rval = mb.create_element( mtype, &conn[0], nvert, handle );CHECK_ERR( rval );
01282 
01283     save_and_load_on_root( mb, file_name );
01284 
01285     if( rank ) return;
01286 
01287     for( int i = 0; i < ntypes; ++i )
01288     {
01289         const int num_exp = numproc / ntypes + ( i < ( numproc % ntypes ) ? 1 : 0 );
01290         Range ents;
01291         rval = mb.get_entities_by_type( 0, topos[i], ents );CHECK_ERR( rval );
01292         int num = 0;
01293         for( Range::iterator e = ents.begin(); e != ents.end(); ++e )
01294         {
01295             const EntityHandle* junk;
01296             int len;
01297             rval = mb.get_connectivity( *e, junk, len, false );
01298             if( len == verts[i] ) ++num;
01299         }
01300 
01301         CHECK_EQUAL( num_exp, num );
01302     }
01303 }
01304 
01305 Tag get_tag( Interface& mb, int rank, bool create )
01306 {
01307     DataType type   = (DataType)( rank % ( MB_MAX_DATA_TYPE + 1 ) );
01308     TagType storage = ( type == MB_TYPE_BIT ) ? MB_TAG_BIT : ( rank % 2 ) ? MB_TAG_DENSE : MB_TAG_SPARSE;
01309     int len         = rank % 3 + 1;
01310     TagType cbit    = create ? MB_TAG_EXCL : (TagType)0;
01311     TagType vbit    = rank % 4 == 1 && storage != MB_TAG_BIT ? MB_TAG_VARLEN : (TagType)0;
01312     std::ostringstream name;
01313     name << "TestTag" << rank;
01314     const void* defval = 0;
01315     const int defint[] = { static_cast< int >( rank ), static_cast< int >( rank / 2 ), static_cast< int >( rank + 1 ),
01316                            static_cast< int >( rank - 1 ) };
01317     const double defreal[] = { 0.1 * rank, 1.0 / rank, static_cast< double >( -rank ), static_cast< double >( rank ) };
01318     const int defhandle[]  = { 0, 0, 0, 0 };
01319     const unsigned char defbit = 0x1;
01320     const char defopq[]        = "Jason";
01321     if( rank % 4 < 2 )
01322     {
01323         switch( type )
01324         {
01325             case MB_TYPE_BIT:
01326                 defval = &defbit;
01327                 break;
01328             case MB_TYPE_INTEGER:
01329                 defval = defint;
01330                 break;
01331             case MB_TYPE_DOUBLE:
01332                 defval = defreal;
01333                 break;
01334             case MB_TYPE_HANDLE:
01335                 defval = defhandle;
01336                 break;
01337             case MB_TYPE_OPAQUE:
01338                 defval = defopq;
01339                 break;
01340         }
01341     }
01342 
01343     Tag result;
01344     ErrorCode rval = mb.tag_get_handle( name.str().c_str(), len, type, result, storage | cbit | vbit, defval );CHECK_ERR( rval );
01345     return result;
01346 }
01347 
01348 void test_write_different_tags()
01349 {
01350     const char file_name[] = "test_write_different_tags.h5m";
01351     int numproc, rank;
01352     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01353     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01354     Core moab;
01355     Interface& mb = moab;
01356 
01357     const int min_tags = 8;
01358     get_tag( mb, rank, true );
01359     for( int idx = rank + numproc; idx < min_tags; idx += numproc )
01360         get_tag( mb, idx, true );
01361 
01362     save_and_load_on_root( mb, file_name );
01363 
01364     if( 0 == rank )
01365     {
01366         for( int i = 0; i < std::max( min_tags, numproc ); ++i )
01367             get_tag( mb, i, false );
01368     }
01369 }
01370 
01371 void test_write_polygons()
01372 {
01373     const char file_name[] = "test_write_polygons.h5m";
01374     int numproc, rank;
01375     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01376     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01377     Core moab;
01378     Interface& mb = moab;
01379     ErrorCode rval;
01380 
01381     // create a polygon on each process
01382     const double r            = 0.70710678118654757;
01383     const double points[8][3] = {
01384         { 1, 0, static_cast< double >( rank ) },
01385         { static_cast< double >( r ), static_cast< double >( r ), static_cast< double >( rank ) },
01386         { 0, 1, static_cast< double >( rank ) },
01387         { static_cast< double >( -r ), static_cast< double >( r ), static_cast< double >( rank ) },
01388         { -1, 0, static_cast< double >( rank ) },
01389         { static_cast< double >( -r ), static_cast< double >( -r ), static_cast< double >( rank ) },
01390         { 0, -1, static_cast< double >( rank ) },
01391         { static_cast< double >( r ), static_cast< double >( -r ), static_cast< double >( rank ) } };
01392     const int nvtx = rank % 4 + 5;
01393     std::vector< EntityHandle > conn( nvtx );
01394     for( int i = 0; i < nvtx; ++i )
01395     {
01396         rval = mb.create_vertex( points[i], conn[i] );CHECK_ERR( rval );
01397     }
01398 
01399     EntityHandle h;
01400     rval = mb.create_element( MBPOLYGON, &conn[0], nvtx, h );CHECK_ERR( rval );
01401 
01402     save_and_load_on_root( mb, file_name );
01403 
01404     if( rank != 0 ) return;
01405 
01406     // check results on root process
01407 
01408     // determine which polygon was created by which proc by
01409     // looking at the z-coordinate of the vertices
01410     Range range;
01411     rval = mb.get_entities_by_type( 0, MBPOLYGON, range );
01412     std::vector< EntityHandle > poly( numproc, 0 );
01413     CHECK_EQUAL( numproc, (int)range.size() );
01414     for( Range::iterator it = range.begin(); it != range.end(); ++it )
01415     {
01416         const EntityHandle* conn_arr;
01417         int len;
01418         rval = mb.get_connectivity( *it, conn_arr, len );CHECK_ERR( rval );
01419         double coords[3];
01420         rval = mb.get_coords( conn_arr, 1, coords );CHECK_ERR( rval );
01421         int proc = (int)( coords[2] );
01422         CHECK_EQUAL( (EntityHandle)0, poly[proc] );
01423         poly[proc] = *it;
01424     }
01425 
01426     // check that each poly has the expected number of vertices
01427     for( int i = 0; i < numproc; ++i )
01428     {
01429         const EntityHandle* conn_arr;
01430         int len;
01431         rval = mb.get_connectivity( poly[i], conn_arr, len );CHECK_ERR( rval );
01432         CHECK_EQUAL( i % 4 + 5, len );
01433     }
01434 }
01435 
01436 // Some processes have no mesh to write
01437 void test_write_unbalanced()
01438 {
01439     const char file_name[] = "test_write_unbalanced.h5m";
01440     int numproc, rank;
01441     MPI_Comm_size( MPI_COMM_WORLD, &numproc );
01442     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01443     Core moab;
01444     Interface& mb = moab;
01445     ErrorCode rval;
01446     Tag idtag = mb.globalId_tag();
01447 
01448     // create a shared set
01449     const int two = 2;
01450     Range entities, sets;
01451 
01452     EntityHandle set;
01453     rval = mb.create_meshset( MESHSET_SET, set );CHECK_ERR( rval );
01454     rval = mb.tag_set_data( idtag, &set, 1, &two );CHECK_ERR( rval );
01455     sets.insert( set );
01456 
01457     // create a quad on every odd processor
01458     if( rank % 2 )
01459     {
01460         const double coords[4][3] = { { static_cast< double >( rank ), 0, 0 },
01461                                       { static_cast< double >( rank + 2 ), 0, 0 },
01462                                       { static_cast< double >( rank + 2 ), 2, 0 },
01463                                       { static_cast< double >( rank ), 2, 0 } };
01464         EntityHandle conn[4], quad;
01465         for( int i = 0; i < 4; ++i )
01466             mb.create_vertex( coords[i], conn[i] );
01467         mb.create_element( MBQUAD, conn, 4, quad );
01468 
01469         const int ids[4] = { rank, rank + 2, rank + 3, rank + 1 };
01470         rval             = mb.tag_set_data( idtag, conn, 4, ids );CHECK_ERR( rval );
01471 
01472         rval = mb.add_entities( set, &quad, 1 );CHECK_ERR( rval );
01473 
01474         entities.insert( quad );
01475     }
01476 
01477     // set up sharing data
01478     ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 );
01479     if( 0 == pcomm ) pcomm = new ParallelComm( &mb, MPI_COMM_WORLD );
01480     rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, NULL, &idtag );CHECK_ERR( rval );
01481     rval = pcomm->resolve_shared_sets( sets, idtag );CHECK_ERR( rval );
01482 
01483     // do parallel write and serial load
01484     save_and_load_on_root( mb, file_name );
01485 
01486     if( rank != 0 ) return;
01487 
01488     // check that we got what we expected
01489     Range quads, verts;
01490     rval = mb.get_entities_by_type( 0, MBQUAD, quads );CHECK_ERR( rval );
01491     rval = mb.get_entities_by_type( 0, MBVERTEX, verts );CHECK_ERR( rval );
01492 
01493     const size_t nquads = numproc / 2;
01494     const size_t nverts = nquads ? 2 + 2 * nquads : 0;
01495     CHECK_EQUAL( nquads, quads.size() );
01496     CHECK_EQUAL( nverts, verts.size() );
01497 
01498     rval = mb.tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, idtag );CHECK_ERR( rval );
01499     sets.clear();
01500     const void* vals[] = { &two };
01501     rval               = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &idtag, vals, 1, sets );CHECK_ERR( rval );
01502     CHECK_EQUAL( (size_t)1, sets.size() );
01503 
01504     entities.clear();
01505     rval = mb.get_entities_by_handle( sets.front(), entities );CHECK_ERR( rval );
01506     CHECK_EQUAL( nquads, entities.size() );
01507     CHECK_EQUAL( quads, entities );
01508 }
01509 
01510 // this test will load a file that has 2 partitions (mix.h5m)
01511 // one partition with triangles, and one with triangles and quads
01512 // a dense tag created on this should be dense in the file
01513 //
01514 void test_write_dense_tags()
01515 {
01516     int err, rank, size;
01517     ErrorCode rval;
01518     err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01519     CHECK( !err );
01520     err = MPI_Comm_size( MPI_COMM_WORLD, &size );
01521     CHECK( !err );
01522 
01523     Core moab;
01524     rval = moab.load_file( InputMix, 0,
01525                            "PARALLEL=READ_PART;"
01526                            "PARTITION=PARALLEL_PARTITION;"
01527                            "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval );
01528 
01529     // create an dense tag, on all elements, then write the output in parallel
01530     // load the file again, and test the type of the tag
01531     Range elems;
01532     moab.get_entities_by_dimension( 0, 2, elems );
01533 
01534     const char* tagname = "element_tag";
01535     const double defVal = 0.;
01536     Tag fieldTag;
01537     rval = moab.tag_get_handle( tagname, 1, MB_TYPE_DOUBLE, fieldTag, MB_TAG_DENSE | MB_TAG_CREAT, &defVal );CHECK_ERR( rval );
01538 
01539     int numElems = (int)elems.size();
01540 
01541     for( int i = 0; i < numElems; i++ )
01542     {
01543         EntityHandle elem = elems[i];
01544         double xyz[3];
01545         moab.get_coords( &elem, 1, xyz );
01546         // some value
01547         double fieldValue = sqrt( xyz[0] * xyz[0] + xyz[1] * xyz[1] + xyz[2] * xyz[2] );
01548         moab.tag_set_data( fieldTag, &elem, 1, &fieldValue );
01549     }
01550 
01551     // write the file in parallel
01552     rval = moab.write_file( "newfile.h5m", 0, "PARALLEL=WRITE_PART" );CHECK_ERR( rval );
01553 
01554     // now read the new file, in a new instance, and test the tag type
01555 
01556     Core moab2;
01557     rval = moab2.load_file( "newfile.h5m", 0,
01558                             "PARALLEL=READ_PART;"
01559                             "PARTITION=PARALLEL_PARTITION;"
01560                             "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval );
01561 
01562     // find the element tag
01563     Tag found_tag;
01564     rval = moab2.tag_get_handle( tagname, 1, MB_TYPE_DOUBLE, found_tag );CHECK_ERR( rval );
01565     TagType tagt;
01566     rval = moab2.tag_get_type( found_tag, tagt );CHECK_ERR( rval );
01567     CHECK( tagt == MB_TAG_DENSE );
01568 }
01569 // this test will load a file that has 2 partitions (oneside.h5m)
01570 // and one side set, that is adjacent to one part only
01571 //
01572 void test_read_non_adjs_side()
01573 {
01574     int err, rank, size;
01575     ErrorCode rval;
01576     err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01577     CHECK( !err );
01578     err = MPI_Comm_size( MPI_COMM_WORLD, &size );
01579     CHECK( !err );
01580 
01581     Core moab;
01582     rval = moab.load_file( InputOneSide, 0,
01583                            "PARALLEL=READ_PART;"
01584                            "PARTITION=PARALLEL_PARTITION;"
01585                            "PARALLEL_RESOLVE_SHARED_ENTS" );CHECK_ERR( rval );
01586 
01587     return;
01588 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines