MOAB: Mesh Oriented datABase  (version 5.4.1)
parallel_unit_tests.cpp
Go to the documentation of this file.
00001 #include "moab/ParallelComm.hpp"
00002 #include "MBParallelConventions.h"
00003 #include "moab/ParCommGraph.hpp"
00004 #include "ReadParallel.hpp"
00005 #include "moab/FileOptions.hpp"
00006 #include "MBTagConventions.hpp"
00007 #include "moab/Core.hpp"
00008 #include "moab_mpi.h"
00009 #include "TestUtil.hpp"
00010 
00011 #include <iostream>
00012 #include <algorithm>
00013 #include <map>
00014 #include <sstream>
00015 #include <cassert>
00016 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
00017 #include <unistd.h>
00018 #endif
00019 
00020 using namespace moab;
00021 
00022 #define CHKERR( a )                                                                                   \
00023     do                                                                                                \
00024     {                                                                                                 \
00025         ErrorCode val = ( a );                                                                        \
00026         if( MB_SUCCESS != val )                                                                       \
00027         {                                                                                             \
00028             std::cerr << "Error code  " << val << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
00029             return val;                                                                               \
00030         }                                                                                             \
00031     } while( false )
00032 
00033 #define PCHECK( A ) \
00034     if( is_any_proc_error( !( A ) ) ) return report_error( __FILE__, __LINE__ )
00035 
00036 ErrorCode report_error( const char* file, int line )
00037 {
00038     std::cerr << "Failure at " << file << ':' << line << std::endl;
00039     return MB_FAILURE;
00040 }
00041 
00042 /**************************************************************************
00043                      Utility Method Declarations
00044  **************************************************************************/
00045 
00046 // Get processors an entity is shared with.
00047 ErrorCode get_sharing_processors( Interface& moab, EntityHandle entity, std::vector< int >& other_procs_out );
00048 
00049 // Create a parallel mesh
00050 //
00051 // Each processor will create four quads.
00052 // Groups of four quads will be arranged as follows:
00053 // +------+------+------+------+------+-----
00054 // |             |             |
00055 // |             |             |
00056 // +    Proc 0   +    Proc 2   +    Proc 4
00057 // |             |             |
00058 // |             |             |
00059 // +------+------+------+------+------+-----
00060 // |             |             |
00061 // |             |             |
00062 // +    Proc 1   +    Proc 3   +    Proc 5
00063 // |             |             |
00064 // |             |             |
00065 // +------+------+------+------+------+-----
00066 //
00067 // Vertices will be enumerated as follows:
00068 // 1------6-----11-----16-----21-----26-----
00069 // |             |             |
00070 // |             |             |
00071 // 2      7     12     17     22     27
00072 // |             |             |
00073 // |             |             |
00074 // 3------8-----13-----18-----23-----28-----
00075 // |             |             |
00076 // |             |             |
00077 // 4      9     14     19     24     29
00078 // |             |             |
00079 // |             |             |
00080 // 5-----10-----15-----20-----25-----30-----
00081 //
00082 // Element IDs will be [4*rank+1,4*rank+5]
00083 //
00084 // If output_sets is non-null, it will be populated with 2 or 3
00085 // set handles.  A set handle is created for each group of four
00086 // adjacent processes, such that one is created across procs 0 to 4,
00087 // one is created for procs 2 to 5, etc.  An additional set is created
00088 // that spans all of the processes.  The set spanning all procs is
00089 // returned first.  The other two sets are returned in the order of the
00090 // largest contained process rank, where the last entry is zero if
00091 // there is only one additional set.
00092 ErrorCode parallel_create_mesh( Interface& mb,
00093                                 int output_vertx_ids[9],
00094                                 EntityHandle output_vertex_handles[9],
00095                                 Range& output_elements,
00096                                 EntityHandle output_sets[3] = 0 );
00097 
00098 // Test if is_my_error is non-zero on any processor in MPI_COMM_WORLD
00099 int is_any_proc_error( int is_my_error );
00100 
00101 /**************************************************************************
00102                            Test  Declarations
00103  **************************************************************************/
00104 
00105 // Check consistancy of sharing data.  (E.g. compare sharing procs for
00106 // vertices to that of adjacent elements, compare sharing data for
00107 // interfaces with that of contained entities, etc.)
00108 ErrorCode test_elements_on_several_procs( const char* filename );
00109 // Test correct ghosting of elements
00110 ErrorCode test_ghost_elements_3_2_1( const char* filename );
00111 ErrorCode test_ghost_elements_3_2_2( const char* filename );
00112 ErrorCode test_ghost_elements_3_0_1( const char* filename );
00113 ErrorCode test_ghost_elements_2_0_1( const char* filename );
00114 // Test exchange of tag data on ghost elements
00115 ErrorCode test_ghost_tag_exchange( const char* filename );
00116 // Bug where exchange_tags fails if dense tag cannot be queried
00117 // for all ghost entities (e.g. no default value)
00118 ErrorCode regression_ghost_tag_exchange_no_default( const char* filename );
00119 // Test owners for interface entities
00120 ErrorCode test_interface_owners( const char* );
00121 // Test data for shared interface entitites with one level of ghosting
00122 ErrorCode regression_owners_with_ghosting( const char* );
00123 // Verify all sharing data for vertices with one level of ghosting
00124 ErrorCode test_ghosted_entity_shared_data( const char* );
00125 // Test assignment of global IDs
00126 ErrorCode test_assign_global_ids( const char* );
00127 // Test shared sets
00128 ErrorCode test_shared_sets( const char* );
00129 // Test reduce_tags
00130 ErrorCode test_reduce_tags( const char* );
00131 // Test reduce_tags failures
00132 ErrorCode test_reduce_tag_failures( const char* );
00133 // Test reduce_tags with explicit destination tag
00134 ErrorCode test_reduce_tag_explicit_dest( const char* );
00135 // Test delete_entities
00136 ErrorCode test_delete_entities( const char* );
00137 // Test ghosting polyhedra
00138 ErrorCode test_ghost_polyhedra( const char* );
00139 // Test failed read with too few parts in partition
00140 ErrorCode test_too_few_parts( const char* );
00141 // Test broken sequences due to ghosting
00142 ErrorCode test_sequences_after_ghosting( const char* );
00143 // Test trivial partition in use by iMOAB
00144 void test_trivial_partition();
00145 
00146 /**************************************************************************
00147                               Main Method
00148  **************************************************************************/
00149 
00150 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
00151 
00152 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00153 {
00154     ErrorCode result = ( *func )( file_name );
00155     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00156     int rank;
00157     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00158     if( rank == 0 )
00159     {
00160         if( is_err )
00161             std::cout << func_name << " : FAILED!!" << std::endl;
00162         else
00163             std::cout << func_name << " : success" << std::endl;
00164     }
00165 
00166     return is_err;
00167 }
00168 
00169 int main( int argc, char* argv[] )
00170 {
00171     int rank, size;
00172     MPI_Init( &argc, &argv );
00173     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00174     MPI_Comm_size( MPI_COMM_WORLD, &size );
00175 
00176     int pause_proc = -1;
00177     std::string filename;
00178     for( int i = 1; i < argc; ++i )
00179     {
00180         if( !strcmp( argv[i], "-p" ) )
00181         {
00182             ++i;
00183             assert( i < argc );
00184             pause_proc = atoi( argv[i] );
00185         }
00186         else if( !filename.size() )
00187         {
00188             filename = std::string( argv[i] );
00189         }
00190         else
00191         {
00192             std::cerr << "Invalid arg: \"" << argv[i] << '"' << std::endl
00193                       << "Usage: " << argv[0] << " [-p <rank>] [<filename>]" << std::endl;
00194             exit( 1 );
00195         }
00196     }
00197 
00198     if( !filename.size() )
00199     {
00200 #ifdef MOAB_HAVE_HDF5
00201         filename = TestDir + "unittest/64bricks_512hex.h5m";
00202 #else
00203         filename = TestDir + "unittest/64bricks_512hex.vtk";
00204 #endif
00205     }
00206     std::cout << "Loading " << filename << "..\n";
00207 #ifdef MOAB_HAVE_HDF5
00208     std::string filename2 = TestDir + "unittest/64bricks_1khex.h5m";
00209     std::string filename3 = TestDir + "unittest/twoPolyh.h5m";
00210     std::string filename4 = TestDir + "unittest/onepart.h5m";
00211 #endif
00212 
00213     if( pause_proc != -1 )
00214     {
00215 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
00216         std::cout << "Processor " << rank << " of " << size << " with PID " << getpid() << std::endl;
00217         std::cout.flush();
00218 #endif
00219         // loop forever on requested processor, giving the user time
00220         // to attach a debugger.  Once the debugger in attached, user
00221         // can change 'pause'.  E.g. on gdb do "set var pause = 0"
00222         if( pause_proc == rank )
00223         {
00224             volatile int pause = 1;
00225             while( pause )
00226                 ;
00227         }
00228 
00229         MPI_Barrier( MPI_COMM_WORLD );
00230         std::cout << "Processor " << rank << " resuming" << std::endl;
00231     }
00232 
00233     int num_errors = 0;
00234 #ifdef MOAB_HAVE_HDF5
00235     num_errors += RUN_TEST_ARG2( test_elements_on_several_procs, filename.c_str() );
00236     num_errors += RUN_TEST_ARG2( test_ghost_elements_3_2_1, filename.c_str() );
00237     num_errors += RUN_TEST_ARG2( test_ghost_elements_3_2_2, filename.c_str() );
00238     num_errors += RUN_TEST_ARG2( test_ghost_elements_3_0_1, filename.c_str() );
00239     num_errors += RUN_TEST_ARG2( test_ghost_elements_2_0_1, filename.c_str() );
00240     num_errors += RUN_TEST_ARG2( test_ghost_tag_exchange, filename.c_str() );
00241     num_errors += RUN_TEST_ARG2( regression_ghost_tag_exchange_no_default, filename.c_str() );
00242     num_errors += RUN_TEST_ARG2( test_delete_entities, filename2.c_str() );
00243     num_errors += RUN_TEST_ARG2( test_sequences_after_ghosting, filename2.c_str() );
00244     if( 2 >= size )  // run this one only on one or 2 processors; the file has only 2 parts in partition
00245         num_errors += RUN_TEST_ARG2( test_ghost_polyhedra, filename3.c_str() );
00246     num_errors += RUN_TEST_ARG2( test_too_few_parts, filename4.c_str() );
00247 #endif
00248     num_errors += RUN_TEST_ARG2( test_assign_global_ids, 0 );
00249     num_errors += RUN_TEST_ARG2( test_shared_sets, 0 );
00250     num_errors += RUN_TEST_ARG2( test_reduce_tags, 0 );
00251     num_errors += RUN_TEST_ARG2( test_reduce_tag_failures, 0 );
00252     num_errors += RUN_TEST_ARG2( test_reduce_tag_explicit_dest, 0 );
00253     num_errors += RUN_TEST_ARG2( test_interface_owners, 0 );
00254     num_errors += RUN_TEST_ARG2( test_ghosted_entity_shared_data, 0 );
00255     num_errors += RUN_TEST_ARG2( regression_owners_with_ghosting, 0 );
00256     num_errors += RUN_TEST( test_trivial_partition );
00257     if( rank == 0 )
00258     {
00259         if( !num_errors )
00260             std::cout << "All tests passed" << std::endl;
00261         else
00262             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00263     }
00264 
00265     MPI_Finalize();
00266     return num_errors;
00267 }
00268 
00269 /**************************************************************************
00270                      Utility Method Implementations
00271  **************************************************************************/
00272 
00273 ErrorCode get_sharing_processors( Interface& moab, EntityHandle entity, std::vector< int >& other_procs_out )
00274 {
00275     ErrorCode rval;
00276 
00277     // get tags for parallel data
00278     Tag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
00279     rval = moab.tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedp_tag );CHKERR( rval );
00280     rval = moab.tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER, sharedps_tag );CHKERR( rval );
00281     rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedh_tag );CHKERR( rval );
00282     rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE, sharedhs_tag );CHKERR( rval );
00283     rval = moab.tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatus_tag );CHKERR( rval );
00284 
00285     other_procs_out.clear();
00286     char status;
00287     rval = moab.tag_get_data( pstatus_tag, &entity, 1, &status );CHKERR( rval );
00288     if( !( status & PSTATUS_SHARED ) ) return MB_SUCCESS;
00289 
00290     int proc_id;
00291     rval = moab.tag_get_data( sharedp_tag, &entity, 1, &proc_id );CHKERR( rval );
00292     if( proc_id >= 0 )
00293     {
00294         other_procs_out.push_back( proc_id );
00295         return MB_SUCCESS;
00296     }
00297 
00298     int procs[MAX_SHARING_PROCS];
00299     rval = moab.tag_get_data( sharedps_tag, &entity, 1, procs );CHKERR( rval );
00300     for( int i = 0; i < MAX_SHARING_PROCS && procs[i] >= 0; ++i )
00301         other_procs_out.push_back( procs[i] );
00302     return MB_SUCCESS;
00303 }
00304 
00305 int is_any_proc_error( int is_my_error )
00306 {
00307     int result = 0;
00308     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00309     return err || result;
00310 }
00311 
00312 ErrorCode parallel_create_mesh( Interface& mb,
00313                                 int vtx_ids[9],
00314                                 EntityHandle vtx_handles[9],
00315                                 Range& range,
00316                                 EntityHandle* entity_sets )
00317 {
00318     // Each processor will create four quads.
00319     // Groups of four quads will be arranged as follows:
00320     // +------+------+------+------+------+-----
00321     // |             |             |
00322     // |             |             |
00323     // +    Proc 0   +    Proc 2   +    Proc 4
00324     // |             |             |
00325     // |             |             |
00326     // +------+------+------+------+------+-----
00327     // |             |             |
00328     // |             |             |
00329     // +    Proc 1   +    Proc 3   +    Proc 5
00330     // |             |             |
00331     // |             |             |
00332     // +------+------+------+------+------+-----
00333     //
00334     // Vertices will be enumerated as follows:
00335     // 1------6-----11-----16-----21-----26-----
00336     // |             |             |
00337     // |             |             |
00338     // 2      7     12     17     22     27
00339     // |             |             |
00340     // |             |             |
00341     // 3------8-----13-----18-----23-----28-----
00342     // |             |             |
00343     // |             |             |
00344     // 4      9     14     19     24     29
00345     // |             |             |
00346     // |             |             |
00347     // 5-----10-----15-----20-----25-----30-----
00348     //
00349     // Element IDs will be [4*rank+1,4*rank+5]
00350 
00351     int size, rank;
00352     MPI_Comm_size( MPI_COMM_WORLD, &size );
00353     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00354 
00355     const int first_vtx_id = 10 * ( rank / 2 ) + 2 * ( rank % 2 ) + 1;
00356     const double x         = 2.0 * ( rank / 2 );
00357     const double y         = 2.0 * ( rank % 2 );
00358 
00359     // create vertices
00360     const int idoff  = ( size % 2 && rank / 2 == size / 2 ) ? 0 : 2;
00361     const int idoff1 = rank ? 2 : idoff;
00362     const int idoff2 = idoff1 + idoff;
00363     const int ids[9] = { first_vtx_id,     first_vtx_id + 3 + idoff1, first_vtx_id + 6 + idoff2,
00364                          first_vtx_id + 1, first_vtx_id + 4 + idoff1, first_vtx_id + 7 + idoff2,
00365                          first_vtx_id + 2, first_vtx_id + 5 + idoff1, first_vtx_id + 8 + idoff2 };
00366     memcpy( vtx_ids, ids, sizeof( ids ) );
00367     const double coords[27] = { x, y,     0,     x + 1, y, 0,     x + 2, y,     0,     x, y + 1, 0,     x + 1, y + 1,
00368                                 0, x + 2, y + 1, 0,     x, y + 2, 0,     x + 1, y + 2, 0, x + 2, y + 2, 0 };
00369 
00370     ErrorCode rval;
00371     Tag id_tag;
00372 
00373     rval = mb.create_vertices( coords, 9, range );CHKERR( rval );
00374     assert( range.size() == 9 );
00375     std::copy( range.begin(), range.end(), vtx_handles );
00376     range.clear();
00377     id_tag = mb.globalId_tag();
00378     rval   = mb.tag_set_data( id_tag, vtx_handles, 9, &ids );CHKERR( rval );
00379 
00380     const EntityHandle conn[4][4] = { { vtx_handles[0], vtx_handles[3], vtx_handles[4], vtx_handles[1] },
00381                                       { vtx_handles[1], vtx_handles[4], vtx_handles[5], vtx_handles[2] },
00382                                       { vtx_handles[3], vtx_handles[6], vtx_handles[7], vtx_handles[4] },
00383                                       { vtx_handles[4], vtx_handles[7], vtx_handles[8], vtx_handles[5] } };
00384     for( int i = 0; i < 4; ++i )
00385     {
00386         const int id = 4 * rank + i + 1;
00387         EntityHandle h;
00388         rval = mb.create_element( MBQUAD, conn[i], 4, h );CHKERR( rval );
00389         range.insert( h );
00390         rval = mb.tag_set_data( id_tag, &h, 1, &id );CHKERR( rval );
00391     }
00392 
00393     if( !entity_sets ) return MB_SUCCESS;
00394 
00395     int set_ids[3] = { size + 1, rank / 2, rank / 2 + 1 };
00396     int nsets      = 0;
00397     rval           = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
00398     rval = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
00399     entity_sets[nsets] = 0;
00400     if( rank < 2 )
00401     {  // if first (left-most) column
00402         set_ids[1] = set_ids[2];
00403     }
00404     else if( rank / 2 < ( size - 1 ) / 2 )
00405     {  // if not last (right-most) column
00406         rval = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
00407     }
00408     for( int i = 0; i < nsets; ++i )
00409     {
00410         rval = mb.add_entities( entity_sets[0], range );CHKERR( rval );
00411     }
00412     rval = mb.tag_set_data( id_tag, entity_sets, nsets, set_ids );CHKERR( rval );
00413 
00414     return MB_SUCCESS;
00415 }
00416 
00417 /**************************************************************************
00418                            Test  Implementations
00419  **************************************************************************/
00420 
00421 ErrorCode test_elements_on_several_procs( const char* filename )
00422 {
00423     Core mb_instance;
00424     Interface& moab = mb_instance;
00425     ErrorCode rval;
00426     const char* geom_names[] = { "vertex", "curve", "surface", "volume", "unknown" };
00427 
00428     rval = moab.load_file( filename, 0,
00429                            "PARALLEL=READ_DELETE;"
00430                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00431                            "PARTITION_DISTRIBUTE;"
00432                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00433                            "PARALLEL_SEQUENCE_FACTOR=1.4" );CHKERR( rval );
00434 
00435     // test contents of interface sets against sharedEnts structure in pcomm;
00436     int my_error        = 0;
00437     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00438     rval                = pcomm->check_all_shared_handles();
00439     if( MB_SUCCESS != rval )
00440     {
00441         my_error = 1;
00442         std::cerr << "check_all_shared_handles test failed on proc " << pcomm->proc_config().proc_rank() << std::endl;
00443     }
00444     PCHECK( !my_error );
00445 
00446     // check adjacencies just to make sure they're consistent
00447     rval = mb_instance.check_adjacencies();
00448     if( MB_SUCCESS != rval ) my_error = 1;
00449     PCHECK( !my_error );
00450 
00451     Tag geom_tag, id_tag;
00452     rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHKERR( rval );
00453     id_tag = moab.globalId_tag();
00454 
00455     // Because we partitioned based on geometric volume sets, then for each
00456     // lower-dimension geometric entity set all of the contained entities of
00457     // the same dimension must be shared by the same set of processors
00458     Range shared, invalid;
00459     for( int dim = 2; dim > 0; --dim )
00460     {
00461         Range geom_sets;
00462         const void* tagvals[] = { &dim };
00463         rval                  = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &geom_tag, tagvals, 1, geom_sets );CHKERR( rval );
00464 
00465         for( Range::iterator j = geom_sets.begin(); j != geom_sets.end(); ++j )
00466         {
00467             Range ents;
00468             rval = moab.get_entities_by_dimension( *j, dim, ents );CHKERR( rval );
00469             if( ents.empty() ) continue;
00470 
00471             // get a single sharing list to compare with
00472             Range::iterator k = ents.begin();
00473             std::vector< int > procs;
00474             rval = get_sharing_processors( moab, *k, procs );CHKERR( rval );
00475             std::sort( procs.begin(), procs.end() );
00476             if( procs.size() > 1 ) shared.merge( ents );
00477 
00478             // compare other elements
00479             for( ++k; k != ents.end(); ++k )
00480             {
00481                 std::vector< int > tmp_procs;
00482                 rval = get_sharing_processors( moab, *k, tmp_procs );CHKERR( rval );
00483                 std::sort( tmp_procs.begin(), tmp_procs.end() );
00484                 if( tmp_procs != procs ) invalid.insert( *j );
00485             }
00486 
00487             // compare interior vertices
00488             ents.clear();
00489             rval = moab.get_entities_by_dimension( *j, 0, ents );CHKERR( rval );
00490             for( k = ents.begin(); k != ents.end(); ++k )
00491             {
00492                 std::vector< int > tmp_procs;
00493                 rval = get_sharing_processors( moab, *k, tmp_procs );CHKERR( rval );
00494                 if( tmp_procs != procs ) invalid.insert( *j );
00495             }
00496         }
00497     }
00498 
00499     // Report any geometric sets for which sharing was inconsistent
00500     if( !invalid.empty() )
00501     {
00502         std::cerr << "Elements or vertices owned by a single geometric entity are "
00503                   << "not shared by the same set of processors for the "
00504                   << "following geometric entities on process " << pcomm->proc_config().proc_rank() << ": ";
00505         for( Range::iterator i = invalid.begin(); i != invalid.end(); ++i )
00506         {
00507             int dim;
00508             int id;
00509             rval = moab.tag_get_data( geom_tag, &*i, 1, &dim );
00510             if( MB_SUCCESS != rval ) dim = 4;
00511             rval = moab.tag_get_data( id_tag, &*i, 1, &id );
00512             if( MB_SUCCESS != rval ) id = -1;
00513             std::cerr << geom_names[dim] << " " << id << ", ";
00514         }
00515         std::cerr << std::endl;
00516 
00517         my_error = 1;
00518     }
00519     PCHECK( !my_error );
00520 
00521     // for each shared element, check that its vertices are shared
00522     // with at least the same processes
00523     for( Range::iterator i = shared.begin(); i != shared.end(); ++i )
00524     {
00525         std::vector< int > procs;
00526         rval = get_sharing_processors( moab, *i, procs );CHKERR( rval );
00527         std::sort( procs.begin(), procs.end() );
00528 
00529         std::vector< EntityHandle > tmp;
00530         const EntityHandle* conn;
00531         int len;
00532         rval = moab.get_connectivity( *i, conn, len, false, &tmp );CHKERR( rval );
00533         for( int j = 0; j < len; ++j )
00534         {
00535             std::vector< int > vprocs;
00536             rval = get_sharing_processors( moab, conn[j], vprocs );CHKERR( rval );
00537             std::sort( vprocs.begin(), vprocs.end() );
00538             std::vector< int > diff( std::max( procs.size(), vprocs.size() ) );
00539             std::vector< int >::iterator k =
00540                 std::set_difference( procs.begin(), procs.end(), vprocs.begin(), vprocs.end(), diff.begin() );
00541             if( k != diff.begin() )  // difference is not empty
00542                 invalid.insert( conn[j] );
00543         }
00544     }
00545 
00546     // Report any vertices with insufficient sharing
00547     if( !invalid.empty() )
00548     {
00549         std::cerr << "Vertices must be shared with at least the union of the processes "
00550                   << "sharing the elements containing the vertex.  This is NOT true for "
00551                   << "the following vertices on process " << pcomm->proc_config().proc_rank() << ": " << invalid
00552                   << std::endl;
00553 
00554         my_error = 1;
00555     }
00556     PCHECK( !my_error );
00557 
00558     return MB_SUCCESS;
00559 }
00560 
00561 ErrorCode get_ghost_entities( ParallelComm& pcomm, Range& ghost_ents )
00562 {
00563     Range all_ents;
00564     ErrorCode rval;
00565 
00566     rval = pcomm.get_moab()->get_entities_by_handle( 0, all_ents );CHKERR( rval );
00567     std::vector< unsigned char > flags( all_ents.size() );
00568     rval = pcomm.get_moab()->tag_get_data( pcomm.pstatus_tag(), all_ents, &flags[0] );CHKERR( rval );
00569 
00570     Range::iterator ins                            = ghost_ents.begin();
00571     std::vector< unsigned char >::const_iterator f = flags.begin();
00572     for( Range::iterator i = all_ents.begin(); i != all_ents.end(); ++i, ++f )
00573         if( ( *f & PSTATUS_NOT_OWNED ) && !( *f & PSTATUS_INTERFACE ) ) ins = ghost_ents.insert( ins, *i, *i );
00574 
00575     return MB_SUCCESS;
00576 }
00577 
00578 ErrorCode get_ents_from_geometric_sets( Interface& moab,
00579                                         const Tag tags[2],
00580                                         int dimension,
00581                                         const std::vector< int >& ids,
00582                                         Range& results )
00583 {
00584     ErrorCode rval;
00585     for( size_t i = 0; i < ids.size(); ++i )
00586     {
00587         const void* tag_vals[2] = { &dimension, &ids[i] };
00588         Range sets;
00589         rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, tags, tag_vals, 2, sets );CHKERR( rval );
00590         for( Range::iterator j = sets.begin(); j != sets.end(); ++j )
00591         {
00592             Range ents;
00593             rval = moab.get_entities_by_dimension( *j, dimension, ents );CHKERR( rval );
00594             results.merge( ents );
00595         }
00596     }
00597     return MB_SUCCESS;
00598 }
00599 
00600 /**\brief Given entire file and IDs of geometric sets, get expected ghost entities
00601  *
00602  *\param moab               The entire mesh, loaded in serial
00603  *\param partition_geom_ids IDs of: geometric volumes owned by this proc and interface topology
00604  *\param ghost_entity_ids   output list
00605  */
00606 ErrorCode get_expected_ghosts( Interface& moab,
00607                                const std::vector< int > partition_geom_ids[4],
00608                                std::vector< int >& ghost_entity_ids,
00609                                int ghost_dimension,
00610                                int bridge_dimension,
00611                                int num_layers )
00612 {
00613     ErrorCode rval;
00614     Tag tags[2];
00615     rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tags[0] );CHKERR( rval );
00616     tags[1] = moab.globalId_tag();
00617 
00618     // get face entities defining interface
00619     Range skin;
00620     rval = get_ents_from_geometric_sets( moab, tags, 2, partition_geom_ids[2], skin );CHKERR( rval );
00621 
00622     // get adjacent entities
00623     Range iface_ghosts, iface_ents;
00624     if( bridge_dimension == 2 )
00625     {
00626         iface_ents = skin;
00627     }
00628     else
00629     {
00630         rval = moab.get_adjacencies( skin, bridge_dimension, true, iface_ents, Interface::UNION );CHKERR( rval );
00631     }
00632     for( int n = 0; n < num_layers; ++n )
00633     {
00634         iface_ghosts.clear();
00635         rval = moab.get_adjacencies( iface_ents, ghost_dimension, false, iface_ghosts, Interface::UNION );CHKERR( rval );
00636         iface_ents.clear();
00637         rval = moab.get_adjacencies( iface_ghosts, bridge_dimension, true, iface_ents, Interface::UNION );CHKERR( rval );
00638     }
00639 
00640     // get volume entities owned by this process
00641     Range ents;
00642     rval = get_ents_from_geometric_sets( moab, tags, 3, partition_geom_ids[3], ents );CHKERR( rval );
00643 
00644     // get entities of ghost_dimension owned by this process
00645     Range owned;
00646     if( ghost_dimension == 3 )
00647         owned = ents;
00648     else
00649     {
00650         rval = moab.get_adjacencies( ents, ghost_dimension, false, owned, Interface::UNION );CHKERR( rval );
00651     }
00652 
00653     // remove owned entities from ghost list
00654     Range ghosts = subtract( iface_ghosts, owned );
00655 
00656     // get ids
00657     ghost_entity_ids.resize( ghosts.size() );
00658     rval = moab.tag_get_data( tags[1], ghosts, &ghost_entity_ids[0] );CHKERR( rval );
00659     return MB_SUCCESS;
00660 }
00661 
00662 ErrorCode test_ghost_elements( const char* filename, int ghost_dimension, int bridge_dimension, int num_layers )
00663 {
00664     Core mb_instance;
00665     Interface& moab = mb_instance;
00666     ErrorCode rval;
00667 
00668     std::ostringstream file_opts;
00669     file_opts << "PARALLEL=READ_DELETE;"
00670               << "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00671               << "PARTITION_DISTRIBUTE;"
00672               << "PARALLEL_RESOLVE_SHARED_ENTS;"
00673               << "PARALLEL_GHOSTS=" << ghost_dimension << '.' << bridge_dimension << '.' << num_layers;
00674 
00675     rval = moab.load_file( filename, 0, file_opts.str().c_str() );CHKERR( rval );
00676     Tag geom_tag, id_tag;
00677     rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHKERR( rval );
00678     id_tag = moab.globalId_tag();
00679 
00680     // Get partition sets
00681     Range partition_geom[4];
00682     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00683     partition_geom[3]   = pcomm->partition_sets();
00684     PCHECK( !partition_geom[3].empty() );
00685 
00686     rval = pcomm->check_all_shared_handles();CHKERR( rval );
00687 
00688     // exchange id tags to allow comparison by id
00689     Range tmp_ents;
00690     rval = pcomm->get_shared_entities( -1, tmp_ents, -1, false, true );
00691     rval = pcomm->exchange_tags( id_tag, tmp_ents );CHKERR( rval );
00692 
00693     // Get geometric surfaces
00694     Range surfs, tmp;
00695     for( Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].end(); ++i )
00696     {
00697         tmp.clear();
00698         rval = moab.get_child_meshsets( *i, tmp );CHKERR( rval );
00699         surfs.merge( tmp );
00700     }
00701 
00702     // Get the set of geometric surfaces that represent the skin
00703     // of the union of the parts for this processor.  As we partition
00704     // based on geometric volumes, the interface must be represented
00705     // by some subset of the surfaces and their child geometric topology.
00706 
00707     int error = 0;
00708     std::ostringstream error_msg;
00709     Range ents, iface_surfs, iface_curves, iface_vertices;
00710     for( Range::iterator i = surfs.begin(); !error && i != surfs.end(); ++i )
00711     {
00712         ents.clear();
00713         rval = moab.get_entities_by_dimension( *i, ghost_dimension - 1, ents );CHKERR( rval );
00714         if( ents.empty() ) continue;
00715 
00716         std::vector< int > procs, tmp_procs;
00717         Range::iterator j = ents.begin();
00718         rval              = get_sharing_processors( moab, *j, procs );CHKERR( rval );
00719         for( ++j; !error && j != ents.end(); ++j )
00720         {
00721             tmp_procs.clear();
00722             rval = get_sharing_processors( moab, *j, tmp_procs );CHKERR( rval );
00723             if( tmp_procs != procs )
00724             {
00725                 error_msg << "Failure at " << __FILE__ << ':' << __LINE__ << std::endl
00726                           << "\tNot all entities in geometric surface are shared with"
00727                           << " same processor." << std::endl;
00728                 error = 1;
00729                 break;
00730             }
00731         }
00732 
00733         if( error ) break;
00734 
00735         // if surface is not part of inter-proc interface, skip it.
00736         if( procs.empty() ) continue;
00737         if( procs.size() != 1 )
00738         {
00739             error_msg << "Failure at " << __FILE__ << ':' << __LINE__ << std::endl
00740                       << "\tSurface elements shared with" << procs.size() << "processors." << std::endl;
00741             error = 1;
00742             break;
00743         }
00744         int other_rank = procs[0];
00745         if( other_rank == (int)pcomm->proc_config().proc_rank() ) continue;
00746 
00747         partition_geom[2].insert( *i );
00748         ents.clear();
00749         rval = moab.get_child_meshsets( *i, ents );CHKERR( rval );
00750         partition_geom[1].merge( ents );
00751     }
00752 
00753     // Don't to global communication until outside
00754     // of loops.  Otherwise we will deadlock if not all
00755     // procs iterate the same number of times.
00756     if( is_any_proc_error( error ) )
00757     {
00758         std::cerr << error_msg.str();
00759         return MB_FAILURE;
00760     }
00761 
00762     for( Range::iterator i = partition_geom[1].begin(); i != partition_geom[1].end(); ++i )
00763     {
00764         ents.clear();
00765         rval = moab.get_child_meshsets( *i, ents );CHKERR( rval );
00766         partition_geom[0].merge( ents );
00767     }
00768 
00769     std::vector< int > partn_geom_ids[4];
00770     for( int dim = 0; dim <= 3; ++dim )
00771     {
00772         partn_geom_ids[dim].resize( partition_geom[dim].size() );
00773         rval = moab.tag_get_data( id_tag, partition_geom[dim], &( partn_geom_ids[dim][0] ) );CHKERR( rval );
00774     }
00775 
00776     // get the global IDs of the ghosted entities
00777     Range ghost_ents;
00778     rval = get_ghost_entities( *pcomm, ghost_ents );CHKERR( rval );
00779     std::pair< Range::iterator, Range::iterator > vtx = ghost_ents.equal_range( MBVERTEX );
00780     ghost_ents.erase( vtx.first, vtx.second );
00781     std::vector< int > actual_ghost_ent_ids( ghost_ents.size() );
00782     rval = moab.tag_get_data( id_tag, ghost_ents, &actual_ghost_ent_ids[0] );CHKERR( rval );
00783 
00784     // read file in serial
00785     Core moab2;
00786     rval = moab2.load_file( filename );
00787     PCHECK( MB_SUCCESS == rval );
00788 
00789     // get the global IDs of the entities we expect to be ghosted
00790     std::vector< int > expected_ghost_ent_ids;
00791     rval = get_expected_ghosts( moab2, partn_geom_ids, expected_ghost_ent_ids, ghost_dimension, bridge_dimension,
00792                                 num_layers );
00793     PCHECK( MB_SUCCESS == rval );
00794 
00795     // check that the correct entities were ghosted
00796     std::sort( actual_ghost_ent_ids.begin(), actual_ghost_ent_ids.end() );
00797     std::sort( expected_ghost_ent_ids.begin(), expected_ghost_ent_ids.end() );
00798     PCHECK( expected_ghost_ent_ids == actual_ghost_ent_ids );
00799 
00800     // check we only have the partitioned and ghosted entities
00801     // on this processor.
00802     Range myents;
00803     for( Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].end(); ++i )
00804     {
00805         ents.clear();
00806         rval = moab.get_entities_by_dimension( *i, 3, ents );CHKERR( rval );
00807         myents.merge( ents );
00808     }
00809     if( ghost_dimension != 3 )
00810     {
00811         ents.clear();
00812         rval = moab.get_adjacencies( myents, ghost_dimension, false, ents, Interface::UNION );
00813         PCHECK( MB_SUCCESS == rval );
00814         myents.swap( ents );
00815     }
00816     myents.merge( ghost_ents );
00817     ents.clear();
00818     rval = moab.get_entities_by_dimension( 0, ghost_dimension, ents );
00819     PCHECK( ents == myents );
00820 
00821     rval = pcomm->check_all_shared_handles();
00822     if( MB_SUCCESS != rval ) error = 1;
00823     PCHECK( !error );
00824 
00825     // done
00826     return MB_SUCCESS;
00827 }
00828 
00829 ErrorCode test_ghost_elements_3_2_1( const char* filename )
00830 {
00831     return test_ghost_elements( filename, 3, 2, 1 );
00832 }
00833 
00834 ErrorCode test_ghost_elements_3_2_2( const char* filename )
00835 {
00836     return test_ghost_elements( filename, 3, 2, 2 );
00837 }
00838 
00839 ErrorCode test_ghost_elements_3_0_1( const char* filename )
00840 {
00841     return test_ghost_elements( filename, 3, 0, 1 );
00842 }
00843 
00844 ErrorCode test_ghost_elements_2_0_1( const char* filename )
00845 {
00846     return test_ghost_elements( filename, 2, 0, 1 );
00847 }
00848 
00849 ErrorCode get_owner_handles( ParallelComm* pcomm, const Range& ents, EntityHandle* handle_arr )
00850 {
00851     size_t i = 0;
00852     int junk;
00853     for( Range::iterator j = ents.begin(); j != ents.end(); ++i, ++j )
00854     {
00855         ErrorCode rval = pcomm->get_owner_handle( *j, junk, handle_arr[i] );
00856         if( MB_SUCCESS != rval ) return rval;
00857     }
00858     return MB_SUCCESS;
00859 }
00860 
00861 ErrorCode test_ghost_tag_exchange( const char* filename )
00862 {
00863     Core mb_instance;
00864     Interface& moab = mb_instance;
00865     ErrorCode rval;
00866 
00867     rval = moab.load_file( filename, 0,
00868                            "PARALLEL=READ_DELETE;"
00869                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00870                            "PARTITION_DISTRIBUTE;"
00871                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00872                            "PARALLEL_GHOSTS=3.2.1;"
00873                            "PARALLEL_SEQUENCE_FACTOR=1.5" );CHKERR( rval );
00874 
00875     // Get ghost elements
00876     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00877     Range local, ghosts;
00878     rval = moab.get_entities_by_dimension( 0, 3, local );CHKERR( rval );
00879     Range::iterator i = local.begin();
00880     while( i != local.end() )
00881     {
00882         int rank;
00883         rval = pcomm->get_owner( *i, rank );CHKERR( rval );
00884         if( rank == (int)pcomm->proc_config().proc_rank() )
00885         {
00886             ++i;
00887         }
00888         else
00889         {
00890             ghosts.insert( *i );
00891             i = local.erase( i );
00892         }
00893     }
00894 
00895     // create a tag to exchange
00896     Tag dense_test_tag;
00897     EntityHandle defval = 0;
00898     // rval = moab.tag_get_handle( "TEST-TAG", sizeof(EntityHandle), MB_TAG_DENSE,
00899     //                         dense_test_tag, &defval ); CHKERR(rval);
00900     rval = moab.tag_get_handle( "TEST-TAG", 1, MB_TYPE_HANDLE, dense_test_tag, MB_TAG_DENSE | MB_TAG_EXCL, &defval );CHKERR( rval );
00901 
00902     // for all entities that I own, set tag to handle value
00903     std::vector< EntityHandle > handles( local.size() ), handles2;
00904     std::copy( local.begin(), local.end(), handles.begin() );
00905     rval = moab.tag_set_data( dense_test_tag, local, &handles[0] );CHKERR( rval );
00906 
00907     // exchange tag data
00908     Range tmp_range;
00909     rval = pcomm->exchange_tags( dense_test_tag, tmp_range );CHKERR( rval );
00910 
00911     // make sure local values are unchanged
00912     handles2.resize( local.size() );
00913     rval = moab.tag_get_data( dense_test_tag, local, &handles2[0] );CHKERR( rval );
00914     PCHECK( handles == handles2 );
00915 
00916     // compare values on ghost entities
00917     handles.resize( ghosts.size() );
00918     handles2.resize( ghosts.size() );
00919     rval = moab.tag_get_data( dense_test_tag, ghosts, &handles2[0] );CHKERR( rval );
00920     rval = get_owner_handles( pcomm, ghosts, &handles[0] );CHKERR( rval );
00921     PCHECK( handles == handles2 );
00922 
00923     // now do it all again for a sparse tag
00924     Tag sparse_test_tag;
00925     // rval = moab.tag_get_handle( "TEST-TAG-2", sizeof(int), MB_TAG_DENSE,
00926     //                         MB_TYPE_INTEGER, sparse_test_tag, 0 ); CHKERR(rval);
00927     rval = moab.tag_get_handle( "TEST-TAG-2", 1, MB_TYPE_INTEGER, sparse_test_tag, MB_TAG_DENSE | MB_TAG_EXCL );CHKERR( rval );
00928 
00929     // for all entiites that I own, set tag to my rank
00930     std::vector< int > procs1( local.size(), pcomm->proc_config().proc_rank() );
00931     rval = moab.tag_set_data( sparse_test_tag, local, &procs1[0] );CHKERR( rval );
00932 
00933     // exchange tag data
00934     tmp_range.clear();
00935     rval = pcomm->exchange_tags( sparse_test_tag, tmp_range );
00936     PCHECK( MB_SUCCESS == rval );
00937 
00938     // make sure local values are unchanged
00939     std::vector< int > procs2( local.size() );
00940     rval = moab.tag_get_data( sparse_test_tag, local, &procs2[0] );CHKERR( rval );
00941     PCHECK( procs1 == procs2 );
00942 
00943     // compare values on ghost entities
00944     procs1.resize( ghosts.size() );
00945     procs2.resize( ghosts.size() );
00946     rval = moab.tag_get_data( sparse_test_tag, ghosts, &procs2[0] );CHKERR( rval );
00947     std::vector< int >::iterator j = procs1.begin();
00948     for( i = ghosts.begin(); i != ghosts.end(); ++i, ++j )
00949     {
00950         rval = pcomm->get_owner( *i, *j );CHKERR( rval );
00951     }
00952     PCHECK( procs1 == procs2 );
00953 
00954     return MB_SUCCESS;
00955 }
00956 
00957 ErrorCode regression_ghost_tag_exchange_no_default( const char* filename )
00958 {
00959     Core mb_instance;
00960     Interface& moab = mb_instance;
00961     ErrorCode rval;
00962 
00963 #ifdef MOAB_HAVE_HDF5
00964     rval = moab.load_file( filename, 0,
00965                            "PARALLEL=READ_DELETE;"
00966                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00967                            "PARTITION_DISTRIBUTE;"
00968                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00969                            "PARALLEL_GHOSTS=3.2.1" );
00970 #else
00971     rval = moab.load_file( filename, 0,
00972                            "PARALLEL=READ_BCAST;"
00973                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00974                            "PARTITION_DISTRIBUTE;"
00975                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00976                            "PARALLEL_GHOSTS=3.2.1" );
00977 #endif
00978     CHKERR( rval );
00979 
00980     // create a tag to exchange
00981     Tag dense_test_tag;
00982     // rval = moab.tag_get_handle( "TEST-TAG", sizeof(EntityHandle), MB_TAG_DENSE,
00983     //                         dense_test_tag, 0 ); CHKERR(rval);
00984     rval = moab.tag_get_handle( "TEST-TAG", 1, MB_TYPE_HANDLE, dense_test_tag, MB_TAG_DENSE | MB_TAG_EXCL );CHKERR( rval );
00985 
00986     // exchange tag data
00987     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00988     Range tmp_range;
00989     rval = pcomm->exchange_tags( dense_test_tag, tmp_range );
00990     PCHECK( MB_SUCCESS == rval );
00991 
00992     return MB_SUCCESS;
00993 }
00994 
00995 // Helper for exhange_sharing_data
00996 // Swap contens of buffer with specified processor.
00997 int MPI_swap( void* buffer, int num_val, MPI_Datatype val_type, int other_proc )
00998 {
00999     int err, rank, bytes;
01000     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01001     MPI_Type_size( val_type, &bytes );
01002     bytes *= num_val;
01003     std::vector< unsigned char > buffer2( bytes );
01004 
01005     for( int i = 0; i < 2; ++i )
01006     {
01007         if( i == ( rank < other_proc ) )
01008         {
01009             err = MPI_Send( buffer, num_val, val_type, other_proc, 0, MPI_COMM_WORLD );
01010             if( err ) return err;
01011         }
01012         else
01013         {
01014             MPI_Status status;
01015             err = MPI_Recv( &buffer2[0], num_val, val_type, other_proc, 0, MPI_COMM_WORLD, &status );
01016             if( err ) return err;
01017         }
01018     }
01019 
01020     memcpy( buffer, &buffer2[0], bytes );
01021     return 0;
01022 }
01023 
01024 int valid_ghosting_owners( int comm_size, const int* ids, const int* owners )
01025 {
01026     // for each vertex ID, build list of {rank,owner} tuples
01027     std::map< int, std::vector< int > > verts;
01028     for( int p = 0; p < comm_size; ++p )
01029     {
01030         for( int i = 0; i < 9; ++i )
01031         {  // nine verts per proc
01032             int idx = 9 * p + i;
01033             verts[ids[idx]].push_back( p );
01034             verts[ids[idx]].push_back( owners[idx] );
01035         }
01036     }
01037 
01038     // now check for each vertex that the owner from
01039     // each processor is the same
01040     bool print_desc = true;
01041     int error_count = 0;
01042     std::map< int, std::vector< int > >::iterator it;
01043     for( it = verts.begin(); it != verts.end(); ++it )
01044     {
01045         int id                   = it->first;
01046         std::vector< int >& list = it->second;
01047         bool all_same            = true;
01048         for( size_t i = 2; i < list.size(); i += 2 )
01049             if( list[i + 1] != list[1] ) all_same = false;
01050         if( all_same ) continue;
01051 
01052         ++error_count;
01053 
01054         if( print_desc )
01055         {
01056             print_desc = false;
01057             std::cerr << "ERROR at " __FILE__ ":" << __LINE__ << std::endl
01058                       << "  Processors have inconsistant ideas of vertex ownership:" << std::endl;
01059         }
01060 
01061         std::cerr << "  Vertex " << id << ": " << std::endl;
01062         for( size_t i = 0; i < list.size(); i += 2 )
01063             std::cerr << "    Proc " << list[i] << " thinks owner is " << list[i + 1] << std::endl;
01064     }
01065 
01066     return error_count;
01067 }
01068 
01069 ErrorCode test_interface_owners_common( int num_ghost_layers )
01070 {
01071     ErrorCode rval;
01072     Core moab_instance;
01073     Interface& mb = moab_instance;
01074     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01075 
01076     // build distributed quad mesh
01077     Range quads;
01078     EntityHandle verts[9];
01079     int ids[9];
01080     rval = parallel_create_mesh( mb, ids, verts, quads );
01081     PCHECK( MB_SUCCESS == rval );
01082     rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
01083     PCHECK( MB_SUCCESS == rval );
01084     if( num_ghost_layers )
01085     {
01086         rval = pcomm.exchange_ghost_cells( 2, 0, num_ghost_layers, 0, true );
01087         PCHECK( MB_SUCCESS == rval );
01088     }
01089 
01090     // get vertex owners
01091     int owner[9];
01092     for( int i = 0; i < 9; ++i )
01093     {
01094         rval = pcomm.get_owner( verts[i], owner[i] );
01095         if( MB_SUCCESS != rval ) break;
01096     }
01097     PCHECK( MB_SUCCESS == rval );
01098 
01099     // exchange vertex owners amongst all processors
01100     int rank, size, ierr;
01101     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01102     MPI_Comm_size( MPI_COMM_WORLD, &size );
01103     std::vector< int > all_ids( 9 * size ), all_owner( 9 * size );
01104     ierr = MPI_Gather( ids, 9, MPI_INT, &all_ids[0], 9, MPI_INT, 0, MPI_COMM_WORLD );
01105     if( ierr ) return MB_FAILURE;
01106     ierr = MPI_Gather( owner, 9, MPI_INT, &all_owner[0], 9, MPI_INT, 0, MPI_COMM_WORLD );
01107     if( ierr ) return MB_FAILURE;
01108 
01109     int errors = rank ? 0 : valid_ghosting_owners( size, &all_ids[0], &all_owner[0] );
01110     MPI_Bcast( &errors, 1, MPI_INT, 0, MPI_COMM_WORLD );
01111     return errors ? MB_FAILURE : MB_SUCCESS;
01112 }
01113 
01114 // Common implementation for both:
01115 //   test_interface
01116 //   regression_interface_with_ghosting
01117 ErrorCode test_interface_owners( const char* )
01118 {
01119     return test_interface_owners_common( 0 );
01120 }
01121 
01122 ErrorCode regression_owners_with_ghosting( const char* )
01123 {
01124     return test_interface_owners_common( 1 );
01125 }
01126 
01127 struct VtxData
01128 {
01129     std::vector< int > procs;
01130     std::vector< int > owners;
01131     std::vector< EntityHandle > handles;
01132 };
01133 
01134 ErrorCode test_ghosted_entity_shared_data( const char* )
01135 {
01136     ErrorCode rval;
01137     Core moab_instance;
01138     Interface& mb = moab_instance;
01139     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01140 
01141     int rank, size;
01142     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01143     MPI_Comm_size( MPI_COMM_WORLD, &size );
01144 
01145     // build distributed quad mesh
01146     Range quads;
01147     EntityHandle verts[9];
01148     int ids[9];
01149     rval = parallel_create_mesh( mb, ids, verts, quads );
01150     PCHECK( MB_SUCCESS == rval );
01151     rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
01152     PCHECK( MB_SUCCESS == rval );
01153     rval = pcomm.exchange_ghost_cells( 2, 1, 1, 0, true );
01154     PCHECK( MB_SUCCESS == rval );
01155 
01156     rval = pcomm.check_all_shared_handles();
01157     PCHECK( MB_SUCCESS == rval );
01158 
01159     return MB_SUCCESS;
01160 }
01161 
01162 ErrorCode check_consistent_ids( Interface& mb,
01163                                 const EntityHandle* entities,
01164                                 const int* orig_ids,
01165                                 int num_ents,
01166                                 const char* singular_name,
01167                                 const char* plural_name )
01168 {
01169     ErrorCode rval;
01170     int rank, size, ierr;
01171     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01172     MPI_Comm_size( MPI_COMM_WORLD, &size );
01173 
01174     Tag id_tag = mb.globalId_tag();
01175     std::vector< int > new_ids( num_ents );
01176     rval = mb.tag_get_data( id_tag, entities, num_ents, &new_ids[0] );CHKERR( rval );
01177     // This test is wrong.  a) The caller can select a start ID so there's
01178     // no guarantee that the IDs will be in any specific range and b) There
01179     // is no reason to expect the global number of entities to be num_ents*size
01180     // if there are any shared entities. J.Kraftcheck 2010-12-22
01181     // rval = MB_SUCCESS;
01182     // for (int i = 0; i < num_ents; ++i)
01183     //  if (new_ids[i] < 0 || new_ids[i] >= num_ents*size) {
01184     //    std::cerr << "ID out of bounds on proc " << rank
01185     //              << " : " << new_ids[i] << " not in [0," << num_ents*size-1
01186     //              << "]" << std::endl;
01187     //    rval = MB_FAILURE;
01188     //  }
01189     // if (MB_SUCCESS != rval)
01190     //  return rval;
01191 
01192     // Gather up all data on root proc for consistency check
01193     std::vector< int > all_orig_ids( num_ents * size ), all_new_ids( num_ents * size );
01194     ierr = MPI_Gather( (void*)orig_ids, num_ents, MPI_INT, &all_orig_ids[0], num_ents, MPI_INT, 0, MPI_COMM_WORLD );
01195     if( ierr ) return MB_FAILURE;
01196     ierr = MPI_Gather( &new_ids[0], num_ents, MPI_INT, &all_new_ids[0], num_ents, MPI_INT, 0, MPI_COMM_WORLD );
01197     if( ierr ) return MB_FAILURE;
01198 
01199     // build a local map from original ID to new ID and use it
01200     // to check for consistancy between all procs
01201     rval = MB_SUCCESS;
01202     ;
01203     if( 0 == rank )
01204     {
01205         // check for two processors having different global ID for same entity
01206         std::map< int, int > idmap;  // index by original ID and contains new ID
01207         std::map< int, int > owner;  // index by original ID and contains owning rank
01208         for( int i = 0; i < num_ents * size; ++i )
01209         {
01210             std::map< int, int >::iterator it = idmap.find( all_orig_ids[i] );
01211             if( it == idmap.end() )
01212             {
01213                 idmap[all_orig_ids[i]] = all_new_ids[i];
01214                 owner[all_orig_ids[i]] = i / num_ents;
01215             }
01216             else if( it->second != all_new_ids[i] )
01217             {
01218                 std::cerr << "Inconsistant " << singular_name << " IDs between processors " << owner[all_orig_ids[i]]
01219                           << " and " << i / num_ents << " : " << it->second << " and " << all_new_ids[i]
01220                           << " respectively." << std::endl;
01221                 rval = MB_FAILURE;
01222             }
01223         }
01224         // check for two processors having same global ID for different entities
01225         idmap.clear();
01226         owner.clear();
01227         for( int i = 0; i < num_ents * size; ++i )
01228         {
01229             std::map< int, int >::iterator it = idmap.find( all_new_ids[i] );
01230             if( it == idmap.end() )
01231             {
01232                 idmap[all_new_ids[i]] = all_orig_ids[i];
01233                 owner[all_new_ids[i]] = i / num_ents;
01234             }
01235             else if( it->second != all_orig_ids[i] )
01236             {
01237                 std::cerr << "ID " << all_new_ids[i] << " assigned to different " << plural_name << " on processors "
01238                           << owner[all_new_ids[i]] << " and " << i / num_ents << std::endl;
01239                 rval = MB_FAILURE;
01240             }
01241         }
01242     }
01243     return rval;
01244 }
01245 
01246 ErrorCode test_assign_global_ids( const char* )
01247 {
01248     ErrorCode rval;
01249     Core moab_instance;
01250     Interface& mb = moab_instance;
01251     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01252 
01253     int rank, size;
01254     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01255     MPI_Comm_size( MPI_COMM_WORLD, &size );
01256 
01257     // build distributed quad mesh
01258     Range quad_range;
01259     EntityHandle verts[9];
01260     int vert_ids[9];
01261     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01262     PCHECK( MB_SUCCESS == rval );
01263     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01264     PCHECK( MB_SUCCESS == rval );
01265 
01266     // get global ids for quads
01267     Tag id_tag = mb.globalId_tag();
01268     assert( 4u == quad_range.size() );
01269     EntityHandle quads[4];
01270     std::copy( quad_range.begin(), quad_range.end(), quads );
01271     int quad_ids[4];
01272     rval = mb.tag_get_data( id_tag, quads, 4, quad_ids );CHKERR( rval );
01273 
01274     // clear GLOBAL_ID tag
01275     int zero[9] = { 0 };
01276     rval        = mb.tag_set_data( id_tag, verts, 9, zero );CHKERR( rval );
01277     rval = mb.tag_set_data( id_tag, quads, 4, zero );CHKERR( rval );
01278 
01279     // assign new global IDs
01280     rval = pcomm.assign_global_ids( 0, 2 );
01281     PCHECK( MB_SUCCESS == rval );
01282 
01283     rval = check_consistent_ids( mb, verts, vert_ids, 9, "vertex", "vertices" );
01284     PCHECK( MB_SUCCESS == rval );
01285     rval = check_consistent_ids( mb, quads, quad_ids, 4, "quad", "quads" );
01286     PCHECK( MB_SUCCESS == rval );
01287     return rval;
01288 }
01289 
01290 ErrorCode test_shared_sets( const char* )
01291 {
01292     ErrorCode rval;
01293     Core moab_instance;
01294     Interface& mb = moab_instance;
01295     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01296 
01297     int rank_i, size_i;
01298     MPI_Comm_rank( MPI_COMM_WORLD, &rank_i );
01299     MPI_Comm_size( MPI_COMM_WORLD, &size_i );
01300     const unsigned rank  = rank_i;
01301     const unsigned nproc = size_i;
01302 
01303     // build distributed quad mesh
01304     Range quads, sets;
01305     EntityHandle verts[9], set_arr[3];
01306     int ids[9];
01307     rval = parallel_create_mesh( mb, ids, verts, quads, set_arr );
01308     PCHECK( MB_SUCCESS == rval );
01309     rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
01310     PCHECK( MB_SUCCESS == rval );
01311     sets.insert( set_arr[0] );
01312     sets.insert( set_arr[1] );
01313     if( set_arr[2] )
01314     {
01315         sets.insert( set_arr[2] );
01316     }
01317     else
01318     {
01319         set_arr[2] = set_arr[1];
01320     }
01321 
01322     Tag id_tag = mb.globalId_tag();
01323     rval       = pcomm.resolve_shared_sets( sets, id_tag );
01324     PCHECK( MB_SUCCESS == rval );
01325 
01326     // check that set data is consistent
01327     ErrorCode ok = MB_SUCCESS;
01328     for( size_t i = 0; i < 3; ++i )
01329     {
01330         unsigned owner;
01331         EntityHandle owner_handle, local;
01332         rval = pcomm.get_entityset_owner( set_arr[i], owner, &owner_handle );
01333         if( MB_SUCCESS != rval )
01334             ok = rval;
01335         else if( owner == rank )
01336         {
01337             if( owner_handle != set_arr[i] )
01338             {
01339                 std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << "invalid remote handle for owned set"
01340                           << std::endl;
01341                 ok = MB_FAILURE;
01342             }
01343         }
01344         else if( MB_SUCCESS != pcomm.get_entityset_local_handle( owner, owner_handle, local ) )
01345             ok = MB_FAILURE;
01346         else if( local != set_arr[i] )
01347         {
01348             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << "invalid local handle for remote data"
01349                       << std::endl;
01350             ok = MB_FAILURE;
01351         }
01352     }
01353     PCHECK( MB_SUCCESS == ok );
01354 
01355     const unsigned col = rank / 2;  // which column is proc in
01356     const unsigned colrank =
01357         2 * col;  // rank of first of two procs in col (rank if rank is even, rank-1 if rank is odd)
01358     unsigned mins[3] = { 0, 0, 0 };
01359     unsigned maxs[3] = { nproc - 1, 0, 0 };
01360     if( rank < 2 )
01361     {  // if in first (left-most) column, then
01362         mins[1] = mins[2] = 0;
01363         maxs[1] = maxs[2] = std::min( 3u, nproc - 1 );
01364     }
01365     else if( col == ( nproc - 1 ) / 2 )
01366     {  // else if last (right-most) column, then
01367         mins[1] = mins[2] = colrank - 2;
01368         maxs[1] = maxs[2] = std::min( colrank + 1, nproc - 1 );
01369     }
01370     else
01371     {  // if inside column, then
01372         mins[1] = colrank - 2;
01373         mins[2] = colrank;
01374         maxs[1] = std::min( colrank + 1, nproc - 1 );
01375         maxs[2] = std::min( colrank + 3, nproc - 1 );
01376     }
01377 
01378     // check expected sharing lists
01379     // set 0 is shared between all procs in the range [ mins[0], maxs[0] ]
01380     std::vector< unsigned > expected, list;
01381     for( size_t i = 0; i < 3; ++i )
01382     {
01383         expected.clear();
01384         for( unsigned r = mins[i]; r <= std::min( maxs[i], nproc - 1 ); ++r )
01385             if( r != rank ) expected.push_back( r );
01386         list.clear();
01387         rval = pcomm.get_entityset_procs( set_arr[i], list );
01388         if( MB_SUCCESS != rval )
01389             ok = rval;
01390         else
01391         {
01392             std::sort( list.begin(), list.end() );
01393             if( expected != list )
01394             {
01395                 std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << " incorrect sharing list for entity set"
01396                           << std::endl;
01397                 ok = MB_FAILURE;
01398             }
01399         }
01400     }
01401     PCHECK( MB_SUCCESS == ok );
01402 
01403     // check consistent owners
01404     unsigned send_list[6], set_owners[3];
01405     std::vector< unsigned > recv_list( 6 * nproc );
01406     for( size_t i = 0; i < 3; ++i )
01407     {
01408         mb.tag_get_data( id_tag, set_arr + i, 1, &send_list[2 * i] );
01409         pcomm.get_entityset_owner( set_arr[i], set_owners[i] );
01410         send_list[2 * i + 1] = set_owners[i];
01411     }
01412     MPI_Allgather( send_list, 6, MPI_UNSIGNED, &recv_list[0], 6, MPI_UNSIGNED, MPI_COMM_WORLD );
01413     std::map< unsigned, unsigned > owners;
01414     for( unsigned i = 0; i < 6 * nproc; i += 2 )
01415     {
01416         unsigned id    = recv_list[i];
01417         unsigned owner = recv_list[i + 1];
01418         if( owners.find( id ) == owners.end() )
01419             owners[id] = owner;
01420         else if( owners[id] != owner )
01421         {
01422             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << " mismatched owners (" << owners[id]
01423                       << " and " << owner << ") for set with ID " << id << std::endl;
01424             ok = MB_FAILURE;
01425         }
01426     }
01427     PCHECK( MB_SUCCESS == ok );
01428 
01429     // test PComm::get_entityset_owners
01430     std::vector< unsigned > act_owners, exp_owners;
01431     for( size_t i = 0; i < 3; ++i )
01432     {
01433         exp_owners.push_back( set_owners[i] );
01434     }
01435     ok = pcomm.get_entityset_owners( act_owners );
01436     PCHECK( MB_SUCCESS == ok );
01437     // PCHECK(std::find(act_owners.begin(),act_owners.end(),rank) == act_owners.end());
01438     std::sort( act_owners.begin(), act_owners.end() );
01439     std::sort( exp_owners.begin(), exp_owners.end() );
01440     exp_owners.erase( std::unique( exp_owners.begin(), exp_owners.end() ), exp_owners.end() );
01441     PCHECK( exp_owners == act_owners );
01442 
01443     // test PComm::get_owned_sets
01444     ok = MB_SUCCESS;
01445     for( unsigned i = 0; i < nproc; ++i )
01446     {
01447         sets.clear();
01448         if( MB_SUCCESS != pcomm.get_owned_sets( i, sets ) )
01449         {
01450             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank
01451                       << " failed to get shared set list for sets owned by rank " << set_owners[i] << std::endl;
01452             continue;
01453         }
01454 
01455         Range expected_range;
01456         for( size_t j = 0; j < 3; ++j )
01457             if( set_owners[j] == i ) expected_range.insert( set_arr[j] );
01458 
01459         if( expected_range != sets )
01460         {
01461             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank
01462                       << " has incorrect shared set list for sets owned by rank " << set_owners[i] << std::endl
01463                       << "Expected: " << expected_range << std::endl
01464                       << "Actual: " << sets << std::endl;
01465             ok = MB_FAILURE;
01466         }
01467     }
01468     PCHECK( MB_SUCCESS == ok );
01469 
01470     return MB_SUCCESS;
01471 }
01472 
01473 template < typename T >
01474 ErrorCode check_shared_ents( ParallelComm& pcomm, Tag tagh, T fact, MPI_Op mpi_op )
01475 {
01476     // get the shared entities
01477     Range shared_ents;
01478     ErrorCode rval = pcomm.get_shared_entities( -1, shared_ents );CHKERR( rval );
01479 
01480     std::vector< int > shprocs( MAX_SHARING_PROCS );
01481     std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
01482     std::vector< T > dum_vals( shared_ents.size() );
01483     int np;
01484     unsigned char pstatus;
01485 
01486     rval = pcomm.get_moab()->tag_get_data( tagh, shared_ents, &dum_vals[0] );CHKERR( rval );
01487 
01488     // for each, compare number of sharing procs against tag value, should be 1/fact the tag value
01489     Range::iterator rit;
01490     int i = 0;
01491     for( rit = shared_ents.begin(); rit != shared_ents.end(); ++rit, i++ )
01492     {
01493         rval = pcomm.get_sharing_data( *rit, &shprocs[0], &shhandles[0], pstatus, np );CHKERR( rval );
01494         if( 1 == np && shprocs[0] != (int)pcomm.proc_config().proc_rank() ) np++;
01495         bool with_root = std::find( &shprocs[0], &shprocs[np], 0 ) - &shprocs[0] != np || !pcomm.rank();
01496         if( mpi_op == MPI_SUM )
01497         {
01498             if( dum_vals[i] != fact * np ) return MB_FAILURE;
01499         }
01500         else if( mpi_op == MPI_PROD )
01501         {
01502             if( dum_vals[i] != pow( fact, np ) ) return MB_FAILURE;
01503         }
01504         else if( mpi_op == MPI_MAX )
01505         {
01506             if( with_root && dum_vals[i] != fact ) return MB_FAILURE;
01507         }
01508         else if( mpi_op == MPI_MIN )
01509         {
01510             if( with_root && dum_vals[i] != fact ) return MB_FAILURE;
01511         }
01512         else
01513             return MB_FAILURE;
01514     }
01515 
01516     return MB_SUCCESS;
01517 }
01518 
01519 template < typename T >
01520 ErrorCode test_reduce_tags( const char*, DataType tp )
01521 {
01522     ErrorCode rval;
01523     Core moab_instance;
01524     Interface& mb = moab_instance;
01525     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01526 
01527     int rank, size;
01528     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01529     MPI_Comm_size( MPI_COMM_WORLD, &size );
01530 
01531     // build distributed quad mesh
01532     Range quad_range;
01533     EntityHandle verts[9];
01534     int vert_ids[9];
01535     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01536     PCHECK( MB_SUCCESS == rval );
01537     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01538     PCHECK( MB_SUCCESS == rval );
01539 
01540     Tag test_tag;
01541     T def_val = 2;
01542     Range dum_range;
01543     std::vector< T > dum_vals;
01544 
01545     // T, MPI_SUM
01546     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01547     rval = pcomm.reduce_tags( test_tag, MPI_SUM, dum_range );CHKERR( rval );
01548     rval = check_shared_ents( pcomm, test_tag, (T)2, MPI_SUM );CHKERR( rval );
01549     rval = mb.tag_delete( test_tag );
01550 
01551     // T, MPI_PROD
01552     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01553     rval = pcomm.reduce_tags( test_tag, MPI_PROD, dum_range );CHKERR( rval );
01554     rval = check_shared_ents( pcomm, test_tag, (T)2, MPI_PROD );CHKERR( rval );
01555     rval = mb.tag_delete( test_tag );
01556 
01557     // T, MPI_MAX
01558     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01559     // get owned entities and set a different value on them
01560     rval = pcomm.get_shared_entities( -1, dum_range, -1, false, false );CHKERR( rval );
01561     if( pcomm.rank() == 0 )
01562     {
01563         dum_vals.resize( dum_range.size(), (T)3 );
01564         rval = mb.tag_set_data( test_tag, dum_range, &dum_vals[0] );CHKERR( rval );
01565     }
01566     rval = pcomm.reduce_tags( test_tag, MPI_MAX, dum_range );CHKERR( rval );
01567     rval = check_shared_ents( pcomm, test_tag, (T)3, MPI_MAX );CHKERR( rval );
01568     rval = mb.tag_delete( test_tag );
01569 
01570     // T, MPI_MIN
01571     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01572     // get owned entities and set a different value on them
01573     if( pcomm.rank() == 0 )
01574     {
01575         std::fill( dum_vals.begin(), dum_vals.end(), (T)-1 );
01576         rval = mb.tag_set_data( test_tag, dum_range, &dum_vals[0] );CHKERR( rval );
01577     }
01578     rval = pcomm.reduce_tags( test_tag, MPI_MIN, dum_range );CHKERR( rval );
01579     rval = check_shared_ents( pcomm, test_tag, (T)-1, MPI_MIN );CHKERR( rval );
01580     rval = mb.tag_delete( test_tag );
01581 
01582     return MB_SUCCESS;
01583 }
01584 
01585 ErrorCode test_reduce_tag_failures( const char* )
01586 {
01587     // test things that should fail
01588     ErrorCode rval;
01589     Core moab_instance;
01590     Interface& mb = moab_instance;
01591     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01592 
01593     int rank, size;
01594     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01595     MPI_Comm_size( MPI_COMM_WORLD, &size );
01596 
01597     // build distributed quad mesh
01598     Range quad_range;
01599     EntityHandle verts[9];
01600     int vert_ids[9];
01601     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01602     PCHECK( MB_SUCCESS == rval );
01603     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01604     PCHECK( MB_SUCCESS == rval );
01605 
01606     Tag test_tag;
01607     Range dum_range;
01608     int def_int;
01609     double def_dbl;
01610 
01611     // explicit destination tag of different type
01612     Tag test_dest;
01613     std::vector< Tag > src_tags, dest_tags;
01614     rval = mb.tag_get_handle( "test_tag", 1, MB_TYPE_INTEGER, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_int );CHKERR( rval );
01615     src_tags.push_back( test_tag );
01616     rval = mb.tag_get_handle( "dtest_tag", 1, MB_TYPE_DOUBLE, test_dest, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
01617     dest_tags.push_back( test_dest );
01618     rval = pcomm.reduce_tags( src_tags, dest_tags, MPI_MIN, dum_range );
01619     PCHECK( rval == MB_FAILURE );
01620     rval = mb.tag_delete( test_dest );CHKERR( rval );
01621     rval = mb.tag_delete( test_tag );CHKERR( rval );
01622 
01623     // tag w/ no default value
01624     rval = mb.tag_get_handle( "test_tag", 1, MB_TYPE_INTEGER, test_tag, MB_TAG_DENSE | MB_TAG_CREAT );CHKERR( rval );
01625     rval = pcomm.reduce_tags( test_tag, MPI_MIN, dum_range );
01626     PCHECK( rval == MB_ENTITY_NOT_FOUND );
01627     rval = mb.tag_delete( test_tag );
01628 
01629     return MB_SUCCESS;
01630 }
01631 
01632 ErrorCode test_reduce_tags( const char* )
01633 {
01634     ErrorCode rval = MB_SUCCESS, tmp_rval;
01635 
01636     tmp_rval = test_reduce_tags< int >( "test_reduce_tags (int)", MB_TYPE_INTEGER );
01637     if( MB_SUCCESS != tmp_rval )
01638     {
01639         std::cout << "test_reduce_tags failed for int data type." << std::endl;
01640         rval = tmp_rval;
01641     }
01642 
01643     tmp_rval = test_reduce_tags< double >( "test_reduce_tags (dbl)", MB_TYPE_DOUBLE );
01644     if( MB_SUCCESS != tmp_rval )
01645     {
01646         std::cout << "test_reduce_tags failed for double data type." << std::endl;
01647         rval = tmp_rval;
01648     }
01649 
01650     return rval;
01651 }
01652 
01653 ErrorCode test_reduce_tag_explicit_dest( const char* )
01654 {
01655     // test tag_reduce stuff with explicit destination tag
01656     ErrorCode rval;
01657     Core moab_instance;
01658     Interface& mb = moab_instance;
01659     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01660 
01661     int rank, size;
01662     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01663     MPI_Comm_size( MPI_COMM_WORLD, &size );
01664 
01665     // build distributed quad mesh
01666     Range quad_range;
01667     EntityHandle verts[9];
01668     int vert_ids[9];
01669     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01670     PCHECK( MB_SUCCESS == rval );
01671     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01672     PCHECK( MB_SUCCESS == rval );
01673 
01674     Tag src_tag, dest_tag;
01675     Range dum_range;
01676     double def_dbl = 5.0;
01677 
01678     // src tag has one value, pre-existing dest tag has another; should get MPI_Op of src values
01679     std::vector< Tag > src_tags, dest_tags;
01680     // create the tags
01681     rval = mb.tag_get_handle( "src_tag", 1, MB_TYPE_DOUBLE, src_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
01682     src_tags.push_back( src_tag );
01683     rval = mb.tag_get_handle( "dest_tag", 1, MB_TYPE_DOUBLE, dest_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
01684     dest_tags.push_back( dest_tag );
01685     // set values for src tag to one value, dest tag to another
01686     rval = pcomm.get_shared_entities( -1, dum_range, -1, false, false );CHKERR( rval );
01687     std::vector< double > tag_vals( dum_range.size(), 1.0 );
01688     rval = mb.tag_set_data( src_tag, dum_range, &tag_vals[0] );CHKERR( rval );
01689     std::fill( tag_vals.begin(), tag_vals.end(), 2.0 );
01690     rval = mb.tag_set_data( dest_tag, dum_range, &tag_vals[0] );CHKERR( rval );
01691     // do the reduce
01692     rval = pcomm.reduce_tags( src_tags, dest_tags, MPI_SUM, dum_range );CHKERR( rval );
01693     // check the reduced value
01694     rval = check_shared_ents< double >( pcomm, dest_tag, (double)1.0, MPI_SUM );CHKERR( rval );
01695 
01696     rval = mb.tag_delete( src_tag );CHKERR( rval );
01697     rval = mb.tag_delete( dest_tag );CHKERR( rval );
01698 
01699     return MB_SUCCESS;
01700 }
01701 
01702 ErrorCode test_delete_entities( const char* filename )
01703 {
01704     Core mb_instance;
01705     Interface& moab = mb_instance;
01706     ErrorCode rval;
01707 
01708     rval = moab.load_file( filename, 0,
01709                            "PARALLEL=READ_PART;"
01710                            "PARTITION=PARALLEL_PARTITION;"
01711                            "PARALLEL_RESOLVE_SHARED_ENTS" );CHKERR( rval );
01712 
01713     // Get ghost elements
01714     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
01715     // get some edges and faces, and delete some
01716     Range local;
01717     rval = moab.get_entities_by_dimension( 0, 1, local );CHKERR( rval );
01718     rval = moab.get_entities_by_dimension( 0, 2, local );CHKERR( rval );  // it is appending to range
01719 
01720     Range local2;  // extract about half of those
01721     std::copy( local.begin(), local.begin() + local.size() / 2, range_inserter( local2 ) );
01722 
01723     // delete local 2
01724     rval = pcomm->delete_entities( local2 );CHKERR( rval );
01725 
01726     for( Range::iterator it = local2.begin(); it != local2.end(); ++it )
01727     {
01728         if( mb_instance.is_valid( *it ) ) return MB_FAILURE;
01729     }
01730     const char* opt = "PARALLEL=WRITE_PART";
01731     rval            = moab.write_file( "tmpx.h5m", 0, opt );CHKERR( rval );
01732     if( pcomm->proc_config().proc_rank() == 0 ) remove( "tmpx.h5m" );
01733 
01734     return MB_SUCCESS;
01735 }
01736 
01737 ErrorCode test_ghost_polyhedra( const char* filename )
01738 {
01739     Core mb_instance;
01740     Interface& moab = mb_instance;
01741     ErrorCode rval;
01742 
01743     rval = moab.load_file( filename, 0,
01744                            "PARALLEL=READ_PART;"
01745                            "PARTITION=PARALLEL_PARTITION;"
01746                            "PARALLEL_RESOLVE_SHARED_ENTS;"
01747                            "PARALLEL_GHOSTS=3.0.1" );CHKERR( rval );
01748 
01749     return MB_SUCCESS;
01750 }
01751 ErrorCode test_too_few_parts( const char* filename )
01752 {
01753     Core mb_instance;
01754     Interface& moab = mb_instance;
01755     ErrorCode rval;
01756 
01757     rval = moab.load_file( filename, 0,
01758                            "PARALLEL=READ_PART;"
01759                            "PARTITION=PARALLEL_PARTITION;"
01760                            "PARALLEL_RESOLVE_SHARED_ENTS;" );
01761     // if(rval==MB_SUCCESS)
01762     // return MB_FAILURE;
01763 
01764     return rval;
01765 }
01766 
01767 ErrorCode test_sequences_after_ghosting( const char* filename )
01768 {
01769     Core mb_instance;
01770     Interface& moab = mb_instance;
01771     ErrorCode rval;
01772 
01773     rval = moab.load_file( filename, 0,
01774                            "PARALLEL=READ_PART;"
01775                            "PARTITION=PARALLEL_PARTITION;"
01776                            "PARALLEL_RESOLVE_SHARED_ENTS;"
01777                            "PARALLEL_GHOSTS=3.2.1;"
01778                            "PARALLEL_SEQUENCE_FACTOR=1.5" );CHKERR( rval );
01779 
01780     // get all elements of dimension 3, and check they are on one sequence, with connect_iterate
01781     Range elems;
01782     rval = moab.get_entities_by_dimension( 0, 3, elems );CHKERR( rval );
01783     if( elems.psize() != 1 )
01784     {
01785         std::cout << " elems.psize() = " << elems.psize() << "\n";
01786         return MB_FAILURE;
01787     }
01788     // we want only one sequence
01789     int count, vpere;
01790     EntityHandle* conn_ptr;
01791     rval = moab.connect_iterate( elems.begin(), elems.end(), conn_ptr, vpere, count );CHKERR( rval );
01792 
01793     if( count != (int)elems.size() )
01794     {
01795         std::cout << " more than one sequence:  elems.size() = " << elems.size() << "  count:" << count << "\n";
01796         return MB_FAILURE;
01797     }
01798     // check also global id tag, which is dense
01799     Tag id_tag          = moab.globalId_tag();
01800     void* globalid_data = NULL;
01801     rval                = moab.tag_iterate( id_tag, elems.begin(), elems.end(), count, globalid_data );CHKERR( rval );
01802     if( count != (int)elems.size() )
01803     {
01804         std::cout << " more than one tag sequence:  elems.size() = " << elems.size() << "  count:" << count << "\n";
01805         return MB_FAILURE;
01806     }
01807 
01808     // repeat the tests for vertex sequences
01809     // get all elements of dimension 3, and check they are on one sequence, with coords_iterate
01810     Range verts;
01811     rval = moab.get_entities_by_dimension( 0, 0, verts );CHKERR( rval );
01812     if( verts.psize() != 1 )
01813     {
01814         std::cout << " verts.psize() = " << verts.psize() << "\n";
01815         return MB_FAILURE;
01816     }
01817     //
01818     double *x_ptr, *y_ptr, *z_ptr;
01819     rval = moab.coords_iterate( verts.begin(), verts.end(), x_ptr, y_ptr, z_ptr, count );CHKERR( rval );
01820 
01821     if( count != (int)verts.size() )
01822     {
01823         std::cout << " more than one sequence:  verts.size() = " << verts.size() << "  count:" << count << "\n";
01824         return MB_FAILURE;
01825     }
01826 
01827     rval = moab.tag_iterate( id_tag, verts.begin(), verts.end(), count, globalid_data );CHKERR( rval );
01828     if( count != (int)verts.size() )
01829     {
01830         std::cout << " more than one tag sequence:  verts.size() = " << verts.size() << "  count:" << count << "\n";
01831         return MB_FAILURE;
01832     }
01833     return MB_SUCCESS;
01834 }
01835 
01836 // test trivial partition as used by iMOAB send/receive mesh methods
01837 // it is hooked to ParCommGraph, but it is really not dependent on anything from MOAB
01838 // these methods that are tested are just utilities
01839 
01840 void test_trivial_partition()
01841 {
01842     // nothing is modified inside moab
01843     // by these methods;
01844     // to instantiate par com graph, we just need 2 groups!
01845     // they can be overlapping !!!! we do not test that yet
01846 
01847     // create 2 groups; one over task 0 and 1, one over 2
01848     MPI_Group worldg;
01849     MPI_Comm duplicate;
01850     MPI_Comm_dup( MPI_COMM_WORLD, &duplicate );
01851     MPI_Comm_group( duplicate, &worldg );
01852 
01853     // this is usually run on 2 processes
01854 
01855     int rank[2] = { 0, 1 };
01856     MPI_Group gr1, gr2;
01857     MPI_Group_incl( worldg, 2, rank, &gr1 );      // will contain 2 ranks, 0 and 1
01858     MPI_Group_incl( worldg, 1, &rank[1], &gr2 );  // will contain 1 rank only (1)
01859 
01860     int comp1 = 10, comp2 = 12;
01861     ParCommGraph* pgr = new ParCommGraph( duplicate, gr1, gr2, comp1, comp2 );
01862 
01863     std::map< int, Range > ranges_to_send;
01864     std::vector< int > number_elems_per_part;
01865     number_elems_per_part.push_back( 6 );
01866     number_elems_per_part.push_back( 10 );
01867 
01868     std::cout << " send sizes ";
01869     for( int k = 0; k < (int)number_elems_per_part.size(); k++ )
01870     {
01871         std::cout << " " << number_elems_per_part[k];
01872     }
01873     std::cout << "\n";
01874 
01875     std::cout << "\n";
01876     pgr->compute_trivial_partition( number_elems_per_part );
01877 
01878     Range verts( 10, 20 );
01879     pgr->split_owned_range( 0, verts );
01880 
01881     for( std::map< int, Range >::iterator it = ranges_to_send.begin(); it != ranges_to_send.end(); it++ )
01882     {
01883         Range& ran = it->second;
01884         std::cout << " receiver " << it->first << " receive range: [" << ran[0] << ", " << ran[ran.size() - 1]
01885                   << "] \n";
01886     }
01887 
01888     delete( pgr );
01889 
01890     MPI_Group_free( &worldg );
01891     MPI_Group_free( &gr1 );
01892     MPI_Group_free( &gr2 );
01893     MPI_Comm_free( &duplicate );
01894 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines