MOAB: Mesh Oriented datABase  (version 5.2.1)
parallel_unit_tests.cpp
Go to the documentation of this file.
00001 #include "moab/ParallelComm.hpp"
00002 #include "MBParallelConventions.h"
00003 #include "moab/ParCommGraph.hpp"
00004 #include "ReadParallel.hpp"
00005 #include "moab/FileOptions.hpp"
00006 #include "MBTagConventions.hpp"
00007 #include "moab/Core.hpp"
00008 #include "moab_mpi.h"
00009 #include "TestUtil.hpp"
00010 
00011 #include <iostream>
00012 #include <algorithm>
00013 #include <map>
00014 #include <sstream>
00015 #include <assert.h>
00016 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
00017 #include <unistd.h>
00018 #endif
00019 
00020 using namespace moab;
00021 
00022 #define CHKERR( a )                                                                                   \
00023     do                                                                                                \
00024     {                                                                                                 \
00025         ErrorCode val = ( a );                                                                        \
00026         if( MB_SUCCESS != val )                                                                       \
00027         {                                                                                             \
00028             std::cerr << "Error code  " << val << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
00029             return val;                                                                               \
00030         }                                                                                             \
00031     } while( false )
00032 
00033 #define PCHECK( A ) \
00034     if( is_any_proc_error( !( A ) ) ) return report_error( __FILE__, __LINE__ )
00035 
00036 ErrorCode report_error( const char* file, int line )
00037 {
00038     std::cerr << "Failure at " << file << ':' << line << std::endl;
00039     return MB_FAILURE;
00040 }
00041 
00042 /**************************************************************************
00043                      Utility Method Declarations
00044  **************************************************************************/
00045 
00046 // Get processors an entity is shared with.
00047 ErrorCode get_sharing_processors( Interface& moab, EntityHandle entity, std::vector< int >& other_procs_out );
00048 
00049 // Create a parallel mesh
00050 //
00051 // Each processor will create four quads.
00052 // Groups of four quads will be arranged as follows:
00053 // +------+------+------+------+------+-----
00054 // |             |             |
00055 // |             |             |
00056 // +    Proc 0   +    Proc 2   +    Proc 4
00057 // |             |             |
00058 // |             |             |
00059 // +------+------+------+------+------+-----
00060 // |             |             |
00061 // |             |             |
00062 // +    Proc 1   +    Proc 3   +    Proc 5
00063 // |             |             |
00064 // |             |             |
00065 // +------+------+------+------+------+-----
00066 //
00067 // Vertices will be enumerated as follows:
00068 // 1------6-----11-----16-----21-----26-----
00069 // |             |             |
00070 // |             |             |
00071 // 2      7     12     17     22     27
00072 // |             |             |
00073 // |             |             |
00074 // 3------8-----13-----18-----23-----28-----
00075 // |             |             |
00076 // |             |             |
00077 // 4      9     14     19     24     29
00078 // |             |             |
00079 // |             |             |
00080 // 5-----10-----15-----20-----25-----30-----
00081 //
00082 // Element IDs will be [4*rank+1,4*rank+5]
00083 //
00084 // If output_sets is non-null, it will be populated with 2 or 3
00085 // set handles.  A set handle is created for each group of four
00086 // adjacent processes, such that one is created across procs 0 to 4,
00087 // one is created for procs 2 to 5, etc.  An additional set is created
00088 // that spans all of the processes.  The set spanning all procs is
00089 // returned first.  The other two sets are returned in the order of the
00090 // largest contained process rank, where the last entry is zero if
00091 // there is only one additional set.
00092 ErrorCode parallel_create_mesh( Interface& mb, int output_vertx_ids[9], EntityHandle output_vertex_handles[9],
00093                                 Range& output_elements, EntityHandle output_sets[3] = 0 );
00094 
00095 // Test if is_my_error is non-zero on any processor in MPI_COMM_WORLD
00096 int is_any_proc_error( int is_my_error );
00097 
00098 /**************************************************************************
00099                            Test  Declarations
00100  **************************************************************************/
00101 
00102 // Check consistancy of sharing data.  (E.g. compare sharing procs for
00103 // vertices to that of adjacent elements, compare sharing data for
00104 // interfaces with that of contained entities, etc.)
00105 ErrorCode test_elements_on_several_procs( const char* filename );
00106 // Test correct ghosting of elements
00107 ErrorCode test_ghost_elements_3_2_1( const char* filename );
00108 ErrorCode test_ghost_elements_3_2_2( const char* filename );
00109 ErrorCode test_ghost_elements_3_0_1( const char* filename );
00110 ErrorCode test_ghost_elements_2_0_1( const char* filename );
00111 // Test exchange of tag data on ghost elements
00112 ErrorCode test_ghost_tag_exchange( const char* filename );
00113 // Bug where exchange_tags fails if dense tag cannot be queried
00114 // for all ghost entities (e.g. no default value)
00115 ErrorCode regression_ghost_tag_exchange_no_default( const char* filename );
00116 // Test owners for interface entities
00117 ErrorCode test_interface_owners( const char* );
00118 // Test data for shared interface entitites with one level of ghosting
00119 ErrorCode regression_owners_with_ghosting( const char* );
00120 // Verify all sharing data for vertices with one level of ghosting
00121 ErrorCode test_ghosted_entity_shared_data( const char* );
00122 // Test assignment of global IDs
00123 ErrorCode test_assign_global_ids( const char* );
00124 // Test shared sets
00125 ErrorCode test_shared_sets( const char* );
00126 // Test reduce_tags
00127 ErrorCode test_reduce_tags( const char* );
00128 // Test reduce_tags failures
00129 ErrorCode test_reduce_tag_failures( const char* );
00130 // Test reduce_tags with explicit destination tag
00131 ErrorCode test_reduce_tag_explicit_dest( const char* );
00132 // Test delete_entities
00133 ErrorCode test_delete_entities( const char* );
00134 // Test ghosting polyhedra
00135 ErrorCode test_ghost_polyhedra( const char* );
00136 // Test failed read with too few parts in partition
00137 ErrorCode test_too_few_parts( const char* );
00138 // Test broken sequences due to ghosting
00139 ErrorCode test_sequences_after_ghosting( const char* );
00140 // Test trivial partition in use by iMOAB
00141 void test_trivial_partition();
00142 
00143 /**************************************************************************
00144                               Main Method
00145  **************************************************************************/
00146 
00147 #define RUN_TEST_ARG2( A, B ) run_test( &A, #A, B )
00148 
00149 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00150 {
00151     ErrorCode result = ( *func )( file_name );
00152     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00153     int rank;
00154     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00155     if( rank == 0 )
00156     {
00157         if( is_err )
00158             std::cout << func_name << " : FAILED!!" << std::endl;
00159         else
00160             std::cout << func_name << " : success" << std::endl;
00161     }
00162 
00163     return is_err;
00164 }
00165 
00166 int main( int argc, char* argv[] )
00167 {
00168     int rank, size;
00169     MPI_Init( &argc, &argv );
00170     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00171     MPI_Comm_size( MPI_COMM_WORLD, &size );
00172 
00173     int pause_proc = -1;
00174     std::string filename;
00175     for( int i = 1; i < argc; ++i )
00176     {
00177         if( !strcmp( argv[i], "-p" ) )
00178         {
00179             ++i;
00180             assert( i < argc );
00181             pause_proc = atoi( argv[i] );
00182         }
00183         else if( !filename.size() )
00184         {
00185             filename = std::string( argv[i] );
00186         }
00187         else
00188         {
00189             std::cerr << "Invalid arg: \"" << argv[i] << '"' << std::endl
00190                       << "Usage: " << argv[0] << " [-p <rank>] [<filename>]" << std::endl;
00191             exit( 1 );
00192         }
00193     }
00194 
00195     if( !filename.size() )
00196     {
00197 #ifdef MOAB_HAVE_HDF5
00198         filename = TestDir + "/64bricks_512hex.h5m";
00199 #else
00200         filename = TestDir + "/64bricks_512hex.vtk";
00201 #endif
00202     }
00203     std::cout << "Loading " << filename << "..\n";
00204 #ifdef MOAB_HAVE_HDF5
00205     std::string filename2 = TestDir + "/64bricks_1khex.h5m";
00206     std::string filename3 = TestDir + "/twoPolyh.h5m";
00207     std::string filename4 = TestDir + "/onepart.h5m";
00208 #endif
00209 
00210     if( pause_proc != -1 )
00211     {
00212 #if !defined( _MSC_VER ) && !defined( __MINGW32__ )
00213         std::cout << "Processor " << rank << " of " << size << " with PID " << getpid() << std::endl;
00214         std::cout.flush();
00215 #endif
00216         // loop forever on requested processor, giving the user time
00217         // to attach a debugger.  Once the debugger in attached, user
00218         // can change 'pause'.  E.g. on gdb do "set var pause = 0"
00219         if( pause_proc == rank )
00220         {
00221             volatile int pause = 1;
00222             while( pause )
00223                 ;
00224         }
00225 
00226         MPI_Barrier( MPI_COMM_WORLD );
00227         std::cout << "Processor " << rank << " resuming" << std::endl;
00228     }
00229 
00230     int num_errors = 0;
00231 #ifdef MOAB_HAVE_HDF5
00232     num_errors += RUN_TEST_ARG2( test_elements_on_several_procs, filename.c_str() );
00233     num_errors += RUN_TEST_ARG2( test_ghost_elements_3_2_1, filename.c_str() );
00234     num_errors += RUN_TEST_ARG2( test_ghost_elements_3_2_2, filename.c_str() );
00235     num_errors += RUN_TEST_ARG2( test_ghost_elements_3_0_1, filename.c_str() );
00236     num_errors += RUN_TEST_ARG2( test_ghost_elements_2_0_1, filename.c_str() );
00237     num_errors += RUN_TEST_ARG2( test_ghost_tag_exchange, filename.c_str() );
00238     num_errors += RUN_TEST_ARG2( regression_ghost_tag_exchange_no_default, filename.c_str() );
00239     num_errors += RUN_TEST_ARG2( test_delete_entities, filename2.c_str() );
00240     num_errors += RUN_TEST_ARG2( test_sequences_after_ghosting, filename2.c_str() );
00241     if( 2 >= size )  // run this one only on one or 2 processors; the file has only 2 parts in partition
00242         num_errors += RUN_TEST_ARG2( test_ghost_polyhedra, filename3.c_str() );
00243     num_errors += RUN_TEST_ARG2( test_too_few_parts, filename4.c_str() );
00244 #endif
00245     num_errors += RUN_TEST_ARG2( test_assign_global_ids, 0 );
00246     num_errors += RUN_TEST_ARG2( test_shared_sets, 0 );
00247     num_errors += RUN_TEST_ARG2( test_reduce_tags, 0 );
00248     num_errors += RUN_TEST_ARG2( test_reduce_tag_failures, 0 );
00249     num_errors += RUN_TEST_ARG2( test_reduce_tag_explicit_dest, 0 );
00250     num_errors += RUN_TEST_ARG2( test_interface_owners, 0 );
00251     num_errors += RUN_TEST_ARG2( test_ghosted_entity_shared_data, 0 );
00252     num_errors += RUN_TEST_ARG2( regression_owners_with_ghosting, 0 );
00253     num_errors += RUN_TEST( test_trivial_partition );
00254     if( rank == 0 )
00255     {
00256         if( !num_errors )
00257             std::cout << "All tests passed" << std::endl;
00258         else
00259             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00260     }
00261 
00262     MPI_Finalize();
00263     return num_errors;
00264 }
00265 
00266 /**************************************************************************
00267                      Utility Method Implementations
00268  **************************************************************************/
00269 
00270 ErrorCode get_sharing_processors( Interface& moab, EntityHandle entity, std::vector< int >& other_procs_out )
00271 {
00272     ErrorCode rval;
00273 
00274     // get tags for parallel data
00275     Tag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
00276     rval = moab.tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedp_tag );CHKERR( rval );
00277     rval = moab.tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER, sharedps_tag );CHKERR( rval );
00278     rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedh_tag );CHKERR( rval );
00279     rval = moab.tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE, sharedhs_tag );CHKERR( rval );
00280     rval = moab.tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatus_tag );CHKERR( rval );
00281 
00282     other_procs_out.clear();
00283     char status;
00284     rval = moab.tag_get_data( pstatus_tag, &entity, 1, &status );CHKERR( rval );
00285     if( !( status & PSTATUS_SHARED ) ) return MB_SUCCESS;
00286 
00287     int proc_id;
00288     rval = moab.tag_get_data( sharedp_tag, &entity, 1, &proc_id );CHKERR( rval );
00289     if( proc_id >= 0 )
00290     {
00291         other_procs_out.push_back( proc_id );
00292         return MB_SUCCESS;
00293     }
00294 
00295     int procs[MAX_SHARING_PROCS];
00296     rval = moab.tag_get_data( sharedps_tag, &entity, 1, procs );CHKERR( rval );
00297     for( int i = 0; i < MAX_SHARING_PROCS && procs[i] >= 0; ++i )
00298         other_procs_out.push_back( procs[i] );
00299     return MB_SUCCESS;
00300 }
00301 
00302 int is_any_proc_error( int is_my_error )
00303 {
00304     int result = 0;
00305     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00306     return err || result;
00307 }
00308 
00309 ErrorCode parallel_create_mesh( Interface& mb, int vtx_ids[9], EntityHandle vtx_handles[9], Range& range,
00310                                 EntityHandle* entity_sets )
00311 {
00312     // Each processor will create four quads.
00313     // Groups of four quads will be arranged as follows:
00314     // +------+------+------+------+------+-----
00315     // |             |             |
00316     // |             |             |
00317     // +    Proc 0   +    Proc 2   +    Proc 4
00318     // |             |             |
00319     // |             |             |
00320     // +------+------+------+------+------+-----
00321     // |             |             |
00322     // |             |             |
00323     // +    Proc 1   +    Proc 3   +    Proc 5
00324     // |             |             |
00325     // |             |             |
00326     // +------+------+------+------+------+-----
00327     //
00328     // Vertices will be enumerated as follows:
00329     // 1------6-----11-----16-----21-----26-----
00330     // |             |             |
00331     // |             |             |
00332     // 2      7     12     17     22     27
00333     // |             |             |
00334     // |             |             |
00335     // 3------8-----13-----18-----23-----28-----
00336     // |             |             |
00337     // |             |             |
00338     // 4      9     14     19     24     29
00339     // |             |             |
00340     // |             |             |
00341     // 5-----10-----15-----20-----25-----30-----
00342     //
00343     // Element IDs will be [4*rank+1,4*rank+5]
00344 
00345     int size, rank;
00346     MPI_Comm_size( MPI_COMM_WORLD, &size );
00347     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00348 
00349     const int first_vtx_id = 10 * ( rank / 2 ) + 2 * ( rank % 2 ) + 1;
00350     const double x         = 2.0 * ( rank / 2 );
00351     const double y         = 2.0 * ( rank % 2 );
00352 
00353     // create vertices
00354     const int idoff  = ( size % 2 && rank / 2 == size / 2 ) ? 0 : 2;
00355     const int idoff1 = rank ? 2 : idoff;
00356     const int idoff2 = idoff1 + idoff;
00357     const int ids[9] = { first_vtx_id,     first_vtx_id + 3 + idoff1, first_vtx_id + 6 + idoff2,
00358                          first_vtx_id + 1, first_vtx_id + 4 + idoff1, first_vtx_id + 7 + idoff2,
00359                          first_vtx_id + 2, first_vtx_id + 5 + idoff1, first_vtx_id + 8 + idoff2 };
00360     memcpy( vtx_ids, ids, sizeof( ids ) );
00361     const double coords[27] = { x, y,     0,     x + 1, y, 0,     x + 2, y,     0,     x, y + 1, 0,     x + 1, y + 1,
00362                                 0, x + 2, y + 1, 0,     x, y + 2, 0,     x + 1, y + 2, 0, x + 2, y + 2, 0 };
00363 
00364     ErrorCode rval;
00365     Tag id_tag;
00366 
00367     rval = mb.create_vertices( coords, 9, range );CHKERR( rval );
00368     assert( range.size() == 9 );
00369     std::copy( range.begin(), range.end(), vtx_handles );
00370     range.clear();
00371     id_tag = mb.globalId_tag();
00372     rval   = mb.tag_set_data( id_tag, vtx_handles, 9, &ids );CHKERR( rval );
00373 
00374     const EntityHandle conn[4][4] = { { vtx_handles[0], vtx_handles[3], vtx_handles[4], vtx_handles[1] },
00375                                       { vtx_handles[1], vtx_handles[4], vtx_handles[5], vtx_handles[2] },
00376                                       { vtx_handles[3], vtx_handles[6], vtx_handles[7], vtx_handles[4] },
00377                                       { vtx_handles[4], vtx_handles[7], vtx_handles[8], vtx_handles[5] } };
00378     for( int i = 0; i < 4; ++i )
00379     {
00380         const int id = 4 * rank + i + 1;
00381         EntityHandle h;
00382         rval = mb.create_element( MBQUAD, conn[i], 4, h );CHKERR( rval );
00383         range.insert( h );
00384         rval = mb.tag_set_data( id_tag, &h, 1, &id );CHKERR( rval );
00385     }
00386 
00387     if( !entity_sets ) return MB_SUCCESS;
00388 
00389     int set_ids[3] = { size + 1, rank / 2, rank / 2 + 1 };
00390     int nsets      = 0;
00391     rval           = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
00392     rval = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
00393     entity_sets[nsets] = 0;
00394     if( rank < 2 )
00395     {  // if first (left-most) column
00396         set_ids[1] = set_ids[2];
00397     }
00398     else if( rank / 2 < ( size - 1 ) / 2 )
00399     {  // if not last (right-most) column
00400         rval = mb.create_meshset( MESHSET_SET, entity_sets[nsets++] );CHKERR( rval );
00401     }
00402     for( int i = 0; i < nsets; ++i )
00403     {
00404         rval = mb.add_entities( entity_sets[0], range );CHKERR( rval );
00405     }
00406     rval = mb.tag_set_data( id_tag, entity_sets, nsets, set_ids );CHKERR( rval );
00407 
00408     return MB_SUCCESS;
00409 }
00410 
00411 /**************************************************************************
00412                            Test  Implementations
00413  **************************************************************************/
00414 
00415 ErrorCode test_elements_on_several_procs( const char* filename )
00416 {
00417     Core mb_instance;
00418     Interface& moab = mb_instance;
00419     ErrorCode rval;
00420     const char* geom_names[] = { "vertex", "curve", "surface", "volume", "unknown" };
00421 
00422     rval = moab.load_file( filename, 0,
00423                            "PARALLEL=READ_DELETE;"
00424                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00425                            "PARTITION_DISTRIBUTE;"
00426                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00427                            "PARALLEL_SEQUENCE_FACTOR=1.4" );CHKERR( rval );
00428 
00429     // test contents of interface sets against sharedEnts structure in pcomm;
00430     int my_error        = 0;
00431     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00432     rval                = pcomm->check_all_shared_handles();
00433     if( MB_SUCCESS != rval )
00434     {
00435         my_error = 1;
00436         std::cerr << "check_all_shared_handles test failed on proc " << pcomm->proc_config().proc_rank() << std::endl;
00437     }
00438     PCHECK( !my_error );
00439 
00440     // check adjacencies just to make sure they're consistent
00441     rval = mb_instance.check_adjacencies();
00442     if( MB_SUCCESS != rval ) my_error = 1;
00443     PCHECK( !my_error );
00444 
00445     Tag geom_tag, id_tag;
00446     rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHKERR( rval );
00447     id_tag = moab.globalId_tag();
00448 
00449     // Because we partitioned based on geometric volume sets, then for each
00450     // lower-dimension geometric entity set all of the contained entities of
00451     // the same dimension must be shared by the same set of processors
00452     Range shared, invalid;
00453     for( int dim = 2; dim > 0; --dim )
00454     {
00455         Range geom_sets;
00456         const void* tagvals[] = { &dim };
00457         rval                  = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &geom_tag, tagvals, 1, geom_sets );CHKERR( rval );
00458 
00459         for( Range::iterator j = geom_sets.begin(); j != geom_sets.end(); ++j )
00460         {
00461             Range ents;
00462             rval = moab.get_entities_by_dimension( *j, dim, ents );CHKERR( rval );
00463             if( ents.empty() ) continue;
00464 
00465             // get a single sharing list to compare with
00466             Range::iterator k = ents.begin();
00467             std::vector< int > procs;
00468             rval = get_sharing_processors( moab, *k, procs );CHKERR( rval );
00469             std::sort( procs.begin(), procs.end() );
00470             if( procs.size() > 1 ) shared.merge( ents );
00471 
00472             // compare other elements
00473             for( ++k; k != ents.end(); ++k )
00474             {
00475                 std::vector< int > tmp_procs;
00476                 rval = get_sharing_processors( moab, *k, tmp_procs );CHKERR( rval );
00477                 std::sort( tmp_procs.begin(), tmp_procs.end() );
00478                 if( tmp_procs != procs ) invalid.insert( *j );
00479             }
00480 
00481             // compare interior vertices
00482             ents.clear();
00483             rval = moab.get_entities_by_dimension( *j, 0, ents );CHKERR( rval );
00484             for( k = ents.begin(); k != ents.end(); ++k )
00485             {
00486                 std::vector< int > tmp_procs;
00487                 rval = get_sharing_processors( moab, *k, tmp_procs );CHKERR( rval );
00488                 if( tmp_procs != procs ) invalid.insert( *j );
00489             }
00490         }
00491     }
00492 
00493     // Report any geometric sets for which sharing was inconsistent
00494     if( !invalid.empty() )
00495     {
00496         std::cerr << "Elements or vertices owned by a single geometric entity are "
00497                   << "not shared by the same set of processors for the "
00498                   << "following geometric entities on process " << pcomm->proc_config().proc_rank() << ": ";
00499         for( Range::iterator i = invalid.begin(); i != invalid.end(); ++i )
00500         {
00501             int dim;
00502             int id;
00503             rval = moab.tag_get_data( geom_tag, &*i, 1, &dim );
00504             if( MB_SUCCESS != rval ) dim = 4;
00505             rval = moab.tag_get_data( id_tag, &*i, 1, &id );
00506             if( MB_SUCCESS != rval ) id = -1;
00507             std::cerr << geom_names[dim] << " " << id << ", ";
00508         }
00509         std::cerr << std::endl;
00510 
00511         my_error = 1;
00512     }
00513     PCHECK( !my_error );
00514 
00515     // for each shared element, check that its vertices are shared
00516     // with at least the same processes
00517     for( Range::iterator i = shared.begin(); i != shared.end(); ++i )
00518     {
00519         std::vector< int > procs;
00520         rval = get_sharing_processors( moab, *i, procs );CHKERR( rval );
00521         std::sort( procs.begin(), procs.end() );
00522 
00523         std::vector< EntityHandle > tmp;
00524         const EntityHandle* conn;
00525         int len;
00526         rval = moab.get_connectivity( *i, conn, len, false, &tmp );CHKERR( rval );
00527         for( int j = 0; j < len; ++j )
00528         {
00529             std::vector< int > vprocs;
00530             rval = get_sharing_processors( moab, conn[j], vprocs );CHKERR( rval );
00531             std::sort( vprocs.begin(), vprocs.end() );
00532             std::vector< int > diff( std::max( procs.size(), vprocs.size() ) );
00533             std::vector< int >::iterator k =
00534                 std::set_difference( procs.begin(), procs.end(), vprocs.begin(), vprocs.end(), diff.begin() );
00535             if( k != diff.begin() )  // difference is not empty
00536                 invalid.insert( conn[j] );
00537         }
00538     }
00539 
00540     // Report any vertices with insufficient sharing
00541     if( !invalid.empty() )
00542     {
00543         std::cerr << "Vertices must be shared with at least the union of the processes "
00544                   << "sharing the elements containing the vertex.  This is NOT true for "
00545                   << "the following vertices on process " << pcomm->proc_config().proc_rank() << ": " << invalid
00546                   << std::endl;
00547 
00548         my_error = 1;
00549     }
00550     PCHECK( !my_error );
00551 
00552     return MB_SUCCESS;
00553 }
00554 
00555 ErrorCode get_ghost_entities( ParallelComm& pcomm, Range& ghost_ents )
00556 {
00557     Range all_ents;
00558     ErrorCode rval;
00559 
00560     rval = pcomm.get_moab()->get_entities_by_handle( 0, all_ents );CHKERR( rval );
00561     std::vector< unsigned char > flags( all_ents.size() );
00562     rval = pcomm.get_moab()->tag_get_data( pcomm.pstatus_tag(), all_ents, &flags[0] );CHKERR( rval );
00563 
00564     Range::iterator ins                            = ghost_ents.begin();
00565     std::vector< unsigned char >::const_iterator f = flags.begin();
00566     for( Range::iterator i = all_ents.begin(); i != all_ents.end(); ++i, ++f )
00567         if( ( *f & PSTATUS_NOT_OWNED ) && !( *f & PSTATUS_INTERFACE ) ) ins = ghost_ents.insert( ins, *i, *i );
00568 
00569     return MB_SUCCESS;
00570 }
00571 
00572 ErrorCode get_ents_from_geometric_sets( Interface& moab, const Tag tags[2], int dimension,
00573                                         const std::vector< int >& ids, Range& results )
00574 {
00575     ErrorCode rval;
00576     for( size_t i = 0; i < ids.size(); ++i )
00577     {
00578         const void* tag_vals[2] = { &dimension, &ids[i] };
00579         Range sets;
00580         rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, tags, tag_vals, 2, sets );CHKERR( rval );
00581         for( Range::iterator j = sets.begin(); j != sets.end(); ++j )
00582         {
00583             Range ents;
00584             rval = moab.get_entities_by_dimension( *j, dimension, ents );CHKERR( rval );
00585             results.merge( ents );
00586         }
00587     }
00588     return MB_SUCCESS;
00589 }
00590 
00591 /**\brief Given entire file and IDs of geometric sets, get expected ghost entities
00592  *
00593  *\param moab               The entire mesh, loaded in serial
00594  *\param partition_geom_ids IDs of: geometric volumes owned by this proc and interface topology
00595  *\param ghost_entity_ids   output list
00596  */
00597 ErrorCode get_expected_ghosts( Interface& moab, const std::vector< int > partition_geom_ids[4],
00598                                std::vector< int >& ghost_entity_ids, int ghost_dimension, int bridge_dimension,
00599                                int num_layers )
00600 {
00601     ErrorCode rval;
00602     Tag tags[2];
00603     rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tags[0] );CHKERR( rval );
00604     tags[1] = moab.globalId_tag();
00605 
00606     // get face entities defining interface
00607     Range skin;
00608     rval = get_ents_from_geometric_sets( moab, tags, 2, partition_geom_ids[2], skin );CHKERR( rval );
00609 
00610     // get adjacent entities
00611     Range iface_ghosts, iface_ents;
00612     if( bridge_dimension == 2 ) { iface_ents = skin; }
00613     else
00614     {
00615         rval = moab.get_adjacencies( skin, bridge_dimension, true, iface_ents, Interface::UNION );CHKERR( rval );
00616     }
00617     for( int n = 0; n < num_layers; ++n )
00618     {
00619         iface_ghosts.clear();
00620         rval = moab.get_adjacencies( iface_ents, ghost_dimension, false, iface_ghosts, Interface::UNION );CHKERR( rval );
00621         iface_ents.clear();
00622         rval = moab.get_adjacencies( iface_ghosts, bridge_dimension, true, iface_ents, Interface::UNION );CHKERR( rval );
00623     }
00624 
00625     // get volume entities owned by this process
00626     Range ents;
00627     rval = get_ents_from_geometric_sets( moab, tags, 3, partition_geom_ids[3], ents );CHKERR( rval );
00628 
00629     // get entities of ghost_dimension owned by this process
00630     Range owned;
00631     if( ghost_dimension == 3 )
00632         owned = ents;
00633     else
00634     {
00635         rval = moab.get_adjacencies( ents, ghost_dimension, false, owned, Interface::UNION );CHKERR( rval );
00636     }
00637 
00638     // remove owned entities from ghost list
00639     Range ghosts = subtract( iface_ghosts, owned );
00640 
00641     // get ids
00642     ghost_entity_ids.resize( ghosts.size() );
00643     rval = moab.tag_get_data( tags[1], ghosts, &ghost_entity_ids[0] );CHKERR( rval );
00644     return MB_SUCCESS;
00645 }
00646 
00647 ErrorCode test_ghost_elements( const char* filename, int ghost_dimension, int bridge_dimension, int num_layers )
00648 {
00649     Core mb_instance;
00650     Interface& moab = mb_instance;
00651     ErrorCode rval;
00652 
00653     std::ostringstream file_opts;
00654     file_opts << "PARALLEL=READ_DELETE;"
00655               << "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00656               << "PARTITION_DISTRIBUTE;"
00657               << "PARALLEL_RESOLVE_SHARED_ENTS;"
00658               << "PARALLEL_GHOSTS=" << ghost_dimension << '.' << bridge_dimension << '.' << num_layers;
00659 
00660     rval = moab.load_file( filename, 0, file_opts.str().c_str() );CHKERR( rval );
00661     Tag geom_tag, id_tag;
00662     rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, geom_tag );CHKERR( rval );
00663     id_tag = moab.globalId_tag();
00664 
00665     // Get partition sets
00666     Range partition_geom[4];
00667     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00668     partition_geom[3]   = pcomm->partition_sets();
00669     PCHECK( !partition_geom[3].empty() );
00670 
00671     rval = pcomm->check_all_shared_handles();CHKERR( rval );
00672 
00673     // exchange id tags to allow comparison by id
00674     Range tmp_ents;
00675     rval = pcomm->get_shared_entities( -1, tmp_ents, -1, false, true );
00676     rval = pcomm->exchange_tags( id_tag, tmp_ents );CHKERR( rval );
00677 
00678     // Get geometric surfaces
00679     Range surfs, tmp;
00680     for( Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].end(); ++i )
00681     {
00682         tmp.clear();
00683         rval = moab.get_child_meshsets( *i, tmp );CHKERR( rval );
00684         surfs.merge( tmp );
00685     }
00686 
00687     // Get the set of geometric surfaces that represent the skin
00688     // of the union of the parts for this processor.  As we partition
00689     // based on geometric volumes, the interface must be represented
00690     // by some subset of the surfaces and their child geometric topology.
00691 
00692     int error = 0;
00693     std::ostringstream error_msg;
00694     Range ents, iface_surfs, iface_curves, iface_vertices;
00695     for( Range::iterator i = surfs.begin(); !error && i != surfs.end(); ++i )
00696     {
00697         ents.clear();
00698         rval = moab.get_entities_by_dimension( *i, ghost_dimension - 1, ents );CHKERR( rval );
00699         if( ents.empty() ) continue;
00700 
00701         std::vector< int > procs, tmp_procs;
00702         Range::iterator j = ents.begin();
00703         rval              = get_sharing_processors( moab, *j, procs );CHKERR( rval );
00704         for( ++j; !error && j != ents.end(); ++j )
00705         {
00706             tmp_procs.clear();
00707             rval = get_sharing_processors( moab, *j, tmp_procs );CHKERR( rval );
00708             if( tmp_procs != procs )
00709             {
00710                 error_msg << "Failure at " << __FILE__ << ':' << __LINE__ << std::endl
00711                           << "\tNot all entities in geometric surface are shared with"
00712                           << " same processor." << std::endl;
00713                 error = 1;
00714                 break;
00715             }
00716         }
00717 
00718         if( error ) break;
00719 
00720         // if surface is not part of inter-proc interface, skip it.
00721         if( procs.empty() ) continue;
00722         if( procs.size() != 1 )
00723         {
00724             error_msg << "Failure at " << __FILE__ << ':' << __LINE__ << std::endl
00725                       << "\tSurface elements shared with" << procs.size() << "processors." << std::endl;
00726             error = 1;
00727             break;
00728         }
00729         int other_rank = procs[0];
00730         if( other_rank == (int)pcomm->proc_config().proc_rank() ) continue;
00731 
00732         partition_geom[2].insert( *i );
00733         ents.clear();
00734         rval = moab.get_child_meshsets( *i, ents );CHKERR( rval );
00735         partition_geom[1].merge( ents );
00736     }
00737 
00738     // Don't to global communication until outside
00739     // of loops.  Otherwise we will deadlock if not all
00740     // procs iterate the same number of times.
00741     if( is_any_proc_error( error ) )
00742     {
00743         std::cerr << error_msg.str();
00744         return MB_FAILURE;
00745     }
00746 
00747     for( Range::iterator i = partition_geom[1].begin(); i != partition_geom[1].end(); ++i )
00748     {
00749         ents.clear();
00750         rval = moab.get_child_meshsets( *i, ents );CHKERR( rval );
00751         partition_geom[0].merge( ents );
00752     }
00753 
00754     std::vector< int > partn_geom_ids[4];
00755     for( int dim = 0; dim <= 3; ++dim )
00756     {
00757         partn_geom_ids[dim].resize( partition_geom[dim].size() );
00758         rval = moab.tag_get_data( id_tag, partition_geom[dim], &( partn_geom_ids[dim][0] ) );CHKERR( rval );
00759     }
00760 
00761     // get the global IDs of the ghosted entities
00762     Range ghost_ents;
00763     rval = get_ghost_entities( *pcomm, ghost_ents );CHKERR( rval );
00764     std::pair< Range::iterator, Range::iterator > vtx = ghost_ents.equal_range( MBVERTEX );
00765     ghost_ents.erase( vtx.first, vtx.second );
00766     std::vector< int > actual_ghost_ent_ids( ghost_ents.size() );
00767     rval = moab.tag_get_data( id_tag, ghost_ents, &actual_ghost_ent_ids[0] );CHKERR( rval );
00768 
00769     // read file in serial
00770     Core moab2;
00771     rval = moab2.load_file( filename );
00772     PCHECK( MB_SUCCESS == rval );
00773 
00774     // get the global IDs of the entities we expect to be ghosted
00775     std::vector< int > expected_ghost_ent_ids;
00776     rval = get_expected_ghosts( moab2, partn_geom_ids, expected_ghost_ent_ids, ghost_dimension, bridge_dimension,
00777                                 num_layers );
00778     PCHECK( MB_SUCCESS == rval );
00779 
00780     // check that the correct entities were ghosted
00781     std::sort( actual_ghost_ent_ids.begin(), actual_ghost_ent_ids.end() );
00782     std::sort( expected_ghost_ent_ids.begin(), expected_ghost_ent_ids.end() );
00783     PCHECK( expected_ghost_ent_ids == actual_ghost_ent_ids );
00784 
00785     // check we only have the partitioned and ghosted entities
00786     // on this processor.
00787     Range myents;
00788     for( Range::iterator i = partition_geom[3].begin(); i != partition_geom[3].end(); ++i )
00789     {
00790         ents.clear();
00791         rval = moab.get_entities_by_dimension( *i, 3, ents );CHKERR( rval );
00792         myents.merge( ents );
00793     }
00794     if( ghost_dimension != 3 )
00795     {
00796         ents.clear();
00797         rval = moab.get_adjacencies( myents, ghost_dimension, false, ents, Interface::UNION );
00798         PCHECK( MB_SUCCESS == rval );
00799         myents.swap( ents );
00800     }
00801     myents.merge( ghost_ents );
00802     ents.clear();
00803     rval = moab.get_entities_by_dimension( 0, ghost_dimension, ents );
00804     PCHECK( ents == myents );
00805 
00806     rval = pcomm->check_all_shared_handles();
00807     if( MB_SUCCESS != rval ) error = 1;
00808     PCHECK( !error );
00809 
00810     // done
00811     return MB_SUCCESS;
00812 }
00813 
00814 ErrorCode test_ghost_elements_3_2_1( const char* filename )
00815 {
00816     return test_ghost_elements( filename, 3, 2, 1 );
00817 }
00818 
00819 ErrorCode test_ghost_elements_3_2_2( const char* filename )
00820 {
00821     return test_ghost_elements( filename, 3, 2, 2 );
00822 }
00823 
00824 ErrorCode test_ghost_elements_3_0_1( const char* filename )
00825 {
00826     return test_ghost_elements( filename, 3, 0, 1 );
00827 }
00828 
00829 ErrorCode test_ghost_elements_2_0_1( const char* filename )
00830 {
00831     return test_ghost_elements( filename, 2, 0, 1 );
00832 }
00833 
00834 ErrorCode get_owner_handles( ParallelComm* pcomm, const Range& ents, EntityHandle* handle_arr )
00835 {
00836     size_t i = 0;
00837     int junk;
00838     for( Range::iterator j = ents.begin(); j != ents.end(); ++i, ++j )
00839     {
00840         ErrorCode rval = pcomm->get_owner_handle( *j, junk, handle_arr[i] );
00841         if( MB_SUCCESS != rval ) return rval;
00842     }
00843     return MB_SUCCESS;
00844 }
00845 
00846 ErrorCode test_ghost_tag_exchange( const char* filename )
00847 {
00848     Core mb_instance;
00849     Interface& moab = mb_instance;
00850     ErrorCode rval;
00851 
00852     rval = moab.load_file( filename, 0,
00853                            "PARALLEL=READ_DELETE;"
00854                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00855                            "PARTITION_DISTRIBUTE;"
00856                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00857                            "PARALLEL_GHOSTS=3.2.1;"
00858                            "PARALLEL_SEQUENCE_FACTOR=1.5" );CHKERR( rval );
00859 
00860     // Get ghost elements
00861     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00862     Range local, ghosts;
00863     rval = moab.get_entities_by_dimension( 0, 3, local );CHKERR( rval );
00864     Range::iterator i = local.begin();
00865     while( i != local.end() )
00866     {
00867         int rank;
00868         rval = pcomm->get_owner( *i, rank );CHKERR( rval );
00869         if( rank == (int)pcomm->proc_config().proc_rank() ) { ++i; }
00870         else
00871         {
00872             ghosts.insert( *i );
00873             i = local.erase( i );
00874         }
00875     }
00876 
00877     // create a tag to exchange
00878     Tag dense_test_tag;
00879     EntityHandle defval = 0;
00880     // rval = moab.tag_get_handle( "TEST-TAG", sizeof(EntityHandle), MB_TAG_DENSE,
00881     //                         dense_test_tag, &defval ); CHKERR(rval);
00882     rval = moab.tag_get_handle( "TEST-TAG", 1, MB_TYPE_HANDLE, dense_test_tag, MB_TAG_DENSE | MB_TAG_EXCL, &defval );CHKERR( rval );
00883 
00884     // for all entities that I own, set tag to handle value
00885     std::vector< EntityHandle > handles( local.size() ), handles2;
00886     std::copy( local.begin(), local.end(), handles.begin() );
00887     rval = moab.tag_set_data( dense_test_tag, local, &handles[0] );CHKERR( rval );
00888 
00889     // exchange tag data
00890     Range tmp_range;
00891     rval = pcomm->exchange_tags( dense_test_tag, tmp_range );CHKERR( rval );
00892 
00893     // make sure local values are unchanged
00894     handles2.resize( local.size() );
00895     rval = moab.tag_get_data( dense_test_tag, local, &handles2[0] );CHKERR( rval );
00896     PCHECK( handles == handles2 );
00897 
00898     // compare values on ghost entities
00899     handles.resize( ghosts.size() );
00900     handles2.resize( ghosts.size() );
00901     rval = moab.tag_get_data( dense_test_tag, ghosts, &handles2[0] );CHKERR( rval );
00902     rval = get_owner_handles( pcomm, ghosts, &handles[0] );CHKERR( rval );
00903     PCHECK( handles == handles2 );
00904 
00905     // now do it all again for a sparse tag
00906     Tag sparse_test_tag;
00907     // rval = moab.tag_get_handle( "TEST-TAG-2", sizeof(int), MB_TAG_DENSE,
00908     //                         MB_TYPE_INTEGER, sparse_test_tag, 0 ); CHKERR(rval);
00909     rval = moab.tag_get_handle( "TEST-TAG-2", 1, MB_TYPE_INTEGER, sparse_test_tag, MB_TAG_DENSE | MB_TAG_EXCL );CHKERR( rval );
00910 
00911     // for all entiites that I own, set tag to my rank
00912     std::vector< int > procs1( local.size(), pcomm->proc_config().proc_rank() );
00913     rval = moab.tag_set_data( sparse_test_tag, local, &procs1[0] );CHKERR( rval );
00914 
00915     // exchange tag data
00916     tmp_range.clear();
00917     rval = pcomm->exchange_tags( sparse_test_tag, tmp_range );
00918     PCHECK( MB_SUCCESS == rval );
00919 
00920     // make sure local values are unchanged
00921     std::vector< int > procs2( local.size() );
00922     rval = moab.tag_get_data( sparse_test_tag, local, &procs2[0] );CHKERR( rval );
00923     PCHECK( procs1 == procs2 );
00924 
00925     // compare values on ghost entities
00926     procs1.resize( ghosts.size() );
00927     procs2.resize( ghosts.size() );
00928     rval = moab.tag_get_data( sparse_test_tag, ghosts, &procs2[0] );CHKERR( rval );
00929     std::vector< int >::iterator j = procs1.begin();
00930     for( i = ghosts.begin(); i != ghosts.end(); ++i, ++j )
00931     {
00932         rval = pcomm->get_owner( *i, *j );CHKERR( rval );
00933     }
00934     PCHECK( procs1 == procs2 );
00935 
00936     return MB_SUCCESS;
00937 }
00938 
00939 ErrorCode regression_ghost_tag_exchange_no_default( const char* filename )
00940 {
00941     Core mb_instance;
00942     Interface& moab = mb_instance;
00943     ErrorCode rval;
00944 
00945 #ifdef MOAB_HAVE_HDF5
00946     rval = moab.load_file( filename, 0,
00947                            "PARALLEL=READ_DELETE;"
00948                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00949                            "PARTITION_DISTRIBUTE;"
00950                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00951                            "PARALLEL_GHOSTS=3.2.1" );
00952 #else
00953     rval = moab.load_file( filename, 0,
00954                            "PARALLEL=READ_BCAST;"
00955                            "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
00956                            "PARTITION_DISTRIBUTE;"
00957                            "PARALLEL_RESOLVE_SHARED_ENTS;"
00958                            "PARALLEL_GHOSTS=3.2.1" );
00959 #endif
00960     CHKERR( rval );
00961 
00962     // create a tag to exchange
00963     Tag dense_test_tag;
00964     // rval = moab.tag_get_handle( "TEST-TAG", sizeof(EntityHandle), MB_TAG_DENSE,
00965     //                         dense_test_tag, 0 ); CHKERR(rval);
00966     rval = moab.tag_get_handle( "TEST-TAG", 1, MB_TYPE_HANDLE, dense_test_tag, MB_TAG_DENSE | MB_TAG_EXCL );CHKERR( rval );
00967 
00968     // exchange tag data
00969     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
00970     Range tmp_range;
00971     rval = pcomm->exchange_tags( dense_test_tag, tmp_range );
00972     PCHECK( MB_SUCCESS == rval );
00973 
00974     return MB_SUCCESS;
00975 }
00976 
00977 // Helper for exhange_sharing_data
00978 // Swap contens of buffer with specified processor.
00979 int MPI_swap( void* buffer, int num_val, MPI_Datatype val_type, int other_proc )
00980 {
00981     int err, rank, bytes;
00982     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00983     MPI_Type_size( val_type, &bytes );
00984     bytes *= num_val;
00985     std::vector< unsigned char > buffer2( bytes );
00986 
00987     for( int i = 0; i < 2; ++i )
00988     {
00989         if( i == ( rank < other_proc ) )
00990         {
00991             err = MPI_Send( buffer, num_val, val_type, other_proc, 0, MPI_COMM_WORLD );
00992             if( err ) return err;
00993         }
00994         else
00995         {
00996             MPI_Status status;
00997             err = MPI_Recv( &buffer2[0], num_val, val_type, other_proc, 0, MPI_COMM_WORLD, &status );
00998             if( err ) return err;
00999         }
01000     }
01001 
01002     memcpy( buffer, &buffer2[0], bytes );
01003     return 0;
01004 }
01005 
01006 int valid_ghosting_owners( int comm_size, const int* ids, const int* owners )
01007 {
01008     // for each vertex ID, build list of {rank,owner} tuples
01009     std::map< int, std::vector< int > > verts;
01010     for( int p = 0; p < comm_size; ++p )
01011     {
01012         for( int i = 0; i < 9; ++i )
01013         {  // nine verts per proc
01014             int idx = 9 * p + i;
01015             verts[ids[idx]].push_back( p );
01016             verts[ids[idx]].push_back( owners[idx] );
01017         }
01018     }
01019 
01020     // now check for each vertex that the owner from
01021     // each processor is the same
01022     bool print_desc = true;
01023     int error_count = 0;
01024     std::map< int, std::vector< int > >::iterator it;
01025     for( it = verts.begin(); it != verts.end(); ++it )
01026     {
01027         int id                   = it->first;
01028         std::vector< int >& list = it->second;
01029         bool all_same            = true;
01030         for( size_t i = 2; i < list.size(); i += 2 )
01031             if( list[i + 1] != list[1] ) all_same = false;
01032         if( all_same ) continue;
01033 
01034         ++error_count;
01035 
01036         if( print_desc )
01037         {
01038             print_desc = false;
01039             std::cerr << "ERROR at " __FILE__ ":" << __LINE__ << std::endl
01040                       << "  Processors have inconsistant ideas of vertex ownership:" << std::endl;
01041         }
01042 
01043         std::cerr << "  Vertex " << id << ": " << std::endl;
01044         for( size_t i = 0; i < list.size(); i += 2 )
01045             std::cerr << "    Proc " << list[i] << " thinks owner is " << list[i + 1] << std::endl;
01046     }
01047 
01048     return error_count;
01049 }
01050 
01051 ErrorCode test_interface_owners_common( int num_ghost_layers )
01052 {
01053     ErrorCode rval;
01054     Core moab_instance;
01055     Interface& mb = moab_instance;
01056     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01057 
01058     // build distributed quad mesh
01059     Range quads;
01060     EntityHandle verts[9];
01061     int ids[9];
01062     rval = parallel_create_mesh( mb, ids, verts, quads );
01063     PCHECK( MB_SUCCESS == rval );
01064     rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
01065     PCHECK( MB_SUCCESS == rval );
01066     if( num_ghost_layers )
01067     {
01068         rval = pcomm.exchange_ghost_cells( 2, 0, num_ghost_layers, 0, true );
01069         PCHECK( MB_SUCCESS == rval );
01070     }
01071 
01072     // get vertex owners
01073     int owner[9];
01074     for( int i = 0; i < 9; ++i )
01075     {
01076         rval = pcomm.get_owner( verts[i], owner[i] );
01077         if( MB_SUCCESS != rval ) break;
01078     }
01079     PCHECK( MB_SUCCESS == rval );
01080 
01081     // exchange vertex owners amongst all processors
01082     int rank, size, ierr;
01083     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01084     MPI_Comm_size( MPI_COMM_WORLD, &size );
01085     std::vector< int > all_ids( 9 * size ), all_owner( 9 * size );
01086     ierr = MPI_Gather( ids, 9, MPI_INT, &all_ids[0], 9, MPI_INT, 0, MPI_COMM_WORLD );
01087     if( ierr ) return MB_FAILURE;
01088     ierr = MPI_Gather( owner, 9, MPI_INT, &all_owner[0], 9, MPI_INT, 0, MPI_COMM_WORLD );
01089     if( ierr ) return MB_FAILURE;
01090 
01091     int errors = rank ? 0 : valid_ghosting_owners( size, &all_ids[0], &all_owner[0] );
01092     MPI_Bcast( &errors, 1, MPI_INT, 0, MPI_COMM_WORLD );
01093     return errors ? MB_FAILURE : MB_SUCCESS;
01094 }
01095 
01096 // Common implementation for both:
01097 //   test_interface
01098 //   regression_interface_with_ghosting
01099 ErrorCode test_interface_owners( const char* )
01100 {
01101     return test_interface_owners_common( 0 );
01102 }
01103 
01104 ErrorCode regression_owners_with_ghosting( const char* )
01105 {
01106     return test_interface_owners_common( 1 );
01107 }
01108 
01109 struct VtxData
01110 {
01111     std::vector< int > procs;
01112     std::vector< int > owners;
01113     std::vector< EntityHandle > handles;
01114 };
01115 
01116 ErrorCode test_ghosted_entity_shared_data( const char* )
01117 {
01118     ErrorCode rval;
01119     Core moab_instance;
01120     Interface& mb = moab_instance;
01121     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01122 
01123     int rank, size;
01124     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01125     MPI_Comm_size( MPI_COMM_WORLD, &size );
01126 
01127     // build distributed quad mesh
01128     Range quads;
01129     EntityHandle verts[9];
01130     int ids[9];
01131     rval = parallel_create_mesh( mb, ids, verts, quads );
01132     PCHECK( MB_SUCCESS == rval );
01133     rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
01134     PCHECK( MB_SUCCESS == rval );
01135     rval = pcomm.exchange_ghost_cells( 2, 1, 1, 0, true );
01136     PCHECK( MB_SUCCESS == rval );
01137 
01138     rval = pcomm.check_all_shared_handles();
01139     PCHECK( MB_SUCCESS == rval );
01140 
01141     return MB_SUCCESS;
01142 }
01143 
01144 ErrorCode check_consistent_ids( Interface& mb, const EntityHandle* entities, const int* orig_ids, int num_ents,
01145                                 const char* singular_name, const char* plural_name )
01146 {
01147     ErrorCode rval;
01148     int rank, size, ierr;
01149     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01150     MPI_Comm_size( MPI_COMM_WORLD, &size );
01151 
01152     Tag id_tag = mb.globalId_tag();
01153     std::vector< int > new_ids( num_ents );
01154     rval = mb.tag_get_data( id_tag, entities, num_ents, &new_ids[0] );CHKERR( rval );
01155     // This test is wrong.  a) The caller can select a start ID so there's
01156     // no guarantee that the IDs will be in any specific range and b) There
01157     // is no reason to expect the global number of entities to be num_ents*size
01158     // if there are any shared entities. J.Kraftcheck 2010-12-22
01159     // rval = MB_SUCCESS;
01160     // for (int i = 0; i < num_ents; ++i)
01161     //  if (new_ids[i] < 0 || new_ids[i] >= num_ents*size) {
01162     //    std::cerr << "ID out of bounds on proc " << rank
01163     //              << " : " << new_ids[i] << " not in [0," << num_ents*size-1
01164     //              << "]" << std::endl;
01165     //    rval = MB_FAILURE;
01166     //  }
01167     // if (MB_SUCCESS != rval)
01168     //  return rval;
01169 
01170     // Gather up all data on root proc for consistency check
01171     std::vector< int > all_orig_ids( num_ents * size ), all_new_ids( num_ents * size );
01172     ierr = MPI_Gather( (void*)orig_ids, num_ents, MPI_INT, &all_orig_ids[0], num_ents, MPI_INT, 0, MPI_COMM_WORLD );
01173     if( ierr ) return MB_FAILURE;
01174     ierr = MPI_Gather( &new_ids[0], num_ents, MPI_INT, &all_new_ids[0], num_ents, MPI_INT, 0, MPI_COMM_WORLD );
01175     if( ierr ) return MB_FAILURE;
01176 
01177     // build a local map from original ID to new ID and use it
01178     // to check for consistancy between all procs
01179     rval = MB_SUCCESS;
01180     ;
01181     if( 0 == rank )
01182     {
01183         // check for two processors having different global ID for same entity
01184         std::map< int, int > idmap;  // index by original ID and contains new ID
01185         std::map< int, int > owner;  // index by original ID and contains owning rank
01186         for( int i = 0; i < num_ents * size; ++i )
01187         {
01188             std::map< int, int >::iterator it = idmap.find( all_orig_ids[i] );
01189             if( it == idmap.end() )
01190             {
01191                 idmap[all_orig_ids[i]] = all_new_ids[i];
01192                 owner[all_orig_ids[i]] = i / num_ents;
01193             }
01194             else if( it->second != all_new_ids[i] )
01195             {
01196                 std::cerr << "Inconsistant " << singular_name << " IDs between processors " << owner[all_orig_ids[i]]
01197                           << " and " << i / num_ents << " : " << it->second << " and " << all_new_ids[i]
01198                           << " respectively." << std::endl;
01199                 rval = MB_FAILURE;
01200             }
01201         }
01202         // check for two processors having same global ID for different entities
01203         idmap.clear();
01204         owner.clear();
01205         for( int i = 0; i < num_ents * size; ++i )
01206         {
01207             std::map< int, int >::iterator it = idmap.find( all_new_ids[i] );
01208             if( it == idmap.end() )
01209             {
01210                 idmap[all_new_ids[i]] = all_orig_ids[i];
01211                 owner[all_new_ids[i]] = i / num_ents;
01212             }
01213             else if( it->second != all_orig_ids[i] )
01214             {
01215                 std::cerr << "ID " << all_new_ids[i] << " assigned to different " << plural_name << " on processors "
01216                           << owner[all_new_ids[i]] << " and " << i / num_ents << std::endl;
01217                 rval = MB_FAILURE;
01218             }
01219         }
01220     }
01221     return rval;
01222 }
01223 
01224 ErrorCode test_assign_global_ids( const char* )
01225 {
01226     ErrorCode rval;
01227     Core moab_instance;
01228     Interface& mb = moab_instance;
01229     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01230 
01231     int rank, size;
01232     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01233     MPI_Comm_size( MPI_COMM_WORLD, &size );
01234 
01235     // build distributed quad mesh
01236     Range quad_range;
01237     EntityHandle verts[9];
01238     int vert_ids[9];
01239     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01240     PCHECK( MB_SUCCESS == rval );
01241     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01242     PCHECK( MB_SUCCESS == rval );
01243 
01244     // get global ids for quads
01245     Tag id_tag = mb.globalId_tag();
01246     assert( 4u == quad_range.size() );
01247     EntityHandle quads[4];
01248     std::copy( quad_range.begin(), quad_range.end(), quads );
01249     int quad_ids[4];
01250     rval = mb.tag_get_data( id_tag, quads, 4, quad_ids );CHKERR( rval );
01251 
01252     // clear GLOBAL_ID tag
01253     int zero[9] = { 0 };
01254     rval        = mb.tag_set_data( id_tag, verts, 9, zero );CHKERR( rval );
01255     rval = mb.tag_set_data( id_tag, quads, 4, zero );CHKERR( rval );
01256 
01257     // assign new global IDs
01258     rval = pcomm.assign_global_ids( 0, 2 );
01259     PCHECK( MB_SUCCESS == rval );
01260 
01261     rval = check_consistent_ids( mb, verts, vert_ids, 9, "vertex", "vertices" );
01262     PCHECK( MB_SUCCESS == rval );
01263     rval = check_consistent_ids( mb, quads, quad_ids, 4, "quad", "quads" );
01264     PCHECK( MB_SUCCESS == rval );
01265     return rval;
01266 }
01267 
01268 ErrorCode test_shared_sets( const char* )
01269 {
01270     ErrorCode rval;
01271     Core moab_instance;
01272     Interface& mb = moab_instance;
01273     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01274 
01275     int rank_i, size_i;
01276     MPI_Comm_rank( MPI_COMM_WORLD, &rank_i );
01277     MPI_Comm_size( MPI_COMM_WORLD, &size_i );
01278     const unsigned rank  = rank_i;
01279     const unsigned nproc = size_i;
01280 
01281     // build distributed quad mesh
01282     Range quads, sets;
01283     EntityHandle verts[9], set_arr[3];
01284     int ids[9];
01285     rval = parallel_create_mesh( mb, ids, verts, quads, set_arr );
01286     PCHECK( MB_SUCCESS == rval );
01287     rval = pcomm.resolve_shared_ents( 0, quads, 2, 1 );
01288     PCHECK( MB_SUCCESS == rval );
01289     sets.insert( set_arr[0] );
01290     sets.insert( set_arr[1] );
01291     if( set_arr[2] ) { sets.insert( set_arr[2] ); }
01292     else
01293     {
01294         set_arr[2] = set_arr[1];
01295     }
01296 
01297     Tag id_tag = mb.globalId_tag();
01298     rval       = pcomm.resolve_shared_sets( sets, id_tag );
01299     PCHECK( MB_SUCCESS == rval );
01300 
01301     // check that set data is consistent
01302     ErrorCode ok = MB_SUCCESS;
01303     for( size_t i = 0; i < 3; ++i )
01304     {
01305         unsigned owner;
01306         EntityHandle owner_handle, local;
01307         rval = pcomm.get_entityset_owner( set_arr[i], owner, &owner_handle );
01308         if( MB_SUCCESS != rval )
01309             ok = rval;
01310         else if( owner == rank )
01311         {
01312             if( owner_handle != set_arr[i] )
01313             {
01314                 std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << "invalid remote handle for owned set"
01315                           << std::endl;
01316                 ok = MB_FAILURE;
01317             }
01318         }
01319         else if( MB_SUCCESS != pcomm.get_entityset_local_handle( owner, owner_handle, local ) )
01320             ok = MB_FAILURE;
01321         else if( local != set_arr[i] )
01322         {
01323             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << "invalid local handle for remote data"
01324                       << std::endl;
01325             ok = MB_FAILURE;
01326         }
01327     }
01328     PCHECK( MB_SUCCESS == ok );
01329 
01330     const unsigned col = rank / 2;  // which column is proc in
01331     const unsigned colrank =
01332         2 * col;  // rank of first of two procs in col (rank if rank is even, rank-1 if rank is odd)
01333     unsigned mins[3] = { 0, 0, 0 };
01334     unsigned maxs[3] = { nproc - 1, 0, 0 };
01335     if( rank < 2 )
01336     {  // if in first (left-most) column, then
01337         mins[1] = mins[2] = 0;
01338         maxs[1] = maxs[2] = std::min( 3u, nproc - 1 );
01339     }
01340     else if( col == ( nproc - 1 ) / 2 )
01341     {  // else if last (right-most) column, then
01342         mins[1] = mins[2] = colrank - 2;
01343         maxs[1] = maxs[2] = std::min( colrank + 1, nproc - 1 );
01344     }
01345     else
01346     {  // if inside column, then
01347         mins[1] = colrank - 2;
01348         mins[2] = colrank;
01349         maxs[1] = std::min( colrank + 1, nproc - 1 );
01350         maxs[2] = std::min( colrank + 3, nproc - 1 );
01351     }
01352 
01353     // check expected sharing lists
01354     // set 0 is shared between all procs in the range [ mins[0], maxs[0] ]
01355     std::vector< unsigned > expected, list;
01356     for( size_t i = 0; i < 3; ++i )
01357     {
01358         expected.clear();
01359         for( unsigned r = mins[i]; r <= std::min( maxs[i], nproc - 1 ); ++r )
01360             if( r != rank ) expected.push_back( r );
01361         list.clear();
01362         rval = pcomm.get_entityset_procs( set_arr[i], list );
01363         if( MB_SUCCESS != rval )
01364             ok = rval;
01365         else
01366         {
01367             std::sort( list.begin(), list.end() );
01368             if( expected != list )
01369             {
01370                 std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << " incorrect sharing list for entity set"
01371                           << std::endl;
01372                 ok = MB_FAILURE;
01373             }
01374         }
01375     }
01376     PCHECK( MB_SUCCESS == ok );
01377 
01378     // check consistent owners
01379     unsigned send_list[6], set_owners[3];
01380     std::vector< unsigned > recv_list( 6 * nproc );
01381     for( size_t i = 0; i < 3; ++i )
01382     {
01383         mb.tag_get_data( id_tag, set_arr + i, 1, &send_list[2 * i] );
01384         pcomm.get_entityset_owner( set_arr[i], set_owners[i] );
01385         send_list[2 * i + 1] = set_owners[i];
01386     }
01387     MPI_Allgather( send_list, 6, MPI_UNSIGNED, &recv_list[0], 6, MPI_UNSIGNED, MPI_COMM_WORLD );
01388     std::map< unsigned, unsigned > owners;
01389     for( unsigned i = 0; i < 6 * nproc; i += 2 )
01390     {
01391         unsigned id    = recv_list[i];
01392         unsigned owner = recv_list[i + 1];
01393         if( owners.find( id ) == owners.end() )
01394             owners[id] = owner;
01395         else if( owners[id] != owner )
01396         {
01397             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank << " mismatched owners (" << owners[id]
01398                       << " and " << owner << ") for set with ID " << id << std::endl;
01399             ok = MB_FAILURE;
01400         }
01401     }
01402     PCHECK( MB_SUCCESS == ok );
01403 
01404     // test PComm::get_entityset_owners
01405     std::vector< unsigned > act_owners, exp_owners;
01406     for( size_t i = 0; i < 3; ++i )
01407     {
01408         exp_owners.push_back( set_owners[i] );
01409     }
01410     ok = pcomm.get_entityset_owners( act_owners );
01411     PCHECK( MB_SUCCESS == ok );
01412     // PCHECK(std::find(act_owners.begin(),act_owners.end(),rank) == act_owners.end());
01413     std::sort( act_owners.begin(), act_owners.end() );
01414     std::sort( exp_owners.begin(), exp_owners.end() );
01415     exp_owners.erase( std::unique( exp_owners.begin(), exp_owners.end() ), exp_owners.end() );
01416     PCHECK( exp_owners == act_owners );
01417 
01418     // test PComm::get_owned_sets
01419     ok = MB_SUCCESS;
01420     for( unsigned i = 0; i < nproc; ++i )
01421     {
01422         sets.clear();
01423         if( MB_SUCCESS != pcomm.get_owned_sets( i, sets ) )
01424         {
01425             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank
01426                       << " failed to get shared set list for sets owned by rank " << set_owners[i] << std::endl;
01427             continue;
01428         }
01429 
01430         Range expected_range;
01431         for( size_t j = 0; j < 3; ++j )
01432             if( set_owners[j] == i ) expected_range.insert( set_arr[j] );
01433 
01434         if( expected_range != sets )
01435         {
01436             std::cerr << __FILE__ << ":" << __LINE__ << " rank " << rank
01437                       << " has incorrect shared set list for sets owned by rank " << set_owners[i] << std::endl
01438                       << "Expected: " << expected_range << std::endl
01439                       << "Actual: " << sets << std::endl;
01440             ok = MB_FAILURE;
01441         }
01442     }
01443     PCHECK( MB_SUCCESS == ok );
01444 
01445     return MB_SUCCESS;
01446 }
01447 
01448 template < typename T >
01449 ErrorCode check_shared_ents( ParallelComm& pcomm, Tag tagh, T fact, MPI_Op mpi_op )
01450 {
01451     // get the shared entities
01452     Range shared_ents;
01453     ErrorCode rval = pcomm.get_shared_entities( -1, shared_ents );CHKERR( rval );
01454 
01455     std::vector< int > shprocs( MAX_SHARING_PROCS );
01456     std::vector< EntityHandle > shhandles( MAX_SHARING_PROCS );
01457     std::vector< T > dum_vals( shared_ents.size() );
01458     int np;
01459     unsigned char pstatus;
01460 
01461     rval = pcomm.get_moab()->tag_get_data( tagh, shared_ents, &dum_vals[0] );CHKERR( rval );
01462 
01463     // for each, compare number of sharing procs against tag value, should be 1/fact the tag value
01464     Range::iterator rit;
01465     int i = 0;
01466     for( rit = shared_ents.begin(); rit != shared_ents.end(); ++rit, i++ )
01467     {
01468         rval = pcomm.get_sharing_data( *rit, &shprocs[0], &shhandles[0], pstatus, np );CHKERR( rval );
01469         if( 1 == np && shprocs[0] != (int)pcomm.proc_config().proc_rank() ) np++;
01470         bool with_root = std::find( &shprocs[0], &shprocs[np], 0 ) - &shprocs[0] != np || !pcomm.rank();
01471         if( mpi_op == MPI_SUM )
01472         {
01473             if( dum_vals[i] != fact * np ) return MB_FAILURE;
01474         }
01475         else if( mpi_op == MPI_PROD )
01476         {
01477             if( dum_vals[i] != pow( fact, np ) ) return MB_FAILURE;
01478         }
01479         else if( mpi_op == MPI_MAX )
01480         {
01481             if( with_root && dum_vals[i] != fact ) return MB_FAILURE;
01482         }
01483         else if( mpi_op == MPI_MIN )
01484         {
01485             if( with_root && dum_vals[i] != fact ) return MB_FAILURE;
01486         }
01487         else
01488             return MB_FAILURE;
01489     }
01490 
01491     return MB_SUCCESS;
01492 }
01493 
01494 template < typename T >
01495 ErrorCode test_reduce_tags( const char*, DataType tp )
01496 {
01497     ErrorCode rval;
01498     Core moab_instance;
01499     Interface& mb = moab_instance;
01500     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01501 
01502     int rank, size;
01503     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01504     MPI_Comm_size( MPI_COMM_WORLD, &size );
01505 
01506     // build distributed quad mesh
01507     Range quad_range;
01508     EntityHandle verts[9];
01509     int vert_ids[9];
01510     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01511     PCHECK( MB_SUCCESS == rval );
01512     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01513     PCHECK( MB_SUCCESS == rval );
01514 
01515     Tag test_tag;
01516     T def_val = 2;
01517     Range dum_range;
01518     std::vector< T > dum_vals;
01519 
01520     // T, MPI_SUM
01521     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01522     rval = pcomm.reduce_tags( test_tag, MPI_SUM, dum_range );CHKERR( rval );
01523     rval = check_shared_ents( pcomm, test_tag, (T)2, MPI_SUM );CHKERR( rval );
01524     rval = mb.tag_delete( test_tag );
01525 
01526     // T, MPI_PROD
01527     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01528     rval = pcomm.reduce_tags( test_tag, MPI_PROD, dum_range );CHKERR( rval );
01529     rval = check_shared_ents( pcomm, test_tag, (T)2, MPI_PROD );CHKERR( rval );
01530     rval = mb.tag_delete( test_tag );
01531 
01532     // T, MPI_MAX
01533     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01534     // get owned entities and set a different value on them
01535     rval = pcomm.get_shared_entities( -1, dum_range, -1, false, false );CHKERR( rval );
01536     if( pcomm.rank() == 0 )
01537     {
01538         dum_vals.resize( dum_range.size(), (T)3 );
01539         rval = mb.tag_set_data( test_tag, dum_range, &dum_vals[0] );CHKERR( rval );
01540     }
01541     rval = pcomm.reduce_tags( test_tag, MPI_MAX, dum_range );CHKERR( rval );
01542     rval = check_shared_ents( pcomm, test_tag, (T)3, MPI_MAX );CHKERR( rval );
01543     rval = mb.tag_delete( test_tag );
01544 
01545     // T, MPI_MIN
01546     rval = mb.tag_get_handle( "test_tag", 1, tp, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );CHKERR( rval );
01547     // get owned entities and set a different value on them
01548     if( pcomm.rank() == 0 )
01549     {
01550         std::fill( dum_vals.begin(), dum_vals.end(), (T)-1 );
01551         rval = mb.tag_set_data( test_tag, dum_range, &dum_vals[0] );CHKERR( rval );
01552     }
01553     rval = pcomm.reduce_tags( test_tag, MPI_MIN, dum_range );CHKERR( rval );
01554     rval = check_shared_ents( pcomm, test_tag, (T)-1, MPI_MIN );CHKERR( rval );
01555     rval = mb.tag_delete( test_tag );
01556 
01557     return MB_SUCCESS;
01558 }
01559 
01560 ErrorCode test_reduce_tag_failures( const char* )
01561 {
01562     // test things that should fail
01563     ErrorCode rval;
01564     Core moab_instance;
01565     Interface& mb = moab_instance;
01566     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01567 
01568     int rank, size;
01569     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01570     MPI_Comm_size( MPI_COMM_WORLD, &size );
01571 
01572     // build distributed quad mesh
01573     Range quad_range;
01574     EntityHandle verts[9];
01575     int vert_ids[9];
01576     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01577     PCHECK( MB_SUCCESS == rval );
01578     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01579     PCHECK( MB_SUCCESS == rval );
01580 
01581     Tag test_tag;
01582     Range dum_range;
01583     int def_int;
01584     double def_dbl;
01585 
01586     // explicit destination tag of different type
01587     Tag test_dest;
01588     std::vector< Tag > src_tags, dest_tags;
01589     rval = mb.tag_get_handle( "test_tag", 1, MB_TYPE_INTEGER, test_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_int );CHKERR( rval );
01590     src_tags.push_back( test_tag );
01591     rval = mb.tag_get_handle( "dtest_tag", 1, MB_TYPE_DOUBLE, test_dest, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
01592     dest_tags.push_back( test_dest );
01593     rval = pcomm.reduce_tags( src_tags, dest_tags, MPI_MIN, dum_range );
01594     PCHECK( rval == MB_FAILURE );
01595     rval = mb.tag_delete( test_dest );CHKERR( rval );
01596     rval = mb.tag_delete( test_tag );CHKERR( rval );
01597 
01598     // tag w/ no default value
01599     rval = mb.tag_get_handle( "test_tag", 1, MB_TYPE_INTEGER, test_tag, MB_TAG_DENSE | MB_TAG_CREAT );CHKERR( rval );
01600     rval = pcomm.reduce_tags( test_tag, MPI_MIN, dum_range );
01601     PCHECK( rval == MB_ENTITY_NOT_FOUND );
01602     rval = mb.tag_delete( test_tag );
01603 
01604     return MB_SUCCESS;
01605 }
01606 
01607 ErrorCode test_reduce_tags( const char* )
01608 {
01609     ErrorCode rval = MB_SUCCESS, tmp_rval;
01610 
01611     tmp_rval = test_reduce_tags< int >( "test_reduce_tags (int)", MB_TYPE_INTEGER );
01612     if( MB_SUCCESS != tmp_rval )
01613     {
01614         std::cout << "test_reduce_tags failed for int data type." << std::endl;
01615         rval = tmp_rval;
01616     }
01617 
01618     tmp_rval = test_reduce_tags< double >( "test_reduce_tags (dbl)", MB_TYPE_DOUBLE );
01619     if( MB_SUCCESS != tmp_rval )
01620     {
01621         std::cout << "test_reduce_tags failed for double data type." << std::endl;
01622         rval = tmp_rval;
01623     }
01624 
01625     return rval;
01626 }
01627 
01628 ErrorCode test_reduce_tag_explicit_dest( const char* )
01629 {
01630     // test tag_reduce stuff with explicit destination tag
01631     ErrorCode rval;
01632     Core moab_instance;
01633     Interface& mb = moab_instance;
01634     ParallelComm pcomm( &mb, MPI_COMM_WORLD );
01635 
01636     int rank, size;
01637     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01638     MPI_Comm_size( MPI_COMM_WORLD, &size );
01639 
01640     // build distributed quad mesh
01641     Range quad_range;
01642     EntityHandle verts[9];
01643     int vert_ids[9];
01644     rval = parallel_create_mesh( mb, vert_ids, verts, quad_range );
01645     PCHECK( MB_SUCCESS == rval );
01646     rval = pcomm.resolve_shared_ents( 0, quad_range, 2, 1 );
01647     PCHECK( MB_SUCCESS == rval );
01648 
01649     Tag src_tag, dest_tag;
01650     Range dum_range;
01651     double def_dbl = 5.0;
01652 
01653     // src tag has one value, pre-existing dest tag has another; should get MPI_Op of src values
01654     std::vector< Tag > src_tags, dest_tags;
01655     // create the tags
01656     rval = mb.tag_get_handle( "src_tag", 1, MB_TYPE_DOUBLE, src_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
01657     src_tags.push_back( src_tag );
01658     rval = mb.tag_get_handle( "dest_tag", 1, MB_TYPE_DOUBLE, dest_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_dbl );CHKERR( rval );
01659     dest_tags.push_back( dest_tag );
01660     // set values for src tag to one value, dest tag to another
01661     rval = pcomm.get_shared_entities( -1, dum_range, -1, false, false );CHKERR( rval );
01662     std::vector< double > tag_vals( dum_range.size(), 1.0 );
01663     rval = mb.tag_set_data( src_tag, dum_range, &tag_vals[0] );CHKERR( rval );
01664     std::fill( tag_vals.begin(), tag_vals.end(), 2.0 );
01665     rval = mb.tag_set_data( dest_tag, dum_range, &tag_vals[0] );CHKERR( rval );
01666     // do the reduce
01667     rval = pcomm.reduce_tags( src_tags, dest_tags, MPI_SUM, dum_range );CHKERR( rval );
01668     // check the reduced value
01669     rval = check_shared_ents< double >( pcomm, dest_tag, (double)1.0, MPI_SUM );CHKERR( rval );
01670 
01671     rval = mb.tag_delete( src_tag );CHKERR( rval );
01672     rval = mb.tag_delete( dest_tag );CHKERR( rval );
01673 
01674     return MB_SUCCESS;
01675 }
01676 
01677 ErrorCode test_delete_entities( const char* filename )
01678 {
01679     Core mb_instance;
01680     Interface& moab = mb_instance;
01681     ErrorCode rval;
01682 
01683     rval = moab.load_file( filename, 0,
01684                            "PARALLEL=READ_PART;"
01685                            "PARTITION=PARALLEL_PARTITION;"
01686                            "PARALLEL_RESOLVE_SHARED_ENTS" );CHKERR( rval );
01687 
01688     // Get ghost elements
01689     ParallelComm* pcomm = ParallelComm::get_pcomm( &moab, 0 );
01690     // get some edges and faces, and delete some
01691     Range local;
01692     rval = moab.get_entities_by_dimension( 0, 1, local );CHKERR( rval );
01693     rval = moab.get_entities_by_dimension( 0, 2, local );CHKERR( rval );  // it is appending to range
01694 
01695     Range local2;  // extract about half of those
01696     std::copy( local.begin(), local.begin() + local.size() / 2, range_inserter( local2 ) );
01697 
01698     // delete local 2
01699     rval = pcomm->delete_entities( local2 );CHKERR( rval );
01700 
01701     for( Range::iterator it = local2.begin(); it != local2.end(); ++it )
01702     {
01703         if( mb_instance.is_valid( *it ) ) return MB_FAILURE;
01704     }
01705     const char* opt = "PARALLEL=WRITE_PART";
01706     rval            = moab.write_file( "tmpx.h5m", 0, opt );CHKERR( rval );
01707     if( pcomm->proc_config().proc_rank() == 0 ) remove( "tmpx.h5m" );
01708 
01709     return MB_SUCCESS;
01710 }
01711 
01712 ErrorCode test_ghost_polyhedra( const char* filename )
01713 {
01714     Core mb_instance;
01715     Interface& moab = mb_instance;
01716     ErrorCode rval;
01717 
01718     rval = moab.load_file( filename, 0,
01719                            "PARALLEL=READ_PART;"
01720                            "PARTITION=PARALLEL_PARTITION;"
01721                            "PARALLEL_RESOLVE_SHARED_ENTS;"
01722                            "PARALLEL_GHOSTS=3.0.1" );CHKERR( rval );
01723 
01724     return MB_SUCCESS;
01725 }
01726 ErrorCode test_too_few_parts( const char* filename )
01727 {
01728     Core mb_instance;
01729     Interface& moab = mb_instance;
01730     ErrorCode rval;
01731 
01732     rval = moab.load_file( filename, 0,
01733                            "PARALLEL=READ_PART;"
01734                            "PARTITION=PARALLEL_PARTITION;"
01735                            "PARALLEL_RESOLVE_SHARED_ENTS;" );
01736     // if(rval==MB_SUCCESS)
01737     // return MB_FAILURE;
01738 
01739     return rval;
01740 }
01741 
01742 ErrorCode test_sequences_after_ghosting( const char* filename )
01743 {
01744     Core mb_instance;
01745     Interface& moab = mb_instance;
01746     ErrorCode rval;
01747 
01748     rval = moab.load_file( filename, 0,
01749                            "PARALLEL=READ_PART;"
01750                            "PARTITION=PARALLEL_PARTITION;"
01751                            "PARALLEL_RESOLVE_SHARED_ENTS;"
01752                            "PARALLEL_GHOSTS=3.2.1;"
01753                            "PARALLEL_SEQUENCE_FACTOR=1.5" );CHKERR( rval );
01754 
01755     // get all elements of dimension 3, and check they are on one sequence, with connect_iterate
01756     Range elems;
01757     rval = moab.get_entities_by_dimension( 0, 3, elems );CHKERR( rval );
01758     if( elems.psize() != 1 )
01759     {
01760         std::cout << " elems.psize() = " << elems.psize() << "\n";
01761         return MB_FAILURE;
01762     }
01763     // we want only one sequence
01764     int count, vpere;
01765     EntityHandle* conn_ptr;
01766     rval = moab.connect_iterate( elems.begin(), elems.end(), conn_ptr, vpere, count );CHKERR( rval );
01767 
01768     if( count != (int)elems.size() )
01769     {
01770         std::cout << " more than one sequence:  elems.size() = " << elems.size() << "  count:" << count << "\n";
01771         return MB_FAILURE;
01772     }
01773     // check also global id tag, which is dense
01774     Tag id_tag          = moab.globalId_tag();
01775     void* globalid_data = NULL;
01776     rval                = moab.tag_iterate( id_tag, elems.begin(), elems.end(), count, globalid_data );CHKERR( rval );
01777     if( count != (int)elems.size() )
01778     {
01779         std::cout << " more than one tag sequence:  elems.size() = " << elems.size() << "  count:" << count << "\n";
01780         return MB_FAILURE;
01781     }
01782 
01783     // repeat the tests for vertex sequences
01784     // get all elements of dimension 3, and check they are on one sequence, with coords_iterate
01785     Range verts;
01786     rval = moab.get_entities_by_dimension( 0, 0, verts );CHKERR( rval );
01787     if( verts.psize() != 1 )
01788     {
01789         std::cout << " verts.psize() = " << verts.psize() << "\n";
01790         return MB_FAILURE;
01791     }
01792     //
01793     double *x_ptr, *y_ptr, *z_ptr;
01794     rval = moab.coords_iterate( verts.begin(), verts.end(), x_ptr, y_ptr, z_ptr, count );CHKERR( rval );
01795 
01796     if( count != (int)verts.size() )
01797     {
01798         std::cout << " more than one sequence:  verts.size() = " << verts.size() << "  count:" << count << "\n";
01799         return MB_FAILURE;
01800     }
01801 
01802     rval = moab.tag_iterate( id_tag, verts.begin(), verts.end(), count, globalid_data );CHKERR( rval );
01803     if( count != (int)verts.size() )
01804     {
01805         std::cout << " more than one tag sequence:  verts.size() = " << verts.size() << "  count:" << count << "\n";
01806         return MB_FAILURE;
01807     }
01808     return MB_SUCCESS;
01809 }
01810 
01811 // test trivial partition as used by iMOAB send/receive mesh methods
01812 // it is hooked to ParCommGraph, but it is really not dependent on anything from MOAB
01813 // these methods that are tested are just utilities
01814 
01815 void test_trivial_partition()
01816 {
01817     // nothing is modified inside moab
01818     // by these methods;
01819     // to instantiate par com graph, we just need 2 groups!
01820     // they can be overlapping !!!! we do not test that yet
01821 
01822     // create 2 groups; one over task 0 and 1, one over 2
01823     MPI_Group worldg;
01824     MPI_Comm duplicate;
01825     MPI_Comm_dup( MPI_COMM_WORLD, &duplicate );
01826     MPI_Comm_group( duplicate, &worldg );
01827 
01828     // this is usually run on 2 processes
01829 
01830     int rank[2] = { 0, 1 };
01831     MPI_Group gr1, gr2;
01832     MPI_Group_incl( worldg, 2, rank, &gr1 );      // will contain 2 ranks, 0 and 1
01833     MPI_Group_incl( worldg, 1, &rank[1], &gr2 );  // will contain 1 rank only (1)
01834 
01835     int comp1 = 10, comp2 = 12;
01836     ParCommGraph* pgr = new ParCommGraph( duplicate, gr1, gr2, comp1, comp2 );
01837 
01838     std::map< int, Range > ranges_to_send;
01839     std::vector< int > number_elems_per_part;
01840     number_elems_per_part.push_back( 6 );
01841     number_elems_per_part.push_back( 10 );
01842 
01843     std::cout << " send sizes ";
01844     for( int k = 0; k < (int)number_elems_per_part.size(); k++ )
01845     {
01846         std::cout << " " << number_elems_per_part[k];
01847     }
01848     std::cout << "\n";
01849 
01850     std::cout << "\n";
01851     pgr->compute_trivial_partition( number_elems_per_part );
01852 
01853     Range verts( 10, 20 );
01854     pgr->split_owned_range( 0, verts );
01855 
01856     for( std::map< int, Range >::iterator it = ranges_to_send.begin(); it != ranges_to_send.end(); it++ )
01857     {
01858         Range& ran = it->second;
01859         std::cout << " receiver " << it->first << " receive range: [" << ran[0] << ", " << ran[ran.size() - 1]
01860                   << "] \n";
01861     }
01862 
01863     delete( pgr );
01864 
01865     MPI_Group_free( &worldg );
01866     MPI_Group_free( &gr1 );
01867     MPI_Group_free( &gr2 );
01868     MPI_Comm_free( &duplicate );
01869 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines