MOAB: Mesh Oriented datABase  (version 5.4.1)
HelloParMOAB.cpp File Reference
#include "moab/Core.hpp"
#include "MBParallelConventions.h"
#include <iostream>
+ Include dependency graph for HelloParMOAB.cpp:

Go to the source code of this file.

Functions

int main (int argc, char **argv)

Variables

string test_file_name = string( MESH_DIR ) + string( "/64bricks_512hex_256part.h5m" )

Function Documentation

int main ( int  argc,
char **  argv 
)

Definition at line 23 of file HelloParMOAB.cpp.

References moab::Range::clear(), ErrorCode, moab::ParallelComm::exchange_ghost_cells(), moab::ParallelComm::filter_pstatus(), moab::ParallelComm::get_shared_entities(), moab::Interface::load_file(), mb, MB_CHK_ERR, MPI_COMM_WORLD, moab::Range::num_of_dimension(), moab::ProcConfig::proc_comm(), moab::ParallelComm::proc_config(), moab::ProcConfig::proc_rank(), moab::ProcConfig::proc_size(), PSTATUS_NOT, PSTATUS_NOT_OWNED, rank, and test_file_name.

{
#ifdef MOAB_HAVE_MPI
    MPI_Init( &argc, &argv );

    string options;

    // Need option handling here for input filename
    if( argc > 1 )
    {
        // User has input a mesh file
        test_file_name = argv[1];
    }

    int nbComms = 1;
    if( argc > 2 ) nbComms = atoi( argv[2] );

    options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";

    // Get MOAB instance
    Interface* mb = new( std::nothrow ) Core;
    if( NULL == mb ) return 1;

    MPI_Comm comm;
    int global_rank, global_size;
    MPI_Comm_rank( MPI_COMM_WORLD, &global_rank );
    MPI_Comm_rank( MPI_COMM_WORLD, &global_size );

    int color = global_rank % nbComms;  // For each angle group a different color
    if( nbComms > 1 )
    {
        // Split the communicator, into ngroups = nbComms
        MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &comm );
    }
    else
        comm = MPI_COMM_WORLD;

    // Get the ParallelComm instance
    ParallelComm* pcomm = new ParallelComm( mb, comm );
    int nprocs          = pcomm->proc_config().proc_size();
    int rank            = pcomm->proc_config().proc_rank();
#ifndef NDEBUG
    MPI_Comm rcomm = pcomm->proc_config().proc_comm();
    assert( rcomm == comm );
#endif
    if( 0 == global_rank )
        cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
             << " processors\n";

    if( 1 == global_rank )
        cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
             << " processors\n";

    MPI_Barrier( MPI_COMM_WORLD );

    if( 0 == global_rank )
        cout << "Reading file " << test_file_name << "\n with options: " << options << "\n on " << nprocs
             << " processors on " << nbComms << " communicator(s)\n";

    // Read the file with the specified options
    ErrorCode rval = mb->load_file( test_file_name.c_str(), 0, options.c_str() );MB_CHK_ERR( rval );

    Range shared_ents;
    // Get entities shared with all other processors
    rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );

    // Filter shared entities with not not_owned, which means owned
    Range owned_entities;
    rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );

    unsigned int nums[4] = { 0 };  // to store the owned entities per dimension
    for( int i = 0; i < 4; i++ )
        nums[i] = (int)owned_entities.num_of_dimension( i );
    vector< int > rbuf( nprocs * 4, 0 );
    MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
    // Print the stats gathered:
    if( 0 == global_rank )
    {
        for( int i = 0; i < nprocs; i++ )
            cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
                 << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
    }

    // Now exchange 1 layer of ghost elements, using vertices as bridge
    // (we could have done this as part of reading process, using the PARALLEL_GHOSTS read option)
    rval = pcomm->exchange_ghost_cells( 3,  // int ghost_dim
                                        0,  // int bridge_dim
                                        1,  // int num_layers
                                        0,  // int addl_ents
                                        true );MB_CHK_ERR( rval );  // bool store_remote_handles

    // Repeat the reports, after ghost exchange
    shared_ents.clear();
    owned_entities.clear();
    rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );
    rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );

    // Find out how many shared entities of each dimension are owned on this processor
    for( int i = 0; i < 4; i++ )
        nums[i] = (int)owned_entities.num_of_dimension( i );

    // Gather the statistics on processor 0
    MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
    if( 0 == global_rank )
    {
        cout << " \n\n After exchanging one ghost layer: \n";
        for( int i = 0; i < nprocs; i++ )
        {
            cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
                 << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
        }
    }

    delete mb;

    MPI_Finalize();
#else
    std::cout << " compile with MPI and hdf5 for this example to work\n";

#endif
    return 0;
}

Variable Documentation

string test_file_name = string( MESH_DIR ) + string( "/64bricks_512hex_256part.h5m" )

Definition at line 21 of file HelloParMOAB.cpp.

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines