![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 /** @example HelloParMOAB.cpp \n
00002 * \brief Read mesh into MOAB and resolve/exchange/report shared and ghosted entities \n
00003 * To run: mpiexec -np 4 HelloParMOAB [filename]\n
00004 *
00005 * It shows how to load the mesh independently, on multiple
00006 * communicators (with second argument, the number of comms)
00007 *
00008 * mpiexec -np 8 HelloParMOAB [filename] [nbComms]
00009 */
00010
00011 #include "moab/Core.hpp"
00012 #ifdef MOAB_HAVE_MPI
00013 #include "moab/ParallelComm.hpp"
00014 #endif
00015 #include "MBParallelConventions.h"
00016 #include
00017
00018 using namespace moab;
00019 using namespace std;
00020
00021 string test_file_name = string( MESH_DIR ) + string( "/64bricks_512hex_256part.h5m" );
00022
00023 int main( int argc, char** argv )
00024 {
00025 #ifdef MOAB_HAVE_MPI
00026 MPI_Init( &argc, &argv );
00027
00028 string options;
00029
00030 // Need option handling here for input filename
00031 if( argc > 1 )
00032 {
00033 // User has input a mesh file
00034 test_file_name = argv[1];
00035 }
00036
00037 int nbComms = 1;
00038 if( argc > 2 ) nbComms = atoi( argv[2] );
00039
00040 options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
00041
00042 // Get MOAB instance
00043 Interface* mb = new( std::nothrow ) Core;
00044 if( NULL == mb ) return 1;
00045
00046 MPI_Comm comm;
00047 int global_rank, global_size;
00048 MPI_Comm_rank( MPI_COMM_WORLD, &global_rank );
00049 MPI_Comm_rank( MPI_COMM_WORLD, &global_size );
00050
00051 int color = global_rank % nbComms; // For each angle group a different color
00052 if( nbComms > 1 )
00053 {
00054 // Split the communicator, into ngroups = nbComms
00055 MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &comm );
00056 }
00057 else
00058 comm = MPI_COMM_WORLD;
00059
00060 // Get the ParallelComm instance
00061 ParallelComm* pcomm = new ParallelComm( mb, comm );
00062 int nprocs = pcomm->proc_config().proc_size();
00063 int rank = pcomm->proc_config().proc_rank();
00064 #ifndef NDEBUG
00065 MPI_Comm rcomm = pcomm->proc_config().proc_comm();
00066 assert( rcomm == comm );
00067 #endif
00068 if( 0 == global_rank )
00069 cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
00070 << " processors\n";
00071
00072 if( 1 == global_rank )
00073 cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs
00074 << " processors\n";
00075
00076 MPI_Barrier( MPI_COMM_WORLD );
00077
00078 if( 0 == global_rank )
00079 cout << "Reading file " << test_file_name << "\n with options: " << options << "\n on " << nprocs
00080 << " processors on " << nbComms << " communicator(s)\n";
00081
00082 // Read the file with the specified options
00083 ErrorCode rval = mb->load_file( test_file_name.c_str(), 0, options.c_str() );MB_CHK_ERR( rval );
00084
00085 Range shared_ents;
00086 // Get entities shared with all other processors
00087 rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );
00088
00089 // Filter shared entities with not not_owned, which means owned
00090 Range owned_entities;
00091 rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );
00092
00093 unsigned int nums[4] = { 0 }; // to store the owned entities per dimension
00094 for( int i = 0; i < 4; i++ )
00095 nums[i] = (int)owned_entities.num_of_dimension( i );
00096 vector< int > rbuf( nprocs * 4, 0 );
00097 MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
00098 // Print the stats gathered:
00099 if( 0 == global_rank )
00100 {
00101 for( int i = 0; i < nprocs; i++ )
00102 cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
00103 << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
00104 }
00105
00106 // Now exchange 1 layer of ghost elements, using vertices as bridge
00107 // (we could have done this as part of reading process, using the PARALLEL_GHOSTS read option)
00108 rval = pcomm->exchange_ghost_cells( 3, // int ghost_dim
00109 0, // int bridge_dim
00110 1, // int num_layers
00111 0, // int addl_ents
00112 true );MB_CHK_ERR( rval ); // bool store_remote_handles
00113
00114 // Repeat the reports, after ghost exchange
00115 shared_ents.clear();
00116 owned_entities.clear();
00117 rval = pcomm->get_shared_entities( -1, shared_ents );MB_CHK_ERR( rval );
00118 rval = pcomm->filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities );MB_CHK_ERR( rval );
00119
00120 // Find out how many shared entities of each dimension are owned on this processor
00121 for( int i = 0; i < 4; i++ )
00122 nums[i] = (int)owned_entities.num_of_dimension( i );
00123
00124 // Gather the statistics on processor 0
00125 MPI_Gather( nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm );
00126 if( 0 == global_rank )
00127 {
00128 cout << " \n\n After exchanging one ghost layer: \n";
00129 for( int i = 0; i < nprocs; i++ )
00130 {
00131 cout << " Shared, owned entities on proc " << i << ": " << rbuf[4 * i] << " verts, " << rbuf[4 * i + 1]
00132 << " edges, " << rbuf[4 * i + 2] << " faces, " << rbuf[4 * i + 3] << " elements" << endl;
00133 }
00134 }
00135
00136 delete mb;
00137
00138 MPI_Finalize();
00139 #else
00140 std::cout << " compile with MPI and hdf5 for this example to work\n";
00141
00142 #endif
00143 return 0;
00144 }