MOAB: Mesh Oriented datABase  (version 5.4.1)
ghost_thin_layers.cpp
Go to the documentation of this file.
00001 #include "moab/ParallelComm.hpp"
00002 #include "moab/Core.hpp"
00003 #include "moab_mpi.h"
00004 #include "TestUtil.hpp"
00005 #include "MBTagConventions.hpp"
00006 #include <iostream>
00007 #include <sstream>
00008 
00009 // a file with 4 quads, in line, partitioned in 4 parts
00010 std::string filename = TestDir + "unittest/io/ln4.h5m";
00011 
00012 using namespace moab;
00013 
00014 void test_correct_ghost()
00015 {
00016     int nproc, rank;
00017     MPI_Comm_size( MPI_COMM_WORLD, &nproc );
00018     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00019     // Get MOAB instance
00020     Interface* mb = new( std::nothrow ) Core;
00021 
00022     ErrorCode rval = MB_SUCCESS;
00023 
00024     // Get the ParallelComm instance
00025     ParallelComm* pcomm = new ParallelComm( mb, MPI_COMM_WORLD );
00026 
00027     char read_opts[] = "PARALLEL=READ_PART;PARALLEL_RESOLVE_SHARED_ENTS;PARTITION=PARALLEL_"
00028                        "PARTITION;PARALLEL_GHOSTS=2.0.1";
00029     rval             = mb->load_file( filename.c_str(), 0, read_opts );CHECK_ERR( rval );
00030 
00031     if( nproc >= 3 )
00032     {
00033         rval = pcomm->correct_thin_ghost_layers();CHECK_ERR( rval );
00034     }
00035 
00036     rval = pcomm->exchange_ghost_cells( 2, 0, 2, 0, true );CHECK_ERR( rval );  // true to store remote handles
00037 
00038     // write in serial the database , on each rank
00039     std::ostringstream outfile;
00040     outfile << "testReadThin_n" << nproc << "." << rank << ".h5m";
00041 
00042     rval = mb->write_file( outfile.str().c_str() );  // everything on local root
00043     CHECK_ERR( rval );
00044     delete mb;
00045 }
00046 
00047 void test_read_with_thin_ghost_layer()
00048 {
00049     int nproc, rank;
00050     MPI_Comm_size( MPI_COMM_WORLD, &nproc );
00051     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00052     moab::Core* mb = new moab::Core();
00053 
00054     ErrorCode rval = MB_SUCCESS;
00055     // Get the ParallelComm instance
00056     ParallelComm* pcomm = new ParallelComm( mb, MPI_COMM_WORLD );
00057 
00058     char read_opts[] = "PARALLEL=READ_PART;PARALLEL_RESOLVE_SHARED_ENTS;PARTITION=PARALLEL_"
00059                        "PARTITION;PARALLEL_GHOSTS=2.0.1;PARALLEL_THIN_GHOST_LAYER;";
00060     rval             = mb->load_file( filename.c_str(), 0, read_opts );CHECK_ERR( rval );
00061 
00062     rval = pcomm->exchange_ghost_cells( 2, 0, 2, 0, true );CHECK_ERR( rval );  // true to store remote handles
00063 
00064     // write in serial the database , on each rank
00065     std::ostringstream outfile;
00066     outfile << "testReadGhost_n" << nproc << "." << rank << ".h5m";
00067 
00068     rval = mb->write_file( outfile.str().c_str() );  // everything on local root
00069     CHECK_ERR( rval );
00070     delete mb;
00071 }
00072 
00073 int main( int argc, char* argv[] )
00074 {
00075     MPI_Init( &argc, &argv );
00076     int nproc, rank;
00077     MPI_Comm_size( MPI_COMM_WORLD, &nproc );
00078     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00079     if( nproc <= 3 )
00080     {
00081         std::cout << " launch it on at least 4 processes. \n";
00082         MPI_Finalize();
00083         return 0;
00084     }
00085 
00086     int result = 0;
00087     if( argc >= 2 ) filename = argv[1];  // to be able to test other files too
00088 
00089     result += RUN_TEST( test_read_with_thin_ghost_layer );
00090     result += RUN_TEST( test_correct_ghost );
00091 
00092     MPI_Finalize();
00093     return 0;
00094 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines