Mesh Oriented datABase  (version 5.4.1)
Array-based unstructured mesh datastructure
CoupleMGen.cpp
Go to the documentation of this file.
00001 /*
00002  * Driver to test coupling online, without using IO hdf5 files
00003  * Will instantiate 2 different meshes, that cover the same domain; reports in the end the L
00004  * infinity norm of a field
00005  *
00006  * will report time to build the meshes, instantiate coupler, locate points and interpolate
00007  * M and K are options for number of parts in x and z directions
00008  *
00009  * partitions are ordered lexicographically, (MxNxK)
00010  *
00011  * it needs to be  np = MxNxK
00012  *
00013  * if M==K, then the partitions are perfectly aligned
00014  *
00015  * the second mesh  is ordered (KxNxM), so if you want them to not be perfectly aligned, make M and
00016  * K different
00017  *
00018  * for example, run with
00019  * M = 16, N=K=1, will verify slabs
00020  *
00021  * -b controls the number of elements in each partition
00022  *
00023  * example
00024  *
00025  *  mpiexec -np 16 CoupleMGen -K 4 -N 4
00026  *
00027  *  Right now, to build, it needs to install MOAB; coupler is harder if not installed (would need to
00028  * add
00029  *    ../tools/mbcoupler , etc, to include and lib paths)
00030  *
00031  *
00032  */
00033 // MOAB includes
00034 #include "moab/ParallelComm.hpp"
00035 #include "MBParallelConventions.h"
00036 #include "moab/Core.hpp"
00037 #include "mbcoupler/Coupler.hpp"
00038 #include "moab_mpi.h"
00039 #include "mbcoupler/ElemUtil.hpp"
00040 #include "moab/MeshGeneration.hpp"
00041 #include "moab/ProgOptions.hpp"
00042 
00043 using namespace moab;
00044 using std::string;
00045 
00046 double physField( double x, double y, double z )
00047 {
00048 
00049     double out = sin( M_PI * x ) * cos( M_PI * y ) * sin( M_PI * z );
00050 
00051     return out;
00052 }
00053 
00054 int main( int argc, char* argv[] )
00055 {
00056     int proc_id = 0, size = 1;
00057 
00058     MPI_Init( &argc, &argv );
00059     MPI_Comm_rank( MPI_COMM_WORLD, &proc_id );
00060     MPI_Comm_size( MPI_COMM_WORLD, &size );
00061 
00062     Core mcore;
00063     Interface* mb = &mcore;
00064     EntityHandle fileset1, fileset2;  // for 2 different meshes
00065     MeshGeneration::BrickOpts opts;
00066     // default options
00067     opts.A = opts.B = opts.C = 1;
00068     opts.M = opts.N = opts.K = 1;
00069     opts.blockSize           = 4;
00070     opts.xsize = opts.ysize = opts.zsize = 1.;
00071     opts.ui                              = CartVect( 1., 0, 0. );
00072     opts.uj                              = CartVect( 0., 1., 0. );
00073     opts.uk                              = CartVect( 0., 0., 1. );
00074     opts.newMergeMethod = opts.quadratic = opts.keep_skins = opts.tetra = false;
00075     opts.adjEnts = opts.parmerge = false;
00076     opts.GL                      = 0;
00077 
00078     ProgOptions popts;
00079 
00080     popts.addOpt< int >( string( "blockSize,b" ), string( "Block size of mesh (default=4)" ), &opts.blockSize );
00081     popts.addOpt< int >( string( "xproc,M" ), string( "Number of processors in x dir (default=1)" ), &opts.M );
00082     popts.addOpt< int >( string( "yproc,N" ), string( "Number of processors in y dir (default=1)" ), &opts.N );
00083     popts.addOpt< int >( string( "zproc,K" ), string( "Number of processors in z dir (default=1)" ), &opts.K );
00084 
00085     popts.addOpt< int >( string( "xblocks,A" ), string( "Number of blocks on a task in x dir (default=2)" ), &opts.A );
00086     popts.addOpt< int >( string( "yblocks,B" ), string( "Number of blocks on a task in y dir (default=2)" ), &opts.B );
00087     popts.addOpt< int >( string( "zblocks,C" ), string( "Number of blocks on a task in x dir (default=2)" ), &opts.C );
00088 
00089     popts.addOpt< double >( string( "xsize,x" ), string( "Total size in x direction (default=1.)" ), &opts.xsize );
00090     popts.addOpt< double >( string( "ysize,y" ), string( "Total size in y direction (default=1.)" ), &opts.ysize );
00091     popts.addOpt< double >( string( "zsize,z" ), string( "Total size in z direction (default=1.)" ), &opts.zsize );
00092 
00093     popts.addOpt< void >( "newMerge,w", "use new merging method", &opts.newMergeMethod );
00094 
00095     popts.addOpt< void >( "quadratic,q", "use hex 27 elements", &opts.quadratic );
00096 
00097     popts.addOpt< void >( "keep_skins,k", "keep skins with shared entities", &opts.keep_skins );
00098 
00099     popts.addOpt< void >( "tetrahedrons,t", "generate tetrahedrons", &opts.tetra );
00100 
00101     popts.addOpt< void >( "faces_edges,f", "create all faces and edges", &opts.adjEnts );
00102 
00103     popts.addOpt< int >( string( "ghost_layers,g" ), string( "Number of ghost layers (default=0)" ), &opts.GL );
00104 
00105     popts.addOpt< void >( "parallel_merge,p", "use parallel mesh merge, not vertex ID based merge", &opts.parmerge );
00106 
00107     Coupler::Method method = Coupler::LINEAR_FE;
00108 
00109     double toler = 1.e-6;
00110     popts.addOpt< double >( string( "eps,e" ), string( "tolerance for coupling, used in locating points" ), &toler );
00111 
00112     bool writeMeshes = false;
00113     popts.addOpt< void >( "print,p", "write meshes", &writeMeshes );
00114 
00115     popts.parseCommandLine( argc, argv );
00116 
00117     double start_time = MPI_Wtime();
00118 
00119     ErrorCode rval = mb->create_meshset( MESHSET_SET, fileset1 );MB_CHK_ERR( rval );
00120     rval = mb->create_meshset( MESHSET_SET, fileset2 );MB_CHK_ERR( rval );
00121 
00122     ParallelComm* pc1     = new ParallelComm( mb, MPI_COMM_WORLD );
00123     MeshGeneration* mgen1 = new MeshGeneration( mb, pc1, fileset1 );
00124 
00125     rval = mgen1->BrickInstance( opts );MB_CHK_ERR( rval );  // this will generate first mesh on fileset1
00126 
00127     double instance_time = MPI_Wtime();
00128     double current       = instance_time;
00129     if( !proc_id ) std::cout << " instantiate first mesh " << instance_time - start_time << "\n";
00130     // set an interpolation tag on source mesh, from phys field
00131     std::string interpTag( "interp_tag" );
00132     Tag tag;
00133     rval = mb->tag_get_handle( interpTag.c_str(), 1, MB_TYPE_DOUBLE, tag, MB_TAG_CREAT | MB_TAG_DENSE );MB_CHK_ERR( rval );
00134 
00135     Range src_elems;
00136     rval = pc1->get_part_entities( src_elems, 3 );MB_CHK_ERR( rval );
00137     Range src_verts;
00138     rval = mb->get_connectivity( src_elems, src_verts );MB_CHK_ERR( rval );
00139     for( Range::iterator vit = src_verts.begin(); vit != src_verts.end(); ++vit )
00140     {
00141         EntityHandle vert = *vit;  //?
00142 
00143         double vertPos[3];
00144         mb->get_coords( &vert, 1, vertPos );
00145 
00146         double fieldValue = physField( vertPos[0], vertPos[1], vertPos[2] );
00147 
00148         rval = mb->tag_set_data( tag, &vert, 1, &fieldValue );MB_CHK_ERR( rval );
00149     }
00150 
00151     double setTag_time = MPI_Wtime();
00152     if( !proc_id ) std::cout << " set tag " << setTag_time - current;
00153     current = instance_time;
00154     // change some options, so it is a different mesh
00155     int tmp1   = opts.K;
00156     opts.K     = opts.M;
00157     opts.M     = tmp1;  // swap (opts.K, opts.M)
00158     opts.tetra = !opts.tetra;
00159     opts.blockSize++;
00160 
00161     ParallelComm* pc2     = new ParallelComm( mb, MPI_COMM_WORLD );
00162     MeshGeneration* mgen2 = new MeshGeneration( mb, pc2, fileset2 );
00163 
00164     rval = mgen2->BrickInstance( opts );MB_CHK_ERR( rval );  // this will generate second mesh on fileset2
00165 
00166     double instance_second = MPI_Wtime();
00167     if( !proc_id ) std::cout << " instance second mesh" << instance_second - current << "\n";
00168     current = instance_second;
00169 
00170     // test the sets are fine
00171     if( writeMeshes )
00172     {
00173         rval = mb->write_file( "mesh1.h5m", 0, ";;PARALLEL=WRITE_PART;CPUTIME;PARALLEL_COMM=0;", &fileset1, 1 );MB_CHK_SET_ERR( rval, "Can't write in parallel mesh 1" );
00174         rval = mb->write_file( "mesh2.h5m", 0, ";;PARALLEL=WRITE_PART;CPUTIME;PARALLEL_COMM=1;", &fileset2, 1 );MB_CHK_SET_ERR( rval, "Can't write in parallel mesh 1" );
00175         double write_files = MPI_Wtime();
00176         if( !proc_id ) std::cout << " write files " << write_files - current << "\n";
00177         current = write_files;
00178     }
00179 
00180     // Instantiate a coupler, which also initializes the tree
00181     Coupler mbc( mb, pc1, src_elems, 0 );
00182 
00183     double instancecoupler = MPI_Wtime();
00184     if( !proc_id ) std::cout << " instance coupler " << instancecoupler - current << "\n";
00185     current = instancecoupler;
00186 
00187     // Get points from the target mesh to interpolate
00188     // We have to treat differently the case when the target is a spectral mesh
00189     // In that case, the points of interest are the GL points, not the vertex nodes
00190     std::vector< double > vpos;  // This will have the positions we are interested in
00191     int numPointsOfInterest = 0;
00192 
00193     Range targ_elems;
00194     Range targ_verts;
00195 
00196     // First get all vertices adj to partition entities in target mesh
00197     rval = pc2->get_part_entities( targ_elems, 3 );MB_CHK_ERR( rval );
00198 
00199     rval = mb->get_adjacencies( targ_elems, 0, false, targ_verts, Interface::UNION );MB_CHK_ERR( rval );
00200     Range tmp_verts;
00201     // Then get non-owned verts and subtract
00202     rval = pc2->get_pstatus_entities( 0, PSTATUS_NOT_OWNED, tmp_verts );MB_CHK_ERR( rval );
00203     targ_verts = subtract( targ_verts, tmp_verts );
00204     // get position of these entities; these are the target points
00205     numPointsOfInterest = (int)targ_verts.size();
00206     vpos.resize( 3 * targ_verts.size() );
00207     rval = mb->get_coords( targ_verts, &vpos[0] );MB_CHK_ERR( rval );
00208     // Locate those points in the source mesh
00209     // std::cout<<"rank "<< proc_id<< " points of interest: " << numPointsOfInterest << "\n";
00210     rval = mbc.locate_points( &vpos[0], numPointsOfInterest, 0, toler );MB_CHK_ERR( rval );
00211 
00212     double locatetime = MPI_Wtime();
00213     if( !proc_id ) std::cout << " locate points: " << locatetime - current << "\n";
00214     current = locatetime;
00215 
00216     // Now interpolate tag onto target points
00217     std::vector< double > field( numPointsOfInterest );
00218 
00219     rval = mbc.interpolate( method, interpTag, &field[0] );MB_CHK_ERR( rval );
00220 
00221     // compare with the actual phys field
00222     double err_max = 0;
00223     for( int i = 0; i < numPointsOfInterest; i++ )
00224     {
00225         double trval = physField( vpos[3 * i], vpos[3 * i + 1], vpos[3 * i + 2] );
00226         double err2  = fabs( trval - field[i] );
00227         if( err2 > err_max ) err_max = err2;
00228     }
00229 
00230     double interpolateTime = MPI_Wtime();
00231     if( !proc_id ) std::cout << " interpolate points: " << interpolateTime - current << "\n";
00232     current = interpolateTime;
00233 
00234     double gerr;
00235     MPI_Allreduce( &err_max, &gerr, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
00236     if( 0 == proc_id ) std::cout << "max err  " << gerr << "\n";
00237 
00238     delete mgen1;
00239     delete mgen2;
00240 
00241     MPI_Finalize();
00242 
00243     return 0;
00244 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines