![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 /** @example GenLargeMesh.cpp \n
00002 * \brief Create a large structured mesh, partitioned \n
00003 *
00004 * It shows how to create a mesh on the fly, on multiple processors
00005 * Each processor will create its version of a block mesh, partitioned
00006 * as AxBxC blocks. Each block will be with blockSize^3 hexahedrons, and
00007 * will get a different PARALLEL_PARTITION tag
00008 *
00009 * The number of tasks will be MxNxK, and it must match the mpi size
00010 * Each task p will generate its mesh at location (m,n,k), and it is
00011 * lexicographic ordering: rank = m + n * M + k * M * N
00012 *
00013 * By default M=1, N=1, K=1, so by default it should be launched on 1 proc
00014 * By default, blockSize is 4, and A=2, B=2, C=2, so each task will generate locally
00015 * blockSize^3 x A x B x C hexahedrons (value = 64x8 = 512 hexas, in 8 partitions)
00016 * (if -t, multiple by 6 for total number of cells/tets)
00017 * The total number of partitions will be A*B*C*M*N*K (default 8)
00018 *
00019 * Each part in partition will get a proper tag, and the numbering is in
00020 * lexicographic ordering; x direction varies first, then y, then z.
00021 * The same principle is used for global id numbering of the nodes and cells.
00022 * (x varies first)
00023 *
00024 * The vertices will get a proper global id, which will be used to resolve the
00025 * shared entities
00026 * The output will be written in parallel, and we will try sizes as big as we can
00027 * (up to a billion vertices, because we use int for global ids)
00028 *
00029 * Within each partition, the hexas entity handles will be contiguous, and also the
00030 * vertices; The global id will be determined easily, for a vertex, but the entity
00031 * handle space will be more interesting to handle, within a partition (we want
00032 * contiguous handles within a partition). After merging the vertices, some fragmentation
00033 * will occur in the vertices handle space, within each partition.
00034 *
00035 * To run: ./GenLargeMesh
00036 *
00037 * When launched on more procs, you have to make sure
00038 * num procs = M*N*K
00039 *
00040 * So you can launch with
00041 * mpiexec -np 8 ./GenLargeMesh -M 2 -N 2 -K 2
00042 *
00043 * We also added -q option; it works now only for hexa mesh, it will generate
00044 * quadratic hex27 elements
00045 *
00046 * -t option will generate tetrahedrons instead of hexahedra. Each hexahedra is
00047 * decomposed into 6 tetrahedrons.
00048 *
00049 * -f option will also generate all edges and faces in the model.
00050 * -w will use a newer merging method locally. Merging is necessary to merge
00051 * vertices on the local task, and the new method does not use a searching tree,
00052 * but rather the global id set on the vertices in a consistent manner
00053 *
00054 * -d and -i options can be used to add some artificial tags on the model;
00055 * you can have multiple -d and -i options; -i will set an integer
00056 * tag with name tag_name on the vertices; -d < tag_name2> will generate
00057 * double tags on cells (3d elements). You can have multiple tags, like
00058 * -i tag1 -i tag2 -i tag3 -d tag4
00059 *
00060 * -x, -y, -z options will control the geometric dimensions of the final mesh, in
00061 * x, y and z directions.
00062 *
00063 * -o controls the name of the output file; it needs to have extension h5m,
00064 * because the file is written in parallel.
00065 *
00066 * -k will keep the edges and faces that are generated as part of resolving shared entities
00067 * (by default these edges and faces are removed); when -f option is used, the -k option is
00068 * enabled too (so no faces and edges are deleted)
00069 *
00070 */
00071
00072 #include "moab/Core.hpp"
00073 #include "moab/ProgOptions.hpp"
00074 #include "moab/MeshGeneration.hpp"
00075 #ifdef MOAB_HAVE_MPI
00076 #include "moab/ParallelComm.hpp"
00077 #include "MBParallelConventions.h"
00078 #endif
00079
00080 #include
00081 #include
00082 #include
00083
00084 using namespace moab;
00085 using namespace std;
00086
00087 int main( int argc, char** argv )
00088 {
00089
00090 bool nosave = false;
00091
00092 #ifdef MOAB_HAVE_MPI
00093 MPI_Init( &argc, &argv );
00094 #endif
00095 MeshGeneration::BrickOpts bopts;
00096 // default options
00097 bopts.A = bopts.B = bopts.C = 2;
00098 bopts.M = bopts.N = bopts.K = 1;
00099 bopts.blockSize = 4;
00100 bopts.xsize = bopts.ysize = bopts.zsize = 1.;
00101 bopts.ui = CartVect( 1., 0, 0. );
00102 bopts.uj = CartVect( 0., 1., 0. );
00103 bopts.uk = CartVect( 0., 0., 1. );
00104 bopts.newMergeMethod = bopts.quadratic = bopts.keep_skins = bopts.tetra = false;
00105 bopts.adjEnts = bopts.parmerge = false;
00106 bopts.GL = 0;
00107
00108 ProgOptions opts;
00109
00110 opts.addOpt< int >( string( "blockSize,b" ), string( "Block size of mesh (default=4)" ), &bopts.blockSize );
00111 opts.addOpt< int >( string( "xproc,M" ), string( "Number of processors in x dir (default=1)" ), &bopts.M );
00112 opts.addOpt< int >( string( "yproc,N" ), string( "Number of processors in y dir (default=1)" ), &bopts.N );
00113 opts.addOpt< int >( string( "zproc,K" ), string( "Number of processors in z dir (default=1)" ), &bopts.K );
00114
00115 opts.addOpt< int >( string( "xblocks,A" ), string( "Number of blocks on a task in x dir (default=2)" ), &bopts.A );
00116 opts.addOpt< int >( string( "yblocks,B" ), string( "Number of blocks on a task in y dir (default=2)" ), &bopts.B );
00117 opts.addOpt< int >( string( "zblocks,C" ), string( "Number of blocks on a task in x dir (default=2)" ), &bopts.C );
00118
00119 opts.addOpt< double >( string( "xsize,x" ), string( "Total size in x direction (default=1.)" ), &bopts.xsize );
00120 opts.addOpt< double >( string( "ysize,y" ), string( "Total size in y direction (default=1.)" ), &bopts.ysize );
00121 opts.addOpt< double >( string( "zsize,z" ), string( "Total size in z direction (default=1.)" ), &bopts.zsize );
00122
00123 opts.addOpt< void >( "newMerge,w", "use new merging method", &bopts.newMergeMethod );
00124
00125 opts.addOpt< void >( "quadratic,q", "use hex 27 elements", &bopts.quadratic );
00126
00127 opts.addOpt< void >( "keep_skins,k", "keep skins with shared entities", &bopts.keep_skins );
00128
00129 opts.addOpt< void >( "tetrahedrons,t", "generate tetrahedrons", &bopts.tetra );
00130
00131 opts.addOpt< void >( "faces_edges,f", "create all faces and edges", &bopts.adjEnts );
00132
00133 opts.addOpt< int >( string( "ghost_layers,g" ), string( "Number of ghost layers (default=0)" ), &bopts.GL );
00134
00135 vector< string > intTagNames;
00136 string firstIntTag;
00137 opts.addOpt< string >( "int_tag_vert,i", "add integer tag on vertices", &firstIntTag );
00138
00139 vector< string > doubleTagNames;
00140 string firstDoubleTag;
00141 opts.addOpt< string >( "double_tag_cell,d", "add double tag on cells", &firstDoubleTag );
00142
00143 string outFileName = "GenLargeMesh.h5m";
00144 opts.addOpt< string >( "outFile,o", "Specify the output file name string (default GenLargeMesh.h5m)",
00145 &outFileName );
00146
00147 #ifdef MOAB_HAVE_HDF5_PARALLEL
00148 bool readb = false;
00149 opts.addOpt< void >( "readback,r", "read back the generated mesh", &readb );
00150
00151 bool readAndGhost = false;
00152 opts.addOpt< void >( "readAndGhost,G", "read back the generated mesh and ghost one layer", &readAndGhost );
00153 #endif
00154
00155 opts.addOpt< void >( "parallel_merge,p", "use parallel mesh merge, not vertex ID based merge", &bopts.parmerge );
00156
00157 opts.addOpt< void >( "no_save,n", "do not save the file", &nosave );
00158
00159 opts.parseCommandLine( argc, argv );
00160
00161 opts.getOptAllArgs( "int_tag_vert,i", intTagNames );
00162 opts.getOptAllArgs( "double_tag_cell,d", doubleTagNames );
00163
00164 if( bopts.adjEnts ) bopts.keep_skins = true; // Do not delete anything
00165
00166 Interface* mb = new( std::nothrow ) Core;
00167 if( NULL == mb )
00168 {
00169 #ifdef MOAB_HAVE_MPI
00170 MPI_Finalize();
00171 #endif
00172 return 1;
00173 }
00174
00175 int rank = 0, size = 1;
00176
00177 #ifdef MOAB_HAVE_MPI
00178 MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00179 MPI_Comm_size( MPI_COMM_WORLD, &size );
00180 #endif
00181
00182 EntityHandle fileset;
00183 ErrorCode rval = mb->create_meshset( MESHSET_SET, fileset );MB_CHK_ERR( rval );
00184 #ifdef MOAB_HAVE_MPI
00185 ParallelComm* pc = new ParallelComm( mb, MPI_COMM_WORLD );
00186 MeshGeneration* mgen = new MeshGeneration( mb, pc, fileset );
00187 #else
00188 MeshGeneration* mgen = new MeshGeneration( mb, 0, fileset );
00189 #endif
00190
00191 clock_t tt = clock();
00192
00193 rval = mgen->BrickInstance( bopts );MB_CHK_ERR( rval );
00194
00195 Range all3dcells;
00196 rval = mb->get_entities_by_dimension( fileset, 3, all3dcells );MB_CHK_ERR( rval );
00197
00198 if( 0 == rank )
00199 {
00200 cout << "generate local mesh: " << ( clock() - tt ) / (double)CLOCKS_PER_SEC << " seconds" << endl;
00201 tt = clock();
00202 cout << "number of elements on rank 0: " << all3dcells.size() << endl;
00203 cout << "Total number of elements " << all3dcells.size() * size << endl;
00204 cout << "Element type: " << ( bopts.tetra ? "MBTET" : "MBHEX" )
00205 << " order:" << ( bopts.quadratic ? "quadratic" : "linear" ) << endl;
00206 }
00207 Range verts;
00208 rval = mb->get_entities_by_dimension( 0, 0, verts );MB_CHK_SET_ERR( rval, "Can't get all vertices" );
00209
00210 if( !nosave )
00211 {
00212 #ifdef MOAB_HAVE_HDF5_PARALLEL
00213 rval = mb->write_file( outFileName.c_str(), 0, ";;PARALLEL=WRITE_PART;CPUTIME;", &fileset, 1 );MB_CHK_SET_ERR( rval, "Can't write in parallel" );
00214 #else
00215 // should be a vtk file, actually, maybe make sure of that
00216 rval = mb->write_file( outFileName.c_str(), 0, "", &fileset, 1 );MB_CHK_SET_ERR( rval, "Can't write in serial" );
00217 #endif
00218 if( 0 == rank )
00219 {
00220 cout << "write file " << outFileName << " in " << ( clock() - tt ) / (double)CLOCKS_PER_SEC << " seconds"
00221 << endl;
00222 tt = clock();
00223 }
00224 }
00225 // delete the mesh that we already have in-memory
00226 size_t nLocalVerts = verts.size();
00227 size_t nLocalCells = all3dcells.size();
00228
00229 mb->delete_mesh();
00230
00231 #ifdef MOAB_HAVE_HDF5_PARALLEL
00232 if( !nosave && readb )
00233 {
00234 // now recreate a core instance and load the file we just wrote out to verify
00235 Core mb2;
00236 std::string read_opts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_"
00237 "SHARED_ENTS;CPUTIME;" );
00238 if( readAndGhost ) read_opts += "PARALLEL_GHOSTS=3.0.1;";
00239 rval = mb2.load_file( outFileName.c_str(), 0, read_opts.c_str() );MB_CHK_SET_ERR( rval, "Can't read in parallel" );
00240 if( 0 == rank )
00241 {
00242 cout << "read back file " << outFileName << " with options: \n"
00243 << read_opts << " in " << ( clock() - tt ) / (double)CLOCKS_PER_SEC << " seconds" << endl;
00244 tt = clock();
00245 }
00246 moab::Range nverts, ncells;
00247 rval = mb2.get_entities_by_dimension( 0, 0, nverts );MB_CHK_SET_ERR( rval, "Can't get all vertices" );
00248 rval = mb2.get_entities_by_dimension( 0, 3, ncells );MB_CHK_SET_ERR( rval, "Can't get all 3d cells elements" );
00249
00250 if( readAndGhost && size > 1 )
00251 {
00252 // filter out the ghost nodes and elements, for comparison with original mesh
00253 // first get the parallel comm
00254 ParallelComm* pcomm2 = ParallelComm::get_pcomm( &mb2, 0 );
00255 if( NULL == pcomm2 ) MB_SET_ERR( MB_FAILURE, "can't get parallel comm." );
00256 rval = pcomm2->filter_pstatus( nverts, PSTATUS_GHOST, PSTATUS_NOT );MB_CHK_SET_ERR( rval, "Can't filter ghost vertices" );
00257 rval = pcomm2->filter_pstatus( ncells, PSTATUS_GHOST, PSTATUS_NOT );MB_CHK_SET_ERR( rval, "Can't filter ghost cells" );
00258 }
00259 if( nverts.size() != nLocalVerts && ncells.size() != nLocalCells )
00260 {
00261 MB_SET_ERR( MB_FAILURE, "Reading back the output file led to inconsistent number of entities." );
00262 }
00263
00264 // delete the mesh that we already have in-memory
00265 mb2.delete_mesh();
00266 }
00267 #endif
00268
00269 #ifdef MOAB_HAVE_MPI
00270 MPI_Finalize();
00271 #endif
00272 return 0;
00273 }