Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 #include <iostream> 00002 #include <cstdlib> 00003 #include <vector> 00004 #include <map> 00005 #include <string> 00006 #include <cstdio> 00007 #include <iomanip> 00008 #include <fstream> 00009 #include "moab/MOABConfig.h" 00010 #ifdef MOAB_HAVE_MPI 00011 #include "moab_mpi.h" 00012 #include "moab/ParallelComm.hpp" 00013 #endif 00014 #if !defined( _MSC_VER ) && !defined( __MINGW32__ ) 00015 #include <termios.h> 00016 #include <sys/ioctl.h> 00017 #endif 00018 #include <cmath> 00019 #include <cassert> 00020 #include <cfloat> 00021 00022 #include "moab/Core.hpp" 00023 #include "moab/Range.hpp" 00024 #include "MBTagConventions.hpp" 00025 #include "moab/Interface.hpp" 00026 00027 #include "moab/verdict/VerdictWrapper.hpp" 00028 00029 using namespace moab; 00030 00031 static void print_usage( const char* name, std::ostream& stream ) 00032 { 00033 stream << "Usage: " << name << " <input_file> [<output_file>]" << std::endl; 00034 } 00035 00036 int main( int argc, char* argv[] ) 00037 { 00038 int proc_id = 0, size = 1; 00039 #ifdef MOAB_HAVE_MPI 00040 MPI_Init( &argc, &argv ); 00041 MPI_Comm_rank( MPI_COMM_WORLD, &proc_id ); 00042 MPI_Comm_size( MPI_COMM_WORLD, &size ); 00043 #endif 00044 if( argc < 2 && 0 == proc_id ) 00045 { 00046 print_usage( argv[0], std::cerr ); 00047 #ifdef MOAB_HAVE_MPI 00048 MPI_Finalize(); 00049 #endif 00050 return 1; 00051 } 00052 Core mb; 00053 std::string read_options; 00054 if( size > 1 ) read_options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS"; 00055 00056 if( size > 1 && argc > 2 ) 00057 { 00058 std::cerr << " cannot use verbose option in parallel \n"; 00059 #ifdef MOAB_HAVE_MPI 00060 MPI_Finalize(); 00061 #endif 00062 return 1; 00063 } 00064 00065 char* out_file = NULL; 00066 std::ofstream ofile; 00067 00068 if( argc == 3 ) 00069 { 00070 std::cout << " write verbose output to a CSV file " << argv[2] << "\n"; 00071 out_file = argv[2]; 00072 ofile.open( out_file ); 00073 } 00074 if( MB_SUCCESS != mb.load_file( argv[1], 0, read_options.c_str() ) ) 00075 { 00076 fprintf( stderr, "Error reading file: %s\n", argv[1] ); 00077 #ifdef MOAB_HAVE_MPI 00078 MPI_Finalize(); 00079 #endif 00080 return 1; 00081 } 00082 00083 VerdictWrapper vw( &mb ); 00084 vw.set_size( 1. ); // for relative size measures; maybe it should be user input 00085 Range entities; 00086 ErrorCode rval = mb.get_entities_by_handle( 0, entities ); 00087 if( MB_SUCCESS != rval ) 00088 { 00089 fprintf( stderr, "Error getting entities from file %s\n", argv[1] ); 00090 #ifdef MOAB_HAVE_MPI 00091 MPI_Finalize(); 00092 #endif 00093 return 1; 00094 } 00095 // get all edges and faces, force them to be created 00096 Range allfaces, alledges; 00097 Range cells = entities.subset_by_dimension( 3 ); 00098 rval = mb.get_adjacencies( cells, 2, true, allfaces, Interface::UNION ); 00099 if( MB_SUCCESS != rval ) 00100 { 00101 fprintf( stderr, "Error getting all faces" ); 00102 #ifdef MOAB_HAVE_MPI 00103 MPI_Finalize(); 00104 #endif 00105 return 1; 00106 } 00107 00108 rval = mb.get_adjacencies( allfaces, 1, true, alledges, Interface::UNION ); 00109 if( MB_SUCCESS != rval ) 00110 { 00111 fprintf( stderr, "Error getting all edges" ); 00112 #ifdef MOAB_HAVE_MPI 00113 MPI_Finalize(); 00114 #endif 00115 return 1; 00116 } 00117 entities.merge( allfaces ); 00118 entities.merge( alledges ); 00119 for( EntityType et = MBENTITYSET; et >= MBEDGE; et-- ) 00120 { 00121 int num_qualities = vw.num_qualities( et ); 00122 if( !num_qualities ) continue; 00123 Range owned = entities.subset_by_type( et ); 00124 std::map< QualityType, double > qualities, minq, maxq; 00125 int ne_local = (int)owned.size(); 00126 int ne_global = ne_local; 00127 00128 #ifdef MOAB_HAVE_MPI 00129 int mpi_err; 00130 if( size > 1 ) 00131 { 00132 // filter the entities not owned, so we do not process them more than once 00133 ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 ); 00134 Range current = owned; 00135 owned.clear(); 00136 rval = pcomm->filter_pstatus( current, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned ); 00137 if( rval != MB_SUCCESS ) 00138 { 00139 MPI_Finalize(); 00140 return 1; 00141 } 00142 ne_local = (int)owned.size(); 00143 mpi_err = MPI_Reduce( &ne_local, &ne_global, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); 00144 if( mpi_err ) 00145 { 00146 MPI_Finalize(); 00147 return 1; 00148 } 00149 } 00150 #endif 00151 if( ne_global > 0 ) 00152 { 00153 if( ne_local > 0 ) 00154 { 00155 Range::iterator it = owned.begin(); 00156 rval = vw.all_quality_measures( *it, qualities ); 00157 if( MB_SUCCESS != rval ) 00158 { 00159 fprintf( stderr, "Error getting quality for entity type %d with id %ld \n", et, 00160 (long)mb.id_from_handle( *it ) ); 00161 #ifdef MOAB_HAVE_MPI 00162 MPI_Finalize(); 00163 #endif 00164 return 1; 00165 } 00166 if( ofile.is_open() ) 00167 { 00168 // write first header or this entity type, then the first values, separated by 00169 // commas 00170 ofile << " There are " << ne_local << " entities of type " << vw.entity_type_name( et ) << " with " 00171 << qualities.size() << " qualities:\n" 00172 << " Entity id "; 00173 for( std::map< QualityType, double >::iterator qit = qualities.begin(); qit != qualities.end(); 00174 ++qit ) 00175 { 00176 ofile << ", " << vw.quality_name( qit->first ); 00177 } 00178 ofile << "\n"; 00179 ofile << mb.id_from_handle( *it ); 00180 for( std::map< QualityType, double >::iterator qit = qualities.begin(); qit != qualities.end(); 00181 ++qit ) 00182 { 00183 ofile << ", " << qit->second; 00184 } 00185 ofile << "\n"; 00186 } 00187 minq = qualities; 00188 maxq = qualities; 00189 ++it; 00190 for( ; it != owned.end(); ++it ) 00191 { 00192 rval = vw.all_quality_measures( *it, qualities ); 00193 if( MB_SUCCESS != rval ) 00194 { 00195 fprintf( stderr, "Error getting quality for entity type %d with id %ld \n", et, 00196 (long)mb.id_from_handle( *it ) ); 00197 #ifdef MOAB_HAVE_MPI 00198 MPI_Finalize(); 00199 #endif 00200 return 1; 00201 } 00202 if( ofile.is_open() ) 00203 { 00204 ofile << mb.id_from_handle( *it ); 00205 for( std::map< QualityType, double >::iterator qit = qualities.begin(); qit != qualities.end(); 00206 ++qit ) 00207 { 00208 ofile << ", " << qit->second; 00209 } 00210 ofile << "\n"; 00211 } 00212 std::map< QualityType, double >::iterator minit = minq.begin(); 00213 std::map< QualityType, double >::iterator maxit = maxq.begin(); 00214 for( std::map< QualityType, double >::iterator mit = qualities.begin(); mit != qualities.end(); 00215 ++mit, ++minit, ++maxit ) 00216 { 00217 if( mit->second > maxit->second ) maxit->second = mit->second; 00218 if( mit->second < minit->second ) minit->second = mit->second; 00219 } 00220 } 00221 } 00222 if( 0 == proc_id ) 00223 { 00224 std::cout << " \n\n " << ne_global << " entities of type " << vw.entity_type_name( et ) << "\n"; 00225 std::cout << std::setw( 30 ) << "Quality Name" << std::setw( 15 ) << " MIN" << std::setw( 15 ) 00226 << " MAX" 00227 << "\n"; 00228 } 00229 00230 QualityType quality_type = MB_EDGE_RATIO; 00231 for( int i = 0; i < num_qualities; i++, quality_type = (QualityType)( quality_type + 1 ) ) 00232 { 00233 while( !( vw.possible_quality( et, quality_type ) ) && quality_type < MB_QUALITY_COUNT ) 00234 quality_type = (QualityType)( quality_type + 1 ); // will get them in order 00235 const char* name_q = vw.quality_name( quality_type ); 00236 double local_min, global_min; 00237 double local_max, global_max; 00238 if( ne_local > 0 ) 00239 { 00240 local_min = minq[quality_type]; 00241 local_max = maxq[quality_type]; 00242 } 00243 else 00244 { 00245 local_min = 1.e38; // so this task has no entities of this type 00246 local_max = -1.e38; // it can get here only in parallel 00247 } 00248 #ifdef MOAB_HAVE_MPI 00249 mpi_err = MPI_Reduce( &local_min, &global_min, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD ); 00250 if( mpi_err ) 00251 { 00252 MPI_Finalize(); 00253 return 1; 00254 } 00255 mpi_err = MPI_Reduce( &local_max, &global_max, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD ); 00256 if( mpi_err ) 00257 { 00258 MPI_Finalize(); 00259 return 1; 00260 } 00261 #else 00262 global_min = local_min; 00263 global_max = local_max; 00264 #endif 00265 if( 0 == proc_id ) 00266 { 00267 std::cout << std::setw( 30 ) << name_q << std::setw( 15 ) << global_min << std::setw( 15 ) 00268 << global_max << "\n"; 00269 } 00270 } 00271 } 00272 } // etype 00273 if( ofile.is_open() ) 00274 { 00275 ofile.close(); 00276 } 00277 #ifdef MOAB_HAVE_MPI 00278 MPI_Finalize(); 00279 #endif 00280 return 0; 00281 }