MOAB: Mesh Oriented datABase
(version 5.3.1)
|
00001 /** test of ParallelComm functionality 00002 * 00003 * To run: 00004 * 00005 * mpirun -np <#procs> mbparallelcomm_test 00006 * 00007 */ 00008 00009 #include "moab/ParallelComm.hpp" 00010 #include "MBParallelConventions.h" 00011 #include "MBTagConventions.hpp" 00012 #include "moab/Core.hpp" 00013 #include "ScdVertexData.hpp" 00014 #include "StructuredElementSeq.hpp" 00015 #include "SequenceManager.hpp" 00016 #include "moab/Error.hpp" 00017 #include "moab_mpi.h" 00018 #include <iostream> 00019 #include <sstream> 00020 #include <cassert> 00021 00022 #define REALTFI 1 00023 00024 const bool debug = false; 00025 00026 using namespace moab; 00027 00028 #define ERROR( a, b ) \ 00029 { \ 00030 std::cerr << ( a ) << std::endl; \ 00031 return b; \ 00032 } 00033 00034 #define PRINT_LAST_ERROR \ 00035 { \ 00036 std::string last_error; \ 00037 result = mbImpl->get_last_error( last_error ); \ 00038 if( last_error.empty() ) \ 00039 std::cerr << "(none)" << std::endl; \ 00040 else \ 00041 std::cerr << last_error << std::endl; \ 00042 } 00043 #define RRA( a ) \ 00044 if( MB_SUCCESS != result ) \ 00045 { \ 00046 std::cerr << ( a ); \ 00047 return result; \ 00048 } 00049 00050 ErrorCode create_linear_mesh( Interface* mbImpl, int N, int M, int& nshared ); 00051 00052 ErrorCode create_scd_mesh( Interface* mbImpl, int IJK, int& nshared ); 00053 00054 ErrorCode read_file( Interface* mbImpl, std::vector< std::string >& filenames, const char* tag_name, int tag_val, 00055 int distrib, int parallel_option, int resolve_shared, int with_ghosts, int use_mpio, 00056 bool print_parallel ); 00057 00058 ErrorCode test_packing( Interface* mbImpl, const char* filename ); 00059 00060 ErrorCode report_nsets( Interface* mbImpl ); 00061 00062 ErrorCode report_iface_ents( Interface* mbImpl, std::vector< ParallelComm* >& pcs ); 00063 00064 void print_usage( const char* ); 00065 00066 int main( int argc, char** argv ) 00067 { 00068 // need to init MPI first, to tell how many procs and rank 00069 MPI_Init( &argc, &argv ); 00070 00071 int nprocs, rank; 00072 MPI_Comm_size( MPI_COMM_WORLD, &nprocs ); 00073 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00074 00075 // start time 00076 double stime = 0, rtime = 0, dtime = 0, ltime = 0; 00077 if( 0 == rank ) stime = MPI_Wtime(); 00078 00079 // create MOAB instance based on that 00080 Interface* mbImpl = new Core; 00081 if( NULL == mbImpl ) return 1; 00082 00083 ErrorCode result = MB_SUCCESS; 00084 00085 // each interior proc has a vector of N+M vertices, sharing 00086 // M vertices each with lower- and upper-rank processors, except 00087 // procs on the end 00088 00089 // get N, M from command line 00090 if( argc < 3 ) 00091 { 00092 if( 0 == rank ) print_usage( argv[0] ); 00093 MPI_Finalize(); 00094 return 1; 00095 } 00096 00097 int npos = 1, tag_val, distrib, with_ghosts = 1, resolve_shared = 1, use_mpio = 0; 00098 bool print_parallel = false; 00099 const char* tag_name; 00100 std::vector< std::string > filenames; 00101 int parallel_option = 0; 00102 int num_files; 00103 00104 if( !strcmp( argv[npos], "-p" ) ) print_parallel = true; 00105 00106 while( npos != argc ) 00107 { 00108 ErrorCode tmp_result; 00109 int this_opt = strtol( argv[npos++], NULL, 0 ); 00110 switch( this_opt ) 00111 { 00112 case 0: 00113 case -1: 00114 case -2: 00115 case -3: 00116 parallel_option = this_opt; 00117 continue; 00118 00119 case 3: 00120 // read a file in parallel from the filename on the command line 00121 tag_name = "MATERIAL_SET"; 00122 tag_val = -1; 00123 num_files = strtol( argv[npos++], NULL, 0 ); 00124 if( 0 == num_files ) 00125 { 00126 if( 0 == rank ) print_usage( argv[0] ); 00127 MPI_Finalize(); 00128 return 1; 00129 } 00130 while( num_files-- && npos < argc ) 00131 filenames.push_back( std::string( argv[npos++] ) ); 00132 if( npos < argc ) tag_name = argv[npos++]; 00133 if( npos < argc ) tag_val = strtol( argv[npos++], NULL, 0 ); 00134 if( npos < argc ) 00135 distrib = strtol( argv[npos++], NULL, 0 ); 00136 else 00137 distrib = 1; 00138 if( npos < argc ) resolve_shared = strtol( argv[npos++], NULL, 0 ); 00139 if( npos < argc ) with_ghosts = strtol( argv[npos++], NULL, 0 ); 00140 if( npos < argc ) use_mpio = strtol( argv[npos++], NULL, 0 ); 00141 00142 tmp_result = read_file( mbImpl, filenames, tag_name, tag_val, distrib, parallel_option, resolve_shared, 00143 with_ghosts, use_mpio, print_parallel ); 00144 if( MB_SUCCESS != tmp_result ) 00145 { 00146 result = tmp_result; 00147 std::cerr << "Couldn't read mesh; error message:" << std::endl; 00148 PRINT_LAST_ERROR; 00149 MPI_Abort( MPI_COMM_WORLD, result ); 00150 } 00151 break; 00152 00153 case 4: 00154 filenames.push_back( argv[npos++] ); 00155 tmp_result = test_packing( mbImpl, filenames[0].c_str() ); 00156 if( MB_SUCCESS != tmp_result ) 00157 { 00158 result = tmp_result; 00159 std::cerr << "Packing test failed; error message:" << std::endl; 00160 PRINT_LAST_ERROR 00161 } 00162 break; 00163 00164 case 5: 00165 // read a file in parallel from the filename on the command line 00166 tag_name = "MATERIAL_SET"; 00167 distrib = 1; 00168 tag_val = -1; 00169 with_ghosts = 0; 00170 resolve_shared = 1; 00171 while( npos < argc ) 00172 filenames.push_back( std::string( argv[npos++] ) ); 00173 tmp_result = read_file( mbImpl, filenames, tag_name, tag_val, distrib, parallel_option, resolve_shared, 00174 with_ghosts, use_mpio, print_parallel ); 00175 if( MB_SUCCESS != tmp_result ) 00176 { 00177 result = tmp_result; 00178 std::cerr << "Couldn't read mesh; error message:" << std::endl; 00179 PRINT_LAST_ERROR; 00180 MPI_Abort( MPI_COMM_WORLD, result ); 00181 } 00182 break; 00183 00184 default: 00185 std::cerr << "Unrecognized option \"" << this_opt << "\"; skipping." << std::endl; 00186 tmp_result = MB_FAILURE; 00187 } 00188 00189 if( 0 == rank ) rtime = MPI_Wtime(); 00190 } 00191 00192 if( 0 == rank ) dtime = MPI_Wtime(); 00193 00194 result = mbImpl->delete_mesh(); 00195 if( MB_SUCCESS != result ) 00196 { 00197 std::cerr << "Couldn't delete mesh on rank " << rank << "; error message: " << std::endl; 00198 PRINT_LAST_ERROR; 00199 } 00200 if( 0 == rank ) ltime = MPI_Wtime(); 00201 00202 if( MB_SUCCESS == result ) std::cerr << "Proc " << rank << ": Success." << std::endl; 00203 00204 if( 0 == rank ) 00205 std::cout << "Times: " << dtime - stime << " " << rtime - stime << " " << ltime - dtime 00206 << " (total/read/delete)" << std::endl; 00207 00208 MPI_Finalize(); 00209 00210 delete mbImpl; 00211 00212 return ( MB_SUCCESS == result ? 0 : 1 ); 00213 } 00214 00215 void print_usage( const char* command ) 00216 { 00217 std::cerr << "Usage: " << command << " [readpar_option] <opt> <input> [...] where:" << std::endl 00218 << " readpar_option = 0 (BCAST_DELETE) (default), -1 (READ_DELETE), " << std::endl 00219 << " -2 (READ_PARALLEL), -3 (BCAST)" << std::endl 00220 << "opt input" << std::endl 00221 << "=== =====" << std::endl 00222 << " 1 <linear_ints> <shared_verts> " << std::endl 00223 << " 2 <n_ints> " << std::endl 00224 << " 3* <# files> <file_names...> [<tag_name>=\"MATERIAL_SET\" [tag_val] " 00225 "[distribute=1] [resolve_shared=1] [with_ghosts=1] [use_mpio=0]" 00226 << std::endl 00227 << " 4 <file_name> " << std::endl 00228 << "*Note: if opt 3 is used, it must be the last one." << std::endl; 00229 } 00230 00231 ErrorCode report_nsets( Interface* mbImpl ) 00232 { 00233 // get and report various numbers... 00234 int rank; 00235 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00236 00237 Range matsets, geomsets, parsets; 00238 int nsets; 00239 Tag mtag = 0, gtag = 0, ptag = 0, gidtag; 00240 ErrorCode result = mbImpl->tag_get_handle( "MATERIAL_SET", 1, MB_TYPE_INTEGER, mtag ); 00241 if( MB_SUCCESS != result ) 00242 { 00243 std::cerr << "Couldn't get MATERIAL_SET tag." << std::endl; 00244 return result; 00245 } 00246 result = mbImpl->tag_get_handle( "GEOM_DIMENSION", 1, MB_TYPE_INTEGER, gtag ); 00247 if( MB_SUCCESS != result ) 00248 { 00249 std::cerr << "Couldn't get MATERIAL_SET tag." << std::endl; 00250 return result; 00251 } 00252 result = mbImpl->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, ptag ); 00253 if( MB_SUCCESS != result ) 00254 { 00255 std::cerr << "Couldn't PARALLEL_PARTITION tag." << std::endl; 00256 return result; 00257 } 00258 result = mbImpl->tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, gidtag ); 00259 if( MB_SUCCESS != result ) 00260 { 00261 std::cerr << "Couldn't get GLOBAL_ID tag." << std::endl; 00262 return result; 00263 } 00264 result = mbImpl->get_number_entities_by_type( 0, MBENTITYSET, nsets ); 00265 if( MB_SUCCESS != result ) 00266 { 00267 std::cerr << "Couldn't get number entities by type." << std::endl; 00268 return result; 00269 } 00270 std::cout << "Proc " << rank << ": Total of " << nsets << " entity sets." << std::endl; 00271 00272 #define PRINTSETS( a, b, c, p ) \ 00273 if( a ) \ 00274 { \ 00275 result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &( a ), p, 1, b ); \ 00276 if( !( b ).empty() ) \ 00277 { \ 00278 std::vector< int > ids( ( b ).size() ); \ 00279 result = mbImpl->tag_get_data( gidtag, b, &ids[0] ); \ 00280 if( MB_SUCCESS == result ) \ 00281 { \ 00282 std::cout << "Proc " << rank << ": " << ( c ) << " (total " << ( b ).size() << "): " << ids[0]; \ 00283 for( unsigned int i = 1; i < ( b ).size(); i++ ) \ 00284 std::cout << ", " << ids[i]; \ 00285 std::cout << std::endl; \ 00286 } \ 00287 } \ 00288 } 00289 00290 PRINTSETS( mtag, matsets, "material sets", NULL ); 00291 00292 int tval = 3; 00293 void* pval = &tval; 00294 00295 PRINTSETS( gtag, geomsets, "geom sets (vols)", &pval ); 00296 tval = 2; 00297 geomsets.clear(); 00298 PRINTSETS( gtag, geomsets, "geom sets (surfs)", &pval ); 00299 tval = 1; 00300 geomsets.clear(); 00301 PRINTSETS( gtag, geomsets, "geom sets (curves)", &pval ); 00302 tval = 0; 00303 geomsets.clear(); 00304 PRINTSETS( gtag, geomsets, "geom sets (verts)", &pval ); 00305 00306 PRINTSETS( ptag, parsets, "partition sets", NULL ); 00307 00308 if( debug ) 00309 { 00310 // list info on all ent sets, reuse parsets 00311 parsets.clear(); 00312 result = mbImpl->get_entities_by_type( 0, MBENTITYSET, parsets ); 00313 if( MB_SUCCESS == result ) 00314 { 00315 std::cout << "Total sets (by range): " << parsets.size() << "; sets: " << std::endl; 00316 parsets.print( " " ); 00317 mbImpl->list_entities( parsets ); 00318 } 00319 } 00320 00321 return MB_SUCCESS; 00322 } 00323 00324 ErrorCode read_file( Interface* mbImpl, std::vector< std::string >& filenames, const char* tag_name, int tag_val, 00325 int distrib, int parallel_option, int resolve_shared, int with_ghosts, int use_mpio, 00326 bool print_parallel ) 00327 { 00328 std::ostringstream options; 00329 switch( parallel_option ) 00330 { 00331 case 0: 00332 options << "PARALLEL=BCAST_DELETE;PARTITION=" << tag_name; 00333 break; 00334 case -1: 00335 options << "PARALLEL=READ_DELETE;PARTITION=" << tag_name; 00336 break; 00337 case -2: 00338 options << "PARALLEL=READ_PART;PARTITION=" << tag_name; 00339 break; 00340 case -3: 00341 options << "PARALLEL=BCAST;PARTITION=" << tag_name; 00342 break; 00343 default: 00344 return MB_FAILURE; 00345 } 00346 00347 if( -1 != tag_val ) options << ";PARTITION_VAL=" << tag_val; 00348 00349 if( 1 == distrib ) options << ";PARTITION_DISTRIBUTE"; 00350 00351 if( 1 == resolve_shared ) options << ";PARALLEL_RESOLVE_SHARED_ENTS"; 00352 00353 if( 1 == with_ghosts ) options << ";PARALLEL_GHOSTS=3.0.1"; 00354 00355 if( 1 == use_mpio ) options << ";USE_MPIO"; 00356 00357 options << ";CPUTIME"; 00358 00359 if( print_parallel ) options << ";PRINT_PARALLEL"; 00360 00361 std::vector< ParallelComm* > pcs( filenames.size() ); 00362 ErrorCode result = MB_FAILURE; 00363 00364 if( 1 < filenames.size() ) 00365 { 00366 for( unsigned int i = 0; i < filenames.size(); i++ ) 00367 { 00368 pcs[i] = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00369 int index = pcs[i]->get_id(); 00370 std::ostringstream newopts; 00371 newopts << options.str(); 00372 newopts << ";PARALLEL_COMM=" << index; 00373 result = mbImpl->load_file( filenames[i].c_str(), 0, newopts.str().c_str() ); 00374 00375 if( MB_SUCCESS != result ) PRINT_LAST_ERROR; 00376 00377 if( MB_SUCCESS != result ) 00378 { 00379 MPI_Abort( MPI_COMM_WORLD, result ); 00380 break; 00381 } 00382 00383 // exchange tag 00384 Range tmp_range; 00385 result = pcs[i]->exchange_tags( "GLOBAL_ID", tmp_range ); 00386 if( MB_SUCCESS != result ) 00387 { 00388 std::cerr << "Tag exchange didn't work." << std::endl; 00389 break; 00390 } 00391 } 00392 } 00393 else 00394 { 00395 result = mbImpl->load_file( filenames[0].c_str(), 0, options.str().c_str() ); 00396 RRA( "Failed to load file." ); 00397 pcs[0] = ParallelComm::get_pcomm( mbImpl, 0 ); 00398 assert( pcs[0] ); 00399 } 00400 00401 if( MB_SUCCESS == result ) report_iface_ents( mbImpl, pcs ); 00402 00403 return result; 00404 } 00405 00406 ErrorCode test_packing( Interface* mbImpl, const char* filename ) 00407 { 00408 // read the mesh 00409 EntityHandle file_set; 00410 ErrorCode result = mbImpl->create_meshset( MESHSET_SET, file_set ); 00411 RRA( "create_meshset failed." ); 00412 00413 result = mbImpl->load_file( filename, &file_set, NULL ); 00414 if( MB_SUCCESS != result ) 00415 { 00416 std::cerr << "Reading file failed; message:" << std::endl; 00417 PRINT_LAST_ERROR; 00418 return result; 00419 } 00420 00421 // get 3d entities and pack a buffer with them 00422 Range ents, whole_range; 00423 std::vector< EntityHandle > new_ents; 00424 result = mbImpl->get_entities_by_handle( file_set, ents ); 00425 RRA( "Getting 3d ents failed." ); 00426 00427 ents.insert( file_set ); 00428 00429 ParallelComm* pcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00430 00431 ParallelComm::Buffer buff; 00432 result = pcomm->pack_buffer( ents, false, true, false, -1, &buff ); 00433 RRA( "Packing buffer count (non-stored handles) failed." ); 00434 00435 std::vector< std::vector< EntityHandle > > L1hloc, L1hrem; 00436 std::vector< std::vector< int > > L1p; 00437 std::vector< EntityHandle > L2hloc, L2hrem; 00438 std::vector< unsigned int > L2p; 00439 00440 buff.reset_ptr(); 00441 result = pcomm->unpack_buffer( buff.buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents ); 00442 RRA( "Unpacking buffer (non-stored handles) failed." ); 00443 00444 return MB_SUCCESS; 00445 } 00446 00447 ErrorCode report_iface_ents( Interface* mbImpl, std::vector< ParallelComm* >& pcs ) 00448 { 00449 Range iface_ents[6]; 00450 ErrorCode result = MB_SUCCESS, tmp_result; 00451 00452 // now figure out which vertices are shared 00453 Range part_ents, part_verts; 00454 for( unsigned int p = 0; p < pcs.size(); p++ ) 00455 { 00456 // get entities owned by this partition 00457 for( Range::iterator rit = pcs[p]->partition_sets().begin(); rit != pcs[p]->partition_sets().end(); ++rit ) 00458 { 00459 tmp_result = mbImpl->get_entities_by_dimension( *rit, 3, part_ents, true ); 00460 if( MB_SUCCESS != tmp_result ) result = tmp_result; 00461 } 00462 00463 for( int i = 0; i < 4; i++ ) 00464 { 00465 tmp_result = pcs[p]->get_iface_entities( -1, i, iface_ents[i] ); 00466 00467 if( MB_SUCCESS != tmp_result ) 00468 { 00469 std::cerr << "get_iface_entities returned error on proc " << pcs[p]->proc_config().proc_rank() 00470 << "; message: " << std::endl; 00471 std::string last_error; 00472 result = mbImpl->get_last_error( last_error ); 00473 if( last_error.empty() ) 00474 std::cerr << "(none)" << std::endl; 00475 else 00476 std::cerr << last_error << std::endl; 00477 result = tmp_result; 00478 } 00479 if( 0 != i ) iface_ents[4].merge( iface_ents[i] ); 00480 } 00481 } 00482 00483 // get non-owned vertices 00484 result = pcs[0]->get_pstatus_entities( 0, PSTATUS_NOT_OWNED, part_verts ); 00485 if( MB_SUCCESS != result ) 00486 { 00487 std::cerr << "Couldn't get non-owned entities." << std::endl; 00488 return result; 00489 } 00490 int tot_verts; 00491 result = mbImpl->get_number_entities_by_dimension( 0, 0, tot_verts ); 00492 if( MB_SUCCESS != result ) 00493 { 00494 std::cerr << "Couldn't get number of vertices." << std::endl; 00495 return result; 00496 } 00497 tot_verts -= part_verts.size(); 00498 00499 // report # iface entities 00500 result = mbImpl->get_adjacencies( iface_ents[4], 0, false, iface_ents[5], Interface::UNION ); 00501 00502 int rank; 00503 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00504 00505 std::cerr << "Proc " << rank << " iface entities: " << std::endl; 00506 for( int i = 0; i < 4; i++ ) 00507 std::cerr << " " << iface_ents[i].size() << " " << i << "d iface entities." << std::endl; 00508 std::cerr << " (" << iface_ents[5].size() << " verts adj to other iface ents)" << std::endl; 00509 if( iface_ents[0].size() != iface_ents[5].size() ) 00510 std::cerr << "WARNING: number of interface vertices don't agree with " 00511 << "vertex adjacencies on interface entities." << std::endl; 00512 00513 // report # regions owned by this proc 00514 std::cout << "Proc " << rank << " owns " << part_ents.size() << " 3d entities." << std::endl; 00515 00516 // get total # regions over all procs 00517 int num_local[2], num_total[2]; 00518 num_local[0] = tot_verts; 00519 num_local[1] = part_ents.size(); 00520 00521 int failure = MPI_Reduce( num_local, num_total, 2, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); 00522 if( failure ) result = MB_FAILURE; 00523 00524 if( 0 == rank ) 00525 { 00526 std::cout << "Total # owned vertices = " << num_total[0] << std::endl; 00527 std::cout << "Total # owned regions = " << num_total[1] << std::endl; 00528 } 00529 00530 return result; 00531 }