MOAB: Mesh Oriented datABase
(version 5.4.1)
|
00001 /** test of ParallelComm functionality 00002 * 00003 * To run: 00004 * 00005 * mpirun -np <#procs> mbparallelcomm_test 00006 * 00007 */ 00008 00009 #include "moab/ParallelComm.hpp" 00010 #include "MBParallelConventions.h" 00011 #include "MBTagConventions.hpp" 00012 #include "moab/Core.hpp" 00013 #include "ScdVertexData.hpp" 00014 #include "StructuredElementSeq.hpp" 00015 #include "SequenceManager.hpp" 00016 #include "moab/Error.hpp" 00017 #include "moab_mpi.h" 00018 #include <iostream> 00019 #include <sstream> 00020 #include <cassert> 00021 00022 #define REALTFI 1 00023 00024 const bool debug = false; 00025 00026 using namespace moab; 00027 00028 #define ERROR( a, b ) \ 00029 { \ 00030 std::cerr << ( a ) << std::endl; \ 00031 return b; \ 00032 } 00033 00034 #define PRINT_LAST_ERROR \ 00035 { \ 00036 std::string last_error; \ 00037 result = mbImpl->get_last_error( last_error ); \ 00038 if( last_error.empty() ) \ 00039 std::cerr << "(none)" << std::endl; \ 00040 else \ 00041 std::cerr << last_error << std::endl; \ 00042 } 00043 #define RRA( a ) \ 00044 if( MB_SUCCESS != result ) \ 00045 { \ 00046 std::cerr << ( a ); \ 00047 return result; \ 00048 } 00049 00050 ErrorCode create_linear_mesh( Interface* mbImpl, int N, int M, int& nshared ); 00051 00052 ErrorCode create_scd_mesh( Interface* mbImpl, int IJK, int& nshared ); 00053 00054 ErrorCode read_file( Interface* mbImpl, 00055 std::vector< std::string >& filenames, 00056 const char* tag_name, 00057 int tag_val, 00058 int distrib, 00059 int parallel_option, 00060 int resolve_shared, 00061 int with_ghosts, 00062 int use_mpio, 00063 bool print_parallel ); 00064 00065 ErrorCode test_packing( Interface* mbImpl, const char* filename ); 00066 00067 ErrorCode report_nsets( Interface* mbImpl ); 00068 00069 ErrorCode report_iface_ents( Interface* mbImpl, std::vector< ParallelComm* >& pcs ); 00070 00071 void print_usage( const char* ); 00072 00073 int main( int argc, char** argv ) 00074 { 00075 // need to init MPI first, to tell how many procs and rank 00076 MPI_Init( &argc, &argv ); 00077 00078 int nprocs, rank; 00079 MPI_Comm_size( MPI_COMM_WORLD, &nprocs ); 00080 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00081 00082 // start time 00083 double stime = 0, rtime = 0, dtime = 0, ltime = 0; 00084 if( 0 == rank ) stime = MPI_Wtime(); 00085 00086 // create MOAB instance based on that 00087 Interface* mbImpl = new Core; 00088 if( NULL == mbImpl ) return 1; 00089 00090 ErrorCode result = MB_SUCCESS; 00091 00092 // each interior proc has a vector of N+M vertices, sharing 00093 // M vertices each with lower- and upper-rank processors, except 00094 // procs on the end 00095 00096 // get N, M from command line 00097 if( argc < 3 ) 00098 { 00099 if( 0 == rank ) print_usage( argv[0] ); 00100 MPI_Finalize(); 00101 return 1; 00102 } 00103 00104 int npos = 1, tag_val, distrib, with_ghosts = 1, resolve_shared = 1, use_mpio = 0; 00105 bool print_parallel = false; 00106 const char* tag_name; 00107 std::vector< std::string > filenames; 00108 int parallel_option = 0; 00109 int num_files; 00110 00111 if( !strcmp( argv[npos], "-p" ) ) print_parallel = true; 00112 00113 while( npos != argc ) 00114 { 00115 ErrorCode tmp_result; 00116 int this_opt = strtol( argv[npos++], NULL, 0 ); 00117 switch( this_opt ) 00118 { 00119 case 0: 00120 case -1: 00121 case -2: 00122 case -3: 00123 parallel_option = this_opt; 00124 continue; 00125 00126 case 3: 00127 // read a file in parallel from the filename on the command line 00128 tag_name = "MATERIAL_SET"; 00129 tag_val = -1; 00130 num_files = strtol( argv[npos++], NULL, 0 ); 00131 if( 0 == num_files ) 00132 { 00133 if( 0 == rank ) print_usage( argv[0] ); 00134 MPI_Finalize(); 00135 return 1; 00136 } 00137 while( num_files-- && npos < argc ) 00138 filenames.push_back( std::string( argv[npos++] ) ); 00139 if( npos < argc ) tag_name = argv[npos++]; 00140 if( npos < argc ) tag_val = strtol( argv[npos++], NULL, 0 ); 00141 if( npos < argc ) 00142 distrib = strtol( argv[npos++], NULL, 0 ); 00143 else 00144 distrib = 1; 00145 if( npos < argc ) resolve_shared = strtol( argv[npos++], NULL, 0 ); 00146 if( npos < argc ) with_ghosts = strtol( argv[npos++], NULL, 0 ); 00147 if( npos < argc ) use_mpio = strtol( argv[npos++], NULL, 0 ); 00148 00149 tmp_result = read_file( mbImpl, filenames, tag_name, tag_val, distrib, parallel_option, resolve_shared, 00150 with_ghosts, use_mpio, print_parallel ); 00151 if( MB_SUCCESS != tmp_result ) 00152 { 00153 result = tmp_result; 00154 std::cerr << "Couldn't read mesh; error message:" << std::endl; 00155 PRINT_LAST_ERROR; 00156 MPI_Abort( MPI_COMM_WORLD, result ); 00157 } 00158 break; 00159 00160 case 4: 00161 filenames.push_back( argv[npos++] ); 00162 tmp_result = test_packing( mbImpl, filenames[0].c_str() ); 00163 if( MB_SUCCESS != tmp_result ) 00164 { 00165 result = tmp_result; 00166 std::cerr << "Packing test failed; error message:" << std::endl; 00167 PRINT_LAST_ERROR 00168 } 00169 break; 00170 00171 case 5: 00172 // read a file in parallel from the filename on the command line 00173 tag_name = "MATERIAL_SET"; 00174 distrib = 1; 00175 tag_val = -1; 00176 with_ghosts = 0; 00177 resolve_shared = 1; 00178 while( npos < argc ) 00179 filenames.push_back( std::string( argv[npos++] ) ); 00180 tmp_result = read_file( mbImpl, filenames, tag_name, tag_val, distrib, parallel_option, resolve_shared, 00181 with_ghosts, use_mpio, print_parallel ); 00182 if( MB_SUCCESS != tmp_result ) 00183 { 00184 result = tmp_result; 00185 std::cerr << "Couldn't read mesh; error message:" << std::endl; 00186 PRINT_LAST_ERROR; 00187 MPI_Abort( MPI_COMM_WORLD, result ); 00188 } 00189 break; 00190 00191 default: 00192 std::cerr << "Unrecognized option \"" << this_opt << "\"; skipping." << std::endl; 00193 tmp_result = MB_FAILURE; 00194 } 00195 00196 if( 0 == rank ) rtime = MPI_Wtime(); 00197 } 00198 00199 if( 0 == rank ) dtime = MPI_Wtime(); 00200 00201 result = mbImpl->delete_mesh(); 00202 if( MB_SUCCESS != result ) 00203 { 00204 std::cerr << "Couldn't delete mesh on rank " << rank << "; error message: " << std::endl; 00205 PRINT_LAST_ERROR; 00206 } 00207 if( 0 == rank ) ltime = MPI_Wtime(); 00208 00209 if( MB_SUCCESS == result ) std::cerr << "Proc " << rank << ": Success." << std::endl; 00210 00211 if( 0 == rank ) 00212 std::cout << "Times: " << dtime - stime << " " << rtime - stime << " " << ltime - dtime 00213 << " (total/read/delete)" << std::endl; 00214 00215 MPI_Finalize(); 00216 00217 delete mbImpl; 00218 00219 return ( MB_SUCCESS == result ? 0 : 1 ); 00220 } 00221 00222 void print_usage( const char* command ) 00223 { 00224 std::cerr << "Usage: " << command << " [readpar_option] <opt> <input> [...] where:" << std::endl 00225 << " readpar_option = 0 (BCAST_DELETE) (default), -1 (READ_DELETE), " << std::endl 00226 << " -2 (READ_PARALLEL), -3 (BCAST)" << std::endl 00227 << "opt input" << std::endl 00228 << "=== =====" << std::endl 00229 << " 1 <linear_ints> <shared_verts> " << std::endl 00230 << " 2 <n_ints> " << std::endl 00231 << " 3* <# files> <file_names...> [<tag_name>=\"MATERIAL_SET\" [tag_val] " 00232 "[distribute=1] [resolve_shared=1] [with_ghosts=1] [use_mpio=0]" 00233 << std::endl 00234 << " 4 <file_name> " << std::endl 00235 << "*Note: if opt 3 is used, it must be the last one." << std::endl; 00236 } 00237 00238 ErrorCode report_nsets( Interface* mbImpl ) 00239 { 00240 // get and report various numbers... 00241 int rank; 00242 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00243 00244 Range matsets, geomsets, parsets; 00245 int nsets; 00246 Tag mtag = 0, gtag = 0, ptag = 0, gidtag; 00247 ErrorCode result = mbImpl->tag_get_handle( "MATERIAL_SET", 1, MB_TYPE_INTEGER, mtag ); 00248 if( MB_SUCCESS != result ) 00249 { 00250 std::cerr << "Couldn't get MATERIAL_SET tag." << std::endl; 00251 return result; 00252 } 00253 result = mbImpl->tag_get_handle( "GEOM_DIMENSION", 1, MB_TYPE_INTEGER, gtag ); 00254 if( MB_SUCCESS != result ) 00255 { 00256 std::cerr << "Couldn't get MATERIAL_SET tag." << std::endl; 00257 return result; 00258 } 00259 result = mbImpl->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, ptag ); 00260 if( MB_SUCCESS != result ) 00261 { 00262 std::cerr << "Couldn't PARALLEL_PARTITION tag." << std::endl; 00263 return result; 00264 } 00265 result = mbImpl->tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, gidtag ); 00266 if( MB_SUCCESS != result ) 00267 { 00268 std::cerr << "Couldn't get GLOBAL_ID tag." << std::endl; 00269 return result; 00270 } 00271 result = mbImpl->get_number_entities_by_type( 0, MBENTITYSET, nsets ); 00272 if( MB_SUCCESS != result ) 00273 { 00274 std::cerr << "Couldn't get number entities by type." << std::endl; 00275 return result; 00276 } 00277 std::cout << "Proc " << rank << ": Total of " << nsets << " entity sets." << std::endl; 00278 00279 #define PRINTSETS( a, b, c, p ) \ 00280 if( a ) \ 00281 { \ 00282 result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &( a ), p, 1, b ); \ 00283 if( !( b ).empty() ) \ 00284 { \ 00285 std::vector< int > ids( ( b ).size() ); \ 00286 result = mbImpl->tag_get_data( gidtag, b, &ids[0] ); \ 00287 if( MB_SUCCESS == result ) \ 00288 { \ 00289 std::cout << "Proc " << rank << ": " << ( c ) << " (total " << ( b ).size() << "): " << ids[0]; \ 00290 for( unsigned int i = 1; i < ( b ).size(); i++ ) \ 00291 std::cout << ", " << ids[i]; \ 00292 std::cout << std::endl; \ 00293 } \ 00294 } \ 00295 } 00296 00297 PRINTSETS( mtag, matsets, "material sets", NULL ); 00298 00299 int tval = 3; 00300 void* pval = &tval; 00301 00302 PRINTSETS( gtag, geomsets, "geom sets (vols)", &pval ); 00303 tval = 2; 00304 geomsets.clear(); 00305 PRINTSETS( gtag, geomsets, "geom sets (surfs)", &pval ); 00306 tval = 1; 00307 geomsets.clear(); 00308 PRINTSETS( gtag, geomsets, "geom sets (curves)", &pval ); 00309 tval = 0; 00310 geomsets.clear(); 00311 PRINTSETS( gtag, geomsets, "geom sets (verts)", &pval ); 00312 00313 PRINTSETS( ptag, parsets, "partition sets", NULL ); 00314 00315 if( debug ) 00316 { 00317 // list info on all ent sets, reuse parsets 00318 parsets.clear(); 00319 result = mbImpl->get_entities_by_type( 0, MBENTITYSET, parsets ); 00320 if( MB_SUCCESS == result ) 00321 { 00322 std::cout << "Total sets (by range): " << parsets.size() << "; sets: " << std::endl; 00323 parsets.print( " " ); 00324 mbImpl->list_entities( parsets ); 00325 } 00326 } 00327 00328 return MB_SUCCESS; 00329 } 00330 00331 ErrorCode read_file( Interface* mbImpl, 00332 std::vector< std::string >& filenames, 00333 const char* tag_name, 00334 int tag_val, 00335 int distrib, 00336 int parallel_option, 00337 int resolve_shared, 00338 int with_ghosts, 00339 int use_mpio, 00340 bool print_parallel ) 00341 { 00342 std::ostringstream options; 00343 switch( parallel_option ) 00344 { 00345 case 0: 00346 options << "PARALLEL=BCAST_DELETE;PARTITION=" << tag_name; 00347 break; 00348 case -1: 00349 options << "PARALLEL=READ_DELETE;PARTITION=" << tag_name; 00350 break; 00351 case -2: 00352 options << "PARALLEL=READ_PART;PARTITION=" << tag_name; 00353 break; 00354 case -3: 00355 options << "PARALLEL=BCAST;PARTITION=" << tag_name; 00356 break; 00357 default: 00358 return MB_FAILURE; 00359 } 00360 00361 if( -1 != tag_val ) options << ";PARTITION_VAL=" << tag_val; 00362 00363 if( 1 == distrib ) options << ";PARTITION_DISTRIBUTE"; 00364 00365 if( 1 == resolve_shared ) options << ";PARALLEL_RESOLVE_SHARED_ENTS"; 00366 00367 if( 1 == with_ghosts ) options << ";PARALLEL_GHOSTS=3.0.1"; 00368 00369 if( 1 == use_mpio ) options << ";USE_MPIO"; 00370 00371 options << ";CPUTIME"; 00372 00373 if( print_parallel ) options << ";PRINT_PARALLEL"; 00374 00375 std::vector< ParallelComm* > pcs( filenames.size() ); 00376 ErrorCode result = MB_FAILURE; 00377 00378 if( 1 < filenames.size() ) 00379 { 00380 for( unsigned int i = 0; i < filenames.size(); i++ ) 00381 { 00382 pcs[i] = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00383 int index = pcs[i]->get_id(); 00384 std::ostringstream newopts; 00385 newopts << options.str(); 00386 newopts << ";PARALLEL_COMM=" << index; 00387 result = mbImpl->load_file( filenames[i].c_str(), 0, newopts.str().c_str() ); 00388 00389 if( MB_SUCCESS != result ) PRINT_LAST_ERROR; 00390 00391 if( MB_SUCCESS != result ) 00392 { 00393 MPI_Abort( MPI_COMM_WORLD, result ); 00394 break; 00395 } 00396 00397 // exchange tag 00398 Range tmp_range; 00399 result = pcs[i]->exchange_tags( "GLOBAL_ID", tmp_range ); 00400 if( MB_SUCCESS != result ) 00401 { 00402 std::cerr << "Tag exchange didn't work." << std::endl; 00403 break; 00404 } 00405 } 00406 } 00407 else 00408 { 00409 result = mbImpl->load_file( filenames[0].c_str(), 0, options.str().c_str() ); 00410 RRA( "Failed to load file." ); 00411 pcs[0] = ParallelComm::get_pcomm( mbImpl, 0 ); 00412 assert( pcs[0] ); 00413 } 00414 00415 if( MB_SUCCESS == result ) report_iface_ents( mbImpl, pcs ); 00416 00417 return result; 00418 } 00419 00420 ErrorCode test_packing( Interface* mbImpl, const char* filename ) 00421 { 00422 // read the mesh 00423 EntityHandle file_set; 00424 ErrorCode result = mbImpl->create_meshset( MESHSET_SET, file_set ); 00425 RRA( "create_meshset failed." ); 00426 00427 result = mbImpl->load_file( filename, &file_set, NULL ); 00428 if( MB_SUCCESS != result ) 00429 { 00430 std::cerr << "Reading file failed; message:" << std::endl; 00431 PRINT_LAST_ERROR; 00432 return result; 00433 } 00434 00435 // get 3d entities and pack a buffer with them 00436 Range ents, whole_range; 00437 std::vector< EntityHandle > new_ents; 00438 result = mbImpl->get_entities_by_handle( file_set, ents ); 00439 RRA( "Getting 3d ents failed." ); 00440 00441 ents.insert( file_set ); 00442 00443 ParallelComm* pcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00444 00445 ParallelComm::Buffer buff; 00446 result = pcomm->pack_buffer( ents, false, true, false, -1, &buff ); 00447 RRA( "Packing buffer count (non-stored handles) failed." ); 00448 00449 std::vector< std::vector< EntityHandle > > L1hloc, L1hrem; 00450 std::vector< std::vector< int > > L1p; 00451 std::vector< EntityHandle > L2hloc, L2hrem; 00452 std::vector< unsigned int > L2p; 00453 00454 buff.reset_ptr(); 00455 result = pcomm->unpack_buffer( buff.buff_ptr, false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents ); 00456 RRA( "Unpacking buffer (non-stored handles) failed." ); 00457 00458 return MB_SUCCESS; 00459 } 00460 00461 ErrorCode report_iface_ents( Interface* mbImpl, std::vector< ParallelComm* >& pcs ) 00462 { 00463 Range iface_ents[6]; 00464 ErrorCode result = MB_SUCCESS, tmp_result; 00465 00466 // now figure out which vertices are shared 00467 Range part_ents, part_verts; 00468 for( unsigned int p = 0; p < pcs.size(); p++ ) 00469 { 00470 // get entities owned by this partition 00471 for( Range::iterator rit = pcs[p]->partition_sets().begin(); rit != pcs[p]->partition_sets().end(); ++rit ) 00472 { 00473 tmp_result = mbImpl->get_entities_by_dimension( *rit, 3, part_ents, true ); 00474 if( MB_SUCCESS != tmp_result ) result = tmp_result; 00475 } 00476 00477 for( int i = 0; i < 4; i++ ) 00478 { 00479 tmp_result = pcs[p]->get_iface_entities( -1, i, iface_ents[i] ); 00480 00481 if( MB_SUCCESS != tmp_result ) 00482 { 00483 std::cerr << "get_iface_entities returned error on proc " << pcs[p]->proc_config().proc_rank() 00484 << "; message: " << std::endl; 00485 std::string last_error; 00486 result = mbImpl->get_last_error( last_error ); 00487 if( last_error.empty() ) 00488 std::cerr << "(none)" << std::endl; 00489 else 00490 std::cerr << last_error << std::endl; 00491 result = tmp_result; 00492 } 00493 if( 0 != i ) iface_ents[4].merge( iface_ents[i] ); 00494 } 00495 } 00496 00497 // get non-owned vertices 00498 result = pcs[0]->get_pstatus_entities( 0, PSTATUS_NOT_OWNED, part_verts ); 00499 if( MB_SUCCESS != result ) 00500 { 00501 std::cerr << "Couldn't get non-owned entities." << std::endl; 00502 return result; 00503 } 00504 int tot_verts; 00505 result = mbImpl->get_number_entities_by_dimension( 0, 0, tot_verts ); 00506 if( MB_SUCCESS != result ) 00507 { 00508 std::cerr << "Couldn't get number of vertices." << std::endl; 00509 return result; 00510 } 00511 tot_verts -= part_verts.size(); 00512 00513 // report # iface entities 00514 result = mbImpl->get_adjacencies( iface_ents[4], 0, false, iface_ents[5], Interface::UNION ); 00515 00516 int rank; 00517 MPI_Comm_rank( MPI_COMM_WORLD, &rank ); 00518 00519 std::cerr << "Proc " << rank << " iface entities: " << std::endl; 00520 for( int i = 0; i < 4; i++ ) 00521 std::cerr << " " << iface_ents[i].size() << " " << i << "d iface entities." << std::endl; 00522 std::cerr << " (" << iface_ents[5].size() << " verts adj to other iface ents)" << std::endl; 00523 if( iface_ents[0].size() != iface_ents[5].size() ) 00524 std::cerr << "WARNING: number of interface vertices don't agree with " 00525 << "vertex adjacencies on interface entities." << std::endl; 00526 00527 // report # regions owned by this proc 00528 std::cout << "Proc " << rank << " owns " << part_ents.size() << " 3d entities." << std::endl; 00529 00530 // get total # regions over all procs 00531 int num_local[2], num_total[2]; 00532 num_local[0] = tot_verts; 00533 num_local[1] = part_ents.size(); 00534 00535 int failure = MPI_Reduce( num_local, num_total, 2, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); 00536 if( failure ) result = MB_FAILURE; 00537 00538 if( 0 == rank ) 00539 { 00540 std::cout << "Total # owned vertices = " << num_total[0] << std::endl; 00541 std::cout << "Total # owned regions = " << num_total[1] << std::endl; 00542 } 00543 00544 return result; 00545 }