Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 #include "ReadParallel.hpp" 00002 #include "moab/Core.hpp" 00003 #include "moab/ProcConfig.hpp" 00004 #include "moab/FileOptions.hpp" 00005 #include "moab/Error.hpp" 00006 #include "moab/ReaderWriterSet.hpp" 00007 #include "moab/ReadUtilIface.hpp" 00008 #include "moab/ParallelComm.hpp" 00009 #include "MBParallelConventions.h" 00010 00011 #include <iostream> 00012 #include <iomanip> 00013 #include <iterator> 00014 #include <sstream> 00015 #include <algorithm> 00016 #include <cassert> 00017 00018 namespace moab 00019 { 00020 00021 const bool debug = false; 00022 00023 const char* ReadParallel::ParallelActionsNames[] = { "PARALLEL READ", 00024 "PARALLEL READ PART", 00025 "PARALLEL BROADCAST", 00026 "PARALLEL DELETE NONLOCAL", 00027 "PARALLEL CHECK_GIDS_SERIAL", 00028 "PARALLEL GET_FILESET_ENTS", 00029 "PARALLEL RESOLVE_SHARED_ENTS", 00030 "PARALLEL EXCHANGE_GHOSTS", 00031 "PARALLEL RESOLVE_SHARED_SETS", 00032 "PARALLEL_AUGMENT_SETS_WITH_GHOSTS", 00033 "PARALLEL PRINT_PARALLEL", 00034 "PARALLEL_CREATE_TRIVIAL_PARTITION", 00035 "PARALLEL_CORRECT_THIN_GHOST_LAYERS" }; 00036 00037 const char* ReadParallel::parallelOptsNames[] = { "NONE", "BCAST", "BCAST_DELETE", "READ_DELETE", "READ_PART", "", 0 }; 00038 00039 ReadParallel::ReadParallel( Interface* impl, ParallelComm* pc ) 00040 : mbImpl( impl ), myPcomm( pc ), myDebug( "ReadPara", std::cerr ) 00041 { 00042 if( !myPcomm ) 00043 { 00044 myPcomm = ParallelComm::get_pcomm( mbImpl, 0 ); 00045 if( NULL == myPcomm ) myPcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00046 } 00047 myDebug.set_rank( myPcomm->proc_config().proc_rank() ); 00048 if( debug ) // For backwards compatibility, enable all debug output if constant is true 00049 myDebug.set_verbosity( 10 ); 00050 00051 impl->query_interface( mError ); 00052 } 00053 00054 ErrorCode ReadParallel::load_file( const char** file_names, 00055 const int num_files, 00056 const EntityHandle* file_set, 00057 const FileOptions& opts, 00058 const ReaderIface::SubsetList* subset_list, 00059 const Tag* file_id_tag ) 00060 { 00061 int tmpval; 00062 if( MB_SUCCESS == opts.get_int_option( "DEBUG_PIO", 1, tmpval ) ) 00063 { 00064 myDebug.set_verbosity( tmpval ); 00065 myPcomm->set_debug_verbosity( tmpval ); 00066 } 00067 myDebug.tprint( 1, "Setting up...\n" ); 00068 00069 // Get parallel settings 00070 int parallel_mode; 00071 ErrorCode result = opts.match_option( "PARALLEL", parallelOptsNames, parallel_mode ); 00072 if( MB_FAILURE == result ) 00073 { 00074 MB_SET_ERR( MB_FAILURE, "Unexpected value for 'PARALLEL' option" ); 00075 } 00076 else if( MB_ENTITY_NOT_FOUND == result ) 00077 { 00078 parallel_mode = 0; 00079 } 00080 00081 // Get partition setting 00082 bool distrib; 00083 std::string partition_tag_name; 00084 result = opts.get_option( "PARTITION", partition_tag_name ); 00085 if( MB_ENTITY_NOT_FOUND == result ) 00086 { 00087 distrib = false; 00088 partition_tag_name = ""; 00089 } 00090 else 00091 { 00092 distrib = true; 00093 if( partition_tag_name.empty() ) partition_tag_name = PARALLEL_PARTITION_TAG_NAME; 00094 00095 // Also get deprecated PARTITION_DISTRIBUTE option 00096 // so that higher-level code doesn't return an error 00097 // due to an unrecognized option 00098 opts.get_null_option( "PARTITION_DISTRIBUTE" ); 00099 } 00100 00101 // Get partition tag value(s), if any, and whether they're to be 00102 // distributed or assigned 00103 std::vector< int > partition_tag_vals; 00104 opts.get_ints_option( "PARTITION_VAL", partition_tag_vals ); 00105 00106 // see if partition tag name is "TRIVIAL", if the tag exists 00107 bool create_trivial_partition = false; 00108 if( partition_tag_name == std::string( "TRIVIAL" ) ) 00109 { 00110 Tag ttag; // see if the trivial tag exists 00111 result = mbImpl->tag_get_handle( partition_tag_name.c_str(), ttag ); 00112 if( MB_TAG_NOT_FOUND == result ) create_trivial_partition = true; 00113 } 00114 // See if we need to report times 00115 bool cputime = false; 00116 result = opts.get_null_option( "CPUTIME" ); 00117 if( MB_SUCCESS == result ) cputime = true; 00118 00119 // See if we need to report times 00120 bool print_parallel = false; 00121 result = opts.get_null_option( "PRINT_PARALLEL" ); 00122 if( MB_SUCCESS == result ) print_parallel = true; 00123 00124 // Get ghosting options 00125 std::string ghost_str; 00126 int bridge_dim, ghost_dim = -1, num_layers, addl_ents = 0; 00127 result = opts.get_str_option( "PARALLEL_GHOSTS", ghost_str ); 00128 if( MB_TYPE_OUT_OF_RANGE == result ) 00129 { 00130 ghost_dim = 3; 00131 bridge_dim = 0; 00132 num_layers = 1; 00133 } 00134 else if( MB_SUCCESS == result ) 00135 { 00136 int num_fields = sscanf( ghost_str.c_str(), "%d.%d.%d.%d", &ghost_dim, &bridge_dim, &num_layers, &addl_ents ); 00137 if( 3 > num_fields ) 00138 { 00139 MB_SET_ERR( MB_FAILURE, "Didn't read 3 fields from PARALLEL_GHOSTS string" ); 00140 } 00141 } 00142 00143 // Get resolve_shared_ents option 00144 std::string shared_str; 00145 int resolve_dim = -2, shared_dim = -1; 00146 result = opts.get_str_option( "PARALLEL_RESOLVE_SHARED_ENTS", shared_str ); 00147 if( MB_TYPE_OUT_OF_RANGE == result ) 00148 { 00149 resolve_dim = -1; 00150 shared_dim = -1; 00151 } 00152 else if( MB_SUCCESS == result ) 00153 { 00154 int num_fields = sscanf( shared_str.c_str(), "%d.%d", &resolve_dim, &shared_dim ); 00155 if( 2 != num_fields ) 00156 { 00157 MB_SET_ERR( MB_FAILURE, "Didn't read 2 fields from PARALLEL_RESOLVE_SHARED_ENTS string" ); 00158 } 00159 } 00160 00161 // Get skip augmenting with ghosts option 00162 bool skip_augment = false; 00163 result = opts.get_null_option( "SKIP_AUGMENT_WITH_GHOSTS" ); 00164 if( MB_SUCCESS == result ) skip_augment = true; 00165 00166 bool correct_thin_ghosts = false; 00167 result = opts.get_null_option( "PARALLEL_THIN_GHOST_LAYER" ); 00168 if( MB_SUCCESS == result ) correct_thin_ghosts = true; 00169 00170 // Get MPI IO processor rank 00171 int reader_rank; 00172 result = opts.get_int_option( "MPI_IO_RANK", reader_rank ); 00173 if( MB_ENTITY_NOT_FOUND == result ) 00174 reader_rank = 0; 00175 else if( MB_SUCCESS != result ) 00176 { 00177 MB_SET_ERR( MB_FAILURE, "Unexpected value for 'MPI_IO_RANK' option" ); 00178 } 00179 00180 // Now that we've parsed all the parallel options, make an instruction queue 00181 std::vector< int > pa_vec; 00182 bool is_reader = ( reader_rank == (int)myPcomm->proc_config().proc_rank() ); 00183 00184 bool partition_by_rank = false; 00185 if( MB_SUCCESS == opts.get_null_option( "PARTITION_BY_RANK" ) ) 00186 { 00187 partition_by_rank = true; 00188 if( !partition_tag_vals.empty() ) 00189 { 00190 MB_SET_ERR( MB_FAILURE, "Cannot specify both PARTITION_VALS and PARTITION_BY_RANK" ); 00191 } 00192 } 00193 00194 double factor_seq; 00195 if( MB_SUCCESS == opts.get_real_option( "PARALLEL_SEQUENCE_FACTOR", factor_seq ) ) 00196 { 00197 if( factor_seq < 1. ) MB_SET_ERR( MB_FAILURE, "cannot have sequence factor less than 1." ); 00198 mbImpl->set_sequence_multiplier( factor_seq ); 00199 } 00200 switch( parallel_mode ) 00201 { 00202 case POPT_BCAST: 00203 myDebug.print( 1, "Read mode is BCAST\n" ); 00204 if( is_reader ) 00205 { 00206 pa_vec.push_back( PA_READ ); 00207 pa_vec.push_back( PA_CHECK_GIDS_SERIAL ); 00208 pa_vec.push_back( PA_GET_FILESET_ENTS ); 00209 } 00210 pa_vec.push_back( PA_BROADCAST ); 00211 if( !is_reader ) pa_vec.push_back( PA_GET_FILESET_ENTS ); 00212 break; 00213 00214 case POPT_BCAST_DELETE: 00215 myDebug.print( 1, "Read mode is BCAST_DELETE\n" ); 00216 if( is_reader ) 00217 { 00218 pa_vec.push_back( PA_READ ); 00219 pa_vec.push_back( PA_CHECK_GIDS_SERIAL ); 00220 pa_vec.push_back( PA_GET_FILESET_ENTS ); 00221 if( create_trivial_partition ) pa_vec.push_back( PA_CREATE_TRIVIAL_PARTITION ); 00222 } 00223 pa_vec.push_back( PA_BROADCAST ); 00224 if( !is_reader ) pa_vec.push_back( PA_GET_FILESET_ENTS ); 00225 pa_vec.push_back( PA_DELETE_NONLOCAL ); 00226 break; 00227 00228 case POPT_DEFAULT: 00229 case POPT_READ_DELETE: 00230 myDebug.print( 1, "Read mode is READ_DELETE\n" ); 00231 pa_vec.push_back( PA_READ ); 00232 pa_vec.push_back( PA_CHECK_GIDS_SERIAL ); 00233 pa_vec.push_back( PA_GET_FILESET_ENTS ); 00234 pa_vec.push_back( PA_DELETE_NONLOCAL ); 00235 break; 00236 00237 case POPT_READ_PART: 00238 myDebug.print( 1, "Read mode is READ_PART\n" ); 00239 pa_vec.push_back( PA_READ_PART ); 00240 break; 00241 00242 default: 00243 MB_SET_ERR( MB_FAILURE, "Unexpected parallel read mode" ); 00244 } 00245 00246 if( -2 != resolve_dim ) pa_vec.push_back( PA_RESOLVE_SHARED_ENTS ); 00247 00248 if( -1 != ghost_dim ) pa_vec.push_back( PA_EXCHANGE_GHOSTS ); 00249 00250 if( -2 != resolve_dim ) 00251 { 00252 pa_vec.push_back( PA_RESOLVE_SHARED_SETS ); 00253 if( -1 != ghost_dim && !skip_augment ) pa_vec.push_back( PA_AUGMENT_SETS_WITH_GHOSTS ); 00254 if( -1 != ghost_dim && correct_thin_ghosts ) pa_vec.push_back( PA_CORRECT_THIN_GHOSTS ); 00255 } 00256 00257 if( print_parallel ) pa_vec.push_back( PA_PRINT_PARALLEL ); 00258 00259 result = load_file( file_names, num_files, file_set, parallel_mode, partition_tag_name, partition_tag_vals, distrib, 00260 partition_by_rank, pa_vec, opts, subset_list, file_id_tag, reader_rank, cputime, resolve_dim, 00261 shared_dim, ghost_dim, bridge_dim, num_layers, addl_ents );MB_CHK_ERR( result ); 00262 00263 if( parallel_mode == POPT_BCAST_DELETE && !is_reader ) opts.mark_all_seen(); 00264 00265 return MB_SUCCESS; 00266 } 00267 00268 ErrorCode ReadParallel::load_file( const char** file_names, 00269 const int num_files, 00270 const EntityHandle* file_set_ptr, 00271 int /*parallel_mode*/, 00272 std::string& partition_tag_name, 00273 std::vector< int >& partition_tag_vals, 00274 bool distrib, 00275 bool partition_by_rank, 00276 std::vector< int >& pa_vec, 00277 const FileOptions& opts, 00278 const ReaderIface::SubsetList* subset_list, 00279 const Tag* file_id_tag, 00280 const int reader_rank, 00281 const bool cputime, 00282 const int resolve_dim, 00283 const int shared_dim, 00284 const int ghost_dim, 00285 const int bridge_dim, 00286 const int num_layers, 00287 const int addl_ents ) 00288 { 00289 ErrorCode result = MB_SUCCESS; 00290 if( myPcomm == NULL ) myPcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00291 00292 Range entities; 00293 Tag file_set_tag = 0; 00294 int other_sets = 0; 00295 ReaderWriterSet::iterator iter; 00296 Range other_file_sets, file_sets; 00297 Core* impl = dynamic_cast< Core* >( mbImpl ); 00298 00299 std::vector< double > act_times( pa_vec.size() + 1 ); 00300 std::vector< int >::iterator vit; 00301 int i, j; 00302 act_times[0] = MPI_Wtime(); 00303 00304 // Make a new set for the parallel read 00305 EntityHandle file_set; 00306 if( !file_set_ptr || !( *file_set_ptr ) ) 00307 { 00308 result = mbImpl->create_meshset( MESHSET_SET, file_set );MB_CHK_SET_ERR( result, "Trouble creating file set" ); 00309 } 00310 else 00311 file_set = *file_set_ptr; 00312 00313 bool i_read = false; 00314 Tag id_tag = 0; 00315 bool use_id_tag = false; 00316 Range ents; 00317 00318 for( i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++ ) 00319 { 00320 ErrorCode tmp_result = MB_SUCCESS; 00321 switch( *vit ) 00322 { 00323 //================== 00324 case PA_READ: 00325 i_read = true; 00326 00327 for( j = 0; j < num_files; j++ ) 00328 { 00329 myDebug.tprintf( 1, "Reading file: \"%s\"\n", file_names[j] ); 00330 00331 EntityHandle new_file_set; 00332 result = mbImpl->create_meshset( MESHSET_SET, new_file_set );MB_CHK_SET_ERR( result, "Trouble creating file set" ); 00333 tmp_result = impl->serial_load_file( file_names[j], &new_file_set, opts, subset_list, file_id_tag ); 00334 if( MB_SUCCESS != tmp_result ) break; 00335 00336 // Put the contents of each file set for the reader into the 00337 // file set for the parallel read 00338 assert( 0 != new_file_set ); 00339 Range all_ents; 00340 tmp_result = mbImpl->get_entities_by_handle( new_file_set, all_ents ); 00341 if( MB_SUCCESS != tmp_result ) break; 00342 all_ents.insert( new_file_set ); 00343 tmp_result = mbImpl->add_entities( file_set, all_ents ); 00344 if( MB_SUCCESS != tmp_result ) break; 00345 } 00346 if( MB_SUCCESS != tmp_result ) break; 00347 00348 // Mark the file set for this parallel reader 00349 tmp_result = mbImpl->tag_get_handle( "__file_set", 1, MB_TYPE_INTEGER, file_set_tag, 00350 MB_TAG_SPARSE | MB_TAG_CREAT ); 00351 if( MB_SUCCESS != tmp_result ) break; 00352 00353 tmp_result = mbImpl->tag_set_data( file_set_tag, &file_set, 1, &other_sets ); 00354 break; 00355 00356 //================== 00357 case PA_READ_PART: { 00358 myDebug.tprintf( 1, "Reading file: \"%s\"\n", file_names[0] ); 00359 00360 i_read = true; 00361 if( num_files != 1 ) 00362 { 00363 MB_SET_ERR( MB_NOT_IMPLEMENTED, "Multiple file read not supported for READ_PART" ); 00364 } 00365 00366 // If we're going to resolve shared entities, then we need 00367 // to ask the file reader to populate a tag with unique ids 00368 // (typically file ids/indices/whatever.) 00369 if( std::find( pa_vec.begin(), pa_vec.end(), PA_RESOLVE_SHARED_ENTS ) != pa_vec.end() ) 00370 { 00371 use_id_tag = true; 00372 if( !file_id_tag ) 00373 { 00374 // This tag is really used for resolving shared entities with crystal router 00375 // In the end, this is an identifier that gets converted to long 00376 // In hdf5 file reader, we also convert from hdf5 file id type to long 00377 tmp_result = mbImpl->tag_get_handle( "", sizeof( long ), MB_TYPE_OPAQUE, id_tag, 00378 MB_TAG_DENSE | MB_TAG_CREAT ); 00379 if( MB_SUCCESS != tmp_result ) break; 00380 file_id_tag = &id_tag; 00381 } 00382 } 00383 00384 ReaderIface::IDTag parts = { partition_tag_name.c_str(), 0, 0 }; 00385 ReaderIface::SubsetList sl; 00386 sl.num_parts = 0; 00387 int rank = myPcomm->rank(); 00388 if( partition_by_rank ) 00389 { 00390 assert( partition_tag_vals.empty() ); 00391 parts.tag_values = &rank; 00392 parts.num_tag_values = 1; 00393 } 00394 else 00395 { 00396 sl.num_parts = myPcomm->size(); 00397 sl.part_number = myPcomm->rank(); 00398 if( !partition_tag_vals.empty() ) 00399 { 00400 parts.tag_values = &partition_tag_vals[0]; 00401 parts.num_tag_values = partition_tag_vals.size(); 00402 } 00403 } 00404 std::vector< ReaderIface::IDTag > subset; 00405 if( subset_list ) 00406 { 00407 std::vector< ReaderIface::IDTag > tmplist( subset_list->tag_list, 00408 subset_list->tag_list + subset_list->tag_list_length ); 00409 tmplist.push_back( parts ); 00410 subset.swap( tmplist ); 00411 sl.tag_list = &subset[0]; 00412 sl.tag_list_length = subset.size(); 00413 } 00414 else 00415 { 00416 sl.tag_list = &parts; 00417 sl.tag_list_length = 1; 00418 } 00419 tmp_result = impl->serial_load_file( *file_names, &file_set, opts, &sl, file_id_tag ); 00420 if( MB_SUCCESS != tmp_result ) break; 00421 00422 if( !partition_tag_name.empty() ) 00423 { 00424 Tag part_tag; 00425 tmp_result = impl->tag_get_handle( partition_tag_name.c_str(), 1, MB_TYPE_INTEGER, part_tag ); 00426 if( MB_SUCCESS != tmp_result ) break; 00427 00428 tmp_result = impl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &part_tag, 0, 1, 00429 myPcomm->partition_sets() ); 00430 } 00431 } 00432 break; 00433 00434 //================== 00435 case PA_GET_FILESET_ENTS: 00436 myDebug.tprint( 1, "Getting fileset entities.\n" ); 00437 00438 // Get entities in the file set, and add actual file set to it; 00439 // mark the file set to make sure any receiving procs know which it is 00440 tmp_result = mbImpl->get_entities_by_handle( file_set, entities ); 00441 if( MB_SUCCESS != tmp_result ) 00442 { 00443 entities.clear(); 00444 break; 00445 } 00446 00447 // Add actual file set to entities too 00448 entities.insert( file_set ); 00449 break; 00450 //================== 00451 case PA_CREATE_TRIVIAL_PARTITION: { 00452 myDebug.tprint( 1, "create trivial partition, for higher dim entities.\n" ); 00453 // get high dim entities (2 or 3) 00454 Range hi_dim_ents = entities.subset_by_dimension( 3 ); 00455 if( hi_dim_ents.empty() ) hi_dim_ents = entities.subset_by_dimension( 2 ); 00456 if( hi_dim_ents.empty() ) hi_dim_ents = entities.subset_by_dimension( 1 ); 00457 if( hi_dim_ents.empty() ) MB_SET_ERR( MB_FAILURE, "there are no elements of dim 1-3" ); 00458 00459 size_t num_hi_ents = hi_dim_ents.size(); 00460 unsigned int num_parts = myPcomm->size(); 00461 00462 // create first the trivial partition tag 00463 int dum_id = -1; 00464 Tag ttag; // trivial tag 00465 tmp_result = mbImpl->tag_get_handle( partition_tag_name.c_str(), 1, MB_TYPE_INTEGER, ttag, 00466 MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );MB_CHK_SET_ERR( tmp_result, "Can't create trivial partition tag" ); 00467 00468 // Compute the number of high dim entities on each part 00469 size_t nPartEnts = num_hi_ents / num_parts; 00470 00471 // Number of extra entities after equal split over parts 00472 int iextra = num_hi_ents % num_parts; 00473 Range::iterator itr = hi_dim_ents.begin(); 00474 for( int k = 0; k < (int)num_parts; k++ ) 00475 { 00476 // create a mesh set, insert a subrange of entities 00477 EntityHandle part_set; 00478 tmp_result = mbImpl->create_meshset( MESHSET_SET, part_set );MB_CHK_SET_ERR( tmp_result, "Can't create part set" ); 00479 entities.insert( part_set ); 00480 00481 tmp_result = mbImpl->tag_set_data( ttag, &part_set, 1, &k );MB_CHK_SET_ERR( tmp_result, "Can't set trivial partition tag" ); 00482 Range subrange; 00483 size_t num_ents_in_part = nPartEnts; 00484 if( i < iextra ) num_ents_in_part++; 00485 for( size_t i1 = 0; i1 < num_ents_in_part; i1++, itr++ ) 00486 subrange.insert( *itr ); 00487 tmp_result = mbImpl->add_entities( part_set, subrange );MB_CHK_SET_ERR( tmp_result, "Can't add entities to trivial part " << k ); 00488 myDebug.tprintf( 1, "create trivial part %d with %lu entities \n", k, num_ents_in_part ); 00489 tmp_result = mbImpl->add_entities( file_set, &part_set, 1 );MB_CHK_SET_ERR( tmp_result, "Can't add trivial part to file set " << k ); 00490 } 00491 } 00492 00493 break; 00494 //================== 00495 case PA_BROADCAST: 00496 // Do the actual broadcast; if single-processor, ignore error 00497 myDebug.tprint( 1, "Broadcasting mesh.\n" ); 00498 00499 if( myPcomm->proc_config().proc_size() > 1 ) 00500 { 00501 tmp_result = myPcomm->broadcast_entities( reader_rank, entities ); 00502 if( MB_SUCCESS != tmp_result ) break; 00503 } 00504 00505 if( debug ) 00506 { 00507 std::cerr << "Bcast done; entities:" << std::endl; 00508 mbImpl->list_entities( 0, 0 ); 00509 } 00510 00511 // Add the received entities to this fileset if I wasn't the reader 00512 if( !i_read && MB_SUCCESS == tmp_result ) tmp_result = mbImpl->add_entities( file_set, entities ); 00513 00514 break; 00515 00516 //================== 00517 case PA_DELETE_NONLOCAL: 00518 myDebug.tprint( 1, "Deleting nonlocal entities.\n" ); 00519 00520 tmp_result = delete_nonlocal_entities( partition_tag_name, partition_tag_vals, distrib, file_set ); 00521 if( debug ) 00522 { 00523 std::cerr << "Delete nonlocal done; entities:" << std::endl; 00524 mbImpl->list_entities( 0, 0 ); 00525 } 00526 00527 if( MB_SUCCESS == tmp_result ) tmp_result = create_partition_sets( partition_tag_name, file_set ); 00528 00529 break; 00530 00531 //================== 00532 case PA_CHECK_GIDS_SERIAL: 00533 myDebug.tprint( 1, "Checking global IDs.\n" ); 00534 00535 tmp_result = myPcomm->check_global_ids( file_set, 0, 1, true, false ); 00536 break; 00537 00538 //================== 00539 case PA_RESOLVE_SHARED_ENTS: 00540 myDebug.tprint( 1, "Resolving shared entities.\n" ); 00541 00542 if( 1 == myPcomm->size() ) 00543 tmp_result = MB_SUCCESS; 00544 else 00545 tmp_result = 00546 myPcomm->resolve_shared_ents( file_set, resolve_dim, shared_dim, use_id_tag ? file_id_tag : 0 ); 00547 if( MB_SUCCESS != tmp_result ) break; 00548 00549 #ifndef NDEBUG 00550 // check number of owned vertices through pcomm's public interface 00551 tmp_result = mbImpl->get_entities_by_type( 0, MBVERTEX, ents ); 00552 if( MB_SUCCESS == tmp_result ) 00553 tmp_result = myPcomm->filter_pstatus( ents, PSTATUS_NOT_OWNED, PSTATUS_NOT ); 00554 if( MB_SUCCESS == tmp_result ) 00555 myDebug.tprintf( 1, "Proc %u reports %lu owned vertices.\n", myPcomm->proc_config().proc_rank(), 00556 ents.size() ); 00557 #endif 00558 break; 00559 00560 //================== 00561 case PA_EXCHANGE_GHOSTS: 00562 myDebug.tprint( 1, "Exchanging ghost entities.\n" ); 00563 00564 tmp_result = myPcomm->exchange_ghost_cells( ghost_dim, bridge_dim, num_layers, addl_ents, true, true, 00565 &file_set ); 00566 break; 00567 00568 //================== 00569 case PA_RESOLVE_SHARED_SETS: 00570 myDebug.tprint( 1, "Resolving shared sets.\n" ); 00571 00572 if( 1 == myPcomm->size() ) 00573 tmp_result = MB_SUCCESS; 00574 else 00575 tmp_result = myPcomm->resolve_shared_sets( file_set, use_id_tag ? file_id_tag : 0 ); 00576 break; 00577 00578 //================== 00579 case PA_AUGMENT_SETS_WITH_GHOSTS: 00580 myDebug.tprint( 1, "Augmenting sets with ghost entities.\n" ); 00581 00582 if( 1 == myPcomm->size() ) 00583 tmp_result = MB_SUCCESS; 00584 else 00585 tmp_result = myPcomm->augment_default_sets_with_ghosts( file_set ); 00586 break; 00587 //================== 00588 case PA_CORRECT_THIN_GHOSTS: 00589 myDebug.tprint( 1, "correcting thin ghost layers.\n" ); 00590 if( 2 >= myPcomm->size() ) // it is a problem only for multi-shared entities 00591 tmp_result = MB_SUCCESS; 00592 else 00593 tmp_result = myPcomm->correct_thin_ghost_layers(); 00594 break; 00595 case PA_PRINT_PARALLEL: 00596 myDebug.tprint( 1, "Printing parallel information.\n" ); 00597 00598 tmp_result = myPcomm->list_entities( 0, -1 ); 00599 break; 00600 00601 //================== 00602 default: 00603 MB_SET_ERR( MB_FAILURE, "Unexpected parallel action" ); 00604 } // switch (*vit) 00605 00606 if( MB_SUCCESS != tmp_result ) 00607 { 00608 MB_SET_ERR( MB_FAILURE, "Failed in step " << ParallelActionsNames[*vit] ); 00609 } 00610 00611 if( cputime ) act_times[i] = MPI_Wtime(); 00612 } // for (i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++) 00613 00614 if( use_id_tag ) 00615 { 00616 result = mbImpl->tag_delete( id_tag );MB_CHK_SET_ERR( result, "Trouble deleting id tag" ); 00617 } 00618 00619 if( cputime ) 00620 { 00621 for( i = pa_vec.size(); i > 0; i-- ) 00622 act_times[i] -= act_times[i - 1]; 00623 00624 // Replace initial time with overall time 00625 act_times[0] = MPI_Wtime() - act_times[0]; 00626 // Get the maximum over all procs 00627 if( 0 != myPcomm->proc_config().proc_rank() ) 00628 { 00629 MPI_Reduce( &act_times[0], 0, pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0, 00630 myPcomm->proc_config().proc_comm() ); 00631 } 00632 else 00633 { 00634 #if( MPI_VERSION >= 2 ) 00635 MPI_Reduce( MPI_IN_PLACE, &act_times[0], pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0, 00636 myPcomm->proc_config().proc_comm() ); 00637 #else 00638 // Note, extra comm-size allocation is required 00639 std::vector< double > act_times_tmp( pa_vec.size() + 1 ); 00640 MPI_Reduce( &act_times[0], &act_times_tmp[0], pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0, 00641 myPcomm->proc_config().proc_comm() ); 00642 act_times = act_times_tmp; // extra copy here too 00643 #endif 00644 std::cout << "Parallel Read times: " << std::endl; 00645 for( i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++ ) 00646 std::cout << " " << act_times[i] << " " << ParallelActionsNames[*vit] << std::endl; 00647 std::cout << " " << act_times[0] << " PARALLEL TOTAL" << std::endl; 00648 } 00649 } 00650 00651 return MB_SUCCESS; 00652 } 00653 00654 ErrorCode ReadParallel::delete_nonlocal_entities( std::string& ptag_name, 00655 std::vector< int >& ptag_vals, 00656 bool distribute, 00657 EntityHandle file_set ) 00658 { 00659 Range partition_sets; 00660 00661 Tag ptag; 00662 ErrorCode result = mbImpl->tag_get_handle( ptag_name.c_str(), 1, MB_TYPE_INTEGER, ptag );MB_CHK_SET_ERR( result, "Failed getting tag handle in delete_nonlocal_entities" ); 00663 00664 result = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &ptag, NULL, 1, myPcomm->partition_sets() );MB_CHK_SET_ERR( result, "Failed to get sets with partition-type tag" ); 00665 00666 int proc_sz = myPcomm->proc_config().proc_size(); 00667 int proc_rk = myPcomm->proc_config().proc_rank(); 00668 00669 if( !ptag_vals.empty() ) 00670 { 00671 // Values input, get sets with those values 00672 Range tmp_sets; 00673 std::vector< int > tag_vals( myPcomm->partition_sets().size() ); 00674 result = mbImpl->tag_get_data( ptag, myPcomm->partition_sets(), &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to get tag data for partition vals tag" ); 00675 for( std::vector< int >::iterator pit = tag_vals.begin(); pit != tag_vals.end(); ++pit ) 00676 { 00677 std::vector< int >::iterator pit2 = std::find( ptag_vals.begin(), ptag_vals.end(), *pit ); 00678 if( pit2 != ptag_vals.end() ) tmp_sets.insert( myPcomm->partition_sets()[pit - tag_vals.begin()] ); 00679 } 00680 00681 myPcomm->partition_sets().swap( tmp_sets ); 00682 } 00683 00684 if( distribute ) 00685 { 00686 // For now, require that number of partition sets be greater 00687 // than number of procs 00688 if( myPcomm->partition_sets().size() < (unsigned int)proc_sz ) 00689 { 00690 MB_SET_ERR( MB_FAILURE, "Too few parts; P = " << proc_rk << ", tag = " << ptag 00691 << ", # sets = " << myPcomm->partition_sets().size() ); 00692 } 00693 00694 Range tmp_sets; 00695 // Distribute the partition sets 00696 unsigned int num_sets = myPcomm->partition_sets().size() / proc_sz; 00697 unsigned int num_leftover = myPcomm->partition_sets().size() % proc_sz; 00698 int begin_set = 0; 00699 if( proc_rk < (int)num_leftover ) 00700 { 00701 num_sets++; 00702 begin_set = num_sets * proc_rk; 00703 } 00704 else 00705 begin_set = proc_rk * num_sets + num_leftover; 00706 00707 for( unsigned int i = 0; i < num_sets; i++ ) 00708 tmp_sets.insert( myPcomm->partition_sets()[begin_set + i] ); 00709 00710 myPcomm->partition_sets().swap( tmp_sets ); 00711 } 00712 00713 myDebug.print( 1, "My partition sets: ", myPcomm->partition_sets() ); 00714 00715 result = delete_nonlocal_entities( file_set );MB_CHK_ERR( result ); 00716 00717 return MB_SUCCESS; 00718 } 00719 00720 ErrorCode ReadParallel::create_partition_sets( std::string& ptag_name, EntityHandle file_set ) 00721 { 00722 int proc_rk = myPcomm->proc_config().proc_rank(); 00723 ErrorCode result = MB_SUCCESS; 00724 00725 Tag ptag; 00726 00727 // Tag the partition sets with a standard tag name 00728 if( ptag_name.empty() ) ptag_name = PARALLEL_PARTITION_TAG_NAME; 00729 bool tag_created = false; 00730 result = mbImpl->tag_get_handle( ptag_name.c_str(), 1, MB_TYPE_INTEGER, ptag, MB_TAG_SPARSE | MB_TAG_CREAT, 0, 00731 &tag_created );MB_CHK_SET_ERR( result, "Trouble getting PARALLEL_PARTITION tag" ); 00732 00733 if( !tag_created ) 00734 { 00735 // This tag already exists; better check to see that tagged sets 00736 // agree with this partition 00737 Range tagged_sets; 00738 int* proc_rk_ptr = &proc_rk; 00739 result = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &ptag, (const void* const*)&proc_rk_ptr, 00740 1, tagged_sets );MB_CHK_SET_ERR( result, "Trouble getting tagged sets" ); 00741 if( !tagged_sets.empty() && tagged_sets != myPcomm->partition_sets() ) 00742 { 00743 result = mbImpl->tag_delete_data( ptag, tagged_sets );MB_CHK_SET_ERR( result, "Trouble deleting data of PARALLEL_PARTITION tag" ); 00744 } 00745 else if( tagged_sets == myPcomm->partition_sets() ) 00746 return MB_SUCCESS; 00747 } 00748 00749 // If we get here, we need to assign the tag 00750 std::vector< int > values( myPcomm->partition_sets().size() ); 00751 for( unsigned int i = 0; i < myPcomm->partition_sets().size(); i++ ) 00752 values[i] = proc_rk; 00753 result = mbImpl->tag_set_data( ptag, myPcomm->partition_sets(), &values[0] );MB_CHK_SET_ERR( result, "Trouble setting data to PARALLEL_PARTITION tag" ); 00754 00755 return MB_SUCCESS; 00756 } 00757 00758 ErrorCode ReadParallel::delete_nonlocal_entities( EntityHandle file_set ) 00759 { 00760 // Get partition entities and ents related to/used by those 00761 // get ents in the partition 00762 ReadUtilIface* read_iface; 00763 mbImpl->query_interface( read_iface ); 00764 Range partition_ents, all_sets; 00765 00766 myDebug.tprint( 2, "Gathering related entities.\n" ); 00767 00768 ErrorCode result = read_iface->gather_related_ents( myPcomm->partition_sets(), partition_ents, &file_set );MB_CHK_SET_ERR( result, "Failure gathering related entities" ); 00769 00770 // Get pre-existing entities 00771 Range file_ents; 00772 result = mbImpl->get_entities_by_handle( file_set, file_ents );MB_CHK_SET_ERR( result, "Couldn't get pre-existing entities" ); 00773 00774 if( 0 == myPcomm->proc_config().proc_rank() ) 00775 { 00776 myDebug.print( 2, "File entities: ", file_ents ); 00777 } 00778 00779 // Get deletable entities by subtracting partition ents from file ents 00780 Range deletable_ents = subtract( file_ents, partition_ents ); 00781 00782 // Cache deletable vs. keepable sets 00783 Range deletable_sets = deletable_ents.subset_by_type( MBENTITYSET ); 00784 Range keepable_sets = subtract( file_ents.subset_by_type( MBENTITYSET ), deletable_sets ); 00785 00786 myDebug.tprint( 2, "Removing deletable entities from keepable sets.\n" ); 00787 00788 // Remove deletable ents from all keepable sets 00789 for( Range::iterator rit = keepable_sets.begin(); rit != keepable_sets.end(); ++rit ) 00790 { 00791 result = mbImpl->remove_entities( *rit, deletable_ents );MB_CHK_SET_ERR( result, "Failure removing deletable entities" ); 00792 } 00793 result = mbImpl->remove_entities( file_set, deletable_ents );MB_CHK_SET_ERR( result, "Failure removing deletable entities" ); 00794 00795 myDebug.tprint( 2, "Deleting deletable entities.\n" ); 00796 00797 if( 0 == myPcomm->proc_config().proc_rank() ) 00798 { 00799 myDebug.print( 2, "Deletable sets: ", deletable_sets ); 00800 } 00801 00802 // Delete sets, then ents 00803 if( !deletable_sets.empty() ) 00804 { 00805 result = mbImpl->delete_entities( deletable_sets );MB_CHK_SET_ERR( result, "Failure deleting sets in delete_nonlocal_entities" ); 00806 } 00807 00808 deletable_ents -= deletable_sets; 00809 00810 if( 0 == myPcomm->proc_config().proc_rank() ) 00811 { 00812 myDebug.print( 2, "Deletable entities: ", deletable_ents ); 00813 } 00814 00815 if( !deletable_ents.empty() ) 00816 { 00817 result = mbImpl->delete_entities( deletable_ents );MB_CHK_SET_ERR( result, "Failure deleting entities in delete_nonlocal_entities" ); 00818 } 00819 00820 return MB_SUCCESS; 00821 } 00822 00823 } // namespace moab