MOAB: Mesh Oriented datABase
(version 5.2.1)
|
00001 #include "ReadParallel.hpp" 00002 #include "moab/Core.hpp" 00003 #include "moab/ProcConfig.hpp" 00004 #include "moab/FileOptions.hpp" 00005 #include "moab/Error.hpp" 00006 #include "moab/ReaderWriterSet.hpp" 00007 #include "moab/ReadUtilIface.hpp" 00008 #include "moab/ParallelComm.hpp" 00009 #include "MBParallelConventions.h" 00010 00011 #include <iostream> 00012 #include <iomanip> 00013 #include <iterator> 00014 #include <sstream> 00015 #include <algorithm> 00016 #include <assert.h> 00017 00018 namespace moab 00019 { 00020 00021 const bool debug = false; 00022 00023 const char* ReadParallel::ParallelActionsNames[] = { "PARALLEL READ", 00024 "PARALLEL READ PART", 00025 "PARALLEL BROADCAST", 00026 "PARALLEL DELETE NONLOCAL", 00027 "PARALLEL CHECK_GIDS_SERIAL", 00028 "PARALLEL GET_FILESET_ENTS", 00029 "PARALLEL RESOLVE_SHARED_ENTS", 00030 "PARALLEL EXCHANGE_GHOSTS", 00031 "PARALLEL RESOLVE_SHARED_SETS", 00032 "PARALLEL_AUGMENT_SETS_WITH_GHOSTS", 00033 "PARALLEL PRINT_PARALLEL", 00034 "PARALLEL_CREATE_TRIVIAL_PARTITION", 00035 "PARALLEL_CORRECT_THIN_GHOST_LAYERS" }; 00036 00037 const char* ReadParallel::parallelOptsNames[] = { "NONE", "BCAST", "BCAST_DELETE", "READ_DELETE", "READ_PART", "", 0 }; 00038 00039 ReadParallel::ReadParallel( Interface* impl, ParallelComm* pc ) 00040 : mbImpl( impl ), myPcomm( pc ), myDebug( "ReadPara", std::cerr ) 00041 { 00042 if( !myPcomm ) 00043 { 00044 myPcomm = ParallelComm::get_pcomm( mbImpl, 0 ); 00045 if( NULL == myPcomm ) myPcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00046 } 00047 myDebug.set_rank( myPcomm->proc_config().proc_rank() ); 00048 if( debug ) // For backwards compatibility, enable all debug output if constant is true 00049 myDebug.set_verbosity( 10 ); 00050 00051 impl->query_interface( mError ); 00052 } 00053 00054 ErrorCode ReadParallel::load_file( const char** file_names, const int num_files, const EntityHandle* file_set, 00055 const FileOptions& opts, const ReaderIface::SubsetList* subset_list, 00056 const Tag* file_id_tag ) 00057 { 00058 int tmpval; 00059 if( MB_SUCCESS == opts.get_int_option( "DEBUG_PIO", 1, tmpval ) ) 00060 { 00061 myDebug.set_verbosity( tmpval ); 00062 myPcomm->set_debug_verbosity( tmpval ); 00063 } 00064 myDebug.tprint( 1, "Setting up...\n" ); 00065 00066 // Get parallel settings 00067 int parallel_mode; 00068 ErrorCode result = opts.match_option( "PARALLEL", parallelOptsNames, parallel_mode ); 00069 if( MB_FAILURE == result ) { MB_SET_ERR( MB_FAILURE, "Unexpected value for 'PARALLEL' option" ); } 00070 else if( MB_ENTITY_NOT_FOUND == result ) 00071 { 00072 parallel_mode = 0; 00073 } 00074 00075 // Get partition setting 00076 bool distrib; 00077 std::string partition_tag_name; 00078 result = opts.get_option( "PARTITION", partition_tag_name ); 00079 if( MB_ENTITY_NOT_FOUND == result ) 00080 { 00081 distrib = false; 00082 partition_tag_name = ""; 00083 } 00084 else 00085 { 00086 distrib = true; 00087 if( partition_tag_name.empty() ) partition_tag_name = PARALLEL_PARTITION_TAG_NAME; 00088 00089 // Also get deprecated PARTITION_DISTRIBUTE option 00090 // so that higher-level code doesn't return an error 00091 // due to an unrecognized option 00092 opts.get_null_option( "PARTITION_DISTRIBUTE" ); 00093 } 00094 00095 // Get partition tag value(s), if any, and whether they're to be 00096 // distributed or assigned 00097 std::vector< int > partition_tag_vals; 00098 opts.get_ints_option( "PARTITION_VAL", partition_tag_vals ); 00099 00100 // see if partition tag name is "TRIVIAL", if the tag exists 00101 bool create_trivial_partition = false; 00102 if( partition_tag_name == std::string( "TRIVIAL" ) ) 00103 { 00104 Tag ttag; // see if the trivial tag exists 00105 result = mbImpl->tag_get_handle( partition_tag_name.c_str(), ttag ); 00106 if( MB_TAG_NOT_FOUND == result ) create_trivial_partition = true; 00107 } 00108 // See if we need to report times 00109 bool cputime = false; 00110 result = opts.get_null_option( "CPUTIME" ); 00111 if( MB_SUCCESS == result ) cputime = true; 00112 00113 // See if we need to report times 00114 bool print_parallel = false; 00115 result = opts.get_null_option( "PRINT_PARALLEL" ); 00116 if( MB_SUCCESS == result ) print_parallel = true; 00117 00118 // Get ghosting options 00119 std::string ghost_str; 00120 int bridge_dim, ghost_dim = -1, num_layers, addl_ents = 0; 00121 result = opts.get_str_option( "PARALLEL_GHOSTS", ghost_str ); 00122 if( MB_TYPE_OUT_OF_RANGE == result ) 00123 { 00124 ghost_dim = 3; 00125 bridge_dim = 0; 00126 num_layers = 1; 00127 } 00128 else if( MB_SUCCESS == result ) 00129 { 00130 int num_fields = sscanf( ghost_str.c_str(), "%d.%d.%d.%d", &ghost_dim, &bridge_dim, &num_layers, &addl_ents ); 00131 if( 3 > num_fields ) { MB_SET_ERR( MB_FAILURE, "Didn't read 3 fields from PARALLEL_GHOSTS string" ); } 00132 } 00133 00134 // Get resolve_shared_ents option 00135 std::string shared_str; 00136 int resolve_dim = -2, shared_dim = -1; 00137 result = opts.get_str_option( "PARALLEL_RESOLVE_SHARED_ENTS", shared_str ); 00138 if( MB_TYPE_OUT_OF_RANGE == result ) 00139 { 00140 resolve_dim = -1; 00141 shared_dim = -1; 00142 } 00143 else if( MB_SUCCESS == result ) 00144 { 00145 int num_fields = sscanf( shared_str.c_str(), "%d.%d", &resolve_dim, &shared_dim ); 00146 if( 2 != num_fields ) 00147 { MB_SET_ERR( MB_FAILURE, "Didn't read 2 fields from PARALLEL_RESOLVE_SHARED_ENTS string" ); } 00148 } 00149 00150 // Get skip augmenting with ghosts option 00151 bool skip_augment = false; 00152 result = opts.get_null_option( "SKIP_AUGMENT_WITH_GHOSTS" ); 00153 if( MB_SUCCESS == result ) skip_augment = true; 00154 00155 bool correct_thin_ghosts = false; 00156 result = opts.get_null_option( "PARALLEL_THIN_GHOST_LAYER" ); 00157 if( MB_SUCCESS == result ) correct_thin_ghosts = true; 00158 00159 // Get MPI IO processor rank 00160 int reader_rank; 00161 result = opts.get_int_option( "MPI_IO_RANK", reader_rank ); 00162 if( MB_ENTITY_NOT_FOUND == result ) 00163 reader_rank = 0; 00164 else if( MB_SUCCESS != result ) 00165 { 00166 MB_SET_ERR( MB_FAILURE, "Unexpected value for 'MPI_IO_RANK' option" ); 00167 } 00168 00169 // Now that we've parsed all the parallel options, make an instruction queue 00170 std::vector< int > pa_vec; 00171 bool is_reader = ( reader_rank == (int)myPcomm->proc_config().proc_rank() ); 00172 00173 bool partition_by_rank = false; 00174 if( MB_SUCCESS == opts.get_null_option( "PARTITION_BY_RANK" ) ) 00175 { 00176 partition_by_rank = true; 00177 if( !partition_tag_vals.empty() ) 00178 { MB_SET_ERR( MB_FAILURE, "Cannot specify both PARTITION_VALS and PARTITION_BY_RANK" ); } 00179 } 00180 00181 double factor_seq; 00182 if( MB_SUCCESS == opts.get_real_option( "PARALLEL_SEQUENCE_FACTOR", factor_seq ) ) 00183 { 00184 if( factor_seq < 1. ) MB_SET_ERR( MB_FAILURE, "cannot have sequence factor less than 1." ); 00185 mbImpl->set_sequence_multiplier( factor_seq ); 00186 } 00187 switch( parallel_mode ) 00188 { 00189 case POPT_BCAST: 00190 myDebug.print( 1, "Read mode is BCAST\n" ); 00191 if( is_reader ) 00192 { 00193 pa_vec.push_back( PA_READ ); 00194 pa_vec.push_back( PA_CHECK_GIDS_SERIAL ); 00195 pa_vec.push_back( PA_GET_FILESET_ENTS ); 00196 } 00197 pa_vec.push_back( PA_BROADCAST ); 00198 if( !is_reader ) pa_vec.push_back( PA_GET_FILESET_ENTS ); 00199 break; 00200 00201 case POPT_BCAST_DELETE: 00202 myDebug.print( 1, "Read mode is BCAST_DELETE\n" ); 00203 if( is_reader ) 00204 { 00205 pa_vec.push_back( PA_READ ); 00206 pa_vec.push_back( PA_CHECK_GIDS_SERIAL ); 00207 pa_vec.push_back( PA_GET_FILESET_ENTS ); 00208 if( create_trivial_partition ) pa_vec.push_back( PA_CREATE_TRIVIAL_PARTITION ); 00209 } 00210 pa_vec.push_back( PA_BROADCAST ); 00211 if( !is_reader ) pa_vec.push_back( PA_GET_FILESET_ENTS ); 00212 pa_vec.push_back( PA_DELETE_NONLOCAL ); 00213 break; 00214 00215 case POPT_DEFAULT: 00216 case POPT_READ_DELETE: 00217 myDebug.print( 1, "Read mode is READ_DELETE\n" ); 00218 pa_vec.push_back( PA_READ ); 00219 pa_vec.push_back( PA_CHECK_GIDS_SERIAL ); 00220 pa_vec.push_back( PA_GET_FILESET_ENTS ); 00221 pa_vec.push_back( PA_DELETE_NONLOCAL ); 00222 break; 00223 00224 case POPT_READ_PART: 00225 myDebug.print( 1, "Read mode is READ_PART\n" ); 00226 pa_vec.push_back( PA_READ_PART ); 00227 break; 00228 00229 default: 00230 MB_SET_ERR( MB_FAILURE, "Unexpected parallel read mode" ); 00231 } 00232 00233 if( -2 != resolve_dim ) pa_vec.push_back( PA_RESOLVE_SHARED_ENTS ); 00234 00235 if( -1 != ghost_dim ) pa_vec.push_back( PA_EXCHANGE_GHOSTS ); 00236 00237 if( -2 != resolve_dim ) 00238 { 00239 pa_vec.push_back( PA_RESOLVE_SHARED_SETS ); 00240 if( -1 != ghost_dim && !skip_augment ) pa_vec.push_back( PA_AUGMENT_SETS_WITH_GHOSTS ); 00241 if( -1 != ghost_dim && correct_thin_ghosts ) pa_vec.push_back( PA_CORRECT_THIN_GHOSTS ); 00242 } 00243 00244 if( print_parallel ) pa_vec.push_back( PA_PRINT_PARALLEL ); 00245 00246 result = load_file( file_names, num_files, file_set, parallel_mode, partition_tag_name, partition_tag_vals, distrib, 00247 partition_by_rank, pa_vec, opts, subset_list, file_id_tag, reader_rank, cputime, resolve_dim, 00248 shared_dim, ghost_dim, bridge_dim, num_layers, addl_ents );MB_CHK_ERR( result ); 00249 00250 if( parallel_mode == POPT_BCAST_DELETE && !is_reader ) opts.mark_all_seen(); 00251 00252 return MB_SUCCESS; 00253 } 00254 00255 ErrorCode ReadParallel::load_file( const char** file_names, const int num_files, const EntityHandle* file_set_ptr, 00256 int /*parallel_mode*/, std::string& partition_tag_name, 00257 std::vector< int >& partition_tag_vals, bool distrib, bool partition_by_rank, 00258 std::vector< int >& pa_vec, const FileOptions& opts, 00259 const ReaderIface::SubsetList* subset_list, const Tag* file_id_tag, 00260 const int reader_rank, const bool cputime, const int resolve_dim, 00261 const int shared_dim, const int ghost_dim, const int bridge_dim, 00262 const int num_layers, const int addl_ents ) 00263 { 00264 ErrorCode result = MB_SUCCESS; 00265 if( myPcomm == NULL ) myPcomm = new ParallelComm( mbImpl, MPI_COMM_WORLD ); 00266 00267 Range entities; 00268 Tag file_set_tag = 0; 00269 int other_sets = 0; 00270 ReaderWriterSet::iterator iter; 00271 Range other_file_sets, file_sets; 00272 Core* impl = dynamic_cast< Core* >( mbImpl ); 00273 00274 std::vector< double > act_times( pa_vec.size() + 1 ); 00275 std::vector< int >::iterator vit; 00276 int i, j; 00277 act_times[0] = MPI_Wtime(); 00278 00279 // Make a new set for the parallel read 00280 EntityHandle file_set; 00281 if( !file_set_ptr || !( *file_set_ptr ) ) 00282 { 00283 result = mbImpl->create_meshset( MESHSET_SET, file_set );MB_CHK_SET_ERR( result, "Trouble creating file set" ); 00284 } 00285 else 00286 file_set = *file_set_ptr; 00287 00288 bool i_read = false; 00289 Tag id_tag = 0; 00290 bool use_id_tag = false; 00291 Range ents; 00292 00293 for( i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++ ) 00294 { 00295 ErrorCode tmp_result = MB_SUCCESS; 00296 switch( *vit ) 00297 { 00298 //================== 00299 case PA_READ: 00300 i_read = true; 00301 00302 for( j = 0; j < num_files; j++ ) 00303 { 00304 myDebug.tprintf( 1, "Reading file: \"%s\"\n", file_names[j] ); 00305 00306 EntityHandle new_file_set; 00307 result = mbImpl->create_meshset( MESHSET_SET, new_file_set );MB_CHK_SET_ERR( result, "Trouble creating file set" ); 00308 tmp_result = impl->serial_load_file( file_names[j], &new_file_set, opts, subset_list, file_id_tag ); 00309 if( MB_SUCCESS != tmp_result ) break; 00310 00311 // Put the contents of each file set for the reader into the 00312 // file set for the parallel read 00313 assert( 0 != new_file_set ); 00314 Range all_ents; 00315 tmp_result = mbImpl->get_entities_by_handle( new_file_set, all_ents ); 00316 if( MB_SUCCESS != tmp_result ) break; 00317 all_ents.insert( new_file_set ); 00318 tmp_result = mbImpl->add_entities( file_set, all_ents ); 00319 if( MB_SUCCESS != tmp_result ) break; 00320 } 00321 if( MB_SUCCESS != tmp_result ) break; 00322 00323 // Mark the file set for this parallel reader 00324 tmp_result = mbImpl->tag_get_handle( "__file_set", 1, MB_TYPE_INTEGER, file_set_tag, 00325 MB_TAG_SPARSE | MB_TAG_CREAT ); 00326 if( MB_SUCCESS != tmp_result ) break; 00327 00328 tmp_result = mbImpl->tag_set_data( file_set_tag, &file_set, 1, &other_sets ); 00329 break; 00330 00331 //================== 00332 case PA_READ_PART: { 00333 myDebug.tprintf( 1, "Reading file: \"%s\"\n", file_names[0] ); 00334 00335 i_read = true; 00336 if( num_files != 1 ) 00337 { MB_SET_ERR( MB_NOT_IMPLEMENTED, "Multiple file read not supported for READ_PART" ); } 00338 00339 // If we're going to resolve shared entities, then we need 00340 // to ask the file reader to populate a tag with unique ids 00341 // (typically file ids/indices/whatever.) 00342 if( std::find( pa_vec.begin(), pa_vec.end(), PA_RESOLVE_SHARED_ENTS ) != pa_vec.end() ) 00343 { 00344 use_id_tag = true; 00345 if( !file_id_tag ) 00346 { 00347 // This tag is really used for resolving shared entities with crystal router 00348 // In the end, this is an identifier that gets converted to long 00349 // In hdf5 file reader, we also convert from hdf5 file id type to long 00350 tmp_result = mbImpl->tag_get_handle( "", sizeof( long ), MB_TYPE_OPAQUE, id_tag, 00351 MB_TAG_DENSE | MB_TAG_CREAT ); 00352 if( MB_SUCCESS != tmp_result ) break; 00353 file_id_tag = &id_tag; 00354 } 00355 } 00356 00357 ReaderIface::IDTag parts = { partition_tag_name.c_str(), 0, 0 }; 00358 ReaderIface::SubsetList sl; 00359 sl.num_parts = 0; 00360 int rank = myPcomm->rank(); 00361 if( partition_by_rank ) 00362 { 00363 assert( partition_tag_vals.empty() ); 00364 parts.tag_values = &rank; 00365 parts.num_tag_values = 1; 00366 } 00367 else 00368 { 00369 sl.num_parts = myPcomm->size(); 00370 sl.part_number = myPcomm->rank(); 00371 if( !partition_tag_vals.empty() ) 00372 { 00373 parts.tag_values = &partition_tag_vals[0]; 00374 parts.num_tag_values = partition_tag_vals.size(); 00375 } 00376 } 00377 std::vector< ReaderIface::IDTag > subset; 00378 if( subset_list ) 00379 { 00380 std::vector< ReaderIface::IDTag > tmplist( subset_list->tag_list, 00381 subset_list->tag_list + subset_list->tag_list_length ); 00382 tmplist.push_back( parts ); 00383 subset.swap( tmplist ); 00384 sl.tag_list = &subset[0]; 00385 sl.tag_list_length = subset.size(); 00386 } 00387 else 00388 { 00389 sl.tag_list = &parts; 00390 sl.tag_list_length = 1; 00391 } 00392 tmp_result = impl->serial_load_file( *file_names, &file_set, opts, &sl, file_id_tag ); 00393 if( MB_SUCCESS != tmp_result ) break; 00394 00395 if( !partition_tag_name.empty() ) 00396 { 00397 Tag part_tag; 00398 tmp_result = impl->tag_get_handle( partition_tag_name.c_str(), 1, MB_TYPE_INTEGER, part_tag ); 00399 if( MB_SUCCESS != tmp_result ) break; 00400 00401 tmp_result = impl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &part_tag, 0, 1, 00402 myPcomm->partition_sets() ); 00403 } 00404 } 00405 break; 00406 00407 //================== 00408 case PA_GET_FILESET_ENTS: 00409 myDebug.tprint( 1, "Getting fileset entities.\n" ); 00410 00411 // Get entities in the file set, and add actual file set to it; 00412 // mark the file set to make sure any receiving procs know which it is 00413 tmp_result = mbImpl->get_entities_by_handle( file_set, entities ); 00414 if( MB_SUCCESS != tmp_result ) 00415 { 00416 entities.clear(); 00417 break; 00418 } 00419 00420 // Add actual file set to entities too 00421 entities.insert( file_set ); 00422 break; 00423 //================== 00424 case PA_CREATE_TRIVIAL_PARTITION: { 00425 myDebug.tprint( 1, "create trivial partition, for higher dim entities.\n" ); 00426 // get high dim entities (2 or 3) 00427 Range hi_dim_ents = entities.subset_by_dimension( 3 ); 00428 if( hi_dim_ents.empty() ) hi_dim_ents = entities.subset_by_dimension( 2 ); 00429 if( hi_dim_ents.empty() ) hi_dim_ents = entities.subset_by_dimension( 1 ); 00430 if( hi_dim_ents.empty() ) MB_SET_ERR( MB_FAILURE, "there are no elements of dim 1-3" ); 00431 00432 size_t num_hi_ents = hi_dim_ents.size(); 00433 unsigned int num_parts = myPcomm->size(); 00434 00435 // create first the trivial partition tag 00436 int dum_id = -1; 00437 Tag ttag; // trivial tag 00438 tmp_result = mbImpl->tag_get_handle( partition_tag_name.c_str(), 1, MB_TYPE_INTEGER, ttag, 00439 MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );MB_CHK_SET_ERR( tmp_result, "Can't create trivial partition tag" ); 00440 00441 // Compute the number of high dim entities on each part 00442 size_t nPartEnts = num_hi_ents / num_parts; 00443 00444 // Number of extra entities after equal split over parts 00445 int iextra = num_hi_ents % num_parts; 00446 Range::iterator itr = hi_dim_ents.begin(); 00447 for( int k = 0; k < (int)num_parts; k++ ) 00448 { 00449 // create a mesh set, insert a subrange of entities 00450 EntityHandle part_set; 00451 tmp_result = mbImpl->create_meshset( MESHSET_SET, part_set );MB_CHK_SET_ERR( tmp_result, "Can't create part set" ); 00452 entities.insert( part_set ); 00453 00454 tmp_result = mbImpl->tag_set_data( ttag, &part_set, 1, &k );MB_CHK_SET_ERR( tmp_result, "Can't set trivial partition tag" ); 00455 Range subrange; 00456 size_t num_ents_in_part = nPartEnts; 00457 if( i < iextra ) num_ents_in_part++; 00458 for( size_t i1 = 0; i1 < num_ents_in_part; i1++, itr++ ) 00459 subrange.insert( *itr ); 00460 tmp_result = mbImpl->add_entities( part_set, subrange );MB_CHK_SET_ERR( tmp_result, "Can't add entities to trivial part " << k ); 00461 myDebug.tprintf( 1, "create trivial part %d with %lu entities \n", k, num_ents_in_part ); 00462 tmp_result = mbImpl->add_entities( file_set, &part_set, 1 );MB_CHK_SET_ERR( tmp_result, "Can't add trivial part to file set " << k ); 00463 } 00464 } 00465 00466 break; 00467 //================== 00468 case PA_BROADCAST: 00469 // Do the actual broadcast; if single-processor, ignore error 00470 myDebug.tprint( 1, "Broadcasting mesh.\n" ); 00471 00472 if( myPcomm->proc_config().proc_size() > 1 ) 00473 { 00474 tmp_result = myPcomm->broadcast_entities( reader_rank, entities ); 00475 if( MB_SUCCESS != tmp_result ) break; 00476 } 00477 00478 if( debug ) 00479 { 00480 std::cerr << "Bcast done; entities:" << std::endl; 00481 mbImpl->list_entities( 0, 0 ); 00482 } 00483 00484 // Add the received entities to this fileset if I wasn't the reader 00485 if( !i_read && MB_SUCCESS == tmp_result ) tmp_result = mbImpl->add_entities( file_set, entities ); 00486 00487 break; 00488 00489 //================== 00490 case PA_DELETE_NONLOCAL: 00491 myDebug.tprint( 1, "Deleting nonlocal entities.\n" ); 00492 00493 tmp_result = delete_nonlocal_entities( partition_tag_name, partition_tag_vals, distrib, file_set ); 00494 if( debug ) 00495 { 00496 std::cerr << "Delete nonlocal done; entities:" << std::endl; 00497 mbImpl->list_entities( 0, 0 ); 00498 } 00499 00500 if( MB_SUCCESS == tmp_result ) tmp_result = create_partition_sets( partition_tag_name, file_set ); 00501 00502 break; 00503 00504 //================== 00505 case PA_CHECK_GIDS_SERIAL: 00506 myDebug.tprint( 1, "Checking global IDs.\n" ); 00507 00508 tmp_result = myPcomm->check_global_ids( file_set, 0, 1, true, false ); 00509 break; 00510 00511 //================== 00512 case PA_RESOLVE_SHARED_ENTS: 00513 myDebug.tprint( 1, "Resolving shared entities.\n" ); 00514 00515 if( 1 == myPcomm->size() ) 00516 tmp_result = MB_SUCCESS; 00517 else 00518 tmp_result = 00519 myPcomm->resolve_shared_ents( file_set, resolve_dim, shared_dim, use_id_tag ? file_id_tag : 0 ); 00520 if( MB_SUCCESS != tmp_result ) break; 00521 00522 #ifndef NDEBUG 00523 // check number of owned vertices through pcomm's public interface 00524 tmp_result = mbImpl->get_entities_by_type( 0, MBVERTEX, ents ); 00525 if( MB_SUCCESS == tmp_result ) 00526 tmp_result = myPcomm->filter_pstatus( ents, PSTATUS_NOT_OWNED, PSTATUS_NOT ); 00527 if( MB_SUCCESS == tmp_result ) 00528 myDebug.tprintf( 1, "Proc %u reports %lu owned vertices.\n", myPcomm->proc_config().proc_rank(), 00529 ents.size() ); 00530 #endif 00531 break; 00532 00533 //================== 00534 case PA_EXCHANGE_GHOSTS: 00535 myDebug.tprint( 1, "Exchanging ghost entities.\n" ); 00536 00537 tmp_result = myPcomm->exchange_ghost_cells( ghost_dim, bridge_dim, num_layers, addl_ents, true, true, 00538 &file_set ); 00539 break; 00540 00541 //================== 00542 case PA_RESOLVE_SHARED_SETS: 00543 myDebug.tprint( 1, "Resolving shared sets.\n" ); 00544 00545 if( 1 == myPcomm->size() ) 00546 tmp_result = MB_SUCCESS; 00547 else 00548 tmp_result = myPcomm->resolve_shared_sets( file_set, use_id_tag ? file_id_tag : 0 ); 00549 break; 00550 00551 //================== 00552 case PA_AUGMENT_SETS_WITH_GHOSTS: 00553 myDebug.tprint( 1, "Augmenting sets with ghost entities.\n" ); 00554 00555 if( 1 == myPcomm->size() ) 00556 tmp_result = MB_SUCCESS; 00557 else 00558 tmp_result = myPcomm->augment_default_sets_with_ghosts( file_set ); 00559 break; 00560 //================== 00561 case PA_CORRECT_THIN_GHOSTS: 00562 myDebug.tprint( 1, "correcting thin ghost layers.\n" ); 00563 if( 2 >= myPcomm->size() ) // it is a problem only for multi-shared entities 00564 tmp_result = MB_SUCCESS; 00565 else 00566 tmp_result = myPcomm->correct_thin_ghost_layers(); 00567 break; 00568 case PA_PRINT_PARALLEL: 00569 myDebug.tprint( 1, "Printing parallel information.\n" ); 00570 00571 tmp_result = myPcomm->list_entities( 0, -1 ); 00572 break; 00573 00574 //================== 00575 default: 00576 MB_SET_ERR( MB_FAILURE, "Unexpected parallel action" ); 00577 } // switch (*vit) 00578 00579 if( MB_SUCCESS != tmp_result ) { MB_SET_ERR( MB_FAILURE, "Failed in step " << ParallelActionsNames[*vit] ); } 00580 00581 if( cputime ) act_times[i] = MPI_Wtime(); 00582 } // for (i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++) 00583 00584 if( use_id_tag ) 00585 { 00586 result = mbImpl->tag_delete( id_tag );MB_CHK_SET_ERR( result, "Trouble deleting id tag" ); 00587 } 00588 00589 if( cputime ) 00590 { 00591 for( i = pa_vec.size(); i > 0; i-- ) 00592 act_times[i] -= act_times[i - 1]; 00593 00594 // Replace initial time with overall time 00595 act_times[0] = MPI_Wtime() - act_times[0]; 00596 // Get the maximum over all procs 00597 if( 0 != myPcomm->proc_config().proc_rank() ) 00598 { 00599 MPI_Reduce( &act_times[0], 0, pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0, 00600 myPcomm->proc_config().proc_comm() ); 00601 } 00602 else 00603 { 00604 #if( MPI_VERSION >= 2 ) 00605 MPI_Reduce( MPI_IN_PLACE, &act_times[0], pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0, 00606 myPcomm->proc_config().proc_comm() ); 00607 #else 00608 // Note, extra comm-size allocation is required 00609 std::vector< double > act_times_tmp( pa_vec.size() + 1 ); 00610 MPI_Reduce( &act_times[0], &act_times_tmp[0], pa_vec.size() + 1, MPI_DOUBLE, MPI_MAX, 0, 00611 myPcomm->proc_config().proc_comm() ); 00612 act_times = act_times_tmp; // extra copy here too 00613 #endif 00614 std::cout << "Parallel Read times: " << std::endl; 00615 for( i = 1, vit = pa_vec.begin(); vit != pa_vec.end(); ++vit, i++ ) 00616 std::cout << " " << act_times[i] << " " << ParallelActionsNames[*vit] << std::endl; 00617 std::cout << " " << act_times[0] << " PARALLEL TOTAL" << std::endl; 00618 } 00619 } 00620 00621 return MB_SUCCESS; 00622 } 00623 00624 ErrorCode ReadParallel::delete_nonlocal_entities( std::string& ptag_name, std::vector< int >& ptag_vals, 00625 bool distribute, EntityHandle file_set ) 00626 { 00627 Range partition_sets; 00628 00629 Tag ptag; 00630 ErrorCode result = mbImpl->tag_get_handle( ptag_name.c_str(), 1, MB_TYPE_INTEGER, ptag );MB_CHK_SET_ERR( result, "Failed getting tag handle in delete_nonlocal_entities" ); 00631 00632 result = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &ptag, NULL, 1, myPcomm->partition_sets() );MB_CHK_SET_ERR( result, "Failed to get sets with partition-type tag" ); 00633 00634 int proc_sz = myPcomm->proc_config().proc_size(); 00635 int proc_rk = myPcomm->proc_config().proc_rank(); 00636 00637 if( !ptag_vals.empty() ) 00638 { 00639 // Values input, get sets with those values 00640 Range tmp_sets; 00641 std::vector< int > tag_vals( myPcomm->partition_sets().size() ); 00642 result = mbImpl->tag_get_data( ptag, myPcomm->partition_sets(), &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to get tag data for partition vals tag" ); 00643 for( std::vector< int >::iterator pit = tag_vals.begin(); pit != tag_vals.end(); ++pit ) 00644 { 00645 std::vector< int >::iterator pit2 = std::find( ptag_vals.begin(), ptag_vals.end(), *pit ); 00646 if( pit2 != ptag_vals.end() ) tmp_sets.insert( myPcomm->partition_sets()[pit - tag_vals.begin()] ); 00647 } 00648 00649 myPcomm->partition_sets().swap( tmp_sets ); 00650 } 00651 00652 if( distribute ) 00653 { 00654 // For now, require that number of partition sets be greater 00655 // than number of procs 00656 if( myPcomm->partition_sets().size() < (unsigned int)proc_sz ) 00657 { 00658 MB_SET_ERR( MB_FAILURE, "Too few parts; P = " << proc_rk << ", tag = " << ptag 00659 << ", # sets = " << myPcomm->partition_sets().size() ); 00660 } 00661 00662 Range tmp_sets; 00663 // Distribute the partition sets 00664 unsigned int num_sets = myPcomm->partition_sets().size() / proc_sz; 00665 unsigned int num_leftover = myPcomm->partition_sets().size() % proc_sz; 00666 int begin_set = 0; 00667 if( proc_rk < (int)num_leftover ) 00668 { 00669 num_sets++; 00670 begin_set = num_sets * proc_rk; 00671 } 00672 else 00673 begin_set = proc_rk * num_sets + num_leftover; 00674 00675 for( unsigned int i = 0; i < num_sets; i++ ) 00676 tmp_sets.insert( myPcomm->partition_sets()[begin_set + i] ); 00677 00678 myPcomm->partition_sets().swap( tmp_sets ); 00679 } 00680 00681 myDebug.print( 1, "My partition sets: ", myPcomm->partition_sets() ); 00682 00683 result = delete_nonlocal_entities( file_set );MB_CHK_ERR( result ); 00684 00685 return MB_SUCCESS; 00686 } 00687 00688 ErrorCode ReadParallel::create_partition_sets( std::string& ptag_name, EntityHandle file_set ) 00689 { 00690 int proc_rk = myPcomm->proc_config().proc_rank(); 00691 ErrorCode result = MB_SUCCESS; 00692 00693 Tag ptag; 00694 00695 // Tag the partition sets with a standard tag name 00696 if( ptag_name.empty() ) ptag_name = PARALLEL_PARTITION_TAG_NAME; 00697 bool tag_created = false; 00698 result = mbImpl->tag_get_handle( ptag_name.c_str(), 1, MB_TYPE_INTEGER, ptag, MB_TAG_SPARSE | MB_TAG_CREAT, 0, 00699 &tag_created );MB_CHK_SET_ERR( result, "Trouble getting PARALLEL_PARTITION tag" ); 00700 00701 if( !tag_created ) 00702 { 00703 // This tag already exists; better check to see that tagged sets 00704 // agree with this partition 00705 Range tagged_sets; 00706 int* proc_rk_ptr = &proc_rk; 00707 result = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &ptag, (const void* const*)&proc_rk_ptr, 00708 1, tagged_sets );MB_CHK_SET_ERR( result, "Trouble getting tagged sets" ); 00709 if( !tagged_sets.empty() && tagged_sets != myPcomm->partition_sets() ) 00710 { 00711 result = mbImpl->tag_delete_data( ptag, tagged_sets );MB_CHK_SET_ERR( result, "Trouble deleting data of PARALLEL_PARTITION tag" ); 00712 } 00713 else if( tagged_sets == myPcomm->partition_sets() ) 00714 return MB_SUCCESS; 00715 } 00716 00717 // If we get here, we need to assign the tag 00718 std::vector< int > values( myPcomm->partition_sets().size() ); 00719 for( unsigned int i = 0; i < myPcomm->partition_sets().size(); i++ ) 00720 values[i] = proc_rk; 00721 result = mbImpl->tag_set_data( ptag, myPcomm->partition_sets(), &values[0] );MB_CHK_SET_ERR( result, "Trouble setting data to PARALLEL_PARTITION tag" ); 00722 00723 return MB_SUCCESS; 00724 } 00725 00726 ErrorCode ReadParallel::delete_nonlocal_entities( EntityHandle file_set ) 00727 { 00728 // Get partition entities and ents related to/used by those 00729 // get ents in the partition 00730 ReadUtilIface* read_iface; 00731 mbImpl->query_interface( read_iface ); 00732 Range partition_ents, all_sets; 00733 00734 myDebug.tprint( 2, "Gathering related entities.\n" ); 00735 00736 ErrorCode result = read_iface->gather_related_ents( myPcomm->partition_sets(), partition_ents, &file_set );MB_CHK_SET_ERR( result, "Failure gathering related entities" ); 00737 00738 // Get pre-existing entities 00739 Range file_ents; 00740 result = mbImpl->get_entities_by_handle( file_set, file_ents );MB_CHK_SET_ERR( result, "Couldn't get pre-existing entities" ); 00741 00742 if( 0 == myPcomm->proc_config().proc_rank() ) { myDebug.print( 2, "File entities: ", file_ents ); } 00743 00744 // Get deletable entities by subtracting partition ents from file ents 00745 Range deletable_ents = subtract( file_ents, partition_ents ); 00746 00747 // Cache deletable vs. keepable sets 00748 Range deletable_sets = deletable_ents.subset_by_type( MBENTITYSET ); 00749 Range keepable_sets = subtract( file_ents.subset_by_type( MBENTITYSET ), deletable_sets ); 00750 00751 myDebug.tprint( 2, "Removing deletable entities from keepable sets.\n" ); 00752 00753 // Remove deletable ents from all keepable sets 00754 for( Range::iterator rit = keepable_sets.begin(); rit != keepable_sets.end(); ++rit ) 00755 { 00756 result = mbImpl->remove_entities( *rit, deletable_ents );MB_CHK_SET_ERR( result, "Failure removing deletable entities" ); 00757 } 00758 result = mbImpl->remove_entities( file_set, deletable_ents );MB_CHK_SET_ERR( result, "Failure removing deletable entities" ); 00759 00760 myDebug.tprint( 2, "Deleting deletable entities.\n" ); 00761 00762 if( 0 == myPcomm->proc_config().proc_rank() ) { myDebug.print( 2, "Deletable sets: ", deletable_sets ); } 00763 00764 // Delete sets, then ents 00765 if( !deletable_sets.empty() ) 00766 { 00767 result = mbImpl->delete_entities( deletable_sets );MB_CHK_SET_ERR( result, "Failure deleting sets in delete_nonlocal_entities" ); 00768 } 00769 00770 deletable_ents -= deletable_sets; 00771 00772 if( 0 == myPcomm->proc_config().proc_rank() ) { myDebug.print( 2, "Deletable entities: ", deletable_ents ); } 00773 00774 if( !deletable_ents.empty() ) 00775 { 00776 result = mbImpl->delete_entities( deletable_ents );MB_CHK_SET_ERR( result, "Failure deleting entities in delete_nonlocal_entities" ); 00777 } 00778 00779 return MB_SUCCESS; 00780 } 00781 00782 } // namespace moab