![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 #include "Coupler.hpp"
00002 #include "moab/ParallelComm.hpp"
00003 #include "moab/AdaptiveKDTree.hpp"
00004 #include "ElemUtil.hpp"
00005 #include "moab/CN.hpp"
00006 #include "moab/gs.hpp"
00007 #include "moab/TupleList.hpp"
00008 #include "moab/Error.hpp"
00009
00010 /* C++ includes */
00011 #include
00012 #include
00013 #include
00014 #include
00015 #include
00016
00017 #define ERROR( a ) \
00018 { \
00019 if( MB_SUCCESS != err ) std::cerr << ( a ) << std::endl; \
00020 }
00021 #define ERRORR( a, b ) \
00022 { \
00023 if( MB_SUCCESS != ( b ) ) \
00024 { \
00025 std::cerr << ( a ) << std::endl; \
00026 return b; \
00027 } \
00028 }
00029 #define ERRORMPI( a, b ) \
00030 { \
00031 if( MPI_SUCCESS != ( b ) ) \
00032 { \
00033 std::cerr << ( a ) << std::endl; \
00034 return MB_FAILURE; \
00035 } \
00036 }
00037
00038 #define MASTER_PROC 0
00039
00040 namespace moab
00041 {
00042
00043 bool debug = false;
00044 int pack_tuples( TupleList* tl, void** ptr );
00045 void unpack_tuples( void* ptr, TupleList** tlp );
00046
00047 Coupler::Coupler( Interface* impl,
00048 ParallelComm* pc,
00049 Range& local_elems,
00050 int coupler_id,
00051 bool init_tree,
00052 int max_ent_dim )
00053 : mbImpl( impl ), myPc( pc ), myId( coupler_id ), numIts( 3 ), max_dim( max_ent_dim ), _ntot( 0 ),
00054 spherical( false )
00055 {
00056 assert( NULL != impl && ( pc || !local_elems.empty() ) );
00057
00058 // Keep track of the local points, at least for now
00059 myRange = local_elems;
00060 myTree = NULL;
00061
00062 // Now initialize the tree
00063 if( init_tree ) initialize_tree();
00064
00065 // Initialize tuple lists to indicate not initialized
00066 mappedPts = NULL;
00067 targetPts = NULL;
00068 _spectralSource = _spectralTarget = NULL;
00069 }
00070
00071 /* Destructor
00072 */
00073 Coupler::~Coupler()
00074 {
00075 // This will clear the cache
00076 delete(moab::Element::SpectralHex*)_spectralSource;
00077 delete(moab::Element::SpectralHex*)_spectralTarget;
00078 delete myTree;
00079 delete targetPts;
00080 delete mappedPts;
00081 }
00082
00083 ErrorCode Coupler::initialize_tree()
00084 {
00085 Range local_ents;
00086
00087 // Get entities on the local part
00088 ErrorCode result = MB_SUCCESS;
00089 if( myPc )
00090 {
00091 result = myPc->get_part_entities( local_ents, max_dim );
00092 if( local_ents.empty() )
00093 {
00094 max_dim--;
00095 result = myPc->get_part_entities( local_ents, max_dim ); // go one dimension lower
00096 // right now, this is used for spherical meshes only
00097 }
00098 }
00099 else
00100 local_ents = myRange;
00101 if( MB_SUCCESS != result || local_ents.empty() )
00102 {
00103 std::cout << "Problems getting source entities" << std::endl;
00104 return result;
00105 }
00106
00107 // Build the tree for local processor
00108 int max_per_leaf = 6;
00109 for( int i = 0; i < numIts; i++ )
00110 {
00111 std::ostringstream str;
00112 str << "PLANE_SET=0;"
00113 << "MAX_PER_LEAF=" << max_per_leaf << ";";
00114 if( spherical && !local_ents.empty() )
00115 {
00116 // get coordinates of one vertex, and use for radius computation
00117 EntityHandle elem = local_ents[0];
00118 const EntityHandle* conn;
00119 int numn = 0;
00120 mbImpl->get_connectivity( elem, conn, numn );
00121 CartVect pos0;
00122 mbImpl->get_coords( conn, 1, &( pos0[0] ) );
00123 double radius = pos0.length();
00124 str << "SPHERICAL=true;RADIUS=" << radius << ";";
00125 }
00126 FileOptions opts( str.str().c_str() );
00127 myTree = new AdaptiveKDTree( mbImpl );
00128 result = myTree->build_tree( local_ents, &localRoot, &opts );
00129 if( MB_SUCCESS != result )
00130 {
00131 std::cout << "Problems building tree";
00132 if( numIts != i )
00133 {
00134 delete myTree;
00135 max_per_leaf *= 2;
00136 std::cout << "; increasing elements/leaf to " << max_per_leaf << std::endl;
00137 }
00138 else
00139 {
00140 std::cout << "; exiting" << std::endl;
00141 return result;
00142 }
00143 }
00144 else
00145 break; // Get out of tree building
00146 }
00147
00148 // Get the bounding box for local tree
00149 if( myPc )
00150 allBoxes.resize( 6 * myPc->proc_config().proc_size() );
00151 else
00152 allBoxes.resize( 6 );
00153
00154 unsigned int my_rank = ( myPc ? myPc->proc_config().proc_rank() : 0 );
00155 BoundBox box;
00156 result = myTree->get_bounding_box( box, &localRoot );
00157 if( MB_SUCCESS != result ) return result;
00158 box.bMin.get( &allBoxes[6 * my_rank] );
00159 box.bMax.get( &allBoxes[6 * my_rank + 3] );
00160
00161 // Now communicate to get all boxes
00162 if( myPc )
00163 {
00164 int mpi_err;
00165 #if( MPI_VERSION >= 2 )
00166 // Use "in place" option
00167 mpi_err = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &allBoxes[0], 6, MPI_DOUBLE,
00168 myPc->proc_config().proc_comm() );
00169 #else
00170 {
00171 std::vector< double > allBoxes_tmp( 6 * myPc->proc_config().proc_size() );
00172 mpi_err = MPI_Allgather( &allBoxes[6 * my_rank], 6, MPI_DOUBLE, &allBoxes_tmp[0], 6, MPI_DOUBLE,
00173 myPc->proc_config().proc_comm() );
00174 allBoxes = allBoxes_tmp;
00175 }
00176 #endif
00177 if( MPI_SUCCESS != mpi_err ) return MB_FAILURE;
00178 }
00179
00180 /* std::ostringstream blah;
00181 for(int i = 0; i < allBoxes.size(); i++)
00182 blah << allBoxes[i] << " ";
00183 std::cout << blah.str() << "\n";*/
00184
00185 #ifdef VERBOSE
00186 double min[3] = { 0, 0, 0 }, max[3] = { 0, 0, 0 };
00187 unsigned int dep;
00188 myTree->get_info( localRoot, min, max, dep );
00189 std::cout << "Proc " << my_rank << ": box min/max, tree depth = (" << min[0] << "," << min[1] << "," << min[2]
00190 << "), (" << max[0] << "," << max[1] << "," << max[2] << "), " << dep << std::endl;
00191 #endif
00192
00193 return result;
00194 }
00195
00196 ErrorCode Coupler::initialize_spectral_elements( EntityHandle rootSource,
00197 EntityHandle rootTarget,
00198 bool& specSou,
00199 bool& specTar )
00200 {
00201 /*void * _spectralSource;
00202 void * _spectralTarget;*/
00203
00204 moab::Range spectral_sets;
00205 moab::Tag sem_tag;
00206 int sem_dims[3];
00207 ErrorCode rval = mbImpl->tag_get_handle( "SEM_DIMS", 3, moab::MB_TYPE_INTEGER, sem_tag );
00208 if( moab::MB_SUCCESS != rval )
00209 {
00210 std::cout << "Can't find tag, no spectral set\n";
00211 return MB_SUCCESS; // Nothing to do, no spectral elements
00212 }
00213 rval = mbImpl->get_entities_by_type_and_tag( rootSource, moab::MBENTITYSET, &sem_tag, NULL, 1, spectral_sets );
00214 if( moab::MB_SUCCESS != rval || spectral_sets.empty() )
00215 std::cout << "Can't get sem set on source\n";
00216 else
00217 {
00218 moab::EntityHandle firstSemSet = spectral_sets[0];
00219 rval = mbImpl->tag_get_data( sem_tag, &firstSemSet, 1, (void*)sem_dims );
00220 if( moab::MB_SUCCESS != rval ) return MB_FAILURE;
00221
00222 if( sem_dims[0] != sem_dims[1] || sem_dims[0] != sem_dims[2] )
00223 {
00224 std::cout << " dimensions are different. bail out\n";
00225 return MB_FAILURE;
00226 }
00227 // Repeat for target sets
00228 spectral_sets.empty();
00229 // Now initialize a source spectral element !
00230 _spectralSource = new moab::Element::SpectralHex( sem_dims[0] );
00231 specSou = true;
00232 }
00233 // Repeat for target source
00234 rval = mbImpl->get_entities_by_type_and_tag( rootTarget, moab::MBENTITYSET, &sem_tag, NULL, 1, spectral_sets );
00235 if( moab::MB_SUCCESS != rval || spectral_sets.empty() )
00236 std::cout << "Can't get sem set on target\n";
00237 else
00238 {
00239 moab::EntityHandle firstSemSet = spectral_sets[0];
00240 rval = mbImpl->tag_get_data( sem_tag, &firstSemSet, 1, (void*)sem_dims );
00241 if( moab::MB_SUCCESS != rval ) return MB_FAILURE;
00242
00243 if( sem_dims[0] != sem_dims[1] || sem_dims[0] != sem_dims[2] )
00244 {
00245 std::cout << " dimensions are different. bail out\n";
00246 return MB_FAILURE;
00247 }
00248 // Repeat for target sets
00249 spectral_sets.empty();
00250 // Now initialize a target spectral element !
00251 _spectralTarget = new moab::Element::SpectralHex( sem_dims[0] );
00252 specTar = true;
00253 }
00254
00255 _ntot = sem_dims[0] * sem_dims[1] * sem_dims[2];
00256 rval = mbImpl->tag_get_handle( "SEM_X", _ntot, moab::MB_TYPE_DOUBLE, _xm1Tag );
00257 if( moab::MB_SUCCESS != rval )
00258 {
00259 std::cout << "Can't get xm1tag \n";
00260 return MB_FAILURE;
00261 }
00262 rval = mbImpl->tag_get_handle( "SEM_Y", _ntot, moab::MB_TYPE_DOUBLE, _ym1Tag );
00263 if( moab::MB_SUCCESS != rval )
00264 {
00265 std::cout << "Can't get ym1tag \n";
00266 return MB_FAILURE;
00267 }
00268 rval = mbImpl->tag_get_handle( "SEM_Z", _ntot, moab::MB_TYPE_DOUBLE, _zm1Tag );
00269 if( moab::MB_SUCCESS != rval )
00270 {
00271 std::cout << "Can't get zm1tag \n";
00272 return MB_FAILURE;
00273 }
00274
00275 return MB_SUCCESS;
00276 }
00277
00278 ErrorCode Coupler::locate_points( Range& targ_ents, double rel_eps, double abs_eps, TupleList* tl, bool store_local )
00279 {
00280 // Get locations
00281 std::vector< double > locs( 3 * targ_ents.size() );
00282 Range verts = targ_ents.subset_by_type( MBVERTEX );
00283 ErrorCode rval = mbImpl->get_coords( verts, &locs[0] );
00284 if( MB_SUCCESS != rval ) return rval;
00285 // Now get other ents; reuse verts
00286 unsigned int num_verts = verts.size();
00287 verts = subtract( targ_ents, verts );
00288 // Compute centroids
00289 std::vector< EntityHandle > dum_conn( CN::MAX_NODES_PER_ELEMENT );
00290 std::vector< double > dum_pos( CN::MAX_NODES_PER_ELEMENT );
00291 const EntityHandle* conn;
00292 int num_conn;
00293 double* coords = &locs[num_verts];
00294 // Do this here instead of a function to allow reuse of dum_pos and dum_conn
00295 for( Range::const_iterator rit = verts.begin(); rit != verts.end(); ++rit )
00296 {
00297 rval = mbImpl->get_connectivity( *rit, conn, num_conn, false, &dum_conn );
00298 if( MB_SUCCESS != rval ) return rval;
00299 rval = mbImpl->get_coords( conn, num_conn, &dum_pos[0] );
00300 if( MB_SUCCESS != rval ) return rval;
00301 coords[0] = coords[1] = coords[2] = 0.0;
00302 for( int i = 0; i < num_conn; i++ )
00303 {
00304 coords[0] += dum_pos[3 * i];
00305 coords[1] += dum_pos[3 * i + 1];
00306 coords[2] += dum_pos[3 * i + 2];
00307 }
00308 coords[0] /= num_conn;
00309 coords[1] /= num_conn;
00310 coords[2] /= num_conn;
00311 coords += 3;
00312 }
00313
00314 if( store_local ) targetEnts = targ_ents;
00315
00316 return locate_points( &locs[0], targ_ents.size(), rel_eps, abs_eps, tl, store_local );
00317 }
00318
00319 ErrorCode Coupler::locate_points( double* xyz,
00320 unsigned int num_points,
00321 double rel_eps,
00322 double abs_eps,
00323 TupleList* tl,
00324 bool store_local )
00325 {
00326 assert( tl || store_local );
00327
00328 // target_pts: TL(to_proc, tgt_index, x, y, z): tuples sent to source mesh procs representing
00329 // pts to be located source_pts: TL(from_proc, tgt_index, src_index): results of source mesh
00330 // proc point location, ready to send
00331 // back to tgt procs; src_index of -1 indicates point not located (arguably not
00332 // useful...)
00333 TupleList target_pts;
00334 target_pts.initialize( 2, 0, 0, 3, num_points );
00335 target_pts.enableWriteAccess();
00336
00337 TupleList source_pts;
00338 mappedPts = new TupleList( 0, 0, 1, 3, target_pts.get_max() );
00339 mappedPts->enableWriteAccess();
00340
00341 source_pts.initialize( 3, 0, 0, 0, target_pts.get_max() );
00342 source_pts.enableWriteAccess();
00343
00344 mappedPts->set_n( 0 );
00345 source_pts.set_n( 0 );
00346 ErrorCode result;
00347
00348 unsigned int my_rank = ( myPc ? myPc->proc_config().proc_rank() : 0 );
00349 bool point_located;
00350
00351 // For each point, find box(es) containing the point,
00352 // appending results to tuple_list;
00353 // keep local points separately, in local_pts, which has pairs
00354 // of , where mapped_index is the index
00355 // into the mappedPts tuple list
00356 for( unsigned int i = 0; i < 3 * num_points; i += 3 )
00357 {
00358
00359 std::vector< int > procs_to_send_to;
00360 for( unsigned int j = 0; j < ( myPc ? myPc->proc_config().proc_size() : 0 ); j++ )
00361 {
00362 // Test if point is in proc's box
00363 if( ( allBoxes[6 * j] <= xyz[i] + abs_eps ) && ( xyz[i] <= allBoxes[6 * j + 3] + abs_eps ) &&
00364 ( allBoxes[6 * j + 1] <= xyz[i + 1] + abs_eps ) && ( xyz[i + 1] <= allBoxes[6 * j + 4] + abs_eps ) &&
00365 ( allBoxes[6 * j + 2] <= xyz[i + 2] + abs_eps ) && ( xyz[i + 2] <= allBoxes[6 * j + 5] + abs_eps ) )
00366 {
00367 // If in this proc's box, will send to proc to test further
00368 procs_to_send_to.push_back( j );
00369 }
00370 }
00371 if( procs_to_send_to.empty() )
00372 {
00373 #ifdef VERBOSE
00374 std::cout << " point index " << i / 3 << ": " << xyz[i] << " " << xyz[i + 1] << " " << xyz[i + 2]
00375 << " not found in any box\n";
00376 #endif
00377 // try to find the closest box, and put it in that box, anyway
00378 double min_dist = 1.e+20;
00379 int index = -1;
00380 for( unsigned int j = 0; j < ( myPc ? myPc->proc_config().proc_size() : 0 ); j++ )
00381 {
00382 BoundBox box( &allBoxes[6 * j] ); // form back the box
00383 double distance = box.distance( &xyz[i] ); // will compute the distance in 3d, from the box
00384 if( distance < min_dist )
00385 {
00386 index = j;
00387 min_dist = distance;
00388 }
00389 }
00390 if( index == -1 )
00391 {
00392 // need to abort early, nothing we can do
00393 assert( "cannot locate any box for some points" );
00394 // need a better exit strategy
00395 }
00396 #ifdef VERBOSE
00397 std::cout << " point index " << i / 3 << " added to box for proc j:" << index << "\n";
00398 #endif
00399 procs_to_send_to.push_back( index ); // will send to just one proc, that has the closest box
00400 }
00401 // we finally decided to populate the tuple list for a list of processors
00402 for( size_t k = 0; k < procs_to_send_to.size(); k++ )
00403 {
00404 unsigned int j = procs_to_send_to[k];
00405 // Check size of tuple list, grow if we're at max
00406 if( target_pts.get_n() == target_pts.get_max() )
00407 target_pts.resize( std::max( 10.0, 1.5 * target_pts.get_max() ) );
00408
00409 target_pts.vi_wr[2 * target_pts.get_n()] = j;
00410 target_pts.vi_wr[2 * target_pts.get_n() + 1] = i / 3;
00411
00412 target_pts.vr_wr[3 * target_pts.get_n()] = xyz[i];
00413 target_pts.vr_wr[3 * target_pts.get_n() + 1] = xyz[i + 1];
00414 target_pts.vr_wr[3 * target_pts.get_n() + 2] = xyz[i + 2];
00415 target_pts.inc_n();
00416 }
00417
00418 } // end for (unsigned int i = 0; ..
00419
00420 int num_to_me = 0;
00421 for( unsigned int i = 0; i < target_pts.get_n(); i++ )
00422 if( target_pts.vi_rd[2 * i] == (int)my_rank ) num_to_me++;
00423 #ifdef VERBOSE
00424 printf( "rank: %u local points: %u, nb sent target pts: %u mappedPts: %u num to me: %d \n", my_rank, num_points,
00425 target_pts.get_n(), mappedPts->get_n(), num_to_me );
00426 #endif
00427 // Perform scatter/gather, to gather points to source mesh procs
00428 if( myPc )
00429 {
00430 ( myPc->proc_config().crystal_router() )->gs_transfer( 1, target_pts, 0 );
00431
00432 num_to_me = 0;
00433 for( unsigned int i = 0; i < target_pts.get_n(); i++ )
00434 {
00435 if( target_pts.vi_rd[2 * i] == (int)my_rank ) num_to_me++;
00436 }
00437 #ifdef VERBOSE
00438 printf( "rank: %u after first gs nb received_pts: %u; num_from_me = %d\n", my_rank, target_pts.get_n(),
00439 num_to_me );
00440 #endif
00441 // After scatter/gather:
00442 // target_pts.set_n(# points local proc has to map);
00443 // target_pts.vi_wr[2*i] = proc sending point i
00444 // target_pts.vi_wr[2*i + 1] = index of point i on sending proc
00445 // target_pts.vr_wr[3*i..3*i + 2] = xyz of point i
00446 //
00447 // Mapping builds the tuple list:
00448 // source_pts.set_n(target_pts.get_n())
00449 // source_pts.vi_wr[3*i] = target_pts.vi_wr[2*i] = sending proc
00450 // source_pts.vi_wr[3*i + 1] = index of point i on sending proc
00451 // source_pts.vi_wr[3*i + 2] = index of mapped point (-1 if not mapped)
00452 //
00453 // Also, mapping builds local tuple_list mappedPts:
00454 // mappedPts->set_n( # mapped points );
00455 // mappedPts->vul_wr[i] = local handle of mapped entity
00456 // mappedPts->vr_wr[3*i..3*i + 2] = natural coordinates in mapped entity
00457
00458 // Test target points against my elements
00459 for( unsigned i = 0; i < target_pts.get_n(); i++ )
00460 {
00461 result = test_local_box( target_pts.vr_wr + 3 * i, target_pts.vi_rd[2 * i], target_pts.vi_rd[2 * i + 1], i,
00462 point_located, rel_eps, abs_eps, &source_pts );
00463 if( MB_SUCCESS != result ) return result;
00464 }
00465
00466 // No longer need target_pts
00467 target_pts.reset();
00468 #ifdef VERBOSE
00469 printf( "rank: %u nb sent source pts: %u, mappedPts now: %u\n", my_rank, source_pts.get_n(),
00470 mappedPts->get_n() );
00471 #endif
00472 // Send target points back to target procs
00473 ( myPc->proc_config().crystal_router() )->gs_transfer( 1, source_pts, 0 );
00474
00475 #ifdef VERBOSE
00476 printf( "rank: %u nb received source pts: %u\n", my_rank, source_pts.get_n() );
00477 #endif
00478 }
00479
00480 // Store proc/index tuples in targetPts, and/or pass back to application;
00481 // the tuple this gets stored to looks like:
00482 // tl.set_n(# mapped points);
00483 // tl.vi_wr[3*i] = remote proc mapping point
00484 // tl.vi_wr[3*i + 1] = local index of mapped point
00485 // tl.vi_wr[3*i + 2] = remote index of mapped point
00486 //
00487 // Local index is mapped into either myRange, holding the handles of
00488 // local mapped entities, or myXyz, holding locations of mapped pts
00489
00490 // Store information about located points
00491 TupleList* tl_tmp;
00492 if( !store_local )
00493 tl_tmp = tl;
00494 else
00495 {
00496 targetPts = new TupleList();
00497 tl_tmp = targetPts;
00498 }
00499
00500 tl_tmp->initialize( 3, 0, 0, 0, num_points );
00501 tl_tmp->set_n( num_points ); // Automatically sets tl to write_enabled
00502 // Initialize so we know afterwards how many pts weren't located
00503 std::fill( tl_tmp->vi_wr, tl_tmp->vi_wr + 3 * num_points, -1 );
00504
00505 unsigned int local_pts = 0;
00506 for( unsigned int i = 0; i < source_pts.get_n(); i++ )
00507 {
00508 if( -1 != source_pts.vi_rd[3 * i + 2] )
00509 { // Why bother sending message saying "i don't have the point" if it gets discarded?
00510 int tgt_index = 3 * source_pts.vi_rd[3 * i + 1];
00511 // Prefer always entities that are local, from the source_pts
00512 // if a local entity was already found to contain the target point, skip
00513 // tl_tmp->vi_wr[tgt_index] is -1 initially, but it could already be set with
00514 // a remote processor
00515 if( tl_tmp->vi_wr[tgt_index] != (int)my_rank )
00516 {
00517 tl_tmp->vi_wr[tgt_index] = source_pts.vi_rd[3 * i];
00518 tl_tmp->vi_wr[tgt_index + 1] = source_pts.vi_rd[3 * i + 1];
00519 tl_tmp->vi_wr[tgt_index + 2] = source_pts.vi_rd[3 * i + 2];
00520 }
00521 }
00522 }
00523
00524 // Count missing points
00525 unsigned int missing_pts = 0;
00526 for( unsigned int i = 0; i < num_points; i++ )
00527 {
00528 if( tl_tmp->vi_rd[3 * i + 1] == -1 )
00529 {
00530 missing_pts++;
00531 #ifdef VERBOSE
00532 printf( "missing point at index i: %d -> %15.10f %15.10f %15.10f\n", i, xyz[3 * i], xyz[3 * i + 1],
00533 xyz[3 * i + 2] );
00534 #endif
00535 }
00536 else if( tl_tmp->vi_rd[3 * i] == (int)my_rank )
00537 local_pts++;
00538 }
00539 #ifdef VERBOSE
00540 printf( "rank: %u point location: wanted %u got %u locally, %u remote, missing %u\n", my_rank, num_points,
00541 local_pts, num_points - missing_pts - local_pts, missing_pts );
00542 #endif
00543 assert( 0 == missing_pts ); // Will likely break on curved geometries
00544
00545 // No longer need source_pts
00546 source_pts.reset();
00547
00548 // Copy into tl if passed in and storing locally
00549 if( tl && store_local )
00550 {
00551 tl->initialize( 3, 0, 0, 0, num_points );
00552 tl->enableWriteAccess();
00553 memcpy( tl->vi_wr, tl_tmp->vi_rd, 3 * tl_tmp->get_n() * sizeof( int ) );
00554 tl->set_n( tl_tmp->get_n() );
00555 tl->disableWriteAccess();
00556 }
00557
00558 tl_tmp->disableWriteAccess();
00559
00560 // Done
00561 return MB_SUCCESS;
00562 }
00563
00564 ErrorCode Coupler::test_local_box( double* xyz,
00565 int from_proc,
00566 int remote_index,
00567 int /*index*/,
00568 bool& point_located,
00569 double rel_eps,
00570 double abs_eps,
00571 TupleList* tl )
00572 {
00573 std::vector< EntityHandle > entities;
00574 std::vector< CartVect > nat_coords;
00575 bool canWrite = false;
00576 if( tl )
00577 {
00578 canWrite = tl->get_writeEnabled();
00579 if( !canWrite )
00580 {
00581 tl->enableWriteAccess();
00582 canWrite = true;
00583 }
00584 }
00585
00586 if( rel_eps && !abs_eps )
00587 {
00588 // Relative epsilon given, translate to absolute epsilon using box dimensions
00589 BoundBox box;
00590 myTree->get_bounding_box( box, &localRoot );
00591 abs_eps = rel_eps * box.diagonal_length();
00592 }
00593
00594 ErrorCode result = nat_param( xyz, entities, nat_coords, abs_eps );
00595 if( MB_SUCCESS != result ) return result;
00596
00597 // If we didn't find any ents and we're looking locally, nothing more to do
00598 if( entities.empty() )
00599 {
00600 if( tl->get_n() == tl->get_max() ) tl->resize( std::max( 10.0, 1.5 * tl->get_max() ) );
00601
00602 tl->vi_wr[3 * tl->get_n()] = from_proc;
00603 tl->vi_wr[3 * tl->get_n() + 1] = remote_index;
00604 tl->vi_wr[3 * tl->get_n() + 2] = -1;
00605 tl->inc_n();
00606
00607 point_located = false;
00608 return MB_SUCCESS;
00609 }
00610
00611 // Grow if we know we'll exceed size
00612 if( mappedPts->get_n() + entities.size() >= mappedPts->get_max() )
00613 mappedPts->resize( std::max( 10.0, 1.5 * mappedPts->get_max() ) );
00614
00615 std::vector< EntityHandle >::iterator eit = entities.begin();
00616 std::vector< CartVect >::iterator ncit = nat_coords.begin();
00617
00618 mappedPts->enableWriteAccess();
00619 for( ; eit != entities.end(); ++eit, ++ncit )
00620 {
00621 // Store in tuple mappedPts
00622 mappedPts->vr_wr[3 * mappedPts->get_n()] = ( *ncit )[0];
00623 mappedPts->vr_wr[3 * mappedPts->get_n() + 1] = ( *ncit )[1];
00624 mappedPts->vr_wr[3 * mappedPts->get_n() + 2] = ( *ncit )[2];
00625 mappedPts->vul_wr[mappedPts->get_n()] = *eit;
00626 mappedPts->inc_n();
00627
00628 // Also store local point, mapped point indices
00629 if( tl->get_n() == tl->get_max() ) tl->resize( std::max( 10.0, 1.5 * tl->get_max() ) );
00630
00631 // Store in tuple source_pts
00632 tl->vi_wr[3 * tl->get_n()] = from_proc;
00633 tl->vi_wr[3 * tl->get_n() + 1] = remote_index;
00634 tl->vi_wr[3 * tl->get_n() + 2] = mappedPts->get_n() - 1;
00635 tl->inc_n();
00636 }
00637
00638 point_located = true;
00639
00640 if( tl && !canWrite ) tl->disableWriteAccess();
00641
00642 return MB_SUCCESS;
00643 }
00644
00645 ErrorCode Coupler::interpolate( Coupler::Method method,
00646 const std::string& interp_tag,
00647 double* interp_vals,
00648 TupleList* tl,
00649 bool normalize )
00650 {
00651 Tag tag;
00652 ErrorCode result;
00653 if( _spectralSource )
00654 {
00655 result = mbImpl->tag_get_handle( interp_tag.c_str(), _ntot, MB_TYPE_DOUBLE, tag );MB_CHK_SET_ERR( result, "Failed to get handle for interpolation tag \"" << interp_tag << "\"" );
00656 }
00657 else
00658 {
00659 result = mbImpl->tag_get_handle( interp_tag.c_str(), 1, MB_TYPE_DOUBLE, tag );MB_CHK_SET_ERR( result, "Failed to get handle for interpolation tag \"" << interp_tag << "\"" );
00660 }
00661
00662 return interpolate( method, tag, interp_vals, tl, normalize );
00663 }
00664
00665 ErrorCode Coupler::interpolate( Coupler::Method* methods,
00666 Tag* tags,
00667 int* points_per_method,
00668 int num_methods,
00669 double* interp_vals,
00670 TupleList* tl,
00671 bool /* normalize */ )
00672 {
00673 // if (!((LINEAR_FE == method) || (CONSTANT == method)))
00674 // return MB_FAILURE;
00675
00676 // remote pts first
00677 TupleList* tl_tmp = ( tl ? tl : targetPts );
00678
00679 ErrorCode result = MB_SUCCESS;
00680
00681 unsigned int pts_total = 0;
00682 for( int i = 0; i < num_methods; i++ )
00683 pts_total += points_per_method[i];
00684
00685 // If tl was passed in non-NULL, just have those points, otherwise have targetPts plus
00686 // locally mapped pts
00687 if( pts_total != tl_tmp->get_n() ) return MB_FAILURE;
00688
00689 TupleList tinterp;
00690 tinterp.initialize( 5, 0, 0, 1, tl_tmp->get_n() );
00691 int t = 0;
00692 tinterp.enableWriteAccess();
00693 for( int i = 0; i < num_methods; i++ )
00694 {
00695 for( int j = 0; j < points_per_method[i]; j++ )
00696 {
00697 tinterp.vi_wr[5 * t] = tl_tmp->vi_rd[3 * t];
00698 tinterp.vi_wr[5 * t + 1] = tl_tmp->vi_rd[3 * t + 1];
00699 tinterp.vi_wr[5 * t + 2] = tl_tmp->vi_rd[3 * t + 2];
00700 tinterp.vi_wr[5 * t + 3] = methods[i];
00701 tinterp.vi_wr[5 * t + 4] = i;
00702 tinterp.vr_wr[t] = 0.0;
00703 tinterp.inc_n();
00704 t++;
00705 }
00706 }
00707
00708 // Scatter/gather interpolation points
00709 if( myPc )
00710 {
00711 ( myPc->proc_config().crystal_router() )->gs_transfer( 1, tinterp, 0 );
00712
00713 // Perform interpolation on local source mesh; put results into
00714 // tinterp.vr_wr[i]
00715
00716 mappedPts->enableWriteAccess();
00717 for( unsigned int i = 0; i < tinterp.get_n(); i++ )
00718 {
00719 int mindex = tinterp.vi_rd[5 * i + 2];
00720 Method method = (Method)tinterp.vi_rd[5 * i + 3];
00721 Tag tag = tags[tinterp.vi_rd[5 * i + 4]];
00722
00723 result = MB_FAILURE;
00724 if( LINEAR_FE == method || QUADRATIC_FE == method || SPHERICAL == method )
00725 {
00726 result = interp_field( mappedPts->vul_rd[mindex], CartVect( mappedPts->vr_wr + 3 * mindex ), tag,
00727 tinterp.vr_wr[i] );
00728 }
00729 else if( CONSTANT == method )
00730 {
00731 result = constant_interp( mappedPts->vul_rd[mindex], tag, tinterp.vr_wr[i] );
00732 }
00733
00734 if( MB_SUCCESS != result ) return result;
00735 }
00736
00737 // Scatter/gather interpolation data
00738 myPc->proc_config().crystal_router()->gs_transfer( 1, tinterp, 0 );
00739 }
00740
00741 // Copy the interpolated field as a unit
00742 for( unsigned int i = 0; i < tinterp.get_n(); i++ )
00743 interp_vals[tinterp.vi_rd[5 * i + 1]] = tinterp.vr_rd[i];
00744
00745 // Done
00746 return MB_SUCCESS;
00747 }
00748
00749 ErrorCode Coupler::nat_param( double xyz[3],
00750 std::vector< EntityHandle >& entities,
00751 std::vector< CartVect >& nat_coords,
00752 double epsilon )
00753 {
00754 if( !myTree ) return MB_FAILURE;
00755
00756 AdaptiveKDTreeIter treeiter;
00757 ErrorCode result = myTree->get_tree_iterator( localRoot, treeiter );
00758 if( MB_SUCCESS != result )
00759 {
00760 std::cout << "Problems getting iterator" << std::endl;
00761 return result;
00762 }
00763
00764 EntityHandle closest_leaf;
00765 if( epsilon )
00766 {
00767 std::vector< double > dists;
00768 std::vector< EntityHandle > leaves;
00769
00770 // Two tolerances
00771 result = myTree->distance_search( xyz, epsilon, leaves,
00772 /*iter_tol*/ epsilon,
00773 /*inside_tol*/ 10 * epsilon, &dists, NULL, &localRoot );
00774 if( leaves.empty() )
00775 // Not found returns success here, with empty list, just like case with no epsilon
00776 return MB_SUCCESS;
00777 // Get closest leaf
00778 double min_dist = *dists.begin();
00779 closest_leaf = *leaves.begin();
00780 std::vector< EntityHandle >::iterator vit = leaves.begin() + 1;
00781 std::vector< double >::iterator dit = dists.begin() + 1;
00782 for( ; vit != leaves.end() && min_dist; ++vit, ++dit )
00783 {
00784 if( *dit < min_dist )
00785 {
00786 min_dist = *dit;
00787 closest_leaf = *vit;
00788 }
00789 }
00790 }
00791 else
00792 {
00793 result = myTree->point_search( xyz, treeiter, 1.0e-10, 1.0e-6, NULL, &localRoot );
00794 if( MB_ENTITY_NOT_FOUND == result ) // Point is outside of myTree's bounding box
00795 return MB_SUCCESS;
00796 else if( MB_SUCCESS != result )
00797 {
00798 std::cout << "Problems getting leaf \n";
00799 return result;
00800 }
00801 closest_leaf = treeiter.handle();
00802 }
00803
00804 // Find natural coordinates of point in element(s) in that leaf
00805 CartVect tmp_nat_coords;
00806 Range range_leaf;
00807 result = mbImpl->get_entities_by_dimension( closest_leaf, max_dim, range_leaf, false );
00808 if( MB_SUCCESS != result ) std::cout << "Problem getting leaf in a range" << std::endl;
00809
00810 // Loop over the range_leaf
00811 for( Range::iterator iter = range_leaf.begin(); iter != range_leaf.end(); ++iter )
00812 {
00813 // Test to find out in which entity the point is
00814 // Get the EntityType and create the appropriate Element::Map subtype
00815 // If spectral, do not need coordinates, just the GL points
00816 EntityType etype = mbImpl->type_from_handle( *iter );
00817 if( NULL != this->_spectralSource && MBHEX == etype )
00818 {
00819 EntityHandle eh = *iter;
00820 const double* xval;
00821 const double* yval;
00822 const double* zval;
00823 ErrorCode rval = mbImpl->tag_get_by_ptr( _xm1Tag, &eh, 1, (const void**)&xval );
00824 if( moab::MB_SUCCESS != rval )
00825 {
00826 std::cout << "Can't get xm1 values \n";
00827 return MB_FAILURE;
00828 }
00829 rval = mbImpl->tag_get_by_ptr( _ym1Tag, &eh, 1, (const void**)&yval );
00830 if( moab::MB_SUCCESS != rval )
00831 {
00832 std::cout << "Can't get ym1 values \n";
00833 return MB_FAILURE;
00834 }
00835 rval = mbImpl->tag_get_by_ptr( _zm1Tag, &eh, 1, (const void**)&zval );
00836 if( moab::MB_SUCCESS != rval )
00837 {
00838 std::cout << "Can't get zm1 values \n";
00839 return MB_FAILURE;
00840 }
00841 Element::SpectralHex* spcHex = (Element::SpectralHex*)_spectralSource;
00842
00843 spcHex->set_gl_points( (double*)xval, (double*)yval, (double*)zval );
00844 try
00845 {
00846 tmp_nat_coords = spcHex->ievaluate( CartVect( xyz ), epsilon ); // introduce
00847 bool inside = spcHex->inside_nat_space( CartVect( tmp_nat_coords ), epsilon );
00848 if( !inside )
00849 {
00850 #ifdef VERBOSE
00851 std::cout << "point " << xyz[0] << " " << xyz[1] << " " << xyz[2]
00852 << " is not converging inside hex " << mbImpl->id_from_handle( eh ) << "\n";
00853 #endif
00854 continue; // It is possible that the point is outside, so it will not converge
00855 }
00856 }
00857 catch( Element::Map::EvaluationError& )
00858 {
00859 continue;
00860 }
00861 }
00862 else
00863 {
00864 const EntityHandle* connect;
00865 int num_connect;
00866
00867 // Get connectivity
00868 result = mbImpl->get_connectivity( *iter, connect, num_connect, true );
00869 if( MB_SUCCESS != result ) return result;
00870
00871 // Get coordinates of the vertices
00872 std::vector< CartVect > coords_vert( num_connect );
00873 result = mbImpl->get_coords( connect, num_connect, &( coords_vert[0][0] ) );
00874 if( MB_SUCCESS != result )
00875 {
00876 std::cout << "Problems getting coordinates of vertices\n";
00877 return result;
00878 }
00879 CartVect pos( xyz );
00880 if( MBHEX == etype )
00881 {
00882 if( 8 == num_connect )
00883 {
00884 Element::LinearHex hexmap( coords_vert );
00885 if( !hexmap.inside_box( pos, epsilon ) ) continue;
00886 try
00887 {
00888 tmp_nat_coords = hexmap.ievaluate( pos, epsilon );
00889 bool inside = hexmap.inside_nat_space( tmp_nat_coords, epsilon );
00890 if( !inside ) continue;
00891 }
00892 catch( Element::Map::EvaluationError& )
00893 {
00894 continue;
00895 }
00896 }
00897 else if( 27 == num_connect )
00898 {
00899 Element::QuadraticHex hexmap( coords_vert );
00900 if( !hexmap.inside_box( pos, epsilon ) ) continue;
00901 try
00902 {
00903 tmp_nat_coords = hexmap.ievaluate( pos, epsilon );
00904 bool inside = hexmap.inside_nat_space( tmp_nat_coords, epsilon );
00905 if( !inside ) continue;
00906 }
00907 catch( Element::Map::EvaluationError& )
00908 {
00909 continue;
00910 }
00911 }
00912 else // TODO this case not treated yet, no interpolation
00913 continue;
00914 }
00915 else if( MBTET == etype )
00916 {
00917 Element::LinearTet tetmap( coords_vert );
00918 // This is just a linear solve; unless degenerate, will not except
00919 tmp_nat_coords = tetmap.ievaluate( pos );
00920 bool inside = tetmap.inside_nat_space( tmp_nat_coords, epsilon );
00921 if( !inside ) continue;
00922 }
00923 else if( MBQUAD == etype && spherical )
00924 {
00925 Element::SphericalQuad sphermap( coords_vert );
00926 /* skip box test, because it can filter out good elements with high curvature
00927 * if (!sphermap.inside_box(pos, epsilon))
00928 continue;*/
00929 try
00930 {
00931 tmp_nat_coords = sphermap.ievaluate( pos, epsilon );
00932 bool inside = sphermap.inside_nat_space( tmp_nat_coords, epsilon );
00933 if( !inside ) continue;
00934 }
00935 catch( Element::Map::EvaluationError& )
00936 {
00937 continue;
00938 }
00939 }
00940 else if( MBTRI == etype && spherical )
00941 {
00942 Element::SphericalTri sphermap( coords_vert );
00943 /* skip box test, because it can filter out good elements with high curvature
00944 * if (!sphermap.inside_box(pos, epsilon))
00945 continue;*/
00946 try
00947 {
00948 tmp_nat_coords = sphermap.ievaluate( pos, epsilon );
00949 bool inside = sphermap.inside_nat_space( tmp_nat_coords, epsilon );
00950 if( !inside ) continue;
00951 }
00952 catch( Element::Map::EvaluationError& )
00953 {
00954 continue;
00955 }
00956 }
00957
00958 else if( MBQUAD == etype )
00959 {
00960 Element::LinearQuad quadmap( coords_vert );
00961 if( !quadmap.inside_box( pos, epsilon ) ) continue;
00962 try
00963 {
00964 tmp_nat_coords = quadmap.ievaluate( pos, epsilon );
00965 bool inside = quadmap.inside_nat_space( tmp_nat_coords, epsilon );
00966 if( !inside ) continue;
00967 }
00968 catch( Element::Map::EvaluationError& )
00969 {
00970 continue;
00971 }
00972 if( !quadmap.inside_nat_space( tmp_nat_coords, epsilon ) ) continue;
00973 }
00974 /*
00975 else if (etype == MBTRI){
00976 Element::LinearTri trimap(coords_vert);
00977 if (!trimap.inside_box( pos, epsilon))
00978 continue;
00979 try {
00980 tmp_nat_coords = trimap.ievaluate(pos, epsilon);
00981 bool inside = trimap.inside_nat_space(tmp_nat_coords, epsilon);
00982 if (!inside) continue;
00983 }
00984 catch (Element::Map::EvaluationError) {
00985 continue;
00986 }
00987 if (!trimap.inside_nat_space(tmp_nat_coords, epsilon))
00988 continue;
00989 }
00990 */
00991 else if( etype == MBEDGE )
00992 {
00993 Element::LinearEdge edgemap( coords_vert );
00994 try
00995 {
00996 tmp_nat_coords = edgemap.ievaluate( CartVect( xyz ), epsilon );
00997 }
00998 catch( Element::Map::EvaluationError )
00999 {
01000 continue;
01001 }
01002 if( !edgemap.inside_nat_space( tmp_nat_coords, epsilon ) ) continue;
01003 }
01004 else
01005 {
01006 std::cout << "Entity not Hex/Tet/Quad/Tri/Edge. Please verify." << std::endl;
01007 continue;
01008 }
01009 }
01010 // If we get here then we've found the coordinates.
01011 // Save them and the entity and return success.
01012 entities.push_back( *iter );
01013 nat_coords.push_back( tmp_nat_coords );
01014 return MB_SUCCESS;
01015 }
01016
01017 // Didn't find any elements containing the point
01018 return MB_SUCCESS;
01019 }
01020
01021 ErrorCode Coupler::interp_field( EntityHandle elem, CartVect nat_coord, Tag tag, double& field )
01022 {
01023 if( _spectralSource )
01024 {
01025 // Get tag values at the GL points for some field (Tag)
01026 const double* vx;
01027 ErrorCode rval = mbImpl->tag_get_by_ptr( tag, &elem, 1, (const void**)&vx );
01028 if( moab::MB_SUCCESS != rval )
01029 {
01030 std::cout << "Can't get field values for the tag \n";
01031 return MB_FAILURE;
01032 }
01033 Element::SpectralHex* spcHex = (Element::SpectralHex*)_spectralSource;
01034 field = spcHex->evaluate_scalar_field( nat_coord, vx );
01035 }
01036 else
01037 {
01038 double vfields[27]; // Will work for linear hex, quadratic hex or Tets
01039 moab::Element::Map* elemMap = NULL;
01040 int num_verts = 0;
01041 // Get the EntityType
01042 // Get the tag values at the vertices
01043 const EntityHandle* connect;
01044 int num_connect;
01045 ErrorCode result = mbImpl->get_connectivity( elem, connect, num_connect );
01046 if( MB_SUCCESS != result ) return result;
01047 EntityType etype = mbImpl->type_from_handle( elem );
01048 if( MBHEX == etype )
01049 {
01050 if( 8 == num_connect )
01051 {
01052 elemMap = new moab::Element::LinearHex();
01053 num_verts = 8;
01054 }
01055 else
01056 { /* (MBHEX == etype && 27 == num_connect) */
01057 elemMap = new moab::Element::QuadraticHex();
01058 num_verts = 27;
01059 }
01060 }
01061 else if( MBTET == etype )
01062 {
01063 elemMap = new moab::Element::LinearTet();
01064 num_verts = 4;
01065 }
01066 else if( MBQUAD == etype )
01067 {
01068 elemMap = new moab::Element::LinearQuad();
01069 num_verts = 4;
01070 }
01071 else if( MBTRI == etype )
01072 {
01073 elemMap = new moab::Element::LinearTri();
01074 num_verts = 3;
01075 }
01076 else
01077 return MB_FAILURE;
01078
01079 result = mbImpl->tag_get_data( tag, connect, std::min( num_verts, num_connect ), vfields );
01080 if( MB_SUCCESS != result )
01081 {
01082 delete elemMap;
01083 return result;
01084 }
01085
01086 // Function for the interpolation
01087 field = 0;
01088
01089 // Check the number of vertices
01090 assert( num_connect >= num_verts );
01091
01092 // Calculate the field
01093 try
01094 {
01095 field = elemMap->evaluate_scalar_field( nat_coord, vfields );
01096 }
01097 catch( moab::Element::Map::EvaluationError& )
01098 {
01099 delete elemMap;
01100 return MB_FAILURE;
01101 }
01102
01103 delete elemMap;
01104 }
01105
01106 return MB_SUCCESS;
01107 }
01108
01109 // Simplest "interpolation" for element-based source fields. Set the value of the field
01110 // at the target point to that of the field in the source element it lies in.
01111 ErrorCode Coupler::constant_interp( EntityHandle elem, Tag tag, double& field )
01112 {
01113 double tempField;
01114
01115 // Get the tag values at the vertices
01116 ErrorCode result = mbImpl->tag_get_data( tag, &elem, 1, &tempField );
01117 if( MB_SUCCESS != result ) return result;
01118
01119 field = tempField;
01120
01121 return MB_SUCCESS;
01122 }
01123
01124 // Normalize a field over the entire mesh represented by the root_set.
01125 ErrorCode Coupler::normalize_mesh( EntityHandle root_set,
01126 const char* norm_tag,
01127 Coupler::IntegType integ_type,
01128 int num_integ_pts )
01129 {
01130 ErrorCode err;
01131
01132 // SLAVE START ****************************************************************
01133 // Search for entities based on tag_handles and tag_values
01134 std::vector< std::vector< EntityHandle > > entity_sets;
01135 std::vector< std::vector< EntityHandle > > entity_groups;
01136
01137 // put the root_set into entity_sets
01138 std::vector< EntityHandle > ent_set;
01139 ent_set.push_back( root_set );
01140 entity_sets.push_back( ent_set );
01141
01142 // get all entities from root_set and put into entity_groups
01143 std::vector< EntityHandle > entities;
01144 err = mbImpl->get_entities_by_handle( root_set, entities, true );ERRORR( "Failed to get entities in root_set.", err );
01145
01146 entity_groups.push_back( entities );
01147
01148 // Call do_normalization() to continue common normalization processing
01149 err = do_normalization( norm_tag, entity_sets, entity_groups, integ_type, num_integ_pts );ERRORR( "Failure in do_normalization().", err );
01150 // SLAVE END ****************************************************************
01151
01152 return err;
01153 }
01154
01155 // Normalize a field over the subset of entities identified by the tags and values passed
01156 ErrorCode Coupler::normalize_subset( EntityHandle root_set,
01157 const char* norm_tag,
01158 const char** tag_names,
01159 int num_tags,
01160 const char** tag_values,
01161 Coupler::IntegType integ_type,
01162 int num_integ_pts )
01163 {
01164 moab::ErrorCode err;
01165 std::vector< Tag > tag_handles;
01166
01167 // Lookup tag handles from tag names
01168 for( int t = 0; t < num_tags; t++ )
01169 {
01170 // get tag handle & size
01171 Tag th;
01172 err = mbImpl->tag_get_handle( tag_names[t], 1, moab::MB_TYPE_DOUBLE, th, moab::MB_TAG_ANY );ERRORR( "Failed to get tag handle.", err );
01173 tag_handles.push_back( th );
01174 }
01175
01176 return normalize_subset( root_set, norm_tag, &tag_handles[0], num_tags, tag_values, integ_type, num_integ_pts );
01177 }
01178
01179 ErrorCode Coupler::normalize_subset( EntityHandle root_set,
01180 const char* norm_tag,
01181 Tag* tag_handles,
01182 int num_tags,
01183 const char** tag_values,
01184 Coupler::IntegType integ_type,
01185 int num_integ_pts )
01186 {
01187 ErrorCode err;
01188
01189 // SLAVE START ****************************************************************
01190 // Search for entities based on tag_handles and tag_values
01191 std::vector< std::vector< EntityHandle > > entity_sets;
01192 std::vector< std::vector< EntityHandle > > entity_groups;
01193
01194 err = get_matching_entities( root_set, tag_handles, tag_values, num_tags, &entity_sets, &entity_groups );ERRORR( "Failed to get matching entities.", err );
01195
01196 // Call do_normalization() to continue common normalization processing
01197 err = do_normalization( norm_tag, entity_sets, entity_groups, integ_type, num_integ_pts );ERRORR( "Failure in do_normalization().", err );
01198 // SLAVE END ****************************************************************
01199
01200 return err;
01201 }
01202
01203 ErrorCode Coupler::do_normalization( const char* norm_tag,
01204 std::vector< std::vector< EntityHandle > >& entity_sets,
01205 std::vector< std::vector< EntityHandle > >& entity_groups,
01206 Coupler::IntegType integ_type,
01207 int num_integ_pts )
01208 {
01209 // SLAVE START ****************************************************************
01210 ErrorCode err;
01211 int ierr = 0;
01212
01213 // Setup data for parallel computing
01214 int nprocs, rank;
01215 ierr = MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
01216 ERRORMPI( "Getting number of procs failed.", ierr );
01217 ierr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01218 ERRORMPI( "Getting rank failed.", ierr );
01219
01220 // Get the integrated field value for each group(vector) of entities.
01221 // If no entities are in a group then a zero will be put in the list
01222 // of return values.
01223 unsigned int num_ent_grps = entity_groups.size();
01224 std::vector< double > integ_vals( num_ent_grps );
01225
01226 err = get_group_integ_vals( entity_groups, integ_vals, norm_tag, num_integ_pts, integ_type );ERRORR( "Failed to get integrated field values for groups in mesh.", err );
01227 // SLAVE END ****************************************************************
01228
01229 // SLAVE/MASTER START #########################################################
01230 // Send list of integrated values back to master proc. The ordering of the
01231 // values will match the ordering of the entity groups (i.e. vector of vectors)
01232 // sent from master to slaves earlier. The values for each entity group will
01233 // be summed during the transfer.
01234 std::vector< double > sum_integ_vals( num_ent_grps );
01235
01236 if( nprocs > 1 )
01237 {
01238 // If parallel then send the values back to the master.
01239 ierr = MPI_Reduce( &integ_vals[0], &sum_integ_vals[0], num_ent_grps, MPI_DOUBLE, MPI_SUM, MASTER_PROC,
01240 myPc->proc_config().proc_comm() );
01241 ERRORMPI( "Transfer and reduction of integrated values failed.", ierr );
01242 }
01243 else
01244 {
01245 // Otherwise just copy the vector
01246 sum_integ_vals = integ_vals;
01247 }
01248 // SLAVE/MASTER END #########################################################
01249
01250 // MASTER START ***************************************************************
01251 // Calculate the normalization factor for each group by taking the
01252 // inverse of each integrated field value. Put the normalization factor
01253 // for each group back into the list in the same order.
01254 for( unsigned int i = 0; i < num_ent_grps; i++ )
01255 {
01256 double val = sum_integ_vals[i];
01257 if( fabs( val ) > 1e-8 )
01258 sum_integ_vals[i] = 1.0 / val;
01259 else
01260 {
01261 sum_integ_vals[i] = 0.0; /* VSM: not sure what we should do here ? */
01262 /* commenting out error below since if integral(value)=0.0, then normalization
01263 is probably unnecessary to start with ? */
01264 /* ERRORR("Integrating an invalid field -- integral("< 1 )
01272 {
01273 // If parallel then broadcast the normalization factors to the procs.
01274 ierr = MPI_Bcast( &sum_integ_vals[0], num_ent_grps, MPI_DOUBLE, MASTER_PROC, myPc->proc_config().proc_comm() );
01275 ERRORMPI( "Broadcast of normalization factors failed.", ierr );
01276 }
01277 // MASTER/SLAVE END #########################################################
01278
01279 // SLAVE START ****************************************************************
01280 // Save the normalization factors to a new tag with name of norm_tag's value
01281 // and the string "_normF" appended. This new tag will be created on the entity
01282 // set that contains all of the entities from a group.
01283
01284 err = apply_group_norm_factor( entity_sets, sum_integ_vals, norm_tag, integ_type );ERRORR( "Failed to set the normalization factor for groups in mesh.", err );
01285 // SLAVE END ****************************************************************
01286
01287 return err;
01288 }
01289
01290 // Functions supporting the subset normalization function
01291
01292 // Retrieve groups of entities matching tags and values if present
01293 ErrorCode Coupler::get_matching_entities( EntityHandle root_set,
01294 const char** tag_names,
01295 const char** tag_values,
01296 int num_tags,
01297 std::vector< std::vector< EntityHandle > >* entity_sets,
01298 std::vector< std::vector< EntityHandle > >* entity_groups )
01299 {
01300 ErrorCode err;
01301 std::vector< Tag > tag_handles;
01302
01303 for( int t = 0; t < num_tags; t++ )
01304 {
01305 // Get tag handle & size
01306 Tag th;
01307 err = mbImpl->tag_get_handle( tag_names[t], 1, moab::MB_TYPE_DOUBLE, th, moab::MB_TAG_ANY );ERRORR( "Failed to get tag handle.", err );
01308 tag_handles.push_back( th );
01309 }
01310
01311 return get_matching_entities( root_set, &tag_handles[0], tag_values, num_tags, entity_sets, entity_groups );
01312 }
01313
01314 // Retrieve groups of entities matching tags and values if present
01315 ErrorCode Coupler::get_matching_entities( EntityHandle root_set,
01316 Tag* tag_handles,
01317 const char** tag_values,
01318 int num_tags,
01319 std::vector< std::vector< EntityHandle > >* entity_sets,
01320 std::vector< std::vector< EntityHandle > >* entity_groups )
01321 {
01322 // SLAVE START ****************************************************************
01323
01324 // Setup data for parallel computing
01325 ErrorCode err;
01326 int ierr = 0;
01327 int nprocs, rank;
01328 ierr = MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
01329 ERRORMPI( "Getting number of procs failed.", ierr );
01330 ierr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
01331 ERRORMPI( "Getting rank failed.", ierr );
01332
01333 Range ent_sets;
01334 err =
01335 mbImpl->get_entities_by_type_and_tag( root_set, moab::MBENTITYSET, tag_handles, (const void* const*)tag_values,
01336 num_tags, ent_sets, Interface::INTERSECT, false );ERRORR( "Core::get_entities_by_type_and_tag failed.", err );
01337
01338 TupleList* tag_list = NULL;
01339 err = create_tuples( ent_sets, tag_handles, num_tags, &tag_list );ERRORR( "Failed to create tuples from entity sets.", err );
01340
01341 // Free up range
01342 ent_sets.clear();
01343 // SLAVE END ****************************************************************
01344
01345 // If we are running in a multi-proc session then send tuple list back to master
01346 // proc for consolidation. Otherwise just copy the pointer to the tuple_list.
01347 TupleList* cons_tuples;
01348 if( nprocs > 1 )
01349 {
01350 // SLAVE/MASTER START #########################################################
01351
01352 // Pack the tuple_list in a buffer.
01353 uint* tuple_buf;
01354 int tuple_buf_len;
01355 tuple_buf_len = pack_tuples( tag_list, (void**)&tuple_buf );
01356
01357 // Free tag_list here as its not used again if nprocs > 1
01358 tag_list->reset();
01359
01360 // Send back the buffer sizes to the master proc
01361 int* recv_cnts = (int*)malloc( nprocs * sizeof( int ) );
01362 int* offsets = (int*)malloc( nprocs * sizeof( int ) );
01363 uint* all_tuples_buf = NULL;
01364
01365 MPI_Gather( &tuple_buf_len, 1, MPI_INT, recv_cnts, 1, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm() );
01366 ERRORMPI( "Gathering buffer sizes failed.", err );
01367
01368 // Allocate a buffer large enough for all the data
01369 if( rank == MASTER_PROC )
01370 {
01371 int all_tuples_len = recv_cnts[0];
01372 offsets[0] = 0;
01373 for( int i = 1; i < nprocs; i++ )
01374 {
01375 offsets[i] = offsets[i - 1] + recv_cnts[i - 1];
01376 all_tuples_len += recv_cnts[i];
01377 }
01378
01379 all_tuples_buf = (uint*)malloc( all_tuples_len * sizeof( uint ) );
01380 }
01381
01382 // Send all buffers to the master proc for consolidation
01383 MPI_Gatherv( (void*)tuple_buf, tuple_buf_len, MPI_INT, (void*)all_tuples_buf, recv_cnts, offsets, MPI_INT,
01384 MASTER_PROC, myPc->proc_config().proc_comm() );
01385 ERRORMPI( "Gathering tuple_lists failed.", err );
01386 free( tuple_buf ); // malloc'd in pack_tuples
01387
01388 if( rank == MASTER_PROC )
01389 {
01390 // Unpack the tuple_list from the buffer.
01391 TupleList** tl_array = (TupleList**)malloc( nprocs * sizeof( TupleList* ) );
01392 for( int i = 0; i < nprocs; i++ )
01393 unpack_tuples( (void*)&all_tuples_buf[offsets[i]], &tl_array[i] );
01394
01395 // Free all_tuples_buf here as it is only allocated on the MASTER_PROC
01396 free( all_tuples_buf );
01397 // SLAVE/MASTER END #########################################################
01398
01399 // MASTER START ***************************************************************
01400 // Consolidate all tuple_lists into one tuple_list with no duplicates.
01401 err = consolidate_tuples( tl_array, nprocs, &cons_tuples );ERRORR( "Failed to consolidate tuples.", err );
01402
01403 for( int i = 0; i < nprocs; i++ )
01404 tl_array[i]->reset();
01405 free( tl_array );
01406 // MASTER END ***************************************************************
01407 }
01408
01409 // Free offsets and recv_cnts as they are allocated on all procs
01410 free( offsets );
01411 free( recv_cnts );
01412
01413 // MASTER/SLAVE START #########################################################
01414 // Broadcast condensed tuple list back to all procs.
01415 uint* ctl_buf;
01416 int ctl_buf_sz;
01417 if( rank == MASTER_PROC ) ctl_buf_sz = pack_tuples( cons_tuples, (void**)&ctl_buf );
01418
01419 // Send buffer size
01420 ierr = MPI_Bcast( &ctl_buf_sz, 1, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm() );
01421 ERRORMPI( "Broadcasting tuple_list size failed.", ierr );
01422
01423 // Allocate a buffer in the other procs
01424 if( rank != MASTER_PROC ) ctl_buf = (uint*)malloc( ctl_buf_sz * sizeof( uint ) );
01425
01426 ierr = MPI_Bcast( (void*)ctl_buf, ctl_buf_sz, MPI_INT, MASTER_PROC, myPc->proc_config().proc_comm() );
01427 ERRORMPI( "Broadcasting tuple_list failed.", ierr );
01428
01429 if( rank != MASTER_PROC ) unpack_tuples( ctl_buf, &cons_tuples );
01430 free( ctl_buf );
01431 // MASTER/SLAVE END #########################################################
01432 }
01433 else
01434 cons_tuples = tag_list;
01435
01436 // SLAVE START ****************************************************************
01437 // Loop over the tuple list getting the entities with the tags in the tuple_list entry
01438 uint mi, ml, mul, mr;
01439 cons_tuples->getTupleSize( mi, ml, mul, mr );
01440
01441 for( unsigned int i = 0; i < cons_tuples->get_n(); i++ )
01442 {
01443 // Get Entity Sets that match the tags and values.
01444
01445 // Convert the data in the tuple_list to an array of pointers to the data
01446 // in the tuple_list as that is what the iMesh API call is expecting.
01447 int** vals = (int**)malloc( mi * sizeof( int* ) );
01448 for( unsigned int j = 0; j < mi; j++ )
01449 vals[j] = (int*)&( cons_tuples->vi_rd[( i * mi ) + j] );
01450
01451 // Get entities recursively based on type and tag data
01452 err = mbImpl->get_entities_by_type_and_tag( root_set, moab::MBENTITYSET, tag_handles, (const void* const*)vals,
01453 mi, ent_sets, Interface::INTERSECT, false );ERRORR( "Core::get_entities_by_type_and_tag failed.", err );
01454 if( debug ) std::cout << "ent_sets_size=" << ent_sets.size() << std::endl;
01455
01456 // Free up the array of pointers
01457 free( vals );
01458
01459 // Loop over the entity sets and then free the memory for ent_sets.
01460 std::vector< EntityHandle > ent_set_hdls;
01461 std::vector< EntityHandle > ent_hdls;
01462 for( unsigned int j = 0; j < ent_sets.size(); j++ )
01463 {
01464 // Save the entity set
01465 ent_set_hdls.push_back( ent_sets[j] );
01466
01467 // Get all entities for the entity set
01468 Range ents;
01469
01470 /* VSM: do we need to filter out entity sets ? */
01471 err = mbImpl->get_entities_by_handle( ent_sets[j], ents, false );ERRORR( "Core::get_entities_by_handle failed.", err );
01472 if( debug ) std::cout << "ents_size=" << ents.size() << std::endl;
01473
01474 // Save all of the entities from the entity set and free the memory for ents.
01475 for( unsigned int k = 0; k < ents.size(); k++ )
01476 {
01477 ent_hdls.push_back( ents[k] );
01478 }
01479 ents.clear();
01480 if( debug ) std::cout << "ent_hdls.size=" << ent_hdls.size() << std::endl;
01481 }
01482
01483 // Free the entity set list for next tuple iteration.
01484 ent_sets.clear();
01485
01486 // Push ent_set_hdls onto entity_sets, ent_hdls onto entity_groups
01487 // and clear both ent_set_hdls and ent_hdls.
01488 entity_sets->push_back( ent_set_hdls );
01489 ent_set_hdls.clear();
01490 entity_groups->push_back( ent_hdls );
01491 ent_hdls.clear();
01492 if( debug )
01493 std::cout << "entity_sets->size=" << entity_sets->size()
01494 << ", entity_groups->size=" << entity_groups->size() << std::endl;
01495 }
01496
01497 cons_tuples->reset();
01498 // SLAVE END ****************************************************************
01499
01500 return err;
01501 }
01502
01503 // Return a tuple_list containing tag values for each Entity Set
01504 // The tuple_list will have a column for each tag and a row for each
01505 // Entity Set. It is assumed all of the tags are integer tags.
01506 ErrorCode Coupler::create_tuples( Range& ent_sets,
01507 const char** tag_names,
01508 unsigned int num_tags,
01509 TupleList** tuple_list )
01510 {
01511 ErrorCode err;
01512 std::vector< Tag > tag_handles;
01513
01514 for( unsigned int t = 0; t < num_tags; t++ )
01515 {
01516 // Get tag handle & size
01517 Tag th;
01518 err = mbImpl->tag_get_handle( tag_names[t], 1, moab::MB_TYPE_DOUBLE, th, moab::MB_TAG_ANY );ERRORR( "Failed to get tag handle.", err );
01519 tag_handles.push_back( th );
01520 }
01521
01522 return create_tuples( ent_sets, &tag_handles[0], num_tags, tuple_list );
01523 }
01524
01525 // Return a tuple_list containing tag values for each Entity Set
01526 // The tuple_list will have a column for each tag and a row for each
01527 // Entity Set. It is assumed all of the tags are integer tags.
01528 ErrorCode Coupler::create_tuples( Range& ent_sets, Tag* tag_handles, unsigned int num_tags, TupleList** tuples )
01529 {
01530 // ASSUMPTION: All tags are of type integer. This may need to be expanded in future.
01531 ErrorCode err;
01532
01533 // Allocate a tuple_list for the number of entity sets passed in
01534 TupleList* tag_tuples = new TupleList( num_tags, 0, 0, 0, (int)ent_sets.size() );
01535 // tag_tuples->initialize(num_tags, 0, 0, 0, num_sets);
01536 uint mi, ml, mul, mr;
01537 tag_tuples->getTupleSize( mi, ml, mul, mr );
01538 tag_tuples->enableWriteAccess();
01539
01540 if( mi == 0 ) ERRORR( "Failed to initialize tuple_list.", MB_FAILURE );
01541
01542 // Loop over the filtered entity sets retrieving each matching tag value one by one.
01543 int val;
01544 for( unsigned int i = 0; i < ent_sets.size(); i++ )
01545 {
01546 for( unsigned int j = 0; j < num_tags; j++ )
01547 {
01548 EntityHandle set_handle = ent_sets[i];
01549 err = mbImpl->tag_get_data( tag_handles[j], &set_handle, 1, &val );ERRORR( "Failed to get integer tag data.", err );
01550 tag_tuples->vi_wr[i * mi + j] = val;
01551 }
01552
01553 // If we get here there was no error so increment n in the tuple_list
01554 tag_tuples->inc_n();
01555 }
01556 tag_tuples->disableWriteAccess();
01557 *tuples = tag_tuples;
01558
01559 return MB_SUCCESS;
01560 }
01561
01562 // Consolidate tuple_lists into one list with no duplicates
01563 ErrorCode Coupler::consolidate_tuples( TupleList** all_tuples, unsigned int num_tuples, TupleList** unique_tuples )
01564 {
01565 int total_rcv_tuples = 0;
01566 int offset = 0, copysz = 0;
01567 unsigned num_tags = 0;
01568
01569 uint ml, mul, mr;
01570 uint* mi = (uint*)malloc( sizeof( uint ) * num_tuples );
01571
01572 for( unsigned int i = 0; i < num_tuples; i++ )
01573 {
01574 all_tuples[i]->getTupleSize( mi[i], ml, mul, mr );
01575 }
01576
01577 for( unsigned int i = 0; i < num_tuples; i++ )
01578 {
01579 if( all_tuples[i] != NULL )
01580 {
01581 total_rcv_tuples += all_tuples[i]->get_n();
01582 num_tags = mi[i];
01583 }
01584 }
01585 const unsigned int_size = sizeof( sint );
01586 const unsigned int_width = num_tags * int_size;
01587
01588 // Get the total size of all of the tuple_lists in all_tuples.
01589 for( unsigned int i = 0; i < num_tuples; i++ )
01590 {
01591 if( all_tuples[i] != NULL ) total_rcv_tuples += all_tuples[i]->get_n();
01592 }
01593
01594 // Copy the tuple_lists into a single tuple_list.
01595 TupleList* all_tuples_list = new TupleList( num_tags, 0, 0, 0, total_rcv_tuples );
01596 all_tuples_list->enableWriteAccess();
01597 // all_tuples_list->initialize(num_tags, 0, 0, 0, total_rcv_tuples);
01598 for( unsigned int i = 0; i < num_tuples; i++ )
01599 {
01600 if( all_tuples[i] != NULL )
01601 {
01602 copysz = all_tuples[i]->get_n() * int_width;
01603 memcpy( all_tuples_list->vi_wr + offset, all_tuples[i]->vi_rd, copysz );
01604 offset = offset + ( all_tuples[i]->get_n() * mi[i] );
01605 all_tuples_list->set_n( all_tuples_list->get_n() + all_tuples[i]->get_n() );
01606 }
01607 }
01608
01609 // Sort the new tuple_list. Use a radix type sort, starting with the last (or least significant)
01610 // tag column in the vi array and working towards the first (or most significant) tag column.
01611 TupleList::buffer sort_buffer;
01612 sort_buffer.buffer_init( 2 * total_rcv_tuples * int_width );
01613 for( int i = num_tags - 1; i >= 0; i-- )
01614 {
01615 all_tuples_list->sort( i, &sort_buffer );
01616 }
01617
01618 // Cycle through the sorted list eliminating duplicates.
01619 // Keep counters to the current end of the tuple_list (w/out dups) and the last tuple examined.
01620 unsigned int end_idx = 0, last_idx = 1;
01621 while( last_idx < all_tuples_list->get_n() )
01622 {
01623 if( memcmp( all_tuples_list->vi_rd + ( end_idx * num_tags ), all_tuples_list->vi_rd + ( last_idx * num_tags ),
01624 int_width ) == 0 )
01625 {
01626 // Values equal - skip
01627 last_idx += 1;
01628 }
01629 else
01630 {
01631 // Values different - copy
01632 // Move up the end index
01633 end_idx += 1;
01634 memcpy( all_tuples_list->vi_wr + ( end_idx * num_tags ), all_tuples_list->vi_rd + ( last_idx * num_tags ),
01635 int_width );
01636 last_idx += 1;
01637 }
01638 }
01639 // Update the count in all_tuples_list
01640 all_tuples_list->set_n( end_idx + 1 );
01641
01642 // Resize the tuple_list
01643 all_tuples_list->resize( all_tuples_list->get_n() );
01644
01645 // Set the output parameter
01646 *unique_tuples = all_tuples_list;
01647
01648 return MB_SUCCESS;
01649 }
01650
01651 // Calculate integrated field values for groups of entities
01652 ErrorCode Coupler::get_group_integ_vals( std::vector< std::vector< EntityHandle > >& groups,
01653 std::vector< double >& integ_vals,
01654 const char* norm_tag,
01655 int /*num_integ_vals*/,
01656 Coupler::IntegType integ_type )
01657 {
01658 ErrorCode err;
01659
01660 std::vector< std::vector< EntityHandle > >::iterator iter_i;
01661 std::vector< EntityHandle >::iterator iter_j;
01662 double grp_intrgr_val, intgr_val;
01663
01664 // Get the tag handle for norm_tag
01665 Tag norm_hdl;
01666 err =
01667 mbImpl->tag_get_handle( norm_tag, 1, moab::MB_TYPE_DOUBLE, norm_hdl, moab::MB_TAG_SPARSE | moab::MB_TAG_CREAT );ERRORR( "Failed to get norm_tag handle.", err );
01668
01669 // Check size of integ_vals vector
01670 if( integ_vals.size() != groups.size() ) integ_vals.resize( groups.size() );
01671
01672 // Loop over the groups(vectors) of entities
01673 unsigned int i;
01674 for( i = 0, iter_i = groups.begin(); iter_i != groups.end(); i++, ++iter_i )
01675 {
01676 grp_intrgr_val = 0;
01677
01678 // Loop over the all the entities in the group, integrating
01679 // the field_fn over the entity in iter_j
01680 for( iter_j = ( *iter_i ).begin(); iter_j != ( *iter_i ).end(); ++iter_j )
01681 {
01682 EntityHandle ehandle = ( *iter_j );
01683
01684 // Check that the entity in iter_j is of the same dimension as the
01685 // integ_type we are performing
01686 EntityType j_type;
01687 j_type = mbImpl->type_from_handle( ehandle );ERRORR( "Failed to get entity type.", err );
01688 // Skip any entities in the group that are not of the type being considered
01689 if( ( integ_type == VOLUME ) && ( j_type < MBTET || j_type >= MBENTITYSET ) ) continue;
01690
01691 intgr_val = 0;
01692
01693 // Retrieve the vertices from the element
01694 const EntityHandle* verts = NULL;
01695 int connectivity_size = 0;
01696
01697 err = mbImpl->get_connectivity( ehandle, verts, connectivity_size, false );ERRORR( "Failed to get vertices from entity.", err );
01698
01699 // Get the vertex coordinates and the field values at the vertices.
01700 double* coords = (double*)malloc( sizeof( double ) * ( 3 * connectivity_size ) );
01701 /* TODO: VSM: check if this works for lower dimensions also without problems */
01702 /* if (3 == geom_dim) */
01703 err = mbImpl->get_coords( verts, connectivity_size, coords );ERRORR( "Failed to get vertex coordinates.", err );
01704
01705 /* allocate the field data array */
01706 double* vfield = (double*)malloc( sizeof( double ) * ( connectivity_size ) );
01707 err = mbImpl->tag_get_data( norm_hdl, verts, connectivity_size, vfield );
01708 if( MB_SUCCESS != err )
01709 {
01710 free( coords );
01711 }
01712 ERRORR( "Failed to get vertex coordinates.", err );
01713
01714 // Get coordinates of all corner vertices (in normal order) and
01715 // put in array of CartVec.
01716 std::vector< CartVect > vertices( connectivity_size );
01717
01718 // Put the vertices into a CartVect vector
01719 double* x = coords;
01720 for( int j = 0; j < connectivity_size; j++, x += 3 )
01721 {
01722 vertices[j] = CartVect( x );
01723 }
01724 free( coords );
01725
01726 moab::Element::Map* elemMap;
01727 if( j_type == MBHEX )
01728 {
01729 if( connectivity_size == 8 )
01730 elemMap = new moab::Element::LinearHex( vertices );
01731 else
01732 elemMap = new moab::Element::QuadraticHex( vertices );
01733 }
01734 else if( j_type == MBTET )
01735 {
01736 elemMap = new moab::Element::LinearTet( vertices );
01737 }
01738 else if( j_type == MBQUAD )
01739 {
01740 elemMap = new moab::Element::LinearQuad( vertices );
01741 }
01742 /*
01743 else if (j_type == MBTRI) {
01744 elemMap = new moab::Element::LinearTri(vertices);
01745 }
01746 */
01747 else if( j_type == MBEDGE )
01748 {
01749 elemMap = new moab::Element::LinearEdge( vertices );
01750 }
01751 else
01752 ERRORR( "Unknown topology type.", MB_UNSUPPORTED_OPERATION );
01753
01754 // Set the vertices in the Map and perform the integration
01755 try
01756 {
01757 /* VSM: Do we need this call ?? */
01758 // elemMap->set_vertices(vertices);
01759
01760 // Perform the actual integration over the element
01761 intgr_val = elemMap->integrate_scalar_field( vfield );
01762
01763 // Combine the result with those of the group
01764 grp_intrgr_val += intgr_val;
01765 }
01766 catch( moab::Element::Map::ArgError& )
01767 {
01768 std::cerr << "Failed to set vertices on Element::Map." << std::endl;
01769 }
01770 catch( moab::Element::Map::EvaluationError& )
01771 {
01772 std::cerr << "Failed to get inverse evaluation of coordinate on Element::Map." << std::endl;
01773 }
01774
01775 delete( elemMap );
01776 free( vfield );
01777 }
01778
01779 // Set the group integrated value in the vector
01780 integ_vals[i] = grp_intrgr_val;
01781 }
01782
01783 return err;
01784 }
01785
01786 // Apply a normalization factor to group of entities
01787 ErrorCode Coupler::apply_group_norm_factor( std::vector< std::vector< EntityHandle > >& entity_sets,
01788 std::vector< double >& norm_factors,
01789 const char* norm_tag,
01790 Coupler::IntegType /*integ_type*/ )
01791 {
01792 ErrorCode err;
01793
01794 // Construct the new tag for the normalization factor from the norm_tag name
01795 // and "_normf".
01796 int norm_tag_len = strlen( norm_tag );
01797 const char* normf_appd = "_normf";
01798 int normf_appd_len = strlen( normf_appd );
01799
01800 char* normf_tag = (char*)malloc( norm_tag_len + normf_appd_len + 1 );
01801 char* tmp_ptr = normf_tag;
01802
01803 memcpy( tmp_ptr, norm_tag, norm_tag_len );
01804 tmp_ptr += norm_tag_len;
01805 memcpy( tmp_ptr, normf_appd, normf_appd_len );
01806 tmp_ptr += normf_appd_len;
01807 *tmp_ptr = '\0';
01808
01809 Tag normf_hdl;
01810 // Check to see if the tag exists. If not then create it and get the handle.
01811 err = mbImpl->tag_get_handle( normf_tag, 1, moab::MB_TYPE_DOUBLE, normf_hdl,
01812 moab::MB_TAG_SPARSE | moab::MB_TAG_CREAT );ERRORR( "Failed to create normalization factor tag.", err );
01813 if( normf_hdl == NULL )
01814 {
01815 std::string msg( "Failed to create normalization factor tag named '" );
01816 msg += std::string( normf_tag ) + std::string( "'" );ERRORR( msg.c_str(), MB_FAILURE );
01817 }
01818 free( normf_tag );
01819
01820 std::vector< std::vector< EntityHandle > >::iterator iter_i;
01821 std::vector< EntityHandle >::iterator iter_j;
01822 std::vector< double >::iterator iter_f;
01823 double grp_norm_factor = 0.0;
01824
01825 // Loop over the entity sets
01826 for( iter_i = entity_sets.begin(), iter_f = norm_factors.begin();
01827 ( iter_i != entity_sets.end() ) && ( iter_f != norm_factors.end() ); ++iter_i, ++iter_f )
01828 {
01829 grp_norm_factor = *iter_f;
01830
01831 // Loop over the all the entity sets in iter_i and set the
01832 // new normf_tag with the norm factor value on each
01833 for( iter_j = ( *iter_i ).begin(); iter_j != ( *iter_i ).end(); ++iter_j )
01834 {
01835 EntityHandle entset = *iter_j;
01836
01837 std::cout << "Coupler: applying normalization for entity set=" << entset
01838 << ", normalization_factor=" << grp_norm_factor << std::endl;
01839
01840 err = mbImpl->tag_set_data( normf_hdl, &entset, 1, &grp_norm_factor );ERRORR( "Failed to set normalization factor on entity set.", err );
01841 }
01842 }
01843
01844 return MB_SUCCESS;
01845 }
01846
01847 #define UINT_PER_X( X ) ( ( sizeof( X ) + sizeof( uint ) - 1 ) / sizeof( uint ) )
01848 #define UINT_PER_REAL UINT_PER_X( realType )
01849 #define UINT_PER_LONG UINT_PER_X( slong )
01850 #define UINT_PER_UNSIGNED UINT_PER_X( unsigned )
01851
01852 // Function for packing tuple_list. Returns number of uints copied into buffer.
01853 int pack_tuples( TupleList* tl, void** ptr )
01854 {
01855 uint mi, ml, mul, mr;
01856 tl->getTupleSize( mi, ml, mul, mr );
01857
01858 uint n = tl->get_n();
01859
01860 int sz_buf = 1 + 4 * UINT_PER_UNSIGNED +
01861 tl->get_n() * ( mi + ml * UINT_PER_LONG + mul * UINT_PER_LONG + mr * UINT_PER_REAL );
01862
01863 uint* buf = (uint*)malloc( sz_buf * sizeof( uint ) );
01864 *ptr = (void*)buf;
01865
01866 // Copy n
01867 memcpy( buf, &n, sizeof( uint ) ), buf += 1;
01868 // Copy mi
01869 memcpy( buf, &mi, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01870 // Copy ml
01871 memcpy( buf, &ml, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01872 // Copy mul
01873 memcpy( buf, &mul, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01874 // Copy mr
01875 memcpy( buf, &mr, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01876 // Copy vi_wr
01877 memcpy( buf, tl->vi_rd, tl->get_n() * mi * sizeof( sint ) ), buf += tl->get_n() * mi;
01878 // Copy vl_wr
01879 memcpy( buf, tl->vl_rd, tl->get_n() * ml * sizeof( slong ) ), buf += tl->get_n() * ml * UINT_PER_LONG;
01880 // Copy vul_wr
01881 memcpy( buf, tl->vul_rd, tl->get_n() * mul * sizeof( ulong ) ), buf += tl->get_n() * mul * UINT_PER_LONG;
01882 // Copy vr_wr
01883 memcpy( buf, tl->vr_rd, tl->get_n() * mr * sizeof( realType ) ), buf += tl->get_n() * mr * UINT_PER_REAL;
01884
01885 return sz_buf;
01886 }
01887
01888 // Function for packing tuple_list
01889 void unpack_tuples( void* ptr, TupleList** tlp )
01890 {
01891 TupleList* tl = new TupleList();
01892 *tlp = tl;
01893
01894 uint nt;
01895 unsigned mit, mlt, mult, mrt;
01896 uint* buf = (uint*)ptr;
01897
01898 // Get n
01899 memcpy( &nt, buf, sizeof( uint ) ), buf += 1;
01900 // Get mi
01901 memcpy( &mit, buf, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01902 // Get ml
01903 memcpy( &mlt, buf, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01904 // Get mul
01905 memcpy( &mult, buf, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01906 // Get mr
01907 memcpy( &mrt, buf, sizeof( unsigned ) ), buf += UINT_PER_UNSIGNED;
01908
01909 // Initialize tl
01910 tl->initialize( mit, mlt, mult, mrt, nt );
01911 tl->enableWriteAccess();
01912 tl->set_n( nt );
01913
01914 uint mi, ml, mul, mr;
01915 tl->getTupleSize( mi, ml, mul, mr );
01916
01917 // Get vi_rd
01918 memcpy( tl->vi_wr, buf, tl->get_n() * mi * sizeof( sint ) ), buf += tl->get_n() * mi;
01919 // Get vl_rd
01920 memcpy( tl->vl_wr, buf, tl->get_n() * ml * sizeof( slong ) ), buf += tl->get_n() * ml * UINT_PER_LONG;
01921 // Get vul_rd
01922 memcpy( tl->vul_wr, buf, tl->get_n() * mul * sizeof( ulong ) ), buf += tl->get_n() * mul * UINT_PER_LONG;
01923 // Get vr_rd
01924 memcpy( tl->vr_wr, buf, tl->get_n() * mr * sizeof( realType ) ), buf += tl->get_n() * mr * UINT_PER_REAL;
01925
01926 tl->disableWriteAccess();
01927 return;
01928 }
01929
01930 ErrorCode Coupler::get_gl_points_on_elements( Range& targ_elems, std::vector< double >& vpos, int& numPointsOfInterest )
01931 {
01932 numPointsOfInterest = targ_elems.size() * _ntot;
01933 vpos.resize( 3 * numPointsOfInterest );
01934 int ielem = 0;
01935 for( Range::iterator eit = targ_elems.begin(); eit != targ_elems.end(); ++eit, ielem += _ntot * 3 )
01936 {
01937 EntityHandle eh = *eit;
01938 const double* xval;
01939 const double* yval;
01940 const double* zval;
01941 ErrorCode rval = mbImpl->tag_get_by_ptr( _xm1Tag, &eh, 1, (const void**)&xval );
01942 if( moab::MB_SUCCESS != rval )
01943 {
01944 std::cout << "Can't get xm1 values \n";
01945 return MB_FAILURE;
01946 }
01947 rval = mbImpl->tag_get_by_ptr( _ym1Tag, &eh, 1, (const void**)&yval );
01948 if( moab::MB_SUCCESS != rval )
01949 {
01950 std::cout << "Can't get ym1 values \n";
01951 return MB_FAILURE;
01952 }
01953 rval = mbImpl->tag_get_by_ptr( _zm1Tag, &eh, 1, (const void**)&zval );
01954 if( moab::MB_SUCCESS != rval )
01955 {
01956 std::cout << "Can't get zm1 values \n";
01957 return MB_FAILURE;
01958 }
01959 // Now, in a stride, populate vpos
01960 for( int i = 0; i < _ntot; i++ )
01961 {
01962 vpos[ielem + 3 * i] = xval[i];
01963 vpos[ielem + 3 * i + 1] = yval[i];
01964 vpos[ielem + 3 * i + 2] = zval[i];
01965 }
01966 }
01967
01968 return MB_SUCCESS;
01969 }
01970
01971 } // namespace moab