Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 #include "NCHelperGCRM.hpp" 00002 #include "moab/ReadUtilIface.hpp" 00003 #include "moab/FileOptions.hpp" 00004 #include "moab/SpectralMeshTool.hpp" 00005 #include "MBTagConventions.hpp" 00006 00007 #ifdef MOAB_HAVE_ZOLTAN 00008 #include "moab/ZoltanPartitioner.hpp" 00009 #endif 00010 00011 #include <cmath> 00012 00013 namespace moab 00014 { 00015 00016 // GCRM cells are either pentagons or hexagons, and pentagons are always padded to hexagons 00017 const int EDGES_PER_CELL = 6; 00018 00019 NCHelperGCRM::NCHelperGCRM( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet ) 00020 : UcdNCHelper( readNC, fileId, opts, fileSet ), createGatherSet( false ) 00021 { 00022 // Ignore variables containing topological information 00023 ignoredVarNames.insert( "grid" ); 00024 ignoredVarNames.insert( "cell_corners" ); 00025 ignoredVarNames.insert( "cell_edges" ); 00026 ignoredVarNames.insert( "edge_corners" ); 00027 ignoredVarNames.insert( "cell_neighbors" ); 00028 } 00029 00030 bool NCHelperGCRM::can_read_file( ReadNC* readNC ) 00031 { 00032 std::vector< std::string >& dimNames = readNC->dimNames; 00033 00034 // If dimension name "cells" exists then it should be the GCRM grid 00035 if( std::find( dimNames.begin(), dimNames.end(), std::string( "cells" ) ) != dimNames.end() ) return true; 00036 00037 return false; 00038 } 00039 00040 ErrorCode NCHelperGCRM::init_mesh_vals() 00041 { 00042 std::vector< std::string >& dimNames = _readNC->dimNames; 00043 std::vector< int >& dimLens = _readNC->dimLens; 00044 std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo; 00045 00046 ErrorCode rval; 00047 unsigned int idx; 00048 std::vector< std::string >::iterator vit; 00049 00050 // Look for time dimension 00051 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() ) 00052 idx = vit - dimNames.begin(); 00053 else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() ) 00054 idx = vit - dimNames.begin(); 00055 else 00056 { 00057 MB_SET_ERR( MB_FAILURE, "Couldn't find 'Time' or 'time' dimension" ); 00058 } 00059 tDim = idx; 00060 nTimeSteps = dimLens[idx]; 00061 00062 // Get number of cells 00063 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "cells" ) ) != dimNames.end() ) 00064 idx = vit - dimNames.begin(); 00065 else 00066 { 00067 MB_SET_ERR( MB_FAILURE, "Couldn't find 'cells' dimension" ); 00068 } 00069 cDim = idx; 00070 nCells = dimLens[idx]; 00071 00072 // Get number of edges 00073 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "edges" ) ) != dimNames.end() ) 00074 idx = vit - dimNames.begin(); 00075 else 00076 { 00077 MB_SET_ERR( MB_FAILURE, "Couldn't find 'edges' dimension" ); 00078 } 00079 eDim = idx; 00080 nEdges = dimLens[idx]; 00081 00082 // Get number of vertices 00083 vDim = -1; 00084 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "corners" ) ) != dimNames.end() ) 00085 idx = vit - dimNames.begin(); 00086 else 00087 { 00088 MB_SET_ERR( MB_FAILURE, "Couldn't find 'corners' dimension" ); 00089 } 00090 vDim = idx; 00091 nVertices = dimLens[idx]; 00092 00093 // Get number of layers 00094 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "layers" ) ) != dimNames.end() ) 00095 idx = vit - dimNames.begin(); 00096 else 00097 { 00098 MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' dimension" ); 00099 } 00100 levDim = idx; 00101 nLevels = dimLens[idx]; 00102 00103 // Dimension indices for other optional levels 00104 std::vector< unsigned int > opt_lev_dims; 00105 00106 // Get index of interface levels 00107 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "interfaces" ) ) != dimNames.end() ) 00108 { 00109 idx = vit - dimNames.begin(); 00110 opt_lev_dims.push_back( idx ); 00111 } 00112 00113 std::map< std::string, ReadNC::VarData >::iterator vmit; 00114 00115 // Store time coordinate values in tVals 00116 if( nTimeSteps > 0 ) 00117 { 00118 if( ( vmit = varInfo.find( "time" ) ) != varInfo.end() ) 00119 { 00120 rval = read_coordinate( "time", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'time' variable" ); 00121 } 00122 else if( ( vmit = varInfo.find( "t" ) ) != varInfo.end() ) 00123 { 00124 rval = read_coordinate( "t", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 't' variable" ); 00125 } 00126 else 00127 { 00128 // If expected time variable is not available, set dummy time coordinate values to tVals 00129 for( int t = 0; t < nTimeSteps; t++ ) 00130 tVals.push_back( (double)t ); 00131 } 00132 } 00133 00134 // For each variable, determine the entity location type and number of levels 00135 for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit ) 00136 { 00137 ReadNC::VarData& vd = ( *vmit ).second; 00138 00139 // Default entLoc is ENTLOCSET 00140 if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() ) 00141 { 00142 if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() ) 00143 vd.entLoc = ReadNC::ENTLOCVERT; 00144 else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() ) 00145 vd.entLoc = ReadNC::ENTLOCEDGE; 00146 else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() ) 00147 vd.entLoc = ReadNC::ENTLOCFACE; 00148 } 00149 00150 // Default numLev is 0 00151 if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() ) 00152 vd.numLev = nLevels; 00153 else 00154 { 00155 // If layers dimension is not found, try other optional levels such as interfaces 00156 for( unsigned int i = 0; i < opt_lev_dims.size(); i++ ) 00157 { 00158 if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() ) 00159 { 00160 vd.numLev = dimLens[opt_lev_dims[i]]; 00161 break; 00162 } 00163 } 00164 } 00165 } 00166 00167 // Hack: create dummy variables for dimensions (like cells) with no corresponding coordinate 00168 // variables 00169 rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" ); 00170 00171 return MB_SUCCESS; 00172 } 00173 00174 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out 00175 // of scope (and deleted). The old instance initialized some variables properly when the mesh was 00176 // created, but they are now lost. The new instance (will not create the mesh with noMesh option) 00177 // has to restore them based on the existing mesh from last read 00178 ErrorCode NCHelperGCRM::check_existing_mesh() 00179 { 00180 Interface*& mbImpl = _readNC->mbImpl; 00181 Tag& mGlobalIdTag = _readNC->mGlobalIdTag; 00182 bool& noMesh = _readNC->noMesh; 00183 00184 if( noMesh ) 00185 { 00186 ErrorCode rval; 00187 00188 if( localGidVerts.empty() ) 00189 { 00190 // Get all vertices from current file set (it is the input set in no_mesh scenario) 00191 Range local_verts; 00192 rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" ); 00193 00194 if( !local_verts.empty() ) 00195 { 00196 std::vector< int > gids( local_verts.size() ); 00197 00198 // !IMPORTANT : this has to be the GLOBAL_ID tag 00199 rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" ); 00200 00201 // Restore localGidVerts 00202 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) ); 00203 nLocalVertices = localGidVerts.size(); 00204 } 00205 } 00206 00207 if( localGidEdges.empty() ) 00208 { 00209 // Get all edges from current file set (it is the input set in no_mesh scenario) 00210 Range local_edges; 00211 rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" ); 00212 00213 if( !local_edges.empty() ) 00214 { 00215 std::vector< int > gids( local_edges.size() ); 00216 00217 // !IMPORTANT : this has to be the GLOBAL_ID tag 00218 rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" ); 00219 00220 // Restore localGidEdges 00221 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) ); 00222 nLocalEdges = localGidEdges.size(); 00223 } 00224 } 00225 00226 if( localGidCells.empty() ) 00227 { 00228 // Get all cells from current file set (it is the input set in no_mesh scenario) 00229 Range local_cells; 00230 rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" ); 00231 00232 if( !local_cells.empty() ) 00233 { 00234 std::vector< int > gids( local_cells.size() ); 00235 00236 // !IMPORTANT : this has to be the GLOBAL_ID tag 00237 rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" ); 00238 00239 // Restore localGidCells 00240 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) ); 00241 nLocalCells = localGidCells.size(); 00242 } 00243 } 00244 } 00245 00246 return MB_SUCCESS; 00247 } 00248 00249 ErrorCode NCHelperGCRM::create_mesh( Range& faces ) 00250 { 00251 bool& noEdges = _readNC->noEdges; 00252 DebugOutput& dbgOut = _readNC->dbgOut; 00253 00254 #ifdef MOAB_HAVE_MPI 00255 int rank = 0; 00256 int procs = 1; 00257 bool& isParallel = _readNC->isParallel; 00258 ParallelComm* myPcomm = NULL; 00259 if( isParallel ) 00260 { 00261 myPcomm = _readNC->myPcomm; 00262 rank = myPcomm->proc_config().proc_rank(); 00263 procs = myPcomm->proc_config().proc_size(); 00264 } 00265 00266 // Need to know whether we'll be creating gather mesh 00267 if( rank == _readNC->gatherSetRank ) createGatherSet = true; 00268 00269 if( procs >= 2 ) 00270 { 00271 // Shift rank to obtain a rotated trivial partition 00272 int shifted_rank = rank; 00273 int& trivialPartitionShift = _readNC->trivialPartitionShift; 00274 if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs; 00275 00276 // Compute the number of local cells on this proc 00277 nLocalCells = int( std::floor( 1.0 * nCells / procs ) ); 00278 00279 // The starting global cell index in the GCRM file for this proc 00280 int start_cell_idx = shifted_rank * nLocalCells; 00281 00282 // Number of extra cells after equal split over procs 00283 int iextra = nCells % procs; 00284 00285 // Allocate extra cells over procs 00286 if( shifted_rank < iextra ) nLocalCells++; 00287 start_cell_idx += std::min( shifted_rank, iextra ); 00288 00289 start_cell_idx++; // 0 based -> 1 based 00290 00291 // Redistribute local cells after trivial partition (e.g. apply Zoltan partition) 00292 ErrorCode rval = redistribute_local_cells( start_cell_idx, myPcomm);MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" ); 00293 } 00294 else 00295 { 00296 nLocalCells = nCells; 00297 localGidCells.insert( 1, nLocalCells ); 00298 } 00299 #else 00300 nLocalCells = nCells; 00301 localGidCells.insert( 1, nLocalCells ); 00302 #endif 00303 dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() ); 00304 dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() ); 00305 00306 // Read vertices on each local cell, to get localGidVerts and cell connectivity later 00307 int verticesOnCellVarId; 00308 int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId ); 00309 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" ); 00310 std::vector< int > vertices_on_local_cells( nLocalCells * EDGES_PER_CELL ); 00311 dbgOut.tprintf( 1, " nLocalCells = %d\n", (int)nLocalCells ); 00312 dbgOut.tprintf( 1, " vertices_on_local_cells.size() = %d\n", (int)vertices_on_local_cells.size() ); 00313 #ifdef MOAB_HAVE_PNETCDF 00314 size_t nb_reads = localGidCells.psize(); 00315 std::vector< int > requests( nb_reads ); 00316 std::vector< int > statuss( nb_reads ); 00317 size_t idxReq = 0; 00318 #endif 00319 size_t indexInArray = 0; 00320 for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end(); 00321 ++pair_iter ) 00322 { 00323 EntityHandle starth = pair_iter->first; 00324 EntityHandle endh = pair_iter->second; 00325 dbgOut.tprintf( 1, " cell_corners starth = %d\n", (int)starth ); 00326 dbgOut.tprintf( 1, " cell_corners endh = %d\n", (int)endh ); 00327 NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 }; 00328 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 00329 static_cast< NCDF_SIZE >( EDGES_PER_CELL ) }; 00330 00331 // Do a partial read in each subrange 00332 #ifdef MOAB_HAVE_PNETCDF 00333 success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, 00334 &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] ); 00335 #else 00336 success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, 00337 &( vertices_on_local_cells[indexInArray] ) ); 00338 #endif 00339 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data in a loop" ); 00340 00341 // Increment the index for next subrange 00342 indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL; 00343 } 00344 00345 #ifdef MOAB_HAVE_PNETCDF 00346 // Wait outside the loop 00347 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] ); 00348 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" ); 00349 #endif 00350 00351 // Correct vertices_on_local_cells array. Pentagons as hexagons should have 00352 // a connectivity like 123455 and not 122345 00353 for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ ) 00354 { 00355 int* pvertex = &vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL]; 00356 for( int k = 0; k < EDGES_PER_CELL - 2; k++ ) 00357 { 00358 if( *( pvertex + k ) == *( pvertex + k + 1 ) ) 00359 { 00360 // Shift the connectivity 00361 for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ ) 00362 *( pvertex + kk ) = *( pvertex + kk + 1 ); 00363 // No need to try next k 00364 break; 00365 } 00366 } 00367 } 00368 00369 // GCRM is 0 based, convert vertex indices from 0 to 1 based 00370 for( std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++ ) 00371 vertices_on_local_cells[idx] += 1; 00372 00373 // Create local vertices 00374 EntityHandle start_vertex; 00375 ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for GCRM mesh" ); 00376 00377 // Create local edges (unless NO_EDGES read option is set) 00378 if( !noEdges ) 00379 { 00380 rval = create_local_edges( start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local edges for GCRM mesh" ); 00381 } 00382 00383 // Create local cells, padded 00384 rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for GCRM mesh" ); 00385 00386 if( createGatherSet ) 00387 { 00388 EntityHandle gather_set; 00389 rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" ); 00390 00391 // Create gather set vertices 00392 EntityHandle start_gather_set_vertex; 00393 rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for GCRM mesh" ); 00394 00395 // Create gather set edges (unless NO_EDGES read option is set) 00396 if( !noEdges ) 00397 { 00398 rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for GCRM mesh" ); 00399 } 00400 00401 // Create gather set cells with padding 00402 rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for GCRM mesh" ); 00403 } 00404 00405 return MB_SUCCESS; 00406 } 00407 00408 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas, 00409 std::vector< int >& tstep_nums ) 00410 { 00411 Interface*& mbImpl = _readNC->mbImpl; 00412 std::vector< int >& dimLens = _readNC->dimLens; 00413 bool& noEdges = _readNC->noEdges; 00414 DebugOutput& dbgOut = _readNC->dbgOut; 00415 00416 Range* range = NULL; 00417 00418 // Get vertices 00419 Range verts; 00420 ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" ); 00421 assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 ); 00422 00423 // Get edges 00424 Range edges; 00425 rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" ); 00426 00427 // Get faces 00428 Range faces; 00429 rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" ); 00430 assert( "Should only have a single face subrange, since they were read in one shot" && faces.psize() == 1 ); 00431 00432 #ifdef MOAB_HAVE_MPI 00433 bool& isParallel = _readNC->isParallel; 00434 if( isParallel ) 00435 { 00436 ParallelComm*& myPcomm = _readNC->myPcomm; 00437 rval = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" ); 00438 } 00439 else 00440 facesOwned = faces; // not running in parallel, but still with MPI 00441 #else 00442 facesOwned = faces; 00443 #endif 00444 00445 for( unsigned int i = 0; i < vdatas.size(); i++ ) 00446 { 00447 // Skip edge variables, if specified by the read options 00448 if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue; 00449 00450 // Support non-set variables with 3 dimensions like (time, cells, layers), or 00451 // 2 dimensions like (time, cells) 00452 assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() ); 00453 00454 // For a non-set variable, time should be the first dimension 00455 assert( tDim == vdatas[i].varDims[0] ); 00456 00457 // Set up readStarts and readCounts 00458 vdatas[i].readStarts.resize( 3 ); 00459 vdatas[i].readCounts.resize( 3 ); 00460 00461 // First: Time 00462 vdatas[i].readStarts[0] = 0; // This value is timestep dependent, will be set later 00463 vdatas[i].readCounts[0] = 1; 00464 00465 // Next: nVertices / nCells / nEdges 00466 switch( vdatas[i].entLoc ) 00467 { 00468 case ReadNC::ENTLOCVERT: 00469 // Vertices 00470 // Start from the first localGidVerts 00471 // Actually, this will be reset later on in a loop 00472 vdatas[i].readStarts[1] = localGidVerts[0] - 1; 00473 vdatas[i].readCounts[1] = nLocalVertices; 00474 range = &verts; 00475 break; 00476 case ReadNC::ENTLOCFACE: 00477 // Faces 00478 // Start from the first localGidCells 00479 // Actually, this will be reset later on in a loop 00480 vdatas[i].readStarts[1] = localGidCells[0] - 1; 00481 vdatas[i].readCounts[1] = nLocalCells; 00482 range = &facesOwned; 00483 break; 00484 case ReadNC::ENTLOCEDGE: 00485 // Edges 00486 // Start from the first localGidEdges 00487 // Actually, this will be reset later on in a loop 00488 vdatas[i].readStarts[1] = localGidEdges[0] - 1; 00489 vdatas[i].readCounts[1] = nLocalEdges; 00490 range = &edges; 00491 break; 00492 default: 00493 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName ); 00494 } 00495 00496 // Finally: layers or other optional levels, it is possible that there is no 00497 // level dimension (numLev is 0) for this non-set variable 00498 if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1; 00499 vdatas[i].readStarts[2] = 0; 00500 vdatas[i].readCounts[2] = vdatas[i].numLev; 00501 00502 // Get variable size 00503 vdatas[i].sz = 1; 00504 for( std::size_t idx = 0; idx != 3; idx++ ) 00505 vdatas[i].sz *= vdatas[i].readCounts[idx]; 00506 00507 for( unsigned int t = 0; t < tstep_nums.size(); t++ ) 00508 { 00509 dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] ); 00510 00511 if( tstep_nums[t] >= dimLens[tDim] ) 00512 { 00513 MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] ); 00514 } 00515 00516 // Get the tag to read into 00517 if( !vdatas[i].varTags[t] ) 00518 { 00519 rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName ); 00520 } 00521 00522 // Get ptr to tag space 00523 assert( 1 == range->psize() ); 00524 void* data; 00525 int count; 00526 rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName ); 00527 assert( (unsigned)count == range->size() ); 00528 vdatas[i].varDatas[t] = data; 00529 } 00530 } 00531 00532 return rval; 00533 } 00534 00535 #ifdef MOAB_HAVE_PNETCDF 00536 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas, 00537 std::vector< int >& tstep_nums ) 00538 { 00539 bool& noEdges = _readNC->noEdges; 00540 DebugOutput& dbgOut = _readNC->dbgOut; 00541 00542 ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" ); 00543 00544 // Finally, read into that space 00545 int success; 00546 Range* pLocalGid = NULL; 00547 00548 for( unsigned int i = 0; i < vdatas.size(); i++ ) 00549 { 00550 // Skip edge variables, if specified by the read options 00551 if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue; 00552 00553 switch( vdatas[i].entLoc ) 00554 { 00555 case ReadNC::ENTLOCVERT: 00556 pLocalGid = &localGidVerts; 00557 break; 00558 case ReadNC::ENTLOCFACE: 00559 pLocalGid = &localGidCells; 00560 break; 00561 case ReadNC::ENTLOCEDGE: 00562 pLocalGid = &localGidEdges; 00563 break; 00564 default: 00565 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName ); 00566 } 00567 00568 std::size_t sz = vdatas[i].sz; 00569 00570 for( unsigned int t = 0; t < tstep_nums.size(); t++ ) 00571 { 00572 // We will synchronize all these reads with the other processors, 00573 // so the wait will be inside this double loop; is it too much? 00574 size_t nb_reads = pLocalGid->psize(); 00575 std::vector< int > requests( nb_reads ), statuss( nb_reads ); 00576 size_t idxReq = 0; 00577 00578 // Set readStart for each timestep along time dimension 00579 vdatas[i].readStarts[0] = tstep_nums[t]; 00580 00581 switch( vdatas[i].varDataType ) 00582 { 00583 case NC_FLOAT: 00584 case NC_DOUBLE: { 00585 // Read float as double 00586 std::vector< double > tmpdoubledata( sz ); 00587 00588 // In the case of ucd mesh, and on multiple proc, 00589 // we need to read as many times as subranges we have in the 00590 // localGid range; 00591 // basically, we have to give a different point 00592 // for data to start, for every subrange :( 00593 size_t indexInDoubleArray = 0; 00594 size_t ic = 0; 00595 for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end(); 00596 ++pair_iter, ic++ ) 00597 { 00598 EntityHandle starth = pair_iter->first; 00599 EntityHandle endh = pair_iter->second; // inclusive 00600 vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 ); 00601 vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 ); 00602 00603 // Do a partial read, in each subrange 00604 // wait outside this loop 00605 success = 00606 NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ), 00607 &( vdatas[i].readCounts[0] ), 00608 &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] ); 00609 if( success ) 00610 MB_SET_ERR( MB_FAILURE, 00611 "Failed to read double data in a loop for variable " << vdatas[i].varName ); 00612 // We need to increment the index in double array for the 00613 // next subrange 00614 indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev; 00615 } 00616 assert( ic == pLocalGid->psize() ); 00617 00618 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] ); 00619 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" ); 00620 00621 void* data = vdatas[i].varDatas[t]; 00622 for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ ) 00623 ( (double*)data )[idx] = tmpdoubledata[idx]; 00624 00625 break; 00626 } 00627 default: 00628 MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName ); 00629 } 00630 } 00631 } 00632 00633 // Debug output, if requested 00634 if( 1 == dbgOut.get_verbosity() ) 00635 { 00636 dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() ); 00637 for( unsigned int i = 1; i < vdatas.size(); i++ ) 00638 dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() ); 00639 dbgOut.tprintf( 1, "\n" ); 00640 } 00641 00642 return rval; 00643 } 00644 #else 00645 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas, 00646 std::vector< int >& tstep_nums ) 00647 { 00648 bool& noEdges = _readNC->noEdges; 00649 DebugOutput& dbgOut = _readNC->dbgOut; 00650 00651 ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" ); 00652 00653 // Finally, read into that space 00654 int success; 00655 Range* pLocalGid = NULL; 00656 00657 for( unsigned int i = 0; i < vdatas.size(); i++ ) 00658 { 00659 // Skip edge variables, if specified by the read options 00660 if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue; 00661 00662 switch( vdatas[i].entLoc ) 00663 { 00664 case ReadNC::ENTLOCVERT: 00665 pLocalGid = &localGidVerts; 00666 break; 00667 case ReadNC::ENTLOCFACE: 00668 pLocalGid = &localGidCells; 00669 break; 00670 case ReadNC::ENTLOCEDGE: 00671 pLocalGid = &localGidEdges; 00672 break; 00673 default: 00674 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName ); 00675 } 00676 00677 std::size_t sz = vdatas[i].sz; 00678 00679 for( unsigned int t = 0; t < tstep_nums.size(); t++ ) 00680 { 00681 // Set readStart for each timestep along time dimension 00682 vdatas[i].readStarts[0] = tstep_nums[t]; 00683 00684 switch( vdatas[i].varDataType ) 00685 { 00686 case NC_FLOAT: 00687 case NC_DOUBLE: { 00688 // Read float as double 00689 std::vector< double > tmpdoubledata( sz ); 00690 00691 // In the case of ucd mesh, and on multiple proc, 00692 // we need to read as many times as subranges we have in the 00693 // localGid range; 00694 // basically, we have to give a different point 00695 // for data to start, for every subrange :( 00696 size_t indexInDoubleArray = 0; 00697 size_t ic = 0; 00698 for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end(); 00699 ++pair_iter, ic++ ) 00700 { 00701 EntityHandle starth = pair_iter->first; 00702 EntityHandle endh = pair_iter->second; // Inclusive 00703 vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 ); 00704 vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 ); 00705 00706 success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ), 00707 &( vdatas[i].readCounts[0] ), 00708 &( tmpdoubledata[indexInDoubleArray] ) ); 00709 if( success ) 00710 MB_SET_ERR( MB_FAILURE, 00711 "Failed to read double data in a loop for variable " << vdatas[i].varName ); 00712 // We need to increment the index in double array for the 00713 // next subrange 00714 indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev; 00715 } 00716 assert( ic == pLocalGid->psize() ); 00717 00718 void* data = vdatas[i].varDatas[t]; 00719 for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ ) 00720 ( (double*)data )[idx] = tmpdoubledata[idx]; 00721 00722 break; 00723 } 00724 default: 00725 MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName ); 00726 } 00727 } 00728 } 00729 00730 // Debug output, if requested 00731 if( 1 == dbgOut.get_verbosity() ) 00732 { 00733 dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() ); 00734 for( unsigned int i = 1; i < vdatas.size(); i++ ) 00735 dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() ); 00736 dbgOut.tprintf( 1, "\n" ); 00737 } 00738 00739 return rval; 00740 } 00741 #endif 00742 00743 #ifdef MOAB_HAVE_MPI 00744 ErrorCode NCHelperGCRM::redistribute_local_cells( int start_cell_idx, ParallelComm * pco ) 00745 { 00746 // If possible, apply Zoltan partition 00747 #ifdef MOAB_HAVE_ZOLTAN 00748 if( _readNC->partMethod == ScdParData::RCBZOLTAN ) 00749 { 00750 // Read lat/lon coordinates of cell centers, then convert spherical to 00751 // Cartesian, and use them as input to Zoltan partition 00752 int xCellVarId; 00753 int success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &xCellVarId ); 00754 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" ); 00755 std::vector< double > xCell( nLocalCells ); 00756 NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 ); 00757 NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells ); 00758 success = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] ); 00759 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" ); 00760 00761 // Read y coordinates of cell centers 00762 int yCellVarId; 00763 success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &yCellVarId ); 00764 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" ); 00765 std::vector< double > yCell( nLocalCells ); 00766 success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] ); 00767 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" ); 00768 00769 // Convert lon/lat/rad to x/y/z 00770 std::vector< double > zCell( nLocalCells ); 00771 double rad = 8000.0; // This is just a approximate radius 00772 for( int i = 0; i < nLocalCells; i++ ) 00773 { 00774 double cosphi = cos( xCell[i] ); 00775 double zmult = sin( xCell[i] ); 00776 double xmult = cosphi * cos( yCell[i] ); 00777 double ymult = cosphi * sin( yCell[i] ); 00778 xCell[i] = rad * xmult; 00779 yCell[i] = rad * ymult; 00780 zCell[i] = rad * zmult; 00781 } 00782 00783 // Zoltan partition using RCB; maybe more studies would be good, as to which partition 00784 // is better 00785 Interface*& mbImpl = _readNC->mbImpl; 00786 DebugOutput& dbgOut = _readNC->dbgOut; 00787 ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, pco, false, 0, NULL ); 00788 ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" ); 00789 delete mbZTool; 00790 00791 dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() ); 00792 dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() ); 00793 00794 // This is important: local cells are now redistributed, so nLocalCells might be different! 00795 nLocalCells = localGidCells.size(); 00796 00797 return MB_SUCCESS; 00798 } 00799 #endif 00800 00801 // By default, apply trivial partition 00802 localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 ); 00803 00804 return MB_SUCCESS; 00805 } 00806 #endif 00807 00808 ErrorCode NCHelperGCRM::create_local_vertices( const std::vector< int >& vertices_on_local_cells, 00809 EntityHandle& start_vertex ) 00810 { 00811 Interface*& mbImpl = _readNC->mbImpl; 00812 Tag& mGlobalIdTag = _readNC->mGlobalIdTag; 00813 const Tag*& mpFileIdTag = _readNC->mpFileIdTag; 00814 DebugOutput& dbgOut = _readNC->dbgOut; 00815 std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo; 00816 00817 // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell 00818 // connectivity later) 00819 std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells ); 00820 std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() ); 00821 std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(), 00822 range_inserter( localGidVerts ) ); 00823 nLocalVertices = localGidVerts.size(); 00824 00825 dbgOut.tprintf( 1, " localGidVerts.psize() = %d\n", (int)localGidVerts.psize() ); 00826 dbgOut.tprintf( 1, " localGidVerts.size() = %d\n", (int)localGidVerts.size() ); 00827 00828 // Create local vertices 00829 std::vector< double* > arrays; 00830 ErrorCode rval = 00831 _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays, 00832 // Might have to create gather mesh later 00833 ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" ); 00834 00835 // Add local vertices to current file set 00836 Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 ); 00837 rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" ); 00838 00839 // Get ptr to GID memory for local vertices 00840 int count = 0; 00841 void* data = NULL; 00842 rval = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" ); 00843 assert( count == nLocalVertices ); 00844 int* gid_data = (int*)data; 00845 std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data ); 00846 00847 // Duplicate GID data, which will be used to resolve sharing 00848 if( mpFileIdTag ) 00849 { 00850 rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" ); 00851 assert( count == nLocalVertices ); 00852 int bytes_per_tag = 4; 00853 rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "can't get number of bytes for file id tag" ); 00854 if( 4 == bytes_per_tag ) 00855 { 00856 gid_data = (int*)data; 00857 std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data ); 00858 } 00859 else if( 8 == bytes_per_tag ) 00860 { // Should be a handle tag on 64 bit machine? 00861 long* handle_tag_data = (long*)data; 00862 std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data ); 00863 } 00864 } 00865 00866 #ifdef MOAB_HAVE_PNETCDF 00867 size_t nb_reads = localGidVerts.psize(); 00868 std::vector< int > requests( nb_reads ); 00869 std::vector< int > statuss( nb_reads ); 00870 size_t idxReq = 0; 00871 #endif 00872 00873 // Store lev values in levVals 00874 std::map< std::string, ReadNC::VarData >::iterator vmit; 00875 if( ( vmit = varInfo.find( "layers" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 ) 00876 { 00877 rval = read_coordinate( "layers", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'layers' variable" ); 00878 } 00879 else if( ( vmit = varInfo.find( "interfaces" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 ) 00880 { 00881 rval = read_coordinate( "interfaces", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'interfaces' variable" ); 00882 } 00883 else 00884 { 00885 MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' or 'interfaces' variable" ); 00886 } 00887 00888 // Decide whether down is positive 00889 char posval[10] = { 0 }; 00890 int success = NCFUNC( get_att_text )( _fileId, ( *vmit ).second.varId, "positive", posval ); 00891 if( 0 == success && !strncmp( posval, "down", 4 ) ) 00892 { 00893 for( std::vector< double >::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit ) 00894 ( *dvit ) *= -1.0; 00895 } 00896 00897 // Read x coordinates for local vertices 00898 double* xptr = arrays[0]; 00899 int xVertexVarId; 00900 success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId ); 00901 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" ); 00902 size_t indexInArray = 0; 00903 for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end(); 00904 ++pair_iter ) 00905 { 00906 EntityHandle starth = pair_iter->first; 00907 EntityHandle endh = pair_iter->second; 00908 NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 ); 00909 NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 ); 00910 00911 // Do a partial read in each subrange 00912 #ifdef MOAB_HAVE_PNETCDF 00913 success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ), 00914 &requests[idxReq++] ); 00915 #else 00916 success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) ); 00917 #endif 00918 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" ); 00919 00920 // Increment the index for next subrange 00921 indexInArray += ( endh - starth + 1 ); 00922 } 00923 00924 #ifdef MOAB_HAVE_PNETCDF 00925 // Wait outside the loop 00926 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] ); 00927 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" ); 00928 #endif 00929 00930 // Read y coordinates for local vertices 00931 double* yptr = arrays[1]; 00932 int yVertexVarId; 00933 success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId ); 00934 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" ); 00935 #ifdef MOAB_HAVE_PNETCDF 00936 idxReq = 0; 00937 #endif 00938 indexInArray = 0; 00939 for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end(); 00940 ++pair_iter ) 00941 { 00942 EntityHandle starth = pair_iter->first; 00943 EntityHandle endh = pair_iter->second; 00944 NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 ); 00945 NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 ); 00946 00947 // Do a partial read in each subrange 00948 #ifdef MOAB_HAVE_PNETCDF 00949 success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ), 00950 &requests[idxReq++] ); 00951 #else 00952 success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) ); 00953 #endif 00954 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" ); 00955 00956 // Increment the index for next subrange 00957 indexInArray += ( endh - starth + 1 ); 00958 } 00959 00960 #ifdef MOAB_HAVE_PNETCDF 00961 // Wait outside the loop 00962 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] ); 00963 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" ); 00964 #endif 00965 00966 // Convert lon/lat/rad to x/y/z 00967 double* zptr = arrays[2]; 00968 double rad = 8000.0 + levVals[0]; 00969 for( int i = 0; i < nLocalVertices; i++ ) 00970 { 00971 double cosphi = cos( yptr[i] ); 00972 double zmult = sin( yptr[i] ); 00973 double xmult = cosphi * cos( xptr[i] ); 00974 double ymult = cosphi * sin( xptr[i] ); 00975 xptr[i] = rad * xmult; 00976 yptr[i] = rad * ymult; 00977 zptr[i] = rad * zmult; 00978 } 00979 00980 return MB_SUCCESS; 00981 } 00982 00983 ErrorCode NCHelperGCRM::create_local_edges( EntityHandle start_vertex ) 00984 { 00985 Interface*& mbImpl = _readNC->mbImpl; 00986 Tag& mGlobalIdTag = _readNC->mGlobalIdTag; 00987 DebugOutput& dbgOut = _readNC->dbgOut; 00988 00989 // Read edges on each local cell, to get localGidEdges 00990 int edgesOnCellVarId; 00991 int success = NCFUNC( inq_varid )( _fileId, "cell_edges", &edgesOnCellVarId ); 00992 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_edges" ); 00993 00994 std::vector< int > edges_on_local_cells( nLocalCells * EDGES_PER_CELL ); 00995 dbgOut.tprintf( 1, " edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() ); 00996 00997 #ifdef MOAB_HAVE_PNETCDF 00998 size_t nb_reads = localGidCells.psize(); 00999 std::vector< int > requests( nb_reads ); 01000 std::vector< int > statuss( nb_reads ); 01001 size_t idxReq = 0; 01002 #endif 01003 size_t indexInArray = 0; 01004 for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end(); 01005 ++pair_iter ) 01006 { 01007 EntityHandle starth = pair_iter->first; 01008 EntityHandle endh = pair_iter->second; 01009 dbgOut.tprintf( 1, " starth = %d\n", (int)starth ); 01010 dbgOut.tprintf( 1, " endh = %d\n", (int)endh ); 01011 NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 }; 01012 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 01013 static_cast< NCDF_SIZE >( EDGES_PER_CELL ) }; 01014 01015 // Do a partial read in each subrange 01016 #ifdef MOAB_HAVE_PNETCDF 01017 success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts, 01018 &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] ); 01019 #else 01020 success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts, 01021 &( edges_on_local_cells[indexInArray] ) ); 01022 #endif 01023 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_edges data in a loop" ); 01024 01025 // Increment the index for next subrange 01026 indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL; 01027 } 01028 01029 #ifdef MOAB_HAVE_PNETCDF 01030 // Wait outside the loop 01031 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] ); 01032 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" ); 01033 #endif 01034 01035 // GCRM is 0 based, convert edge indices from 0 to 1 based 01036 for( std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++ ) 01037 edges_on_local_cells[idx] += 1; 01038 01039 // Collect local edges 01040 std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() ); 01041 std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) ); 01042 nLocalEdges = localGidEdges.size(); 01043 01044 dbgOut.tprintf( 1, " localGidEdges.psize() = %d\n", (int)localGidEdges.psize() ); 01045 dbgOut.tprintf( 1, " localGidEdges.size() = %d\n", (int)localGidEdges.size() ); 01046 01047 // Create local edges 01048 EntityHandle start_edge; 01049 EntityHandle* conn_arr_edges = NULL; 01050 ErrorCode rval = 01051 _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges, 01052 // Might have to create gather mesh later 01053 ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" ); 01054 01055 // Add local edges to current file set 01056 Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 ); 01057 rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" ); 01058 01059 // Get ptr to GID memory for edges 01060 int count = 0; 01061 void* data = NULL; 01062 rval = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" ); 01063 assert( count == nLocalEdges ); 01064 int* gid_data = (int*)data; 01065 std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data ); 01066 01067 int verticesOnEdgeVarId; 01068 01069 // Read vertices on each local edge, to get edge connectivity 01070 success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId ); 01071 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" ); 01072 // Utilize the memory storage pointed by conn_arr_edges 01073 int* vertices_on_local_edges = (int*)conn_arr_edges; 01074 #ifdef MOAB_HAVE_PNETCDF 01075 nb_reads = localGidEdges.psize(); 01076 requests.resize( nb_reads ); 01077 statuss.resize( nb_reads ); 01078 idxReq = 0; 01079 #endif 01080 indexInArray = 0; 01081 for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end(); 01082 ++pair_iter ) 01083 { 01084 EntityHandle starth = pair_iter->first; 01085 EntityHandle endh = pair_iter->second; 01086 NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 }; 01087 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 }; 01088 01089 // Do a partial read in each subrange 01090 #ifdef MOAB_HAVE_PNETCDF 01091 success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, 01092 &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] ); 01093 #else 01094 success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, 01095 &( vertices_on_local_edges[indexInArray] ) ); 01096 #endif 01097 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data in a loop" ); 01098 01099 // Increment the index for next subrange 01100 indexInArray += ( endh - starth + 1 ) * 2; 01101 } 01102 01103 #ifdef MOAB_HAVE_PNETCDF 01104 // Wait outside the loop 01105 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] ); 01106 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" ); 01107 #endif 01108 01109 // Populate connectivity data for local edges 01110 // Convert in-place from int (stored in the first half) to EntityHandle 01111 // Reading backward is the trick 01112 for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- ) 01113 { 01114 // Note, indices stored in vertices_on_local_edges are 0 based 01115 int global_vert_idx = vertices_on_local_edges[edge_vert] + 1; // Global vertex index, 1 based 01116 int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based 01117 assert( local_vert_idx != -1 ); 01118 conn_arr_edges[edge_vert] = start_vertex + local_vert_idx; 01119 } 01120 01121 return MB_SUCCESS; 01122 } 01123 01124 ErrorCode NCHelperGCRM::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells, 01125 EntityHandle start_vertex, 01126 Range& faces ) 01127 { 01128 Interface*& mbImpl = _readNC->mbImpl; 01129 Tag& mGlobalIdTag = _readNC->mGlobalIdTag; 01130 01131 // Create cells 01132 EntityHandle start_element; 01133 EntityHandle* conn_arr_local_cells = NULL; 01134 ErrorCode rval = 01135 _readNC->readMeshIface->get_element_connect( nLocalCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element, 01136 conn_arr_local_cells, 01137 // Might have to create gather mesh later 01138 ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" ); 01139 faces.insert( start_element, start_element + nLocalCells - 1 ); 01140 01141 // Add local cells to current file set 01142 Range local_cells_range( start_element, start_element + nLocalCells - 1 ); 01143 rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" ); 01144 01145 // Get ptr to GID memory for local cells 01146 int count = 0; 01147 void* data = NULL; 01148 rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" ); 01149 assert( count == nLocalCells ); 01150 int* gid_data = (int*)data; 01151 std::copy( localGidCells.begin(), localGidCells.end(), gid_data ); 01152 01153 // Set connectivity array with proper local vertices handles 01154 // vertices_on_local_cells array was already corrected to have 01155 // the last vertices repeated for pentagons, e.g. 122345 => 123455 01156 for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ ) 01157 { 01158 for( int i = 0; i < EDGES_PER_CELL; i++ ) 01159 { 01160 // Note, indices stored in vertices_on_local_cells are 1 based 01161 EntityHandle global_vert_idx = 01162 vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL + i]; // Global vertex index, 1 based 01163 int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based 01164 assert( local_vert_idx != -1 ); 01165 conn_arr_local_cells[local_cell_idx * EDGES_PER_CELL + i] = start_vertex + local_vert_idx; 01166 } 01167 } 01168 01169 return MB_SUCCESS; 01170 } 01171 01172 ErrorCode NCHelperGCRM::create_gather_set_vertices( EntityHandle gather_set, EntityHandle& gather_set_start_vertex ) 01173 { 01174 Interface*& mbImpl = _readNC->mbImpl; 01175 Tag& mGlobalIdTag = _readNC->mGlobalIdTag; 01176 const Tag*& mpFileIdTag = _readNC->mpFileIdTag; 01177 01178 // Create gather set vertices 01179 std::vector< double* > arrays; 01180 // Don't need to specify allocation number here, because we know enough vertices were created 01181 // before 01182 ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" ); 01183 01184 // Add vertices to the gather set 01185 Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 ); 01186 rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" ); 01187 01188 // Read x coordinates for gather set vertices 01189 double* xptr = arrays[0]; 01190 int xVertexVarId; 01191 int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId ); 01192 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" ); 01193 NCDF_SIZE read_start = 0; 01194 NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices ); 01195 #ifdef MOAB_HAVE_PNETCDF 01196 // Enter independent I/O mode, since this read is only for the gather processor 01197 success = NCFUNC( begin_indep_data )( _fileId ); 01198 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" ); 01199 success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr ); 01200 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" ); 01201 success = NCFUNC( end_indep_data )( _fileId ); 01202 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" ); 01203 #else 01204 success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr ); 01205 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" ); 01206 #endif 01207 01208 // Read y coordinates for gather set vertices 01209 double* yptr = arrays[1]; 01210 int yVertexVarId; 01211 success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId ); 01212 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" ); 01213 #ifdef MOAB_HAVE_PNETCDF 01214 // Enter independent I/O mode, since this read is only for the gather processor 01215 success = NCFUNC( begin_indep_data )( _fileId ); 01216 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" ); 01217 success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr ); 01218 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" ); 01219 success = NCFUNC( end_indep_data )( _fileId ); 01220 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" ); 01221 #else 01222 success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr ); 01223 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" ); 01224 #endif 01225 01226 // Convert lon/lat/rad to x/y/z 01227 double* zptr = arrays[2]; 01228 double rad = 8000.0 + levVals[0]; 01229 for( int i = 0; i < nVertices; i++ ) 01230 { 01231 double cosphi = cos( yptr[i] ); 01232 double zmult = sin( yptr[i] ); 01233 double xmult = cosphi * cos( xptr[i] ); 01234 double ymult = cosphi * sin( xptr[i] ); 01235 xptr[i] = rad * xmult; 01236 yptr[i] = rad * ymult; 01237 zptr[i] = rad * zmult; 01238 } 01239 01240 // Get ptr to GID memory for gather set vertices 01241 int count = 0; 01242 void* data = NULL; 01243 rval = 01244 mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" ); 01245 assert( count == nVertices ); 01246 int* gid_data = (int*)data; 01247 for( int j = 1; j <= nVertices; j++ ) 01248 gid_data[j - 1] = j; 01249 01250 // Set the file id tag too, it should be bigger something not interfering with global id 01251 if( mpFileIdTag ) 01252 { 01253 rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, 01254 data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" ); 01255 assert( count == nVertices ); 01256 int bytes_per_tag = 4; 01257 rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" ); 01258 if( 4 == bytes_per_tag ) 01259 { 01260 gid_data = (int*)data; 01261 for( int j = 1; j <= nVertices; j++ ) 01262 gid_data[j - 1] = nVertices + j; // Bigger than global id tag 01263 } 01264 else if( 8 == bytes_per_tag ) 01265 { // Should be a handle tag on 64 bit machine? 01266 long* handle_tag_data = (long*)data; 01267 for( int j = 1; j <= nVertices; j++ ) 01268 handle_tag_data[j - 1] = nVertices + j; // Bigger than global id tag 01269 } 01270 } 01271 01272 return MB_SUCCESS; 01273 } 01274 01275 ErrorCode NCHelperGCRM::create_gather_set_edges( EntityHandle gather_set, EntityHandle gather_set_start_vertex ) 01276 { 01277 Interface*& mbImpl = _readNC->mbImpl; 01278 01279 // Create gather set edges 01280 EntityHandle start_edge; 01281 EntityHandle* conn_arr_gather_set_edges = NULL; 01282 // Don't need to specify allocation number here, because we know enough edges were created 01283 // before 01284 ErrorCode rval = 01285 _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" ); 01286 01287 // Add edges to the gather set 01288 Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 ); 01289 rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" ); 01290 01291 // Read vertices on each edge 01292 int verticesOnEdgeVarId; 01293 int success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId ); 01294 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" ); 01295 // Utilize the memory storage pointed by conn_arr_gather_set_edges 01296 int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges; 01297 NCDF_SIZE read_starts[2] = { 0, 0 }; 01298 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nEdges ), 2 }; 01299 #ifdef MOAB_HAVE_PNETCDF 01300 // Enter independent I/O mode, since this read is only for the gather processor 01301 success = NCFUNC( begin_indep_data )( _fileId ); 01302 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" ); 01303 success = 01304 NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges ); 01305 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" ); 01306 success = NCFUNC( end_indep_data )( _fileId ); 01307 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" ); 01308 #else 01309 success = 01310 NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges ); 01311 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" ); 01312 #endif 01313 01314 // Populate connectivity data for gather set edges 01315 // Convert in-place from int (stored in the first half) to EntityHandle 01316 // Reading backward is the trick 01317 for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- ) 01318 { 01319 // Note, indices stored in vertices_on_gather_set_edges are 0 based 01320 int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 0 based 01321 // Connectivity array is shifted by where the gather set vertices start 01322 conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx; 01323 } 01324 01325 return MB_SUCCESS; 01326 } 01327 01328 ErrorCode NCHelperGCRM::create_padded_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex ) 01329 { 01330 Interface*& mbImpl = _readNC->mbImpl; 01331 01332 // Create gather set cells 01333 EntityHandle start_element; 01334 EntityHandle* conn_arr_gather_set_cells = NULL; 01335 // Don't need to specify allocation number here, because we know enough cells were created 01336 // before 01337 ErrorCode rval = _readNC->readMeshIface->get_element_connect( nCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element, 01338 conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" ); 01339 01340 // Add cells to the gather set 01341 Range gather_set_cells_range( start_element, start_element + nCells - 1 ); 01342 rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" ); 01343 01344 // Read vertices on each gather set cell (connectivity) 01345 int verticesOnCellVarId; 01346 int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId ); 01347 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" ); 01348 // Utilize the memory storage pointed by conn_arr_gather_set_cells 01349 int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells; 01350 NCDF_SIZE read_starts[2] = { 0, 0 }; 01351 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( EDGES_PER_CELL ) }; 01352 #ifdef MOAB_HAVE_PNETCDF 01353 // Enter independent I/O mode, since this read is only for the gather processor 01354 success = NCFUNC( begin_indep_data )( _fileId ); 01355 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" ); 01356 success = 01357 NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells ); 01358 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" ); 01359 success = NCFUNC( end_indep_data )( _fileId ); 01360 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" ); 01361 #else 01362 success = 01363 NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells ); 01364 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" ); 01365 #endif 01366 01367 // Correct gather set cell vertices array in the same way as local cell vertices array 01368 // Pentagons as hexagons should have a connectivity like 123455 and not 122345 01369 for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ ) 01370 { 01371 int* pvertex = vertices_on_gather_set_cells + gather_set_cell_idx * EDGES_PER_CELL; 01372 for( int k = 0; k < EDGES_PER_CELL - 2; k++ ) 01373 { 01374 if( *( pvertex + k ) == *( pvertex + k + 1 ) ) 01375 { 01376 // Shift the connectivity 01377 for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ ) 01378 *( pvertex + kk ) = *( pvertex + kk + 1 ); 01379 // No need to try next k 01380 break; 01381 } 01382 } 01383 } 01384 01385 // Populate connectivity data for gather set cells 01386 // Convert in-place from int (stored in the first half) to EntityHandle 01387 // Reading backward is the trick 01388 for( int cell_vert = nCells * EDGES_PER_CELL - 1; cell_vert >= 0; cell_vert-- ) 01389 { 01390 // Note, indices stored in vertices_on_gather_set_cells are 0 based 01391 int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 0 based 01392 // Connectivity array is shifted by where the gather set vertices start 01393 conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx; 01394 } 01395 01396 return MB_SUCCESS; 01397 } 01398 01399 } // namespace moab