![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 #include "NCHelperMPAS.hpp"
00002 #include "moab/ReadUtilIface.hpp"
00003 #include "moab/FileOptions.hpp"
00004 #include "moab/SpectralMeshTool.hpp"
00005 #include "MBTagConventions.hpp"
00006
00007 #ifdef MOAB_HAVE_ZOLTAN
00008 #include "moab/ZoltanPartitioner.hpp"
00009 #endif
00010
00011 #include
00012
00013 namespace moab
00014 {
00015
00016 const int DEFAULT_MAX_EDGES_PER_CELL = 10;
00017
00018 NCHelperMPAS::NCHelperMPAS( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet )
00019 : UcdNCHelper( readNC, fileId, opts, fileSet ), maxEdgesPerCell( DEFAULT_MAX_EDGES_PER_CELL ), numCellGroups( 0 ),
00020 createGatherSet( false )
00021 {
00022 // Ignore variables containing topological information
00023 ignoredVarNames.insert( "nEdgesOnEdge" );
00024 ignoredVarNames.insert( "nEdgesOnCell" );
00025 ignoredVarNames.insert( "edgesOnVertex" );
00026 ignoredVarNames.insert( "cellsOnVertex" );
00027 ignoredVarNames.insert( "verticesOnEdge" );
00028 ignoredVarNames.insert( "edgesOnEdge" );
00029 ignoredVarNames.insert( "cellsOnEdge" );
00030 ignoredVarNames.insert( "verticesOnCell" );
00031 ignoredVarNames.insert( "edgesOnCell" );
00032 ignoredVarNames.insert( "cellsOnCell" );
00033 // ignore variables containing mesh related info, that are currently saved as variable tags on
00034 // file set ; if needed, these should be saved as dense tags, and read accordingly, in parallel
00035 ignoredVarNames.insert( "weightsOnEdge" );
00036 ignoredVarNames.insert( "angleEdge" );
00037 ignoredVarNames.insert( "areaCell" );
00038 ignoredVarNames.insert( "areaTriangle" );
00039 ignoredVarNames.insert( "dcEdge" );
00040 ignoredVarNames.insert( "dv1Edge" );
00041 ignoredVarNames.insert( "dv2Edge" );
00042 ignoredVarNames.insert( "fEdge" );
00043 ignoredVarNames.insert( "fVertex" );
00044 ignoredVarNames.insert( "h_s" );
00045 ignoredVarNames.insert( "kiteAreasOnVertex" );
00046 ignoredVarNames.insert( "latCell" );
00047 ignoredVarNames.insert( "latEdge" );
00048 ignoredVarNames.insert( "latVertex" );
00049 ignoredVarNames.insert( "lonCell" );
00050 ignoredVarNames.insert( "lonEdge" );
00051 ignoredVarNames.insert( "lonVertex" );
00052 ignoredVarNames.insert( "meshDensity" );
00053 ignoredVarNames.insert( "xCell" );
00054 ignoredVarNames.insert( "xEdge" );
00055 ignoredVarNames.insert( "xVertex" );
00056 ignoredVarNames.insert( "yCell" );
00057 ignoredVarNames.insert( "yEdge" );
00058 ignoredVarNames.insert( "yVertex" );
00059 ignoredVarNames.insert( "zCell" );
00060 ignoredVarNames.insert( "zEdge" );
00061 ignoredVarNames.insert( "zVertex" );
00062 // Ignore variables for index conversion
00063 ignoredVarNames.insert( "indexToVertexID" );
00064 ignoredVarNames.insert( "indexToEdgeID" );
00065 ignoredVarNames.insert( "indexToCellID" );
00066 }
00067
00068 bool NCHelperMPAS::can_read_file( ReadNC* readNC )
00069 {
00070 std::vector< std::string >& dimNames = readNC->dimNames;
00071
00072 // If dimension name "vertexDegree" exists then it should be the MPAS grid
00073 if( std::find( dimNames.begin(), dimNames.end(), std::string( "vertexDegree" ) ) != dimNames.end() ) return true;
00074
00075 return false;
00076 }
00077
00078 ErrorCode NCHelperMPAS::init_mesh_vals()
00079 {
00080 std::vector< std::string >& dimNames = _readNC->dimNames;
00081 std::vector< int >& dimLens = _readNC->dimLens;
00082 std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00083
00084 ErrorCode rval;
00085 unsigned int idx;
00086 std::vector< std::string >::iterator vit;
00087
00088 // Get max edges per cell reported in the MPAS file header
00089 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "maxEdges" ) ) != dimNames.end() )
00090 {
00091 idx = vit - dimNames.begin();
00092 maxEdgesPerCell = dimLens[idx];
00093 if( maxEdgesPerCell > DEFAULT_MAX_EDGES_PER_CELL )
00094 {
00095 MB_SET_ERR( MB_INVALID_SIZE,
00096 "maxEdgesPerCell read from the MPAS file header has exceeded " << DEFAULT_MAX_EDGES_PER_CELL );
00097 }
00098 }
00099
00100 // Look for time dimension
00101 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() )
00102 idx = vit - dimNames.begin();
00103 else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() )
00104 idx = vit - dimNames.begin();
00105 else
00106 {
00107 MB_SET_ERR( MB_FAILURE, "Couldn't find 'Time' or 'time' dimension" );
00108 }
00109 tDim = idx;
00110 nTimeSteps = dimLens[idx];
00111
00112 // Get number of cells
00113 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nCells" ) ) != dimNames.end() )
00114 idx = vit - dimNames.begin();
00115 else
00116 {
00117 MB_SET_ERR( MB_FAILURE, "Couldn't find 'nCells' dimension" );
00118 }
00119 cDim = idx;
00120 nCells = dimLens[idx];
00121
00122 // Get number of edges
00123 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nEdges" ) ) != dimNames.end() )
00124 idx = vit - dimNames.begin();
00125 else
00126 {
00127 MB_SET_ERR( MB_FAILURE, "Couldn't find 'nEdges' dimension" );
00128 }
00129 eDim = idx;
00130 nEdges = dimLens[idx];
00131
00132 // Get number of vertices
00133 vDim = -1;
00134 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertices" ) ) != dimNames.end() )
00135 idx = vit - dimNames.begin();
00136 else
00137 {
00138 MB_SET_ERR( MB_FAILURE, "Couldn't find 'nVertices' dimension" );
00139 }
00140 vDim = idx;
00141 nVertices = dimLens[idx];
00142
00143 // Get number of vertex levels
00144 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevels" ) ) != dimNames.end() )
00145 idx = vit - dimNames.begin();
00146 else
00147 {
00148 std::cerr << "Warning: dimension nVertLevels not found in header.\nThe file may contain "
00149 "just the mesh"
00150 << std::endl;
00151 }
00152 levDim = idx;
00153 nLevels = dimLens[idx];
00154
00155 // Dimension indices for other optional levels
00156 std::vector< unsigned int > opt_lev_dims;
00157
00158 // Get index of vertex levels P1
00159 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP1" ) ) != dimNames.end() )
00160 {
00161 idx = vit - dimNames.begin();
00162 opt_lev_dims.push_back( idx );
00163 }
00164
00165 // Get index of vertex levels P2
00166 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP2" ) ) != dimNames.end() )
00167 {
00168 idx = vit - dimNames.begin();
00169 opt_lev_dims.push_back( idx );
00170 }
00171
00172 // Get index of soil levels
00173 if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nSoilLevels" ) ) != dimNames.end() )
00174 {
00175 idx = vit - dimNames.begin();
00176 opt_lev_dims.push_back( idx );
00177 }
00178
00179 std::map< std::string, ReadNC::VarData >::iterator vmit;
00180
00181 // Store time coordinate values in tVals
00182 if( nTimeSteps > 0 )
00183 {
00184 // Note, two possible types for xtime variable: double(Time) or char(Time, StrLen)
00185 if( ( vmit = varInfo.find( "xtime" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
00186 {
00187 // If xtime variable is double type, read time coordinate values to tVals
00188 rval = read_coordinate( "xtime", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'xtime' variable" );
00189 }
00190 else
00191 {
00192 // If xtime variable does not exist, or it is string type, set dummy values to tVals
00193 for( int t = 0; t < nTimeSteps; t++ )
00194 tVals.push_back( (double)t );
00195 }
00196 }
00197
00198 // For each variable, determine the entity location type and number of levels
00199 for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit )
00200 {
00201 ReadNC::VarData& vd = ( *vmit ).second;
00202
00203 // Default entLoc is ENTLOCSET
00204 if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
00205 {
00206 if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() )
00207 vd.entLoc = ReadNC::ENTLOCVERT;
00208 else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() )
00209 vd.entLoc = ReadNC::ENTLOCEDGE;
00210 else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() )
00211 vd.entLoc = ReadNC::ENTLOCFACE;
00212 }
00213
00214 // Default numLev is 0
00215 if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() )
00216 vd.numLev = nLevels;
00217 else
00218 {
00219 // If nVertLevels dimension is not found, try other optional levels such as
00220 // nVertLevelsP1
00221 for( unsigned int i = 0; i < opt_lev_dims.size(); i++ )
00222 {
00223 if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() )
00224 {
00225 vd.numLev = dimLens[opt_lev_dims[i]];
00226 break;
00227 }
00228 }
00229 }
00230
00231 // Hack: ignore variables with more than 3 dimensions, e.g. tracers(Time, nCells,
00232 // nVertLevels, nTracers)
00233 if( vd.varDims.size() > 3 ) ignoredVarNames.insert( vd.varName );
00234 }
00235
00236 // Hack: create dummy variables for dimensions (like nCells) with no corresponding coordinate
00237 // variables
00238 rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" );
00239
00240 return MB_SUCCESS;
00241 }
00242
00243 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
00244 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
00245 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
00246 // has to restore them based on the existing mesh from last read
00247 ErrorCode NCHelperMPAS::check_existing_mesh()
00248 {
00249 Interface*& mbImpl = _readNC->mbImpl;
00250 Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
00251 bool& noMesh = _readNC->noMesh;
00252
00253 if( noMesh )
00254 {
00255 ErrorCode rval;
00256
00257 // Restore numCellGroups
00258 if( 0 == numCellGroups )
00259 {
00260 Tag numCellGroupsTag;
00261 rval = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag );MB_CHK_SET_ERR( rval, "Trouble getting __NUM_CELL_GROUPS tag" );
00262 if( MB_SUCCESS == rval ) rval = mbImpl->tag_get_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble getting data of __NUM_CELL_GROUPS tag" );
00263 }
00264
00265 if( localGidVerts.empty() )
00266 {
00267 // Get all vertices from current file set (it is the input set in no_mesh scenario)
00268 Range local_verts;
00269 rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" );
00270
00271 if( !local_verts.empty() )
00272 {
00273 std::vector< int > gids( local_verts.size() );
00274
00275 // !IMPORTANT : this has to be the GLOBAL_ID tag
00276 rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" );
00277
00278 // Restore localGidVerts
00279 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) );
00280 nLocalVertices = localGidVerts.size();
00281 }
00282 }
00283
00284 if( localGidEdges.empty() )
00285 {
00286 // Get all edges from current file set (it is the input set in no_mesh scenario)
00287 Range local_edges;
00288 rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" );
00289
00290 if( !local_edges.empty() )
00291 {
00292 std::vector< int > gids( local_edges.size() );
00293
00294 // !IMPORTANT : this has to be the GLOBAL_ID tag
00295 rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" );
00296
00297 // Restore localGidEdges
00298 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) );
00299 nLocalEdges = localGidEdges.size();
00300 }
00301 }
00302
00303 if( localGidCells.empty() )
00304 {
00305 // Get all cells from current file set (it is the input set in no_mesh scenario)
00306 Range local_cells;
00307 rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" );
00308
00309 if( !local_cells.empty() )
00310 {
00311 std::vector< int > gids( local_cells.size() );
00312
00313 // !IMPORTANT : this has to be the GLOBAL_ID tag
00314 rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" );
00315
00316 // Restore localGidCells
00317 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) );
00318 nLocalCells = localGidCells.size();
00319
00320 if( numCellGroups > 1 )
00321 {
00322 // Restore cellHandleToGlobalID map
00323 Range::const_iterator rit;
00324 int i;
00325 for( rit = local_cells.begin(), i = 0; rit != local_cells.end(); ++rit, i++ )
00326 cellHandleToGlobalID[*rit] = gids[i];
00327 }
00328 }
00329 }
00330 }
00331
00332 return MB_SUCCESS;
00333 }
00334
00335 ErrorCode NCHelperMPAS::create_mesh( Range& faces )
00336 {
00337 Interface*& mbImpl = _readNC->mbImpl;
00338 bool& noMixedElements = _readNC->noMixedElements;
00339 bool& noEdges = _readNC->noEdges;
00340 DebugOutput& dbgOut = _readNC->dbgOut;
00341
00342 #ifdef MOAB_HAVE_MPI
00343 int rank = 0;
00344 int procs = 1;
00345 bool& isParallel = _readNC->isParallel;
00346 ParallelComm* myPcomm = NULL;
00347 if( isParallel )
00348 {
00349 myPcomm = _readNC->myPcomm;
00350 rank = myPcomm->proc_config().proc_rank();
00351 procs = myPcomm->proc_config().proc_size();
00352 }
00353
00354 // Need to know whether we'll be creating gather mesh
00355 if( rank == _readNC->gatherSetRank ) createGatherSet = true;
00356
00357 if( procs >= 2 )
00358 {
00359 // Shift rank to obtain a rotated trivial partition
00360 int shifted_rank = rank;
00361 int& trivialPartitionShift = _readNC->trivialPartitionShift;
00362 if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00363
00364 // Compute the number of local cells on this proc
00365 nLocalCells = int( std::floor( 1.0 * nCells / procs ) );
00366
00367 // The starting global cell index in the MPAS file for this proc
00368 int start_cell_idx = shifted_rank * nLocalCells;
00369
00370 // Number of extra cells after equal split over procs
00371 int iextra = nCells % procs;
00372
00373 // Allocate extra cells over procs
00374 if( shifted_rank < iextra ) nLocalCells++;
00375 start_cell_idx += std::min( shifted_rank, iextra );
00376
00377 start_cell_idx++; // 0 based -> 1 based
00378
00379 // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00380 ErrorCode rval = redistribute_local_cells( start_cell_idx, myPcomm );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00381 }
00382 else
00383 {
00384 nLocalCells = nCells;
00385 localGidCells.insert( 1, nLocalCells );
00386 }
00387 #else
00388 nLocalCells = nCells;
00389 localGidCells.insert( 1, nLocalCells );
00390 #endif
00391 dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00392 dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00393
00394 // Read number of edges on each local cell, to calculate actual maxEdgesPerCell
00395 int nEdgesOnCellVarId;
00396 int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
00397 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
00398 std::vector< int > num_edges_on_local_cells( nLocalCells );
00399 #ifdef MOAB_HAVE_PNETCDF
00400 size_t nb_reads = localGidCells.psize();
00401 std::vector< int > requests( nb_reads );
00402 std::vector< int > statuss( nb_reads );
00403 size_t idxReq = 0;
00404 #endif
00405 size_t indexInArray = 0;
00406 for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00407 ++pair_iter )
00408 {
00409 EntityHandle starth = pair_iter->first;
00410 EntityHandle endh = pair_iter->second;
00411 NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
00412 NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
00413
00414 // Do a partial read in each subrange
00415 #ifdef MOAB_HAVE_PNETCDF
00416 success = NCFUNCREQG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
00417 &( num_edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
00418 #else
00419 success = NCFUNCAG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
00420 &( num_edges_on_local_cells[indexInArray] ) );
00421 #endif
00422 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data in a loop" );
00423
00424 // Increment the index for next subrange
00425 indexInArray += ( endh - starth + 1 );
00426 }
00427
00428 #ifdef MOAB_HAVE_PNETCDF
00429 // Wait outside the loop
00430 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00431 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00432 #endif
00433
00434 // Get local maxEdgesPerCell on this proc
00435 int local_max_edges_per_cell =
00436 *( std::max_element( num_edges_on_local_cells.begin(), num_edges_on_local_cells.end() ) );
00437 maxEdgesPerCell = local_max_edges_per_cell;
00438
00439 // If parallel, do a MPI_Allreduce to get global maxEdgesPerCell across all procs
00440 #ifdef MOAB_HAVE_MPI
00441 if( procs >= 2 )
00442 {
00443 int global_max_edges_per_cell;
00444 ParallelComm*& myPcomm = _readNC->myPcomm;
00445 MPI_Allreduce( &local_max_edges_per_cell, &global_max_edges_per_cell, 1, MPI_INT, MPI_MAX,
00446 myPcomm->proc_config().proc_comm() );
00447 assert( local_max_edges_per_cell <= global_max_edges_per_cell );
00448 maxEdgesPerCell = global_max_edges_per_cell;
00449 if( 0 == rank ) dbgOut.tprintf( 1, " global_max_edges_per_cell = %d\n", global_max_edges_per_cell );
00450 }
00451 #endif
00452
00453 // Read vertices on each local cell, to get localGidVerts and cell connectivity later
00454 int verticesOnCellVarId;
00455 success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
00456 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
00457 std::vector< int > vertices_on_local_cells( nLocalCells * maxEdgesPerCell );
00458 #ifdef MOAB_HAVE_PNETCDF
00459 idxReq = 0;
00460 #endif
00461 indexInArray = 0;
00462 for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00463 ++pair_iter )
00464 {
00465 EntityHandle starth = pair_iter->first;
00466 EntityHandle endh = pair_iter->second;
00467 NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00468 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00469 static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
00470
00471 // Do a partial read in each subrange
00472 #ifdef MOAB_HAVE_PNETCDF
00473 success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00474 &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] );
00475 #else
00476 success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00477 &( vertices_on_local_cells[indexInArray] ) );
00478 #endif
00479 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data in a loop" );
00480
00481 // Increment the index for next subrange
00482 indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
00483 }
00484
00485 #ifdef MOAB_HAVE_PNETCDF
00486 // Wait outside the loop
00487 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00488 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00489 #endif
00490
00491 // Correct local cell vertices array, replace the padded vertices with the last vertices
00492 // in the corresponding cells; sometimes the padded vertices are 0, sometimes a large
00493 // vertex id. Make sure they are consistent to our padded option
00494 for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
00495 {
00496 int num_edges = num_edges_on_local_cells[local_cell_idx];
00497 int idx_in_local_vert_arr = local_cell_idx * maxEdgesPerCell;
00498 int last_vert_idx = vertices_on_local_cells[idx_in_local_vert_arr + num_edges - 1];
00499 for( int i = num_edges; i < maxEdgesPerCell; i++ )
00500 vertices_on_local_cells[idx_in_local_vert_arr + i] = last_vert_idx;
00501 }
00502
00503 // Create local vertices
00504 EntityHandle start_vertex;
00505 ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for MPAS mesh" );
00506
00507 // Create local edges (unless NO_EDGES read option is set)
00508 if( !noEdges )
00509 {
00510 rval = create_local_edges( start_vertex, num_edges_on_local_cells );MB_CHK_SET_ERR( rval, "Failed to create local edges for MPAS mesh" );
00511 }
00512
00513 // Create local cells, either unpadded or padded
00514 if( noMixedElements )
00515 {
00516 rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for MPAS mesh" );
00517 }
00518 else
00519 {
00520 rval = create_local_cells( vertices_on_local_cells, num_edges_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create local cells for MPAS mesh" );
00521 }
00522
00523 // Set tag for numCellGroups
00524 Tag numCellGroupsTag = 0;
00525 rval = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag,
00526 MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating __NUM_CELL_GROUPS tag" );
00527 rval = mbImpl->tag_set_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble setting data to __NUM_CELL_GROUPS tag" );
00528
00529 if( createGatherSet )
00530 {
00531 EntityHandle gather_set;
00532 rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" );
00533
00534 // Create gather set vertices
00535 EntityHandle start_gather_set_vertex;
00536 rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for MPAS mesh" );
00537
00538 // Create gather set edges (unless NO_EDGES read option is set)
00539 if( !noEdges )
00540 {
00541 rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for MPAS mesh" );
00542 }
00543
00544 // Create gather set cells, either unpadded or padded
00545 if( noMixedElements )
00546 {
00547 rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for MPAS mesh" );
00548 }
00549 else
00550 {
00551 rval = create_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set cells for MPAS mesh" );
00552 }
00553 }
00554
00555 return MB_SUCCESS;
00556 }
00557
00558 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas,
00559 std::vector< int >& tstep_nums )
00560 {
00561 Interface*& mbImpl = _readNC->mbImpl;
00562 std::vector< int >& dimLens = _readNC->dimLens;
00563 bool& noEdges = _readNC->noEdges;
00564 DebugOutput& dbgOut = _readNC->dbgOut;
00565
00566 Range* range = NULL;
00567
00568 // Get vertices
00569 Range verts;
00570 ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" );
00571 assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 );
00572
00573 // Get edges
00574 Range edges;
00575 rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" );
00576
00577 // Get faces
00578 Range faces;
00579 rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" );
00580 // Note, for MPAS faces.psize() can be more than 1
00581
00582 #ifdef MOAB_HAVE_MPI
00583 bool& isParallel = _readNC->isParallel;
00584 if( isParallel )
00585 {
00586 ParallelComm*& myPcomm = _readNC->myPcomm;
00587 rval = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" );
00588 }
00589 else
00590 facesOwned = faces; // not running in parallel, but still with MPI
00591 #else
00592 facesOwned = faces;
00593 #endif
00594
00595 for( unsigned int i = 0; i < vdatas.size(); i++ )
00596 {
00597 // Skip edge variables, if specified by the read options
00598 if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00599
00600 // Support non-set variables with 3 dimensions like (Time, nCells, nVertLevels), or
00601 // 2 dimensions like (Time, nCells)
00602 assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() );
00603
00604 // For a non-set variable, time should be the first dimension
00605 assert( tDim == vdatas[i].varDims[0] );
00606
00607 // Set up readStarts and readCounts
00608 vdatas[i].readStarts.resize( 3 );
00609 vdatas[i].readCounts.resize( 3 );
00610
00611 // First: Time
00612 vdatas[i].readStarts[0] = 0; // This value is timestep dependent, will be set later
00613 vdatas[i].readCounts[0] = 1;
00614
00615 // Next: nVertices / nCells / nEdges
00616 switch( vdatas[i].entLoc )
00617 {
00618 case ReadNC::ENTLOCVERT:
00619 // Vertices
00620 // Start from the first localGidVerts
00621 // Actually, this will be reset later on in a loop
00622 vdatas[i].readStarts[1] = localGidVerts[0] - 1;
00623 vdatas[i].readCounts[1] = nLocalVertices;
00624 range = &verts;
00625 break;
00626 case ReadNC::ENTLOCFACE:
00627 // Faces
00628 // Start from the first localGidCells
00629 // Actually, this will be reset later on in a loop
00630 vdatas[i].readStarts[1] = localGidCells[0] - 1;
00631 vdatas[i].readCounts[1] = nLocalCells;
00632 range = &facesOwned;
00633 break;
00634 case ReadNC::ENTLOCEDGE:
00635 // Edges
00636 // Start from the first localGidEdges
00637 // Actually, this will be reset later on in a loop
00638 vdatas[i].readStarts[1] = localGidEdges[0] - 1;
00639 vdatas[i].readCounts[1] = nLocalEdges;
00640 range = &edges;
00641 break;
00642 default:
00643 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00644 }
00645
00646 // Finally: nVertLevels or other optional levels, it is possible that there is no
00647 // level dimension (numLev is 0) for this non-set variable, e.g. (Time, nCells)
00648 if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1;
00649 vdatas[i].readStarts[2] = 0;
00650 vdatas[i].readCounts[2] = vdatas[i].numLev;
00651
00652 // Get variable size
00653 vdatas[i].sz = 1;
00654 for( std::size_t idx = 0; idx != 3; idx++ )
00655 vdatas[i].sz *= vdatas[i].readCounts[idx];
00656
00657 for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00658 {
00659 dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] );
00660
00661 if( tstep_nums[t] >= dimLens[tDim] )
00662 {
00663 MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] );
00664 }
00665
00666 // Get the tag to read into
00667 if( !vdatas[i].varTags[t] )
00668 {
00669 rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName );
00670 }
00671
00672 // Get ptr to tag space
00673 if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00674 {
00675 // For a cell variable that is NOT on one contiguous chunk of faces, defer its tag
00676 // space allocation
00677 vdatas[i].varDatas[t] = NULL;
00678 }
00679 else
00680 {
00681 assert( 1 == range->psize() );
00682 void* data;
00683 int count;
00684 rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName );
00685 assert( (unsigned)count == range->size() );
00686 vdatas[i].varDatas[t] = data;
00687 }
00688 }
00689 }
00690
00691 return rval;
00692 }
00693
00694 #ifdef MOAB_HAVE_PNETCDF
00695 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas,
00696 std::vector< int >& tstep_nums )
00697 {
00698 Interface*& mbImpl = _readNC->mbImpl;
00699 bool& noEdges = _readNC->noEdges;
00700 DebugOutput& dbgOut = _readNC->dbgOut;
00701
00702 ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00703
00704 // Finally, read into that space
00705 int success;
00706 Range* pLocalGid = NULL;
00707
00708 for( unsigned int i = 0; i < vdatas.size(); i++ )
00709 {
00710 // Skip edge variables, if specified by the read options
00711 if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00712
00713 switch( vdatas[i].entLoc )
00714 {
00715 case ReadNC::ENTLOCVERT:
00716 pLocalGid = &localGidVerts;
00717 break;
00718 case ReadNC::ENTLOCFACE:
00719 pLocalGid = &localGidCells;
00720 break;
00721 case ReadNC::ENTLOCEDGE:
00722 pLocalGid = &localGidEdges;
00723 break;
00724 default:
00725 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00726 }
00727
00728 std::size_t sz = vdatas[i].sz;
00729
00730 for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00731 {
00732 // We will synchronize all these reads with the other processors,
00733 // so the wait will be inside this double loop; is it too much?
00734 size_t nb_reads = pLocalGid->psize();
00735 std::vector< int > requests( nb_reads ), statuss( nb_reads );
00736 size_t idxReq = 0;
00737
00738 // Set readStart for each timestep along time dimension
00739 vdatas[i].readStarts[0] = tstep_nums[t];
00740
00741 switch( vdatas[i].varDataType )
00742 {
00743 case NC_FLOAT:
00744 case NC_DOUBLE: {
00745 // Read float as double
00746 std::vector< double > tmpdoubledata( sz );
00747
00748 // In the case of ucd mesh, and on multiple proc,
00749 // we need to read as many times as subranges we have in the
00750 // localGid range;
00751 // basically, we have to give a different point
00752 // for data to start, for every subrange :(
00753 size_t indexInDoubleArray = 0;
00754 size_t ic = 0;
00755 for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00756 ++pair_iter, ic++ )
00757 {
00758 EntityHandle starth = pair_iter->first;
00759 EntityHandle endh = pair_iter->second; // Inclusive
00760 vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 );
00761 vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 );
00762
00763 // Do a partial read, in each subrange
00764 // Wait outside this loop
00765 success =
00766 NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00767 &( vdatas[i].readCounts[0] ),
00768 &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] );
00769 if( success )
00770 MB_SET_ERR( MB_FAILURE,
00771 "Failed to read double data in a loop for variable " << vdatas[i].varName );
00772 // We need to increment the index in double array for the
00773 // next subrange
00774 indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00775 }
00776 assert( ic == pLocalGid->psize() );
00777
00778 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00779 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00780
00781 if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00782 {
00783 // For a cell variable that is NOT on one contiguous chunk of faces,
00784 // allocate tag space for each cell group, and utilize cellHandleToGlobalID
00785 // map to read tag data
00786 Range::iterator iter = facesOwned.begin();
00787 while( iter != facesOwned.end() )
00788 {
00789 int count;
00790 void* ptr;
00791 rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
00792
00793 for( int j = 0; j < count; j++ )
00794 {
00795 int global_cell_idx =
00796 cellHandleToGlobalID[*( iter + j )]; // Global cell index, 1 based
00797 int local_cell_idx =
00798 localGidCells.index( global_cell_idx ); // Local cell index, 0 based
00799 assert( local_cell_idx != -1 );
00800 for( int level = 0; level < vdatas[i].numLev; level++ )
00801 ( (double*)ptr )[j * vdatas[i].numLev + level] =
00802 tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
00803 }
00804
00805 iter += count;
00806 }
00807 }
00808 else
00809 {
00810 void* data = vdatas[i].varDatas[t];
00811 for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00812 ( (double*)data )[idx] = tmpdoubledata[idx];
00813 }
00814
00815 break;
00816 }
00817 default:
00818 MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00819 }
00820 }
00821 }
00822
00823 // Debug output, if requested
00824 if( 1 == dbgOut.get_verbosity() )
00825 {
00826 dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00827 for( unsigned int i = 1; i < vdatas.size(); i++ )
00828 dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00829 dbgOut.tprintf( 1, "\n" );
00830 }
00831
00832 return rval;
00833 }
00834 #else
00835 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas,
00836 std::vector< int >& tstep_nums )
00837 {
00838 Interface*& mbImpl = _readNC->mbImpl;
00839 bool& noEdges = _readNC->noEdges;
00840 DebugOutput& dbgOut = _readNC->dbgOut;
00841
00842 ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00843
00844 // Finally, read into that space
00845 int success;
00846 Range* pLocalGid = NULL;
00847
00848 for( unsigned int i = 0; i < vdatas.size(); i++ )
00849 {
00850 // Skip edge variables, if specified by the read options
00851 if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00852
00853 switch( vdatas[i].entLoc )
00854 {
00855 case ReadNC::ENTLOCVERT:
00856 pLocalGid = &localGidVerts;
00857 break;
00858 case ReadNC::ENTLOCFACE:
00859 pLocalGid = &localGidCells;
00860 break;
00861 case ReadNC::ENTLOCEDGE:
00862 pLocalGid = &localGidEdges;
00863 break;
00864 default:
00865 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00866 }
00867
00868 std::size_t sz = vdatas[i].sz;
00869
00870 for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00871 {
00872 // Set readStart for each timestep along time dimension
00873 vdatas[i].readStarts[0] = tstep_nums[t];
00874
00875 switch( vdatas[i].varDataType )
00876 {
00877 case NC_FLOAT:
00878 case NC_DOUBLE: {
00879 // Read float as double
00880 std::vector< double > tmpdoubledata( sz );
00881
00882 // In the case of ucd mesh, and on multiple proc,
00883 // we need to read as many times as subranges we have in the
00884 // localGid range;
00885 // basically, we have to give a different point
00886 // for data to start, for every subrange :(
00887 size_t indexInDoubleArray = 0;
00888 size_t ic = 0;
00889 for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00890 ++pair_iter, ic++ )
00891 {
00892 EntityHandle starth = pair_iter->first;
00893 EntityHandle endh = pair_iter->second; // Inclusive
00894 vdatas[i].readStarts[1] = (NCDF_SIZE)( starth - 1 );
00895 vdatas[i].readCounts[1] = (NCDF_SIZE)( endh - starth + 1 );
00896
00897 success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00898 &( vdatas[i].readCounts[0] ),
00899 &( tmpdoubledata[indexInDoubleArray] ) );
00900 if( success )
00901 MB_SET_ERR( MB_FAILURE,
00902 "Failed to read double data in a loop for variable " << vdatas[i].varName );
00903 // We need to increment the index in double array for the
00904 // next subrange
00905 indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00906 }
00907 assert( ic == pLocalGid->psize() );
00908
00909 if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00910 {
00911 // For a cell variable that is NOT on one contiguous chunk of faces,
00912 // allocate tag space for each cell group, and utilize cellHandleToGlobalID
00913 // map to read tag data
00914 Range::iterator iter = facesOwned.begin();
00915 while( iter != facesOwned.end() )
00916 {
00917 int count;
00918 void* ptr;
00919 rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
00920
00921 for( int j = 0; j < count; j++ )
00922 {
00923 int global_cell_idx =
00924 cellHandleToGlobalID[*( iter + j )]; // Global cell index, 1 based
00925 int local_cell_idx =
00926 localGidCells.index( global_cell_idx ); // Local cell index, 0 based
00927 assert( local_cell_idx != -1 );
00928 for( int level = 0; level < vdatas[i].numLev; level++ )
00929 ( (double*)ptr )[j * vdatas[i].numLev + level] =
00930 tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
00931 }
00932
00933 iter += count;
00934 }
00935 }
00936 else
00937 {
00938 void* data = vdatas[i].varDatas[t];
00939 for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00940 ( (double*)data )[idx] = tmpdoubledata[idx];
00941 }
00942
00943 break;
00944 }
00945 default:
00946 MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00947 }
00948 }
00949 }
00950
00951 // Debug output, if requested
00952 if( 1 == dbgOut.get_verbosity() )
00953 {
00954 dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00955 for( unsigned int i = 1; i < vdatas.size(); i++ )
00956 dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00957 dbgOut.tprintf( 1, "\n" );
00958 }
00959
00960 return rval;
00961 }
00962 #endif
00963
00964 #ifdef MOAB_HAVE_MPI
00965 ErrorCode NCHelperMPAS::redistribute_local_cells( int start_cell_idx, ParallelComm * pco )
00966 {
00967 // If possible, apply Zoltan partition
00968 #ifdef MOAB_HAVE_ZOLTAN
00969 if( ScdParData::RCBZOLTAN == _readNC->partMethod )
00970 {
00971 // Read x coordinates of cell centers
00972 int xCellVarId;
00973 int success = NCFUNC( inq_varid )( _fileId, "xCell", &xCellVarId );
00974 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xCell" );
00975 std::vector< double > xCell( nLocalCells );
00976 NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00977 NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00978 success = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] );
00979 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xCell data" );
00980
00981 // Read y coordinates of cell centers
00982 int yCellVarId;
00983 success = NCFUNC( inq_varid )( _fileId, "yCell", &yCellVarId );
00984 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yCell" );
00985 std::vector< double > yCell( nLocalCells );
00986 success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] );
00987 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yCell data" );
00988
00989 // Read z coordinates of cell centers
00990 int zCellVarId;
00991 success = NCFUNC( inq_varid )( _fileId, "zCell", &zCellVarId );
00992 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zCell" );
00993 std::vector< double > zCell( nLocalCells );
00994 success = NCFUNCAG( _vara_double )( _fileId, zCellVarId, &read_start, &read_count, &zCell[0] );
00995 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zCell data" );
00996
00997 // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00998 // is better
00999 Interface*& mbImpl = _readNC->mbImpl;
01000 DebugOutput& dbgOut = _readNC->dbgOut;
01001 ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, pco, false, 0, NULL );
01002 ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
01003 delete mbZTool;
01004
01005 dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
01006 dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
01007
01008 // This is important: local cells are now redistributed, so nLocalCells might be different!
01009 nLocalCells = localGidCells.size();
01010
01011 return MB_SUCCESS;
01012 }
01013 #endif
01014
01015 // By default, apply trivial partition
01016 localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
01017
01018 return MB_SUCCESS;
01019 }
01020 #endif
01021
01022 ErrorCode NCHelperMPAS::create_local_vertices( const std::vector< int >& vertices_on_local_cells,
01023 EntityHandle& start_vertex )
01024 {
01025 Interface*& mbImpl = _readNC->mbImpl;
01026 Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
01027 const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01028 DebugOutput& dbgOut = _readNC->dbgOut;
01029
01030 // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell
01031 // connectivity later)
01032 std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells );
01033 std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() );
01034 std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(),
01035 range_inserter( localGidVerts ) );
01036 nLocalVertices = localGidVerts.size();
01037
01038 dbgOut.tprintf( 1, " localGidVerts.psize() = %d\n", (int)localGidVerts.psize() );
01039 dbgOut.tprintf( 1, " localGidVerts.size() = %d\n", (int)localGidVerts.size() );
01040
01041 // Create local vertices
01042 std::vector< double* > arrays;
01043 ErrorCode rval =
01044 _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays,
01045 // Might have to create gather mesh later
01046 ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
01047
01048 // Add local vertices to current file set
01049 Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 );
01050 rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" );
01051
01052 // Get ptr to GID memory for local vertices
01053 int count = 0;
01054 void* data = NULL;
01055 rval = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" );
01056 assert( count == nLocalVertices );
01057 int* gid_data = (int*)data;
01058 std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
01059
01060 // Duplicate GID data, which will be used to resolve sharing
01061 if( mpFileIdTag )
01062 {
01063 rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" );
01064 assert( count == nLocalVertices );
01065 int bytes_per_tag = 4;
01066 rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01067 if( 4 == bytes_per_tag )
01068 {
01069 gid_data = (int*)data;
01070 std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
01071 }
01072 else if( 8 == bytes_per_tag )
01073 { // Should be a handle tag on 64 bit machine?
01074 long* handle_tag_data = (long*)data;
01075 std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data );
01076 }
01077 }
01078
01079 #ifdef MOAB_HAVE_PNETCDF
01080 size_t nb_reads = localGidVerts.psize();
01081 std::vector< int > requests( nb_reads );
01082 std::vector< int > statuss( nb_reads );
01083 size_t idxReq = 0;
01084 #endif
01085
01086 // Read x coordinates for local vertices
01087 double* xptr = arrays[0];
01088 int xVertexVarId;
01089 int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
01090 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
01091 size_t indexInArray = 0;
01092 for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01093 ++pair_iter )
01094 {
01095 EntityHandle starth = pair_iter->first;
01096 EntityHandle endh = pair_iter->second;
01097 NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
01098 NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
01099
01100 // Do a partial read in each subrange
01101 #ifdef MOAB_HAVE_PNETCDF
01102 success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ),
01103 &requests[idxReq++] );
01104 #else
01105 success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) );
01106 #endif
01107 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data in a loop" );
01108
01109 // Increment the index for next subrange
01110 indexInArray += ( endh - starth + 1 );
01111 }
01112
01113 #ifdef MOAB_HAVE_PNETCDF
01114 // Wait outside the loop
01115 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01116 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01117 #endif
01118
01119 // Read y coordinates for local vertices
01120 double* yptr = arrays[1];
01121 int yVertexVarId;
01122 success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
01123 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
01124 #ifdef MOAB_HAVE_PNETCDF
01125 idxReq = 0;
01126 #endif
01127 indexInArray = 0;
01128 for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01129 ++pair_iter )
01130 {
01131 EntityHandle starth = pair_iter->first;
01132 EntityHandle endh = pair_iter->second;
01133 NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
01134 NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
01135
01136 // Do a partial read in each subrange
01137 #ifdef MOAB_HAVE_PNETCDF
01138 success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ),
01139 &requests[idxReq++] );
01140 #else
01141 success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) );
01142 #endif
01143 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data in a loop" );
01144
01145 // Increment the index for next subrange
01146 indexInArray += ( endh - starth + 1 );
01147 }
01148
01149 #ifdef MOAB_HAVE_PNETCDF
01150 // Wait outside the loop
01151 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01152 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01153 #endif
01154
01155 // Read z coordinates for local vertices
01156 double* zptr = arrays[2];
01157 int zVertexVarId;
01158 success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
01159 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
01160 #ifdef MOAB_HAVE_PNETCDF
01161 idxReq = 0;
01162 #endif
01163 indexInArray = 0;
01164 for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01165 ++pair_iter )
01166 {
01167 EntityHandle starth = pair_iter->first;
01168 EntityHandle endh = pair_iter->second;
01169 NCDF_SIZE read_start = (NCDF_SIZE)( starth - 1 );
01170 NCDF_SIZE read_count = (NCDF_SIZE)( endh - starth + 1 );
01171
01172 // Do a partial read in each subrange
01173 #ifdef MOAB_HAVE_PNETCDF
01174 success = NCFUNCREQG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ),
01175 &requests[idxReq++] );
01176 #else
01177 success = NCFUNCAG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ) );
01178 #endif
01179 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data in a loop" );
01180
01181 // Increment the index for next subrange
01182 indexInArray += ( endh - starth + 1 );
01183 }
01184
01185 #ifdef MOAB_HAVE_PNETCDF
01186 // Wait outside the loop
01187 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01188 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01189 #endif
01190
01191 return MB_SUCCESS;
01192 }
01193
01194 ErrorCode NCHelperMPAS::create_local_edges( EntityHandle start_vertex,
01195 const std::vector< int >& num_edges_on_local_cells )
01196 {
01197 Interface*& mbImpl = _readNC->mbImpl;
01198 Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
01199 DebugOutput& dbgOut = _readNC->dbgOut;
01200
01201 // Read edges on each local cell, to get localGidEdges
01202 int edgesOnCellVarId;
01203 int success = NCFUNC( inq_varid )( _fileId, "edgesOnCell", &edgesOnCellVarId );
01204 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edgesOnCell" );
01205
01206 std::vector< int > edges_on_local_cells( nLocalCells * maxEdgesPerCell );
01207 dbgOut.tprintf( 1, " edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() );
01208
01209 #ifdef MOAB_HAVE_PNETCDF
01210 size_t nb_reads = localGidCells.psize();
01211 std::vector< int > requests( nb_reads );
01212 std::vector< int > statuss( nb_reads );
01213 size_t idxReq = 0;
01214 #endif
01215 size_t indexInArray = 0;
01216 for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
01217 ++pair_iter )
01218 {
01219 EntityHandle starth = pair_iter->first;
01220 EntityHandle endh = pair_iter->second;
01221 NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01222 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
01223 static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01224
01225 // Do a partial read in each subrange
01226 #ifdef MOAB_HAVE_PNETCDF
01227 success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01228 &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
01229 #else
01230 success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01231 &( edges_on_local_cells[indexInArray] ) );
01232 #endif
01233 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edgesOnCell data in a loop" );
01234
01235 // Increment the index for next subrange
01236 indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
01237 }
01238
01239 #ifdef MOAB_HAVE_PNETCDF
01240 // Wait outside the loop
01241 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01242 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01243 #endif
01244
01245 // Correct local cell edges array in the same way as local cell vertices array, replace the
01246 // padded edges with the last edges in the corresponding cells
01247 for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01248 {
01249 int num_edges = num_edges_on_local_cells[local_cell_idx];
01250 int idx_in_local_edge_arr = local_cell_idx * maxEdgesPerCell;
01251 int last_edge_idx = edges_on_local_cells[idx_in_local_edge_arr + num_edges - 1];
01252 for( int i = num_edges; i < maxEdgesPerCell; i++ )
01253 edges_on_local_cells[idx_in_local_edge_arr + i] = last_edge_idx;
01254 }
01255
01256 // Collect local edges
01257 std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() );
01258 std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) );
01259 nLocalEdges = localGidEdges.size();
01260
01261 dbgOut.tprintf( 1, " localGidEdges.psize() = %d\n", (int)localGidEdges.psize() );
01262 dbgOut.tprintf( 1, " localGidEdges.size() = %d\n", (int)localGidEdges.size() );
01263
01264 // Create local edges
01265 EntityHandle start_edge;
01266 EntityHandle* conn_arr_edges = NULL;
01267 ErrorCode rval =
01268 _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
01269 // Might have to create gather mesh later
01270 ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" );
01271
01272 // Add local edges to current file set
01273 Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 );
01274 rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" );
01275
01276 // Get ptr to GID memory for edges
01277 int count = 0;
01278 void* data = NULL;
01279 rval = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" );
01280 assert( count == nLocalEdges );
01281 int* gid_data = (int*)data;
01282 std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data );
01283
01284 int verticesOnEdgeVarId;
01285
01286 // Read vertices on each local edge, to get edge connectivity
01287 success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
01288 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
01289 // Utilize the memory storage pointed by conn_arr_edges
01290 int* vertices_on_local_edges = (int*)conn_arr_edges;
01291 #ifdef MOAB_HAVE_PNETCDF
01292 nb_reads = localGidEdges.psize();
01293 requests.resize( nb_reads );
01294 statuss.resize( nb_reads );
01295 idxReq = 0;
01296 #endif
01297 indexInArray = 0;
01298 for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end();
01299 ++pair_iter )
01300 {
01301 EntityHandle starth = pair_iter->first;
01302 EntityHandle endh = pair_iter->second;
01303 NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01304 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 };
01305
01306 // Do a partial read in each subrange
01307 #ifdef MOAB_HAVE_PNETCDF
01308 success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01309 &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] );
01310 #else
01311 success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01312 &( vertices_on_local_edges[indexInArray] ) );
01313 #endif
01314 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data in a loop" );
01315
01316 // Increment the index for next subrange
01317 indexInArray += ( endh - starth + 1 ) * 2;
01318 }
01319
01320 #ifdef MOAB_HAVE_PNETCDF
01321 // Wait outside the loop
01322 success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01323 if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01324 #endif
01325
01326 // Populate connectivity data for local edges
01327 // Convert in-place from int (stored in the first half) to EntityHandle
01328 // Reading backward is the trick
01329 for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01330 {
01331 int global_vert_idx = vertices_on_local_edges[edge_vert]; // Global vertex index, 1 based
01332 int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
01333 assert( local_vert_idx != -1 );
01334 conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
01335 }
01336
01337 return MB_SUCCESS;
01338 }
01339
01340 ErrorCode NCHelperMPAS::create_local_cells( const std::vector< int >& vertices_on_local_cells,
01341 const std::vector< int >& num_edges_on_local_cells,
01342 EntityHandle start_vertex,
01343 Range& faces )
01344 {
01345 Interface*& mbImpl = _readNC->mbImpl;
01346 Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
01347
01348 // Divide local cells into groups based on the number of edges
01349 Range local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01350 // Insert larger values before smaller ones to increase efficiency
01351 for( int i = nLocalCells - 1; i >= 0; i-- )
01352 {
01353 int num_edges = num_edges_on_local_cells[i];
01354 local_cells_with_n_edges[num_edges].insert( localGidCells[i] ); // Global cell index
01355 }
01356
01357 std::vector< int > num_edges_on_cell_groups;
01358 for( int i = 3; i <= maxEdgesPerCell; i++ )
01359 {
01360 if( local_cells_with_n_edges[i].size() > 0 ) num_edges_on_cell_groups.push_back( i );
01361 }
01362 numCellGroups = num_edges_on_cell_groups.size();
01363
01364 EntityHandle* conn_arr_local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01365 for( int i = 0; i < numCellGroups; i++ )
01366 {
01367 int num_edges_per_cell = num_edges_on_cell_groups[i];
01368 int num_group_cells = (int)local_cells_with_n_edges[num_edges_per_cell].size();
01369
01370 // Create local cells for each non-empty cell group
01371 EntityHandle start_element;
01372 ErrorCode rval = _readNC->readMeshIface->get_element_connect(
01373 num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
01374 conn_arr_local_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01375 faces.insert( start_element, start_element + num_group_cells - 1 );
01376
01377 // Add local cells to current file set
01378 Range local_cells_range( start_element, start_element + num_group_cells - 1 );
01379 rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01380
01381 // Get ptr to gid memory for local cells
01382 int count = 0;
01383 void* data = NULL;
01384 rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01385 assert( count == num_group_cells );
01386 int* gid_data = (int*)data;
01387 std::copy( local_cells_with_n_edges[num_edges_per_cell].begin(),
01388 local_cells_with_n_edges[num_edges_per_cell].end(), gid_data );
01389
01390 // Set connectivity array with proper local vertices handles
01391 for( int j = 0; j < num_group_cells; j++ )
01392 {
01393 EntityHandle global_cell_idx =
01394 local_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
01395 int local_cell_idx = localGidCells.index( global_cell_idx ); // Local cell index, 0 based
01396 assert( local_cell_idx != -1 );
01397
01398 if( numCellGroups > 1 )
01399 {
01400 // Populate cellHandleToGlobalID map to read cell variables
01401 cellHandleToGlobalID[start_element + j] = global_cell_idx;
01402 }
01403
01404 for( int k = 0; k < num_edges_per_cell; k++ )
01405 {
01406 EntityHandle global_vert_idx =
01407 vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + k]; // Global vertex index, 1 based
01408 int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
01409 assert( local_vert_idx != -1 );
01410 conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
01411 start_vertex + local_vert_idx;
01412 }
01413 }
01414 }
01415
01416 return MB_SUCCESS;
01417 }
01418
01419 ErrorCode NCHelperMPAS::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells,
01420 EntityHandle start_vertex,
01421 Range& faces )
01422 {
01423 Interface*& mbImpl = _readNC->mbImpl;
01424 Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
01425
01426 // Only one group of cells (each cell is represented by a polygon with maxEdgesPerCell edges)
01427 numCellGroups = 1;
01428
01429 // Create cells for this cell group
01430 EntityHandle start_element;
01431 EntityHandle* conn_arr_local_cells = NULL;
01432 ErrorCode rval =
01433 _readNC->readMeshIface->get_element_connect( nLocalCells, maxEdgesPerCell, MBPOLYGON, 0, start_element,
01434 conn_arr_local_cells,
01435 // Might have to create gather mesh later
01436 ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01437 faces.insert( start_element, start_element + nLocalCells - 1 );
01438
01439 // Add local cells to current file set
01440 Range local_cells_range( start_element, start_element + nLocalCells - 1 );
01441 rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01442
01443 // Get ptr to GID memory for local cells
01444 int count = 0;
01445 void* data = NULL;
01446 rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01447 assert( count == nLocalCells );
01448 int* gid_data = (int*)data;
01449 std::copy( localGidCells.begin(), localGidCells.end(), gid_data );
01450
01451 // Set connectivity array with proper local vertices handles
01452 // vertices_on_local_cells array was already corrected to have the last vertices padded
01453 // no need for extra checks considering
01454 for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01455 {
01456 for( int i = 0; i < maxEdgesPerCell; i++ )
01457 {
01458 EntityHandle global_vert_idx =
01459 vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + i]; // Global vertex index, 1 based
01460 int local_vert_idx = localGidVerts.index( global_vert_idx ); // Local vertex index, 0 based
01461 assert( local_vert_idx != -1 );
01462 conn_arr_local_cells[local_cell_idx * maxEdgesPerCell + i] = start_vertex + local_vert_idx;
01463 }
01464 }
01465
01466 return MB_SUCCESS;
01467 }
01468
01469 ErrorCode NCHelperMPAS::create_gather_set_vertices( EntityHandle gather_set, EntityHandle& gather_set_start_vertex )
01470 {
01471 Interface*& mbImpl = _readNC->mbImpl;
01472 Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
01473 const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01474
01475 // Create gather set vertices
01476 std::vector< double* > arrays;
01477 // Don't need to specify allocation number here, because we know enough vertices were created
01478 // before
01479 ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" );
01480
01481 // Add vertices to the gather set
01482 Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 );
01483 rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" );
01484
01485 // Read x coordinates for gather set vertices
01486 double* xptr = arrays[0];
01487 int xVertexVarId;
01488 int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
01489 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
01490 NCDF_SIZE read_start = 0;
01491 NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices );
01492 #ifdef MOAB_HAVE_PNETCDF
01493 // Enter independent I/O mode, since this read is only for the gather processor
01494 success = NCFUNC( begin_indep_data )( _fileId );
01495 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01496 success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01497 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
01498 success = NCFUNC( end_indep_data )( _fileId );
01499 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01500 #else
01501 success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01502 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
01503 #endif
01504
01505 // Read y coordinates for gather set vertices
01506 double* yptr = arrays[1];
01507 int yVertexVarId;
01508 success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
01509 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
01510 #ifdef MOAB_HAVE_PNETCDF
01511 // Enter independent I/O mode, since this read is only for the gather processor
01512 success = NCFUNC( begin_indep_data )( _fileId );
01513 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01514 success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01515 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
01516 success = NCFUNC( end_indep_data )( _fileId );
01517 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01518 #else
01519 success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01520 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
01521 #endif
01522
01523 // Read z coordinates for gather set vertices
01524 double* zptr = arrays[2];
01525 int zVertexVarId;
01526 success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
01527 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
01528 #ifdef MOAB_HAVE_PNETCDF
01529 // Enter independent I/O mode, since this read is only for the gather processor
01530 success = NCFUNC( begin_indep_data )( _fileId );
01531 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01532 success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
01533 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
01534 success = NCFUNC( end_indep_data )( _fileId );
01535 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01536 #else
01537 success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
01538 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
01539 #endif
01540
01541 // Get ptr to GID memory for gather set vertices
01542 int count = 0;
01543 void* data = NULL;
01544 rval =
01545 mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" );
01546 assert( count == nVertices );
01547 int* gid_data = (int*)data;
01548 for( int j = 1; j <= nVertices; j++ )
01549 gid_data[j - 1] = j;
01550
01551 // Set the file id tag too, it should be bigger something not interfering with global id
01552 if( mpFileIdTag )
01553 {
01554 rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count,
01555 data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" );
01556 assert( count == nVertices );
01557 int bytes_per_tag = 4;
01558 rval = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01559 if( 4 == bytes_per_tag )
01560 {
01561 gid_data = (int*)data;
01562 for( int j = 1; j <= nVertices; j++ )
01563 gid_data[j - 1] = nVertices + j; // Bigger than global id tag
01564 }
01565 else if( 8 == bytes_per_tag )
01566 { // Should be a handle tag on 64 bit machine?
01567 long* handle_tag_data = (long*)data;
01568 for( int j = 1; j <= nVertices; j++ )
01569 handle_tag_data[j - 1] = nVertices + j; // Bigger than global id tag
01570 }
01571 }
01572
01573 return MB_SUCCESS;
01574 }
01575
01576 ErrorCode NCHelperMPAS::create_gather_set_edges( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01577 {
01578 Interface*& mbImpl = _readNC->mbImpl;
01579
01580 // Create gather set edges
01581 EntityHandle start_edge;
01582 EntityHandle* conn_arr_gather_set_edges = NULL;
01583 // Don't need to specify allocation number here, because we know enough edges were created
01584 // before
01585 ErrorCode rval =
01586 _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" );
01587
01588 // Add edges to the gather set
01589 Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 );
01590 rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" );
01591
01592 // Read vertices on each edge
01593 int verticesOnEdgeVarId;
01594 int success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
01595 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
01596 // Utilize the memory storage pointed by conn_arr_gather_set_edges
01597 int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges;
01598 NCDF_SIZE read_starts[2] = { 0, 0 };
01599 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nEdges ), 2 };
01600 #ifdef MOAB_HAVE_PNETCDF
01601 // Enter independent I/O mode, since this read is only for the gather processor
01602 success = NCFUNC( begin_indep_data )( _fileId );
01603 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01604 success =
01605 NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01606 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
01607 success = NCFUNC( end_indep_data )( _fileId );
01608 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01609 #else
01610 success =
01611 NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01612 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
01613 #endif
01614
01615 // Populate connectivity data for gather set edges
01616 // Convert in-place from int (stored in the first half) to EntityHandle
01617 // Reading backward is the trick
01618 for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01619 {
01620 int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 1 based
01621 gather_set_vert_idx--; // 1 based -> 0 based
01622 // Connectivity array is shifted by where the gather set vertices start
01623 conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
01624 }
01625
01626 return MB_SUCCESS;
01627 }
01628
01629 ErrorCode NCHelperMPAS::create_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01630 {
01631 Interface*& mbImpl = _readNC->mbImpl;
01632
01633 // Read number of edges on each gather set cell
01634 int nEdgesOnCellVarId;
01635 int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
01636 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
01637 std::vector< int > num_edges_on_gather_set_cells( nCells );
01638 NCDF_SIZE read_start = 0;
01639 NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
01640 #ifdef MOAB_HAVE_PNETCDF
01641 // Enter independent I/O mode, since this read is only for the gather processor
01642 success = NCFUNC( begin_indep_data )( _fileId );
01643 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01644 success =
01645 NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01646 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01647 success = NCFUNC( end_indep_data )( _fileId );
01648 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01649 #else
01650 success =
01651 NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01652 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01653 #endif
01654
01655 // Read vertices on each gather set cell (connectivity)
01656 int verticesOnCellVarId;
01657 success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
01658 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
01659 std::vector< int > vertices_on_gather_set_cells( nCells * maxEdgesPerCell );
01660 NCDF_SIZE read_starts[2] = { 0, 0 };
01661 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01662 #ifdef MOAB_HAVE_PNETCDF
01663 // Enter independent I/O mode, since this read is only for the gather processor
01664 success = NCFUNC( begin_indep_data )( _fileId );
01665 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01666 success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
01667 &vertices_on_gather_set_cells[0] );
01668 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01669 success = NCFUNC( end_indep_data )( _fileId );
01670 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01671 #else
01672 success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
01673 &vertices_on_gather_set_cells[0] );
01674 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01675 #endif
01676
01677 // Divide gather set cells into groups based on the number of edges
01678 Range gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01679 // Insert larger values before smaller values to increase efficiency
01680 for( int i = nCells - 1; i >= 0; i-- )
01681 {
01682 int num_edges = num_edges_on_gather_set_cells[i];
01683 gather_set_cells_with_n_edges[num_edges].insert( i + 1 ); // 0 based -> 1 based
01684 }
01685
01686 // Create gather set cells
01687 EntityHandle* conn_arr_gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01688 for( int num_edges_per_cell = 3; num_edges_per_cell <= maxEdgesPerCell; num_edges_per_cell++ )
01689 {
01690 int num_group_cells = gather_set_cells_with_n_edges[num_edges_per_cell].size();
01691 if( num_group_cells > 0 )
01692 {
01693 EntityHandle start_element;
01694 ErrorCode rval = _readNC->readMeshIface->get_element_connect(
01695 num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
01696 conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01697
01698 // Add cells to the gather set
01699 Range gather_set_cells_range( start_element, start_element + num_group_cells - 1 );
01700 rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01701
01702 for( int j = 0; j < num_group_cells; j++ )
01703 {
01704 int gather_set_cell_idx =
01705 gather_set_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
01706 gather_set_cell_idx--; // 1 based -> 0 based
01707
01708 for( int k = 0; k < num_edges_per_cell; k++ )
01709 {
01710 EntityHandle gather_set_vert_idx =
01711 vertices_on_gather_set_cells[gather_set_cell_idx * maxEdgesPerCell +
01712 k]; // Global vertex index, 1 based
01713 gather_set_vert_idx--; // 1 based -> 0 based
01714
01715 // Connectivity array is shifted by where the gather set vertices start
01716 conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
01717 gather_set_start_vertex + gather_set_vert_idx;
01718 }
01719 }
01720 }
01721 }
01722
01723 return MB_SUCCESS;
01724 }
01725
01726 ErrorCode NCHelperMPAS::create_padded_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01727 {
01728 Interface*& mbImpl = _readNC->mbImpl;
01729
01730 // Read number of edges on each gather set cell
01731 int nEdgesOnCellVarId;
01732 int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
01733 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
01734 std::vector< int > num_edges_on_gather_set_cells( nCells );
01735 NCDF_SIZE read_start = 0;
01736 NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
01737 #ifdef MOAB_HAVE_PNETCDF
01738 // Enter independent I/O mode, since this read is only for the gather processor
01739 success = NCFUNC( begin_indep_data )( _fileId );
01740 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01741 success =
01742 NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01743 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01744 success = NCFUNC( end_indep_data )( _fileId );
01745 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01746 #else
01747 success =
01748 NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01749 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01750 #endif
01751
01752 // Create gather set cells
01753 EntityHandle start_element;
01754 EntityHandle* conn_arr_gather_set_cells = NULL;
01755 // Don't need to specify allocation number here, because we know enough cells were created
01756 // before
01757 ErrorCode rval = _readNC->readMeshIface->get_element_connect( nCells, maxEdgesPerCell, MBPOLYGON, 0, start_element,
01758 conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01759
01760 // Add cells to the gather set
01761 Range gather_set_cells_range( start_element, start_element + nCells - 1 );
01762 rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01763
01764 // Read vertices on each gather set cell (connectivity)
01765 int verticesOnCellVarId;
01766 success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
01767 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
01768 // Utilize the memory storage pointed by conn_arr_gather_set_cells
01769 int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells;
01770 NCDF_SIZE read_starts[2] = { 0, 0 };
01771 NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01772 #ifdef MOAB_HAVE_PNETCDF
01773 // Enter independent I/O mode, since this read is only for the gather processor
01774 success = NCFUNC( begin_indep_data )( _fileId );
01775 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01776 success =
01777 NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01778 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01779 success = NCFUNC( end_indep_data )( _fileId );
01780 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01781 #else
01782 success =
01783 NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01784 if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01785 #endif
01786
01787 // Correct gather set cell vertices array in the same way as local cell vertices array,
01788 // replace the padded vertices with the last vertices in the corresponding cells
01789 for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ )
01790 {
01791 int num_edges = num_edges_on_gather_set_cells[gather_set_cell_idx];
01792 int idx_in_gather_set_vert_arr = gather_set_cell_idx * maxEdgesPerCell;
01793 int last_vert_idx = vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + num_edges - 1];
01794 for( int i = num_edges; i < maxEdgesPerCell; i++ )
01795 vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + i] = last_vert_idx;
01796 }
01797
01798 // Populate connectivity data for gather set cells
01799 // Convert in-place from int (stored in the first half) to EntityHandle
01800 // Reading backward is the trick
01801 for( int cell_vert = nCells * maxEdgesPerCell - 1; cell_vert >= 0; cell_vert-- )
01802 {
01803 int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 1 based
01804 gather_set_vert_idx--; // 1 based -> 0 based
01805 // Connectivity array is shifted by where the gather set vertices start
01806 conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
01807 }
01808
01809 return MB_SUCCESS;
01810 }
01811
01812 } // namespace moab