MOAB: Mesh Oriented datABase  (version 5.2.1)
NCHelperMPAS.cpp
Go to the documentation of this file.
00001 #include "NCHelperMPAS.hpp"
00002 #include "moab/ReadUtilIface.hpp"
00003 #include "moab/FileOptions.hpp"
00004 #include "moab/SpectralMeshTool.hpp"
00005 #include "MBTagConventions.hpp"
00006 
00007 #ifdef MOAB_HAVE_ZOLTAN
00008 #include "moab/ZoltanPartitioner.hpp"
00009 #endif
00010 
00011 #include <cmath>
00012 
00013 namespace moab
00014 {
00015 
00016 const int DEFAULT_MAX_EDGES_PER_CELL = 10;
00017 
00018 NCHelperMPAS::NCHelperMPAS( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet )
00019     : UcdNCHelper( readNC, fileId, opts, fileSet ), maxEdgesPerCell( DEFAULT_MAX_EDGES_PER_CELL ), numCellGroups( 0 ),
00020       createGatherSet( false )
00021 {
00022     // Ignore variables containing topological information
00023     ignoredVarNames.insert( "nEdgesOnEdge" );
00024     ignoredVarNames.insert( "nEdgesOnCell" );
00025     ignoredVarNames.insert( "edgesOnVertex" );
00026     ignoredVarNames.insert( "cellsOnVertex" );
00027     ignoredVarNames.insert( "verticesOnEdge" );
00028     ignoredVarNames.insert( "edgesOnEdge" );
00029     ignoredVarNames.insert( "cellsOnEdge" );
00030     ignoredVarNames.insert( "verticesOnCell" );
00031     ignoredVarNames.insert( "edgesOnCell" );
00032     ignoredVarNames.insert( "cellsOnCell" );
00033     // ignore variables containing mesh related info, that are currently saved as variable tags on
00034     // file set ; if needed, these should be saved as dense tags, and read accordingly, in parallel
00035     ignoredVarNames.insert( "weightsOnEdge" );
00036     ignoredVarNames.insert( "angleEdge" );
00037     ignoredVarNames.insert( "areaCell" );
00038     ignoredVarNames.insert( "areaTriangle" );
00039     ignoredVarNames.insert( "dcEdge" );
00040     ignoredVarNames.insert( "dv1Edge" );
00041     ignoredVarNames.insert( "dv2Edge" );
00042     ignoredVarNames.insert( "fEdge" );
00043     ignoredVarNames.insert( "fVertex" );
00044     ignoredVarNames.insert( "h_s" );
00045     ignoredVarNames.insert( "kiteAreasOnVertex" );
00046     ignoredVarNames.insert( "latCell" );
00047     ignoredVarNames.insert( "latEdge" );
00048     ignoredVarNames.insert( "latVertex" );
00049     ignoredVarNames.insert( "lonCell" );
00050     ignoredVarNames.insert( "lonEdge" );
00051     ignoredVarNames.insert( "lonVertex" );
00052     ignoredVarNames.insert( "meshDensity" );
00053     ignoredVarNames.insert( "xCell" );
00054     ignoredVarNames.insert( "xEdge" );
00055     ignoredVarNames.insert( "xVertex" );
00056     ignoredVarNames.insert( "yCell" );
00057     ignoredVarNames.insert( "yEdge" );
00058     ignoredVarNames.insert( "yVertex" );
00059     ignoredVarNames.insert( "zCell" );
00060     ignoredVarNames.insert( "zEdge" );
00061     ignoredVarNames.insert( "zVertex" );
00062     // Ignore variables for index conversion
00063     ignoredVarNames.insert( "indexToVertexID" );
00064     ignoredVarNames.insert( "indexToEdgeID" );
00065     ignoredVarNames.insert( "indexToCellID" );
00066 }
00067 
00068 bool NCHelperMPAS::can_read_file( ReadNC* readNC )
00069 {
00070     std::vector< std::string >& dimNames = readNC->dimNames;
00071 
00072     // If dimension name "vertexDegree" exists then it should be the MPAS grid
00073     if( std::find( dimNames.begin(), dimNames.end(), std::string( "vertexDegree" ) ) != dimNames.end() ) return true;
00074 
00075     return false;
00076 }
00077 
00078 ErrorCode NCHelperMPAS::init_mesh_vals()
00079 {
00080     std::vector< std::string >& dimNames              = _readNC->dimNames;
00081     std::vector< int >& dimLens                       = _readNC->dimLens;
00082     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00083 
00084     ErrorCode rval;
00085     unsigned int idx;
00086     std::vector< std::string >::iterator vit;
00087 
00088     // Get max edges per cell reported in the MPAS file header
00089     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "maxEdges" ) ) != dimNames.end() )
00090     {
00091         idx             = vit - dimNames.begin();
00092         maxEdgesPerCell = dimLens[idx];
00093         if( maxEdgesPerCell > DEFAULT_MAX_EDGES_PER_CELL )
00094         {
00095             MB_SET_ERR( MB_INVALID_SIZE,
00096                         "maxEdgesPerCell read from the MPAS file header has exceeded " << DEFAULT_MAX_EDGES_PER_CELL );
00097         }
00098     }
00099 
00100     // Look for time dimension
00101     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() )
00102         idx = vit - dimNames.begin();
00103     else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() )
00104         idx = vit - dimNames.begin();
00105     else
00106     {
00107         MB_SET_ERR( MB_FAILURE, "Couldn't find 'Time' or 'time' dimension" );
00108     }
00109     tDim       = idx;
00110     nTimeSteps = dimLens[idx];
00111 
00112     // Get number of cells
00113     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nCells" ) ) != dimNames.end() )
00114         idx = vit - dimNames.begin();
00115     else
00116     {
00117         MB_SET_ERR( MB_FAILURE, "Couldn't find 'nCells' dimension" );
00118     }
00119     cDim   = idx;
00120     nCells = dimLens[idx];
00121 
00122     // Get number of edges
00123     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nEdges" ) ) != dimNames.end() )
00124         idx = vit - dimNames.begin();
00125     else
00126     {
00127         MB_SET_ERR( MB_FAILURE, "Couldn't find 'nEdges' dimension" );
00128     }
00129     eDim   = idx;
00130     nEdges = dimLens[idx];
00131 
00132     // Get number of vertices
00133     vDim = -1;
00134     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertices" ) ) != dimNames.end() )
00135         idx = vit - dimNames.begin();
00136     else
00137     {
00138         MB_SET_ERR( MB_FAILURE, "Couldn't find 'nVertices' dimension" );
00139     }
00140     vDim      = idx;
00141     nVertices = dimLens[idx];
00142 
00143     // Get number of vertex levels
00144     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevels" ) ) != dimNames.end() )
00145         idx = vit - dimNames.begin();
00146     else
00147     {
00148         std::cerr << "Warning: dimension nVertLevels not found in header.\nThe file may contain "
00149                      "just the mesh"
00150                   << std::endl;
00151     }
00152     levDim  = idx;
00153     nLevels = dimLens[idx];
00154 
00155     // Dimension indices for other optional levels
00156     std::vector< unsigned int > opt_lev_dims;
00157 
00158     // Get index of vertex levels P1
00159     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP1" ) ) != dimNames.end() )
00160     {
00161         idx = vit - dimNames.begin();
00162         opt_lev_dims.push_back( idx );
00163     }
00164 
00165     // Get index of vertex levels P2
00166     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP2" ) ) != dimNames.end() )
00167     {
00168         idx = vit - dimNames.begin();
00169         opt_lev_dims.push_back( idx );
00170     }
00171 
00172     // Get index of soil levels
00173     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nSoilLevels" ) ) != dimNames.end() )
00174     {
00175         idx = vit - dimNames.begin();
00176         opt_lev_dims.push_back( idx );
00177     }
00178 
00179     std::map< std::string, ReadNC::VarData >::iterator vmit;
00180 
00181     // Store time coordinate values in tVals
00182     if( nTimeSteps > 0 )
00183     {
00184         // Note, two possible types for xtime variable: double(Time) or char(Time, StrLen)
00185         if( ( vmit = varInfo.find( "xtime" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
00186         {
00187             // If xtime variable is double type, read time coordinate values to tVals
00188             rval = read_coordinate( "xtime", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'xtime' variable" );
00189         }
00190         else
00191         {
00192             // If xtime variable does not exist, or it is string type, set dummy values to tVals
00193             for( int t = 0; t < nTimeSteps; t++ )
00194                 tVals.push_back( (double)t );
00195         }
00196     }
00197 
00198     // For each variable, determine the entity location type and number of levels
00199     for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit )
00200     {
00201         ReadNC::VarData& vd = ( *vmit ).second;
00202 
00203         // Default entLoc is ENTLOCSET
00204         if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
00205         {
00206             if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() )
00207                 vd.entLoc = ReadNC::ENTLOCVERT;
00208             else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() )
00209                 vd.entLoc = ReadNC::ENTLOCEDGE;
00210             else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() )
00211                 vd.entLoc = ReadNC::ENTLOCFACE;
00212         }
00213 
00214         // Default numLev is 0
00215         if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() )
00216             vd.numLev = nLevels;
00217         else
00218         {
00219             // If nVertLevels dimension is not found, try other optional levels such as
00220             // nVertLevelsP1
00221             for( unsigned int i = 0; i < opt_lev_dims.size(); i++ )
00222             {
00223                 if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() )
00224                 {
00225                     vd.numLev = dimLens[opt_lev_dims[i]];
00226                     break;
00227                 }
00228             }
00229         }
00230 
00231         // Hack: ignore variables with more than 3 dimensions, e.g. tracers(Time, nCells,
00232         // nVertLevels, nTracers)
00233         if( vd.varDims.size() > 3 ) ignoredVarNames.insert( vd.varName );
00234     }
00235 
00236     // Hack: create dummy variables for dimensions (like nCells) with no corresponding coordinate
00237     // variables
00238     rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" );
00239 
00240     return MB_SUCCESS;
00241 }
00242 
00243 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
00244 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
00245 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
00246 // has to restore them based on the existing mesh from last read
00247 ErrorCode NCHelperMPAS::check_existing_mesh()
00248 {
00249     Interface*& mbImpl = _readNC->mbImpl;
00250     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
00251     bool& noMesh       = _readNC->noMesh;
00252 
00253     if( noMesh )
00254     {
00255         ErrorCode rval;
00256 
00257         // Restore numCellGroups
00258         if( 0 == numCellGroups )
00259         {
00260             Tag numCellGroupsTag;
00261             rval = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag );MB_CHK_SET_ERR( rval, "Trouble getting __NUM_CELL_GROUPS tag" );
00262             if( MB_SUCCESS == rval ) rval = mbImpl->tag_get_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble getting data of __NUM_CELL_GROUPS tag" );
00263         }
00264 
00265         if( localGidVerts.empty() )
00266         {
00267             // Get all vertices from current file set (it is the input set in no_mesh scenario)
00268             Range local_verts;
00269             rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" );
00270 
00271             if( !local_verts.empty() )
00272             {
00273                 std::vector< int > gids( local_verts.size() );
00274 
00275                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00276                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" );
00277 
00278                 // Restore localGidVerts
00279                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) );
00280                 nLocalVertices = localGidVerts.size();
00281             }
00282         }
00283 
00284         if( localGidEdges.empty() )
00285         {
00286             // Get all edges from current file set (it is the input set in no_mesh scenario)
00287             Range local_edges;
00288             rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" );
00289 
00290             if( !local_edges.empty() )
00291             {
00292                 std::vector< int > gids( local_edges.size() );
00293 
00294                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00295                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" );
00296 
00297                 // Restore localGidEdges
00298                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) );
00299                 nLocalEdges = localGidEdges.size();
00300             }
00301         }
00302 
00303         if( localGidCells.empty() )
00304         {
00305             // Get all cells from current file set (it is the input set in no_mesh scenario)
00306             Range local_cells;
00307             rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" );
00308 
00309             if( !local_cells.empty() )
00310             {
00311                 std::vector< int > gids( local_cells.size() );
00312 
00313                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00314                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" );
00315 
00316                 // Restore localGidCells
00317                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) );
00318                 nLocalCells = localGidCells.size();
00319 
00320                 if( numCellGroups > 1 )
00321                 {
00322                     // Restore cellHandleToGlobalID map
00323                     Range::const_iterator rit;
00324                     int i;
00325                     for( rit = local_cells.begin(), i = 0; rit != local_cells.end(); ++rit, i++ )
00326                         cellHandleToGlobalID[*rit] = gids[i];
00327                 }
00328             }
00329         }
00330     }
00331 
00332     return MB_SUCCESS;
00333 }
00334 
00335 ErrorCode NCHelperMPAS::create_mesh( Range& faces )
00336 {
00337     Interface*& mbImpl    = _readNC->mbImpl;
00338     bool& noMixedElements = _readNC->noMixedElements;
00339     bool& noEdges         = _readNC->noEdges;
00340     DebugOutput& dbgOut   = _readNC->dbgOut;
00341 
00342 #ifdef MOAB_HAVE_MPI
00343     int rank         = 0;
00344     int procs        = 1;
00345     bool& isParallel = _readNC->isParallel;
00346     if( isParallel )
00347     {
00348         ParallelComm*& myPcomm = _readNC->myPcomm;
00349         rank                   = myPcomm->proc_config().proc_rank();
00350         procs                  = myPcomm->proc_config().proc_size();
00351     }
00352 
00353     // Need to know whether we'll be creating gather mesh
00354     if( rank == _readNC->gatherSetRank ) createGatherSet = true;
00355 
00356     if( procs >= 2 )
00357     {
00358         // Shift rank to obtain a rotated trivial partition
00359         int shifted_rank           = rank;
00360         int& trivialPartitionShift = _readNC->trivialPartitionShift;
00361         if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00362 
00363         // Compute the number of local cells on this proc
00364         nLocalCells = int( std::floor( 1.0 * nCells / procs ) );
00365 
00366         // The starting global cell index in the MPAS file for this proc
00367         int start_cell_idx = shifted_rank * nLocalCells;
00368 
00369         // Number of extra cells after equal split over procs
00370         int iextra = nCells % procs;
00371 
00372         // Allocate extra cells over procs
00373         if( shifted_rank < iextra ) nLocalCells++;
00374         start_cell_idx += std::min( shifted_rank, iextra );
00375 
00376         start_cell_idx++;  // 0 based -> 1 based
00377 
00378         // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00379         ErrorCode rval = redistribute_local_cells( start_cell_idx );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00380     }
00381     else
00382     {
00383         nLocalCells = nCells;
00384         localGidCells.insert( 1, nLocalCells );
00385     }
00386 #else
00387     nLocalCells = nCells;
00388     localGidCells.insert( 1, nLocalCells );
00389 #endif
00390     dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00391     dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00392 
00393     // Read number of edges on each local cell, to calculate actual maxEdgesPerCell
00394     int nEdgesOnCellVarId;
00395     int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
00396     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
00397     std::vector< int > num_edges_on_local_cells( nLocalCells );
00398 #ifdef MOAB_HAVE_PNETCDF
00399     size_t nb_reads = localGidCells.psize();
00400     std::vector< int > requests( nb_reads );
00401     std::vector< int > statuss( nb_reads );
00402     size_t idxReq = 0;
00403 #endif
00404     size_t indexInArray = 0;
00405     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00406          ++pair_iter )
00407     {
00408         EntityHandle starth  = pair_iter->first;
00409         EntityHandle endh    = pair_iter->second;
00410         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
00411         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
00412 
00413         // Do a partial read in each subrange
00414 #ifdef MOAB_HAVE_PNETCDF
00415         success = NCFUNCREQG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
00416                                            &( num_edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
00417 #else
00418         success = NCFUNCAG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
00419                                          &( num_edges_on_local_cells[indexInArray] ) );
00420 #endif
00421         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data in a loop" );
00422 
00423         // Increment the index for next subrange
00424         indexInArray += ( endh - starth + 1 );
00425     }
00426 
00427 #ifdef MOAB_HAVE_PNETCDF
00428     // Wait outside the loop
00429     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00430     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00431 #endif
00432 
00433     // Get local maxEdgesPerCell on this proc
00434     int local_max_edges_per_cell =
00435         *( std::max_element( num_edges_on_local_cells.begin(), num_edges_on_local_cells.end() ) );
00436     maxEdgesPerCell = local_max_edges_per_cell;
00437 
00438     // If parallel, do a MPI_Allreduce to get global maxEdgesPerCell across all procs
00439 #ifdef MOAB_HAVE_MPI
00440     if( procs >= 2 )
00441     {
00442         int global_max_edges_per_cell;
00443         ParallelComm*& myPcomm = _readNC->myPcomm;
00444         MPI_Allreduce( &local_max_edges_per_cell, &global_max_edges_per_cell, 1, MPI_INT, MPI_MAX,
00445                        myPcomm->proc_config().proc_comm() );
00446         assert( local_max_edges_per_cell <= global_max_edges_per_cell );
00447         maxEdgesPerCell = global_max_edges_per_cell;
00448         if( 0 == rank ) dbgOut.tprintf( 1, "  global_max_edges_per_cell = %d\n", global_max_edges_per_cell );
00449     }
00450 #endif
00451 
00452     // Read vertices on each local cell, to get localGidVerts and cell connectivity later
00453     int verticesOnCellVarId;
00454     success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
00455     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
00456     std::vector< int > vertices_on_local_cells( nLocalCells * maxEdgesPerCell );
00457 #ifdef MOAB_HAVE_PNETCDF
00458     idxReq = 0;
00459 #endif
00460     indexInArray = 0;
00461     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00462          ++pair_iter )
00463     {
00464         EntityHandle starth      = pair_iter->first;
00465         EntityHandle endh        = pair_iter->second;
00466         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00467         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00468                                      static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
00469 
00470         // Do a partial read in each subrange
00471 #ifdef MOAB_HAVE_PNETCDF
00472         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00473                                            &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] );
00474 #else
00475         success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00476                                          &( vertices_on_local_cells[indexInArray] ) );
00477 #endif
00478         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data in a loop" );
00479 
00480         // Increment the index for next subrange
00481         indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
00482     }
00483 
00484 #ifdef MOAB_HAVE_PNETCDF
00485     // Wait outside the loop
00486     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00487     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00488 #endif
00489 
00490     // Correct local cell vertices array, replace the padded vertices with the last vertices
00491     // in the corresponding cells; sometimes the padded vertices are 0, sometimes a large
00492     // vertex id. Make sure they are consistent to our padded option
00493     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
00494     {
00495         int num_edges             = num_edges_on_local_cells[local_cell_idx];
00496         int idx_in_local_vert_arr = local_cell_idx * maxEdgesPerCell;
00497         int last_vert_idx         = vertices_on_local_cells[idx_in_local_vert_arr + num_edges - 1];
00498         for( int i = num_edges; i < maxEdgesPerCell; i++ )
00499             vertices_on_local_cells[idx_in_local_vert_arr + i] = last_vert_idx;
00500     }
00501 
00502     // Create local vertices
00503     EntityHandle start_vertex;
00504     ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for MPAS mesh" );
00505 
00506     // Create local edges (unless NO_EDGES read option is set)
00507     if( !noEdges )
00508     {
00509         rval = create_local_edges( start_vertex, num_edges_on_local_cells );MB_CHK_SET_ERR( rval, "Failed to create local edges for MPAS mesh" );
00510     }
00511 
00512     // Create local cells, either unpadded or padded
00513     if( noMixedElements )
00514     {
00515         rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for MPAS mesh" );
00516     }
00517     else
00518     {
00519         rval = create_local_cells( vertices_on_local_cells, num_edges_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create local cells for MPAS mesh" );
00520     }
00521 
00522     // Set tag for numCellGroups
00523     Tag numCellGroupsTag = 0;
00524     rval                 = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag,
00525                                    MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating __NUM_CELL_GROUPS tag" );
00526     rval = mbImpl->tag_set_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble setting data to __NUM_CELL_GROUPS tag" );
00527 
00528     if( createGatherSet )
00529     {
00530         EntityHandle gather_set;
00531         rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" );
00532 
00533         // Create gather set vertices
00534         EntityHandle start_gather_set_vertex;
00535         rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for MPAS mesh" );
00536 
00537         // Create gather set edges (unless NO_EDGES read option is set)
00538         if( !noEdges )
00539         {
00540             rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for MPAS mesh" );
00541         }
00542 
00543         // Create gather set cells, either unpadded or padded
00544         if( noMixedElements )
00545         {
00546             rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for MPAS mesh" );
00547         }
00548         else
00549         {
00550             rval = create_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set cells for MPAS mesh" );
00551         }
00552     }
00553 
00554     return MB_SUCCESS;
00555 }
00556 
00557 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas,
00558                                                                std::vector< int >& tstep_nums )
00559 {
00560     Interface*& mbImpl          = _readNC->mbImpl;
00561     std::vector< int >& dimLens = _readNC->dimLens;
00562     bool& noEdges               = _readNC->noEdges;
00563     DebugOutput& dbgOut         = _readNC->dbgOut;
00564 
00565     Range* range = NULL;
00566 
00567     // Get vertices
00568     Range verts;
00569     ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" );
00570     assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 );
00571 
00572     // Get edges
00573     Range edges;
00574     rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" );
00575 
00576     // Get faces
00577     Range faces;
00578     rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" );
00579     // Note, for MPAS faces.psize() can be more than 1
00580 
00581 #ifdef MOAB_HAVE_MPI
00582     bool& isParallel = _readNC->isParallel;
00583     if( isParallel )
00584     {
00585         ParallelComm*& myPcomm = _readNC->myPcomm;
00586         rval                   = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" );
00587     }
00588     else
00589         facesOwned = faces;  // not running in parallel, but still with MPI
00590 #else
00591     facesOwned = faces;
00592 #endif
00593 
00594     for( unsigned int i = 0; i < vdatas.size(); i++ )
00595     {
00596         // Skip edge variables, if specified by the read options
00597         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00598 
00599         // Support non-set variables with 3 dimensions like (Time, nCells, nVertLevels), or
00600         // 2 dimensions like (Time, nCells)
00601         assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() );
00602 
00603         // For a non-set variable, time should be the first dimension
00604         assert( tDim == vdatas[i].varDims[0] );
00605 
00606         // Set up readStarts and readCounts
00607         vdatas[i].readStarts.resize( 3 );
00608         vdatas[i].readCounts.resize( 3 );
00609 
00610         // First: Time
00611         vdatas[i].readStarts[0] = 0;  // This value is timestep dependent, will be set later
00612         vdatas[i].readCounts[0] = 1;
00613 
00614         // Next: nVertices / nCells / nEdges
00615         switch( vdatas[i].entLoc )
00616         {
00617             case ReadNC::ENTLOCVERT:
00618                 // Vertices
00619                 // Start from the first localGidVerts
00620                 // Actually, this will be reset later on in a loop
00621                 vdatas[i].readStarts[1] = localGidVerts[0] - 1;
00622                 vdatas[i].readCounts[1] = nLocalVertices;
00623                 range                   = &verts;
00624                 break;
00625             case ReadNC::ENTLOCFACE:
00626                 // Faces
00627                 // Start from the first localGidCells
00628                 // Actually, this will be reset later on in a loop
00629                 vdatas[i].readStarts[1] = localGidCells[0] - 1;
00630                 vdatas[i].readCounts[1] = nLocalCells;
00631                 range                   = &facesOwned;
00632                 break;
00633             case ReadNC::ENTLOCEDGE:
00634                 // Edges
00635                 // Start from the first localGidEdges
00636                 // Actually, this will be reset later on in a loop
00637                 vdatas[i].readStarts[1] = localGidEdges[0] - 1;
00638                 vdatas[i].readCounts[1] = nLocalEdges;
00639                 range                   = &edges;
00640                 break;
00641             default:
00642                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00643         }
00644 
00645         // Finally: nVertLevels or other optional levels, it is possible that there is no
00646         // level dimension (numLev is 0) for this non-set variable, e.g. (Time, nCells)
00647         if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1;
00648         vdatas[i].readStarts[2] = 0;
00649         vdatas[i].readCounts[2] = vdatas[i].numLev;
00650 
00651         // Get variable size
00652         vdatas[i].sz = 1;
00653         for( std::size_t idx = 0; idx != 3; idx++ )
00654             vdatas[i].sz *= vdatas[i].readCounts[idx];
00655 
00656         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00657         {
00658             dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] );
00659 
00660             if( tstep_nums[t] >= dimLens[tDim] )
00661             { MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] ); }
00662 
00663             // Get the tag to read into
00664             if( !vdatas[i].varTags[t] )
00665             {
00666                 rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName );
00667             }
00668 
00669             // Get ptr to tag space
00670             if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00671             {
00672                 // For a cell variable that is NOT on one contiguous chunk of faces, defer its tag
00673                 // space allocation
00674                 vdatas[i].varDatas[t] = NULL;
00675             }
00676             else
00677             {
00678                 assert( 1 == range->psize() );
00679                 void* data;
00680                 int count;
00681                 rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName );
00682                 assert( (unsigned)count == range->size() );
00683                 vdatas[i].varDatas[t] = data;
00684             }
00685         }
00686     }
00687 
00688     return rval;
00689 }
00690 
00691 #ifdef MOAB_HAVE_PNETCDF
00692 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas,
00693                                                             std::vector< int >& tstep_nums )
00694 {
00695     Interface*& mbImpl  = _readNC->mbImpl;
00696     bool& noEdges       = _readNC->noEdges;
00697     DebugOutput& dbgOut = _readNC->dbgOut;
00698 
00699     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00700 
00701     // Finally, read into that space
00702     int success;
00703     Range* pLocalGid = NULL;
00704 
00705     for( unsigned int i = 0; i < vdatas.size(); i++ )
00706     {
00707         // Skip edge variables, if specified by the read options
00708         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00709 
00710         switch( vdatas[i].entLoc )
00711         {
00712             case ReadNC::ENTLOCVERT:
00713                 pLocalGid = &localGidVerts;
00714                 break;
00715             case ReadNC::ENTLOCFACE:
00716                 pLocalGid = &localGidCells;
00717                 break;
00718             case ReadNC::ENTLOCEDGE:
00719                 pLocalGid = &localGidEdges;
00720                 break;
00721             default:
00722                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00723         }
00724 
00725         std::size_t sz = vdatas[i].sz;
00726 
00727         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00728         {
00729             // We will synchronize all these reads with the other processors,
00730             // so the wait will be inside this double loop; is it too much?
00731             size_t nb_reads = pLocalGid->psize();
00732             std::vector< int > requests( nb_reads ), statuss( nb_reads );
00733             size_t idxReq = 0;
00734 
00735             // Set readStart for each timestep along time dimension
00736             vdatas[i].readStarts[0] = tstep_nums[t];
00737 
00738             switch( vdatas[i].varDataType )
00739             {
00740                 case NC_FLOAT:
00741                 case NC_DOUBLE: {
00742                     // Read float as double
00743                     std::vector< double > tmpdoubledata( sz );
00744 
00745                     // In the case of ucd mesh, and on multiple proc,
00746                     // we need to read as many times as subranges we have in the
00747                     // localGid range;
00748                     // basically, we have to give a different point
00749                     // for data to start, for every subrange :(
00750                     size_t indexInDoubleArray = 0;
00751                     size_t ic                 = 0;
00752                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00753                          ++pair_iter, ic++ )
00754                     {
00755                         EntityHandle starth     = pair_iter->first;
00756                         EntityHandle endh       = pair_iter->second;  // Inclusive
00757                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00758                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00759 
00760                         // Do a partial read, in each subrange
00761                         // Wait outside this loop
00762                         success =
00763                             NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00764                                                         &( vdatas[i].readCounts[0] ),
00765                                                         &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] );
00766                         if( success )
00767                             MB_SET_ERR( MB_FAILURE,
00768                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00769                         // We need to increment the index in double array for the
00770                         // next subrange
00771                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00772                     }
00773                     assert( ic == pLocalGid->psize() );
00774 
00775                     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00776                     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00777 
00778                     if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00779                     {
00780                         // For a cell variable that is NOT on one contiguous chunk of faces,
00781                         // allocate tag space for each cell group, and utilize cellHandleToGlobalID
00782                         // map to read tag data
00783                         Range::iterator iter = facesOwned.begin();
00784                         while( iter != facesOwned.end() )
00785                         {
00786                             int count;
00787                             void* ptr;
00788                             rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
00789 
00790                             for( int j = 0; j < count; j++ )
00791                             {
00792                                 int global_cell_idx =
00793                                     cellHandleToGlobalID[*( iter + j )];  // Global cell index, 1 based
00794                                 int local_cell_idx =
00795                                     localGidCells.index( global_cell_idx );  // Local cell index, 0 based
00796                                 assert( local_cell_idx != -1 );
00797                                 for( int level = 0; level < vdatas[i].numLev; level++ )
00798                                     ( (double*)ptr )[j * vdatas[i].numLev + level] =
00799                                         tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
00800                             }
00801 
00802                             iter += count;
00803                         }
00804                     }
00805                     else
00806                     {
00807                         void* data = vdatas[i].varDatas[t];
00808                         for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00809                             ( (double*)data )[idx] = tmpdoubledata[idx];
00810                     }
00811 
00812                     break;
00813                 }
00814                 default:
00815                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00816             }
00817         }
00818     }
00819 
00820     // Debug output, if requested
00821     if( 1 == dbgOut.get_verbosity() )
00822     {
00823         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00824         for( unsigned int i = 1; i < vdatas.size(); i++ )
00825             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00826         dbgOut.tprintf( 1, "\n" );
00827     }
00828 
00829     return rval;
00830 }
00831 #else
00832 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas,
00833                                                       std::vector< int >& tstep_nums )
00834 {
00835     Interface*& mbImpl = _readNC->mbImpl;
00836     bool& noEdges = _readNC->noEdges;
00837     DebugOutput& dbgOut = _readNC->dbgOut;
00838 
00839     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00840 
00841     // Finally, read into that space
00842     int success;
00843     Range* pLocalGid = NULL;
00844 
00845     for( unsigned int i = 0; i < vdatas.size(); i++ )
00846     {
00847         // Skip edge variables, if specified by the read options
00848         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00849 
00850         switch( vdatas[i].entLoc )
00851         {
00852             case ReadNC::ENTLOCVERT:
00853                 pLocalGid = &localGidVerts;
00854                 break;
00855             case ReadNC::ENTLOCFACE:
00856                 pLocalGid = &localGidCells;
00857                 break;
00858             case ReadNC::ENTLOCEDGE:
00859                 pLocalGid = &localGidEdges;
00860                 break;
00861             default:
00862                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00863         }
00864 
00865         std::size_t sz = vdatas[i].sz;
00866 
00867         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00868         {
00869             // Set readStart for each timestep along time dimension
00870             vdatas[i].readStarts[0] = tstep_nums[t];
00871 
00872             switch( vdatas[i].varDataType )
00873             {
00874                 case NC_FLOAT:
00875                 case NC_DOUBLE: {
00876                     // Read float as double
00877                     std::vector< double > tmpdoubledata( sz );
00878 
00879                     // In the case of ucd mesh, and on multiple proc,
00880                     // we need to read as many times as subranges we have in the
00881                     // localGid range;
00882                     // basically, we have to give a different point
00883                     // for data to start, for every subrange :(
00884                     size_t indexInDoubleArray = 0;
00885                     size_t ic = 0;
00886                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00887                          ++pair_iter, ic++ )
00888                     {
00889                         EntityHandle starth = pair_iter->first;
00890                         EntityHandle endh = pair_iter->second;  // Inclusive
00891                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00892                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00893 
00894                         success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00895                                                             &( vdatas[i].readCounts[0] ),
00896                                                             &( tmpdoubledata[indexInDoubleArray] ) );
00897                         if( success )
00898                             MB_SET_ERR( MB_FAILURE,
00899                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00900                         // We need to increment the index in double array for the
00901                         // next subrange
00902                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00903                     }
00904                     assert( ic == pLocalGid->psize() );
00905 
00906                     if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00907                     {
00908                         // For a cell variable that is NOT on one contiguous chunk of faces,
00909                         // allocate tag space for each cell group, and utilize cellHandleToGlobalID
00910                         // map to read tag data
00911                         Range::iterator iter = facesOwned.begin();
00912                         while( iter != facesOwned.end() )
00913                         {
00914                             int count;
00915                             void* ptr;
00916                             rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
00917 
00918                             for( int j = 0; j < count; j++ )
00919                             {
00920                                 int global_cell_idx =
00921                                     cellHandleToGlobalID[*( iter + j )];  // Global cell index, 1 based
00922                                 int local_cell_idx =
00923                                     localGidCells.index( global_cell_idx );  // Local cell index, 0 based
00924                                 assert( local_cell_idx != -1 );
00925                                 for( int level = 0; level < vdatas[i].numLev; level++ )
00926                                     ( (double*)ptr )[j * vdatas[i].numLev + level] =
00927                                         tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
00928                             }
00929 
00930                             iter += count;
00931                         }
00932                     }
00933                     else
00934                     {
00935                         void* data = vdatas[i].varDatas[t];
00936                         for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00937                             ( (double*)data )[idx] = tmpdoubledata[idx];
00938                     }
00939 
00940                     break;
00941                 }
00942                 default:
00943                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00944             }
00945         }
00946     }
00947 
00948     // Debug output, if requested
00949     if( 1 == dbgOut.get_verbosity() )
00950     {
00951         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00952         for( unsigned int i = 1; i < vdatas.size(); i++ )
00953             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00954         dbgOut.tprintf( 1, "\n" );
00955     }
00956 
00957     return rval;
00958 }
00959 #endif
00960 
00961 #ifdef MOAB_HAVE_MPI
00962 ErrorCode NCHelperMPAS::redistribute_local_cells( int start_cell_idx )
00963 {
00964     // If possible, apply Zoltan partition
00965 #ifdef MOAB_HAVE_ZOLTAN
00966     if( ScdParData::RCBZOLTAN == _readNC->partMethod )
00967     {
00968         // Read x coordinates of cell centers
00969         int xCellVarId;
00970         int success = NCFUNC( inq_varid )( _fileId, "xCell", &xCellVarId );
00971         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xCell" );
00972         std::vector< double > xCell( nLocalCells );
00973         NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00974         NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00975         success              = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] );
00976         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xCell data" );
00977 
00978         // Read y coordinates of cell centers
00979         int yCellVarId;
00980         success = NCFUNC( inq_varid )( _fileId, "yCell", &yCellVarId );
00981         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yCell" );
00982         std::vector< double > yCell( nLocalCells );
00983         success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] );
00984         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yCell data" );
00985 
00986         // Read z coordinates of cell centers
00987         int zCellVarId;
00988         success = NCFUNC( inq_varid )( _fileId, "zCell", &zCellVarId );
00989         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zCell" );
00990         std::vector< double > zCell( nLocalCells );
00991         success = NCFUNCAG( _vara_double )( _fileId, zCellVarId, &read_start, &read_count, &zCell[0] );
00992         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zCell data" );
00993 
00994         // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00995         // is better
00996         Interface*& mbImpl         = _readNC->mbImpl;
00997         DebugOutput& dbgOut        = _readNC->dbgOut;
00998         ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, false, 0, NULL );
00999         ErrorCode rval             = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
01000         delete mbZTool;
01001 
01002         dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
01003         dbgOut.tprintf( 1, "                           localGidCells.size() = %d\n", (int)localGidCells.size() );
01004 
01005         // This is important: local cells are now redistributed, so nLocalCells might be different!
01006         nLocalCells = localGidCells.size();
01007 
01008         return MB_SUCCESS;
01009     }
01010 #endif
01011 
01012     // By default, apply trivial partition
01013     localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
01014 
01015     return MB_SUCCESS;
01016 }
01017 #endif
01018 
01019 ErrorCode NCHelperMPAS::create_local_vertices( const std::vector< int >& vertices_on_local_cells,
01020                                                EntityHandle& start_vertex )
01021 {
01022     Interface*& mbImpl      = _readNC->mbImpl;
01023     Tag& mGlobalIdTag       = _readNC->mGlobalIdTag;
01024     const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01025     DebugOutput& dbgOut     = _readNC->dbgOut;
01026 
01027     // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell
01028     // connectivity later)
01029     std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells );
01030     std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() );
01031     std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(),
01032                range_inserter( localGidVerts ) );
01033     nLocalVertices = localGidVerts.size();
01034 
01035     dbgOut.tprintf( 1, "   localGidVerts.psize() = %d\n", (int)localGidVerts.psize() );
01036     dbgOut.tprintf( 1, "   localGidVerts.size() = %d\n", (int)localGidVerts.size() );
01037 
01038     // Create local vertices
01039     std::vector< double* > arrays;
01040     ErrorCode rval =
01041         _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays,
01042                                                  // Might have to create gather mesh later
01043                                                  ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
01044 
01045     // Add local vertices to current file set
01046     Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 );
01047     rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" );
01048 
01049     // Get ptr to GID memory for local vertices
01050     int count  = 0;
01051     void* data = NULL;
01052     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" );
01053     assert( count == nLocalVertices );
01054     int* gid_data = (int*)data;
01055     std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
01056 
01057     // Duplicate GID data, which will be used to resolve sharing
01058     if( mpFileIdTag )
01059     {
01060         rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" );
01061         assert( count == nLocalVertices );
01062         int bytes_per_tag = 4;
01063         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01064         if( 4 == bytes_per_tag )
01065         {
01066             gid_data = (int*)data;
01067             std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
01068         }
01069         else if( 8 == bytes_per_tag )
01070         {  // Should be a handle tag on 64 bit machine?
01071             long* handle_tag_data = (long*)data;
01072             std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data );
01073         }
01074     }
01075 
01076 #ifdef MOAB_HAVE_PNETCDF
01077     size_t nb_reads = localGidVerts.psize();
01078     std::vector< int > requests( nb_reads );
01079     std::vector< int > statuss( nb_reads );
01080     size_t idxReq = 0;
01081 #endif
01082 
01083     // Read x coordinates for local vertices
01084     double* xptr = arrays[0];
01085     int xVertexVarId;
01086     int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
01087     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
01088     size_t indexInArray = 0;
01089     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01090          ++pair_iter )
01091     {
01092         EntityHandle starth  = pair_iter->first;
01093         EntityHandle endh    = pair_iter->second;
01094         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
01095         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
01096 
01097         // Do a partial read in each subrange
01098 #ifdef MOAB_HAVE_PNETCDF
01099         success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ),
01100                                               &requests[idxReq++] );
01101 #else
01102         success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) );
01103 #endif
01104         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data in a loop" );
01105 
01106         // Increment the index for next subrange
01107         indexInArray += ( endh - starth + 1 );
01108     }
01109 
01110 #ifdef MOAB_HAVE_PNETCDF
01111     // Wait outside the loop
01112     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01113     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01114 #endif
01115 
01116     // Read y coordinates for local vertices
01117     double* yptr = arrays[1];
01118     int yVertexVarId;
01119     success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
01120     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
01121 #ifdef MOAB_HAVE_PNETCDF
01122     idxReq = 0;
01123 #endif
01124     indexInArray = 0;
01125     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01126          ++pair_iter )
01127     {
01128         EntityHandle starth  = pair_iter->first;
01129         EntityHandle endh    = pair_iter->second;
01130         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
01131         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
01132 
01133         // Do a partial read in each subrange
01134 #ifdef MOAB_HAVE_PNETCDF
01135         success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ),
01136                                               &requests[idxReq++] );
01137 #else
01138         success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) );
01139 #endif
01140         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data in a loop" );
01141 
01142         // Increment the index for next subrange
01143         indexInArray += ( endh - starth + 1 );
01144     }
01145 
01146 #ifdef MOAB_HAVE_PNETCDF
01147     // Wait outside the loop
01148     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01149     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01150 #endif
01151 
01152     // Read z coordinates for local vertices
01153     double* zptr = arrays[2];
01154     int zVertexVarId;
01155     success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
01156     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
01157 #ifdef MOAB_HAVE_PNETCDF
01158     idxReq = 0;
01159 #endif
01160     indexInArray = 0;
01161     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01162          ++pair_iter )
01163     {
01164         EntityHandle starth  = pair_iter->first;
01165         EntityHandle endh    = pair_iter->second;
01166         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
01167         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
01168 
01169         // Do a partial read in each subrange
01170 #ifdef MOAB_HAVE_PNETCDF
01171         success = NCFUNCREQG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ),
01172                                               &requests[idxReq++] );
01173 #else
01174         success = NCFUNCAG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ) );
01175 #endif
01176         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data in a loop" );
01177 
01178         // Increment the index for next subrange
01179         indexInArray += ( endh - starth + 1 );
01180     }
01181 
01182 #ifdef MOAB_HAVE_PNETCDF
01183     // Wait outside the loop
01184     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01185     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01186 #endif
01187 
01188     return MB_SUCCESS;
01189 }
01190 
01191 ErrorCode NCHelperMPAS::create_local_edges( EntityHandle start_vertex,
01192                                             const std::vector< int >& num_edges_on_local_cells )
01193 {
01194     Interface*& mbImpl  = _readNC->mbImpl;
01195     Tag& mGlobalIdTag   = _readNC->mGlobalIdTag;
01196     DebugOutput& dbgOut = _readNC->dbgOut;
01197 
01198     // Read edges on each local cell, to get localGidEdges
01199     int edgesOnCellVarId;
01200     int success = NCFUNC( inq_varid )( _fileId, "edgesOnCell", &edgesOnCellVarId );
01201     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edgesOnCell" );
01202 
01203     std::vector< int > edges_on_local_cells( nLocalCells * maxEdgesPerCell );
01204     dbgOut.tprintf( 1, "   edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() );
01205 
01206 #ifdef MOAB_HAVE_PNETCDF
01207     size_t nb_reads = localGidCells.psize();
01208     std::vector< int > requests( nb_reads );
01209     std::vector< int > statuss( nb_reads );
01210     size_t idxReq = 0;
01211 #endif
01212     size_t indexInArray = 0;
01213     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
01214          ++pair_iter )
01215     {
01216         EntityHandle starth      = pair_iter->first;
01217         EntityHandle endh        = pair_iter->second;
01218         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01219         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
01220                                      static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01221 
01222         // Do a partial read in each subrange
01223 #ifdef MOAB_HAVE_PNETCDF
01224         success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01225                                            &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
01226 #else
01227         success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01228                                          &( edges_on_local_cells[indexInArray] ) );
01229 #endif
01230         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edgesOnCell data in a loop" );
01231 
01232         // Increment the index for next subrange
01233         indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
01234     }
01235 
01236 #ifdef MOAB_HAVE_PNETCDF
01237     // Wait outside the loop
01238     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01239     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01240 #endif
01241 
01242     // Correct local cell edges array in the same way as local cell vertices array, replace the
01243     // padded edges with the last edges in the corresponding cells
01244     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01245     {
01246         int num_edges             = num_edges_on_local_cells[local_cell_idx];
01247         int idx_in_local_edge_arr = local_cell_idx * maxEdgesPerCell;
01248         int last_edge_idx         = edges_on_local_cells[idx_in_local_edge_arr + num_edges - 1];
01249         for( int i = num_edges; i < maxEdgesPerCell; i++ )
01250             edges_on_local_cells[idx_in_local_edge_arr + i] = last_edge_idx;
01251     }
01252 
01253     // Collect local edges
01254     std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() );
01255     std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) );
01256     nLocalEdges = localGidEdges.size();
01257 
01258     dbgOut.tprintf( 1, "   localGidEdges.psize() = %d\n", (int)localGidEdges.psize() );
01259     dbgOut.tprintf( 1, "   localGidEdges.size() = %d\n", (int)localGidEdges.size() );
01260 
01261     // Create local edges
01262     EntityHandle start_edge;
01263     EntityHandle* conn_arr_edges = NULL;
01264     ErrorCode rval =
01265         _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
01266                                                      // Might have to create gather mesh later
01267                                                      ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" );
01268 
01269     // Add local edges to current file set
01270     Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 );
01271     rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" );
01272 
01273     // Get ptr to GID memory for edges
01274     int count  = 0;
01275     void* data = NULL;
01276     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" );
01277     assert( count == nLocalEdges );
01278     int* gid_data = (int*)data;
01279     std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data );
01280 
01281     int verticesOnEdgeVarId;
01282 
01283     // Read vertices on each local edge, to get edge connectivity
01284     success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
01285     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
01286     // Utilize the memory storage pointed by conn_arr_edges
01287     int* vertices_on_local_edges = (int*)conn_arr_edges;
01288 #ifdef MOAB_HAVE_PNETCDF
01289     nb_reads = localGidEdges.psize();
01290     requests.resize( nb_reads );
01291     statuss.resize( nb_reads );
01292     idxReq = 0;
01293 #endif
01294     indexInArray = 0;
01295     for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end();
01296          ++pair_iter )
01297     {
01298         EntityHandle starth      = pair_iter->first;
01299         EntityHandle endh        = pair_iter->second;
01300         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01301         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 };
01302 
01303         // Do a partial read in each subrange
01304 #ifdef MOAB_HAVE_PNETCDF
01305         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01306                                            &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] );
01307 #else
01308         success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01309                                          &( vertices_on_local_edges[indexInArray] ) );
01310 #endif
01311         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data in a loop" );
01312 
01313         // Increment the index for next subrange
01314         indexInArray += ( endh - starth + 1 ) * 2;
01315     }
01316 
01317 #ifdef MOAB_HAVE_PNETCDF
01318     // Wait outside the loop
01319     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01320     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01321 #endif
01322 
01323     // Populate connectivity data for local edges
01324     // Convert in-place from int (stored in the first half) to EntityHandle
01325     // Reading backward is the trick
01326     for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01327     {
01328         int global_vert_idx = vertices_on_local_edges[edge_vert];      // Global vertex index, 1 based
01329         int local_vert_idx  = localGidVerts.index( global_vert_idx );  // Local vertex index, 0 based
01330         assert( local_vert_idx != -1 );
01331         conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
01332     }
01333 
01334     return MB_SUCCESS;
01335 }
01336 
01337 ErrorCode NCHelperMPAS::create_local_cells( const std::vector< int >& vertices_on_local_cells,
01338                                             const std::vector< int >& num_edges_on_local_cells,
01339                                             EntityHandle start_vertex, Range& faces )
01340 {
01341     Interface*& mbImpl = _readNC->mbImpl;
01342     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
01343 
01344     // Divide local cells into groups based on the number of edges
01345     Range local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01346     // Insert larger values before smaller ones to increase efficiency
01347     for( int i = nLocalCells - 1; i >= 0; i-- )
01348     {
01349         int num_edges = num_edges_on_local_cells[i];
01350         local_cells_with_n_edges[num_edges].insert( localGidCells[i] );  // Global cell index
01351     }
01352 
01353     std::vector< int > num_edges_on_cell_groups;
01354     for( int i = 3; i <= maxEdgesPerCell; i++ )
01355     {
01356         if( local_cells_with_n_edges[i].size() > 0 ) num_edges_on_cell_groups.push_back( i );
01357     }
01358     numCellGroups = num_edges_on_cell_groups.size();
01359 
01360     EntityHandle* conn_arr_local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01361     for( int i = 0; i < numCellGroups; i++ )
01362     {
01363         int num_edges_per_cell = num_edges_on_cell_groups[i];
01364         int num_group_cells    = (int)local_cells_with_n_edges[num_edges_per_cell].size();
01365 
01366         // Create local cells for each non-empty cell group
01367         EntityHandle start_element;
01368         ErrorCode rval = _readNC->readMeshIface->get_element_connect(
01369             num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
01370             conn_arr_local_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01371         faces.insert( start_element, start_element + num_group_cells - 1 );
01372 
01373         // Add local cells to current file set
01374         Range local_cells_range( start_element, start_element + num_group_cells - 1 );
01375         rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01376 
01377         // Get ptr to gid memory for local cells
01378         int count  = 0;
01379         void* data = NULL;
01380         rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01381         assert( count == num_group_cells );
01382         int* gid_data = (int*)data;
01383         std::copy( local_cells_with_n_edges[num_edges_per_cell].begin(),
01384                    local_cells_with_n_edges[num_edges_per_cell].end(), gid_data );
01385 
01386         // Set connectivity array with proper local vertices handles
01387         for( int j = 0; j < num_group_cells; j++ )
01388         {
01389             EntityHandle global_cell_idx =
01390                 local_cells_with_n_edges[num_edges_per_cell][j];          // Global cell index, 1 based
01391             int local_cell_idx = localGidCells.index( global_cell_idx );  // Local cell index, 0 based
01392             assert( local_cell_idx != -1 );
01393 
01394             if( numCellGroups > 1 )
01395             {
01396                 // Populate cellHandleToGlobalID map to read cell variables
01397                 cellHandleToGlobalID[start_element + j] = global_cell_idx;
01398             }
01399 
01400             for( int k = 0; k < num_edges_per_cell; k++ )
01401             {
01402                 EntityHandle global_vert_idx =
01403                     vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + k];  // Global vertex index, 1 based
01404                 int local_vert_idx = localGidVerts.index( global_vert_idx );        // Local vertex index, 0 based
01405                 assert( local_vert_idx != -1 );
01406                 conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
01407                     start_vertex + local_vert_idx;
01408             }
01409         }
01410     }
01411 
01412     return MB_SUCCESS;
01413 }
01414 
01415 ErrorCode NCHelperMPAS::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells,
01416                                                    EntityHandle start_vertex, Range& faces )
01417 {
01418     Interface*& mbImpl = _readNC->mbImpl;
01419     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
01420 
01421     // Only one group of cells (each cell is represented by a polygon with maxEdgesPerCell edges)
01422     numCellGroups = 1;
01423 
01424     // Create cells for this cell group
01425     EntityHandle start_element;
01426     EntityHandle* conn_arr_local_cells = NULL;
01427     ErrorCode rval =
01428         _readNC->readMeshIface->get_element_connect( nLocalCells, maxEdgesPerCell, MBPOLYGON, 0, start_element,
01429                                                      conn_arr_local_cells,
01430                                                      // Might have to create gather mesh later
01431                                                      ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01432     faces.insert( start_element, start_element + nLocalCells - 1 );
01433 
01434     // Add local cells to current file set
01435     Range local_cells_range( start_element, start_element + nLocalCells - 1 );
01436     rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01437 
01438     // Get ptr to GID memory for local cells
01439     int count  = 0;
01440     void* data = NULL;
01441     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01442     assert( count == nLocalCells );
01443     int* gid_data = (int*)data;
01444     std::copy( localGidCells.begin(), localGidCells.end(), gid_data );
01445 
01446     // Set connectivity array with proper local vertices handles
01447     // vertices_on_local_cells array was already corrected to have the last vertices padded
01448     // no need for extra checks considering
01449     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01450     {
01451         for( int i = 0; i < maxEdgesPerCell; i++ )
01452         {
01453             EntityHandle global_vert_idx =
01454                 vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + i];  // Global vertex index, 1 based
01455             int local_vert_idx = localGidVerts.index( global_vert_idx );        // Local vertex index, 0 based
01456             assert( local_vert_idx != -1 );
01457             conn_arr_local_cells[local_cell_idx * maxEdgesPerCell + i] = start_vertex + local_vert_idx;
01458         }
01459     }
01460 
01461     return MB_SUCCESS;
01462 }
01463 
01464 ErrorCode NCHelperMPAS::create_gather_set_vertices( EntityHandle gather_set, EntityHandle& gather_set_start_vertex )
01465 {
01466     Interface*& mbImpl      = _readNC->mbImpl;
01467     Tag& mGlobalIdTag       = _readNC->mGlobalIdTag;
01468     const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01469 
01470     // Create gather set vertices
01471     std::vector< double* > arrays;
01472     // Don't need to specify allocation number here, because we know enough vertices were created
01473     // before
01474     ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" );
01475 
01476     // Add vertices to the gather set
01477     Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 );
01478     rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" );
01479 
01480     // Read x coordinates for gather set vertices
01481     double* xptr = arrays[0];
01482     int xVertexVarId;
01483     int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
01484     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
01485     NCDF_SIZE read_start = 0;
01486     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices );
01487 #ifdef MOAB_HAVE_PNETCDF
01488     // Enter independent I/O mode, since this read is only for the gather processor
01489     success = NCFUNC( begin_indep_data )( _fileId );
01490     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01491     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01492     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
01493     success = NCFUNC( end_indep_data )( _fileId );
01494     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01495 #else
01496     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01497     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
01498 #endif
01499 
01500     // Read y coordinates for gather set vertices
01501     double* yptr = arrays[1];
01502     int yVertexVarId;
01503     success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
01504     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
01505 #ifdef MOAB_HAVE_PNETCDF
01506     // Enter independent I/O mode, since this read is only for the gather processor
01507     success = NCFUNC( begin_indep_data )( _fileId );
01508     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01509     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01510     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
01511     success = NCFUNC( end_indep_data )( _fileId );
01512     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01513 #else
01514     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01515     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
01516 #endif
01517 
01518     // Read z coordinates for gather set vertices
01519     double* zptr = arrays[2];
01520     int zVertexVarId;
01521     success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
01522     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
01523 #ifdef MOAB_HAVE_PNETCDF
01524     // Enter independent I/O mode, since this read is only for the gather processor
01525     success = NCFUNC( begin_indep_data )( _fileId );
01526     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01527     success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
01528     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
01529     success = NCFUNC( end_indep_data )( _fileId );
01530     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01531 #else
01532     success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
01533     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
01534 #endif
01535 
01536     // Get ptr to GID memory for gather set vertices
01537     int count  = 0;
01538     void* data = NULL;
01539     rval =
01540         mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" );
01541     assert( count == nVertices );
01542     int* gid_data = (int*)data;
01543     for( int j = 1; j <= nVertices; j++ )
01544         gid_data[j - 1] = j;
01545 
01546     // Set the file id tag too, it should be bigger something not interfering with global id
01547     if( mpFileIdTag )
01548     {
01549         rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count,
01550                                     data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" );
01551         assert( count == nVertices );
01552         int bytes_per_tag = 4;
01553         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01554         if( 4 == bytes_per_tag )
01555         {
01556             gid_data = (int*)data;
01557             for( int j = 1; j <= nVertices; j++ )
01558                 gid_data[j - 1] = nVertices + j;  // Bigger than global id tag
01559         }
01560         else if( 8 == bytes_per_tag )
01561         {  // Should be a handle tag on 64 bit machine?
01562             long* handle_tag_data = (long*)data;
01563             for( int j = 1; j <= nVertices; j++ )
01564                 handle_tag_data[j - 1] = nVertices + j;  // Bigger than global id tag
01565         }
01566     }
01567 
01568     return MB_SUCCESS;
01569 }
01570 
01571 ErrorCode NCHelperMPAS::create_gather_set_edges( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01572 {
01573     Interface*& mbImpl = _readNC->mbImpl;
01574 
01575     // Create gather set edges
01576     EntityHandle start_edge;
01577     EntityHandle* conn_arr_gather_set_edges = NULL;
01578     // Don't need to specify allocation number here, because we know enough edges were created
01579     // before
01580     ErrorCode rval =
01581         _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" );
01582 
01583     // Add edges to the gather set
01584     Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 );
01585     rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" );
01586 
01587     // Read vertices on each edge
01588     int verticesOnEdgeVarId;
01589     int success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
01590     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
01591     // Utilize the memory storage pointed by conn_arr_gather_set_edges
01592     int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges;
01593     NCDF_SIZE read_starts[2]          = { 0, 0 };
01594     NCDF_SIZE read_counts[2]          = { static_cast< NCDF_SIZE >( nEdges ), 2 };
01595 #ifdef MOAB_HAVE_PNETCDF
01596     // Enter independent I/O mode, since this read is only for the gather processor
01597     success = NCFUNC( begin_indep_data )( _fileId );
01598     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01599     success =
01600         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01601     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
01602     success = NCFUNC( end_indep_data )( _fileId );
01603     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01604 #else
01605     success =
01606         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01607     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
01608 #endif
01609 
01610     // Populate connectivity data for gather set edges
01611     // Convert in-place from int (stored in the first half) to EntityHandle
01612     // Reading backward is the trick
01613     for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01614     {
01615         int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert];  // Global vertex index, 1 based
01616         gather_set_vert_idx--;                                              // 1 based -> 0 based
01617         // Connectivity array is shifted by where the gather set vertices start
01618         conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
01619     }
01620 
01621     return MB_SUCCESS;
01622 }
01623 
01624 ErrorCode NCHelperMPAS::create_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01625 {
01626     Interface*& mbImpl = _readNC->mbImpl;
01627 
01628     // Read number of edges on each gather set cell
01629     int nEdgesOnCellVarId;
01630     int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
01631     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
01632     std::vector< int > num_edges_on_gather_set_cells( nCells );
01633     NCDF_SIZE read_start = 0;
01634     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
01635 #ifdef MOAB_HAVE_PNETCDF
01636     // Enter independent I/O mode, since this read is only for the gather processor
01637     success = NCFUNC( begin_indep_data )( _fileId );
01638     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01639     success =
01640         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01641     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01642     success = NCFUNC( end_indep_data )( _fileId );
01643     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01644 #else
01645     success =
01646         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01647     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01648 #endif
01649 
01650     // Read vertices on each gather set cell (connectivity)
01651     int verticesOnCellVarId;
01652     success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
01653     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
01654     std::vector< int > vertices_on_gather_set_cells( nCells * maxEdgesPerCell );
01655     NCDF_SIZE read_starts[2] = { 0, 0 };
01656     NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01657 #ifdef MOAB_HAVE_PNETCDF
01658     // Enter independent I/O mode, since this read is only for the gather processor
01659     success = NCFUNC( begin_indep_data )( _fileId );
01660     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01661     success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
01662                                     &vertices_on_gather_set_cells[0] );
01663     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01664     success = NCFUNC( end_indep_data )( _fileId );
01665     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01666 #else
01667     success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
01668                                     &vertices_on_gather_set_cells[0] );
01669     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01670 #endif
01671 
01672     // Divide gather set cells into groups based on the number of edges
01673     Range gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01674     // Insert larger values before smaller values to increase efficiency
01675     for( int i = nCells - 1; i >= 0; i-- )
01676     {
01677         int num_edges = num_edges_on_gather_set_cells[i];
01678         gather_set_cells_with_n_edges[num_edges].insert( i + 1 );  // 0 based -> 1 based
01679     }
01680 
01681     // Create gather set cells
01682     EntityHandle* conn_arr_gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01683     for( int num_edges_per_cell = 3; num_edges_per_cell <= maxEdgesPerCell; num_edges_per_cell++ )
01684     {
01685         int num_group_cells = gather_set_cells_with_n_edges[num_edges_per_cell].size();
01686         if( num_group_cells > 0 )
01687         {
01688             EntityHandle start_element;
01689             ErrorCode rval = _readNC->readMeshIface->get_element_connect(
01690                 num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
01691                 conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01692 
01693             // Add cells to the gather set
01694             Range gather_set_cells_range( start_element, start_element + num_group_cells - 1 );
01695             rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01696 
01697             for( int j = 0; j < num_group_cells; j++ )
01698             {
01699                 int gather_set_cell_idx =
01700                     gather_set_cells_with_n_edges[num_edges_per_cell][j];  // Global cell index, 1 based
01701                 gather_set_cell_idx--;                                     // 1 based -> 0 based
01702 
01703                 for( int k = 0; k < num_edges_per_cell; k++ )
01704                 {
01705                     EntityHandle gather_set_vert_idx =
01706                         vertices_on_gather_set_cells[gather_set_cell_idx * maxEdgesPerCell +
01707                                                      k];  // Global vertex index, 1 based
01708                     gather_set_vert_idx--;                // 1 based -> 0 based
01709 
01710                     // Connectivity array is shifted by where the gather set vertices start
01711                     conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
01712                         gather_set_start_vertex + gather_set_vert_idx;
01713                 }
01714             }
01715         }
01716     }
01717 
01718     return MB_SUCCESS;
01719 }
01720 
01721 ErrorCode NCHelperMPAS::create_padded_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01722 {
01723     Interface*& mbImpl = _readNC->mbImpl;
01724 
01725     // Read number of edges on each gather set cell
01726     int nEdgesOnCellVarId;
01727     int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
01728     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
01729     std::vector< int > num_edges_on_gather_set_cells( nCells );
01730     NCDF_SIZE read_start = 0;
01731     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
01732 #ifdef MOAB_HAVE_PNETCDF
01733     // Enter independent I/O mode, since this read is only for the gather processor
01734     success = NCFUNC( begin_indep_data )( _fileId );
01735     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01736     success =
01737         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01738     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01739     success = NCFUNC( end_indep_data )( _fileId );
01740     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01741 #else
01742     success =
01743         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01744     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01745 #endif
01746 
01747     // Create gather set cells
01748     EntityHandle start_element;
01749     EntityHandle* conn_arr_gather_set_cells = NULL;
01750     // Don't need to specify allocation number here, because we know enough cells were created
01751     // before
01752     ErrorCode rval = _readNC->readMeshIface->get_element_connect( nCells, maxEdgesPerCell, MBPOLYGON, 0, start_element,
01753                                                                   conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01754 
01755     // Add cells to the gather set
01756     Range gather_set_cells_range( start_element, start_element + nCells - 1 );
01757     rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01758 
01759     // Read vertices on each gather set cell (connectivity)
01760     int verticesOnCellVarId;
01761     success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
01762     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
01763     // Utilize the memory storage pointed by conn_arr_gather_set_cells
01764     int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells;
01765     NCDF_SIZE read_starts[2]          = { 0, 0 };
01766     NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01767 #ifdef MOAB_HAVE_PNETCDF
01768     // Enter independent I/O mode, since this read is only for the gather processor
01769     success = NCFUNC( begin_indep_data )( _fileId );
01770     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01771     success =
01772         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01773     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01774     success = NCFUNC( end_indep_data )( _fileId );
01775     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01776 #else
01777     success =
01778         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01779     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01780 #endif
01781 
01782     // Correct gather set cell vertices array in the same way as local cell vertices array,
01783     // replace the padded vertices with the last vertices in the corresponding cells
01784     for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ )
01785     {
01786         int num_edges                  = num_edges_on_gather_set_cells[gather_set_cell_idx];
01787         int idx_in_gather_set_vert_arr = gather_set_cell_idx * maxEdgesPerCell;
01788         int last_vert_idx              = vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + num_edges - 1];
01789         for( int i = num_edges; i < maxEdgesPerCell; i++ )
01790             vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + i] = last_vert_idx;
01791     }
01792 
01793     // Populate connectivity data for gather set cells
01794     // Convert in-place from int (stored in the first half) to EntityHandle
01795     // Reading backward is the trick
01796     for( int cell_vert = nCells * maxEdgesPerCell - 1; cell_vert >= 0; cell_vert-- )
01797     {
01798         int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert];  // Global vertex index, 1 based
01799         gather_set_vert_idx--;                                              // 1 based -> 0 based
01800         // Connectivity array is shifted by where the gather set vertices start
01801         conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
01802     }
01803 
01804     return MB_SUCCESS;
01805 }
01806 
01807 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines