MOAB: Mesh Oriented datABase  (version 5.3.0)
NCHelperMPAS.cpp
Go to the documentation of this file.
00001 #include "NCHelperMPAS.hpp"
00002 #include "moab/ReadUtilIface.hpp"
00003 #include "moab/FileOptions.hpp"
00004 #include "moab/SpectralMeshTool.hpp"
00005 #include "MBTagConventions.hpp"
00006 
00007 #ifdef MOAB_HAVE_ZOLTAN
00008 #include "moab/ZoltanPartitioner.hpp"
00009 #endif
00010 
00011 #include <cmath>
00012 
00013 namespace moab
00014 {
00015 
00016 const int DEFAULT_MAX_EDGES_PER_CELL = 10;
00017 
00018 NCHelperMPAS::NCHelperMPAS( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet )
00019     : UcdNCHelper( readNC, fileId, opts, fileSet ), maxEdgesPerCell( DEFAULT_MAX_EDGES_PER_CELL ), numCellGroups( 0 ),
00020       createGatherSet( false )
00021 {
00022     // Ignore variables containing topological information
00023     ignoredVarNames.insert( "nEdgesOnEdge" );
00024     ignoredVarNames.insert( "nEdgesOnCell" );
00025     ignoredVarNames.insert( "edgesOnVertex" );
00026     ignoredVarNames.insert( "cellsOnVertex" );
00027     ignoredVarNames.insert( "verticesOnEdge" );
00028     ignoredVarNames.insert( "edgesOnEdge" );
00029     ignoredVarNames.insert( "cellsOnEdge" );
00030     ignoredVarNames.insert( "verticesOnCell" );
00031     ignoredVarNames.insert( "edgesOnCell" );
00032     ignoredVarNames.insert( "cellsOnCell" );
00033     // ignore variables containing mesh related info, that are currently saved as variable tags on
00034     // file set ; if needed, these should be saved as dense tags, and read accordingly, in parallel
00035     ignoredVarNames.insert( "weightsOnEdge" );
00036     ignoredVarNames.insert( "angleEdge" );
00037     ignoredVarNames.insert( "areaCell" );
00038     ignoredVarNames.insert( "areaTriangle" );
00039     ignoredVarNames.insert( "dcEdge" );
00040     ignoredVarNames.insert( "dv1Edge" );
00041     ignoredVarNames.insert( "dv2Edge" );
00042     ignoredVarNames.insert( "fEdge" );
00043     ignoredVarNames.insert( "fVertex" );
00044     ignoredVarNames.insert( "h_s" );
00045     ignoredVarNames.insert( "kiteAreasOnVertex" );
00046     ignoredVarNames.insert( "latCell" );
00047     ignoredVarNames.insert( "latEdge" );
00048     ignoredVarNames.insert( "latVertex" );
00049     ignoredVarNames.insert( "lonCell" );
00050     ignoredVarNames.insert( "lonEdge" );
00051     ignoredVarNames.insert( "lonVertex" );
00052     ignoredVarNames.insert( "meshDensity" );
00053     ignoredVarNames.insert( "xCell" );
00054     ignoredVarNames.insert( "xEdge" );
00055     ignoredVarNames.insert( "xVertex" );
00056     ignoredVarNames.insert( "yCell" );
00057     ignoredVarNames.insert( "yEdge" );
00058     ignoredVarNames.insert( "yVertex" );
00059     ignoredVarNames.insert( "zCell" );
00060     ignoredVarNames.insert( "zEdge" );
00061     ignoredVarNames.insert( "zVertex" );
00062     // Ignore variables for index conversion
00063     ignoredVarNames.insert( "indexToVertexID" );
00064     ignoredVarNames.insert( "indexToEdgeID" );
00065     ignoredVarNames.insert( "indexToCellID" );
00066 }
00067 
00068 bool NCHelperMPAS::can_read_file( ReadNC* readNC )
00069 {
00070     std::vector< std::string >& dimNames = readNC->dimNames;
00071 
00072     // If dimension name "vertexDegree" exists then it should be the MPAS grid
00073     if( std::find( dimNames.begin(), dimNames.end(), std::string( "vertexDegree" ) ) != dimNames.end() ) return true;
00074 
00075     return false;
00076 }
00077 
00078 ErrorCode NCHelperMPAS::init_mesh_vals()
00079 {
00080     std::vector< std::string >& dimNames              = _readNC->dimNames;
00081     std::vector< int >& dimLens                       = _readNC->dimLens;
00082     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00083 
00084     ErrorCode rval;
00085     unsigned int idx;
00086     std::vector< std::string >::iterator vit;
00087 
00088     // Get max edges per cell reported in the MPAS file header
00089     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "maxEdges" ) ) != dimNames.end() )
00090     {
00091         idx             = vit - dimNames.begin();
00092         maxEdgesPerCell = dimLens[idx];
00093         if( maxEdgesPerCell > DEFAULT_MAX_EDGES_PER_CELL )
00094         {
00095             MB_SET_ERR( MB_INVALID_SIZE,
00096                         "maxEdgesPerCell read from the MPAS file header has exceeded " << DEFAULT_MAX_EDGES_PER_CELL );
00097         }
00098     }
00099 
00100     // Look for time dimension
00101     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() )
00102         idx = vit - dimNames.begin();
00103     else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() )
00104         idx = vit - dimNames.begin();
00105     else
00106     {
00107         MB_SET_ERR( MB_FAILURE, "Couldn't find 'Time' or 'time' dimension" );
00108     }
00109     tDim       = idx;
00110     nTimeSteps = dimLens[idx];
00111 
00112     // Get number of cells
00113     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nCells" ) ) != dimNames.end() )
00114         idx = vit - dimNames.begin();
00115     else
00116     {
00117         MB_SET_ERR( MB_FAILURE, "Couldn't find 'nCells' dimension" );
00118     }
00119     cDim   = idx;
00120     nCells = dimLens[idx];
00121 
00122     // Get number of edges
00123     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nEdges" ) ) != dimNames.end() )
00124         idx = vit - dimNames.begin();
00125     else
00126     {
00127         MB_SET_ERR( MB_FAILURE, "Couldn't find 'nEdges' dimension" );
00128     }
00129     eDim   = idx;
00130     nEdges = dimLens[idx];
00131 
00132     // Get number of vertices
00133     vDim = -1;
00134     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertices" ) ) != dimNames.end() )
00135         idx = vit - dimNames.begin();
00136     else
00137     {
00138         MB_SET_ERR( MB_FAILURE, "Couldn't find 'nVertices' dimension" );
00139     }
00140     vDim      = idx;
00141     nVertices = dimLens[idx];
00142 
00143     // Get number of vertex levels
00144     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevels" ) ) != dimNames.end() )
00145         idx = vit - dimNames.begin();
00146     else
00147     {
00148         std::cerr << "Warning: dimension nVertLevels not found in header.\nThe file may contain "
00149                      "just the mesh"
00150                   << std::endl;
00151     }
00152     levDim  = idx;
00153     nLevels = dimLens[idx];
00154 
00155     // Dimension indices for other optional levels
00156     std::vector< unsigned int > opt_lev_dims;
00157 
00158     // Get index of vertex levels P1
00159     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP1" ) ) != dimNames.end() )
00160     {
00161         idx = vit - dimNames.begin();
00162         opt_lev_dims.push_back( idx );
00163     }
00164 
00165     // Get index of vertex levels P2
00166     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nVertLevelsP2" ) ) != dimNames.end() )
00167     {
00168         idx = vit - dimNames.begin();
00169         opt_lev_dims.push_back( idx );
00170     }
00171 
00172     // Get index of soil levels
00173     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "nSoilLevels" ) ) != dimNames.end() )
00174     {
00175         idx = vit - dimNames.begin();
00176         opt_lev_dims.push_back( idx );
00177     }
00178 
00179     std::map< std::string, ReadNC::VarData >::iterator vmit;
00180 
00181     // Store time coordinate values in tVals
00182     if( nTimeSteps > 0 )
00183     {
00184         // Note, two possible types for xtime variable: double(Time) or char(Time, StrLen)
00185         if( ( vmit = varInfo.find( "xtime" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
00186         {
00187             // If xtime variable is double type, read time coordinate values to tVals
00188             rval = read_coordinate( "xtime", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'xtime' variable" );
00189         }
00190         else
00191         {
00192             // If xtime variable does not exist, or it is string type, set dummy values to tVals
00193             for( int t = 0; t < nTimeSteps; t++ )
00194                 tVals.push_back( (double)t );
00195         }
00196     }
00197 
00198     // For each variable, determine the entity location type and number of levels
00199     for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit )
00200     {
00201         ReadNC::VarData& vd = ( *vmit ).second;
00202 
00203         // Default entLoc is ENTLOCSET
00204         if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
00205         {
00206             if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() )
00207                 vd.entLoc = ReadNC::ENTLOCVERT;
00208             else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() )
00209                 vd.entLoc = ReadNC::ENTLOCEDGE;
00210             else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() )
00211                 vd.entLoc = ReadNC::ENTLOCFACE;
00212         }
00213 
00214         // Default numLev is 0
00215         if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() )
00216             vd.numLev = nLevels;
00217         else
00218         {
00219             // If nVertLevels dimension is not found, try other optional levels such as
00220             // nVertLevelsP1
00221             for( unsigned int i = 0; i < opt_lev_dims.size(); i++ )
00222             {
00223                 if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() )
00224                 {
00225                     vd.numLev = dimLens[opt_lev_dims[i]];
00226                     break;
00227                 }
00228             }
00229         }
00230 
00231         // Hack: ignore variables with more than 3 dimensions, e.g. tracers(Time, nCells,
00232         // nVertLevels, nTracers)
00233         if( vd.varDims.size() > 3 ) ignoredVarNames.insert( vd.varName );
00234     }
00235 
00236     // Hack: create dummy variables for dimensions (like nCells) with no corresponding coordinate
00237     // variables
00238     rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" );
00239 
00240     return MB_SUCCESS;
00241 }
00242 
00243 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
00244 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
00245 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
00246 // has to restore them based on the existing mesh from last read
00247 ErrorCode NCHelperMPAS::check_existing_mesh()
00248 {
00249     Interface*& mbImpl = _readNC->mbImpl;
00250     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
00251     bool& noMesh       = _readNC->noMesh;
00252 
00253     if( noMesh )
00254     {
00255         ErrorCode rval;
00256 
00257         // Restore numCellGroups
00258         if( 0 == numCellGroups )
00259         {
00260             Tag numCellGroupsTag;
00261             rval = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag );MB_CHK_SET_ERR( rval, "Trouble getting __NUM_CELL_GROUPS tag" );
00262             if( MB_SUCCESS == rval ) rval = mbImpl->tag_get_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble getting data of __NUM_CELL_GROUPS tag" );
00263         }
00264 
00265         if( localGidVerts.empty() )
00266         {
00267             // Get all vertices from current file set (it is the input set in no_mesh scenario)
00268             Range local_verts;
00269             rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" );
00270 
00271             if( !local_verts.empty() )
00272             {
00273                 std::vector< int > gids( local_verts.size() );
00274 
00275                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00276                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" );
00277 
00278                 // Restore localGidVerts
00279                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) );
00280                 nLocalVertices = localGidVerts.size();
00281             }
00282         }
00283 
00284         if( localGidEdges.empty() )
00285         {
00286             // Get all edges from current file set (it is the input set in no_mesh scenario)
00287             Range local_edges;
00288             rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" );
00289 
00290             if( !local_edges.empty() )
00291             {
00292                 std::vector< int > gids( local_edges.size() );
00293 
00294                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00295                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" );
00296 
00297                 // Restore localGidEdges
00298                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) );
00299                 nLocalEdges = localGidEdges.size();
00300             }
00301         }
00302 
00303         if( localGidCells.empty() )
00304         {
00305             // Get all cells from current file set (it is the input set in no_mesh scenario)
00306             Range local_cells;
00307             rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" );
00308 
00309             if( !local_cells.empty() )
00310             {
00311                 std::vector< int > gids( local_cells.size() );
00312 
00313                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00314                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" );
00315 
00316                 // Restore localGidCells
00317                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) );
00318                 nLocalCells = localGidCells.size();
00319 
00320                 if( numCellGroups > 1 )
00321                 {
00322                     // Restore cellHandleToGlobalID map
00323                     Range::const_iterator rit;
00324                     int i;
00325                     for( rit = local_cells.begin(), i = 0; rit != local_cells.end(); ++rit, i++ )
00326                         cellHandleToGlobalID[*rit] = gids[i];
00327                 }
00328             }
00329         }
00330     }
00331 
00332     return MB_SUCCESS;
00333 }
00334 
00335 ErrorCode NCHelperMPAS::create_mesh( Range& faces )
00336 {
00337     Interface*& mbImpl    = _readNC->mbImpl;
00338     bool& noMixedElements = _readNC->noMixedElements;
00339     bool& noEdges         = _readNC->noEdges;
00340     DebugOutput& dbgOut   = _readNC->dbgOut;
00341 
00342 #ifdef MOAB_HAVE_MPI
00343     int rank         = 0;
00344     int procs        = 1;
00345     bool& isParallel = _readNC->isParallel;
00346     if( isParallel )
00347     {
00348         ParallelComm*& myPcomm = _readNC->myPcomm;
00349         rank                   = myPcomm->proc_config().proc_rank();
00350         procs                  = myPcomm->proc_config().proc_size();
00351     }
00352 
00353     // Need to know whether we'll be creating gather mesh
00354     if( rank == _readNC->gatherSetRank ) createGatherSet = true;
00355 
00356     if( procs >= 2 )
00357     {
00358         // Shift rank to obtain a rotated trivial partition
00359         int shifted_rank           = rank;
00360         int& trivialPartitionShift = _readNC->trivialPartitionShift;
00361         if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00362 
00363         // Compute the number of local cells on this proc
00364         nLocalCells = int( std::floor( 1.0 * nCells / procs ) );
00365 
00366         // The starting global cell index in the MPAS file for this proc
00367         int start_cell_idx = shifted_rank * nLocalCells;
00368 
00369         // Number of extra cells after equal split over procs
00370         int iextra = nCells % procs;
00371 
00372         // Allocate extra cells over procs
00373         if( shifted_rank < iextra ) nLocalCells++;
00374         start_cell_idx += std::min( shifted_rank, iextra );
00375 
00376         start_cell_idx++;  // 0 based -> 1 based
00377 
00378         // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00379         ErrorCode rval = redistribute_local_cells( start_cell_idx );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00380     }
00381     else
00382     {
00383         nLocalCells = nCells;
00384         localGidCells.insert( 1, nLocalCells );
00385     }
00386 #else
00387     nLocalCells = nCells;
00388     localGidCells.insert( 1, nLocalCells );
00389 #endif
00390     dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00391     dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00392 
00393     // Read number of edges on each local cell, to calculate actual maxEdgesPerCell
00394     int nEdgesOnCellVarId;
00395     int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
00396     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
00397     std::vector< int > num_edges_on_local_cells( nLocalCells );
00398 #ifdef MOAB_HAVE_PNETCDF
00399     size_t nb_reads = localGidCells.psize();
00400     std::vector< int > requests( nb_reads );
00401     std::vector< int > statuss( nb_reads );
00402     size_t idxReq = 0;
00403 #endif
00404     size_t indexInArray = 0;
00405     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00406          ++pair_iter )
00407     {
00408         EntityHandle starth  = pair_iter->first;
00409         EntityHandle endh    = pair_iter->second;
00410         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
00411         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
00412 
00413         // Do a partial read in each subrange
00414 #ifdef MOAB_HAVE_PNETCDF
00415         success = NCFUNCREQG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
00416                                            &( num_edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
00417 #else
00418         success = NCFUNCAG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count,
00419                                          &( num_edges_on_local_cells[indexInArray] ) );
00420 #endif
00421         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data in a loop" );
00422 
00423         // Increment the index for next subrange
00424         indexInArray += ( endh - starth + 1 );
00425     }
00426 
00427 #ifdef MOAB_HAVE_PNETCDF
00428     // Wait outside the loop
00429     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00430     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00431 #endif
00432 
00433     // Get local maxEdgesPerCell on this proc
00434     int local_max_edges_per_cell =
00435         *( std::max_element( num_edges_on_local_cells.begin(), num_edges_on_local_cells.end() ) );
00436     maxEdgesPerCell = local_max_edges_per_cell;
00437 
00438     // If parallel, do a MPI_Allreduce to get global maxEdgesPerCell across all procs
00439 #ifdef MOAB_HAVE_MPI
00440     if( procs >= 2 )
00441     {
00442         int global_max_edges_per_cell;
00443         ParallelComm*& myPcomm = _readNC->myPcomm;
00444         MPI_Allreduce( &local_max_edges_per_cell, &global_max_edges_per_cell, 1, MPI_INT, MPI_MAX,
00445                        myPcomm->proc_config().proc_comm() );
00446         assert( local_max_edges_per_cell <= global_max_edges_per_cell );
00447         maxEdgesPerCell = global_max_edges_per_cell;
00448         if( 0 == rank ) dbgOut.tprintf( 1, "  global_max_edges_per_cell = %d\n", global_max_edges_per_cell );
00449     }
00450 #endif
00451 
00452     // Read vertices on each local cell, to get localGidVerts and cell connectivity later
00453     int verticesOnCellVarId;
00454     success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
00455     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
00456     std::vector< int > vertices_on_local_cells( nLocalCells * maxEdgesPerCell );
00457 #ifdef MOAB_HAVE_PNETCDF
00458     idxReq = 0;
00459 #endif
00460     indexInArray = 0;
00461     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00462          ++pair_iter )
00463     {
00464         EntityHandle starth      = pair_iter->first;
00465         EntityHandle endh        = pair_iter->second;
00466         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00467         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00468                                      static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
00469 
00470         // Do a partial read in each subrange
00471 #ifdef MOAB_HAVE_PNETCDF
00472         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00473                                            &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] );
00474 #else
00475         success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00476                                          &( vertices_on_local_cells[indexInArray] ) );
00477 #endif
00478         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data in a loop" );
00479 
00480         // Increment the index for next subrange
00481         indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
00482     }
00483 
00484 #ifdef MOAB_HAVE_PNETCDF
00485     // Wait outside the loop
00486     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00487     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00488 #endif
00489 
00490     // Correct local cell vertices array, replace the padded vertices with the last vertices
00491     // in the corresponding cells; sometimes the padded vertices are 0, sometimes a large
00492     // vertex id. Make sure they are consistent to our padded option
00493     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
00494     {
00495         int num_edges             = num_edges_on_local_cells[local_cell_idx];
00496         int idx_in_local_vert_arr = local_cell_idx * maxEdgesPerCell;
00497         int last_vert_idx         = vertices_on_local_cells[idx_in_local_vert_arr + num_edges - 1];
00498         for( int i = num_edges; i < maxEdgesPerCell; i++ )
00499             vertices_on_local_cells[idx_in_local_vert_arr + i] = last_vert_idx;
00500     }
00501 
00502     // Create local vertices
00503     EntityHandle start_vertex;
00504     ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for MPAS mesh" );
00505 
00506     // Create local edges (unless NO_EDGES read option is set)
00507     if( !noEdges )
00508     {
00509         rval = create_local_edges( start_vertex, num_edges_on_local_cells );MB_CHK_SET_ERR( rval, "Failed to create local edges for MPAS mesh" );
00510     }
00511 
00512     // Create local cells, either unpadded or padded
00513     if( noMixedElements )
00514     {
00515         rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for MPAS mesh" );
00516     }
00517     else
00518     {
00519         rval = create_local_cells( vertices_on_local_cells, num_edges_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create local cells for MPAS mesh" );
00520     }
00521 
00522     // Set tag for numCellGroups
00523     Tag numCellGroupsTag = 0;
00524     rval                 = mbImpl->tag_get_handle( "__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag,
00525                                    MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( rval, "Trouble creating __NUM_CELL_GROUPS tag" );
00526     rval = mbImpl->tag_set_data( numCellGroupsTag, &_fileSet, 1, &numCellGroups );MB_CHK_SET_ERR( rval, "Trouble setting data to __NUM_CELL_GROUPS tag" );
00527 
00528     if( createGatherSet )
00529     {
00530         EntityHandle gather_set;
00531         rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" );
00532 
00533         // Create gather set vertices
00534         EntityHandle start_gather_set_vertex;
00535         rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for MPAS mesh" );
00536 
00537         // Create gather set edges (unless NO_EDGES read option is set)
00538         if( !noEdges )
00539         {
00540             rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for MPAS mesh" );
00541         }
00542 
00543         // Create gather set cells, either unpadded or padded
00544         if( noMixedElements )
00545         {
00546             rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for MPAS mesh" );
00547         }
00548         else
00549         {
00550             rval = create_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set cells for MPAS mesh" );
00551         }
00552     }
00553 
00554     return MB_SUCCESS;
00555 }
00556 
00557 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas,
00558                                                                std::vector< int >& tstep_nums )
00559 {
00560     Interface*& mbImpl          = _readNC->mbImpl;
00561     std::vector< int >& dimLens = _readNC->dimLens;
00562     bool& noEdges               = _readNC->noEdges;
00563     DebugOutput& dbgOut         = _readNC->dbgOut;
00564 
00565     Range* range = NULL;
00566 
00567     // Get vertices
00568     Range verts;
00569     ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" );
00570     assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 );
00571 
00572     // Get edges
00573     Range edges;
00574     rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" );
00575 
00576     // Get faces
00577     Range faces;
00578     rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" );
00579     // Note, for MPAS faces.psize() can be more than 1
00580 
00581 #ifdef MOAB_HAVE_MPI
00582     bool& isParallel = _readNC->isParallel;
00583     if( isParallel )
00584     {
00585         ParallelComm*& myPcomm = _readNC->myPcomm;
00586         rval                   = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" );
00587     }
00588     else
00589         facesOwned = faces;  // not running in parallel, but still with MPI
00590 #else
00591     facesOwned = faces;
00592 #endif
00593 
00594     for( unsigned int i = 0; i < vdatas.size(); i++ )
00595     {
00596         // Skip edge variables, if specified by the read options
00597         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00598 
00599         // Support non-set variables with 3 dimensions like (Time, nCells, nVertLevels), or
00600         // 2 dimensions like (Time, nCells)
00601         assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() );
00602 
00603         // For a non-set variable, time should be the first dimension
00604         assert( tDim == vdatas[i].varDims[0] );
00605 
00606         // Set up readStarts and readCounts
00607         vdatas[i].readStarts.resize( 3 );
00608         vdatas[i].readCounts.resize( 3 );
00609 
00610         // First: Time
00611         vdatas[i].readStarts[0] = 0;  // This value is timestep dependent, will be set later
00612         vdatas[i].readCounts[0] = 1;
00613 
00614         // Next: nVertices / nCells / nEdges
00615         switch( vdatas[i].entLoc )
00616         {
00617             case ReadNC::ENTLOCVERT:
00618                 // Vertices
00619                 // Start from the first localGidVerts
00620                 // Actually, this will be reset later on in a loop
00621                 vdatas[i].readStarts[1] = localGidVerts[0] - 1;
00622                 vdatas[i].readCounts[1] = nLocalVertices;
00623                 range                   = &verts;
00624                 break;
00625             case ReadNC::ENTLOCFACE:
00626                 // Faces
00627                 // Start from the first localGidCells
00628                 // Actually, this will be reset later on in a loop
00629                 vdatas[i].readStarts[1] = localGidCells[0] - 1;
00630                 vdatas[i].readCounts[1] = nLocalCells;
00631                 range                   = &facesOwned;
00632                 break;
00633             case ReadNC::ENTLOCEDGE:
00634                 // Edges
00635                 // Start from the first localGidEdges
00636                 // Actually, this will be reset later on in a loop
00637                 vdatas[i].readStarts[1] = localGidEdges[0] - 1;
00638                 vdatas[i].readCounts[1] = nLocalEdges;
00639                 range                   = &edges;
00640                 break;
00641             default:
00642                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00643         }
00644 
00645         // Finally: nVertLevels or other optional levels, it is possible that there is no
00646         // level dimension (numLev is 0) for this non-set variable, e.g. (Time, nCells)
00647         if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1;
00648         vdatas[i].readStarts[2] = 0;
00649         vdatas[i].readCounts[2] = vdatas[i].numLev;
00650 
00651         // Get variable size
00652         vdatas[i].sz = 1;
00653         for( std::size_t idx = 0; idx != 3; idx++ )
00654             vdatas[i].sz *= vdatas[i].readCounts[idx];
00655 
00656         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00657         {
00658             dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] );
00659 
00660             if( tstep_nums[t] >= dimLens[tDim] )
00661             {
00662                 MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] );
00663             }
00664 
00665             // Get the tag to read into
00666             if( !vdatas[i].varTags[t] )
00667             {
00668                 rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName );
00669             }
00670 
00671             // Get ptr to tag space
00672             if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00673             {
00674                 // For a cell variable that is NOT on one contiguous chunk of faces, defer its tag
00675                 // space allocation
00676                 vdatas[i].varDatas[t] = NULL;
00677             }
00678             else
00679             {
00680                 assert( 1 == range->psize() );
00681                 void* data;
00682                 int count;
00683                 rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName );
00684                 assert( (unsigned)count == range->size() );
00685                 vdatas[i].varDatas[t] = data;
00686             }
00687         }
00688     }
00689 
00690     return rval;
00691 }
00692 
00693 #ifdef MOAB_HAVE_PNETCDF
00694 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas,
00695                                                             std::vector< int >& tstep_nums )
00696 {
00697     Interface*& mbImpl  = _readNC->mbImpl;
00698     bool& noEdges       = _readNC->noEdges;
00699     DebugOutput& dbgOut = _readNC->dbgOut;
00700 
00701     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00702 
00703     // Finally, read into that space
00704     int success;
00705     Range* pLocalGid = NULL;
00706 
00707     for( unsigned int i = 0; i < vdatas.size(); i++ )
00708     {
00709         // Skip edge variables, if specified by the read options
00710         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00711 
00712         switch( vdatas[i].entLoc )
00713         {
00714             case ReadNC::ENTLOCVERT:
00715                 pLocalGid = &localGidVerts;
00716                 break;
00717             case ReadNC::ENTLOCFACE:
00718                 pLocalGid = &localGidCells;
00719                 break;
00720             case ReadNC::ENTLOCEDGE:
00721                 pLocalGid = &localGidEdges;
00722                 break;
00723             default:
00724                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00725         }
00726 
00727         std::size_t sz = vdatas[i].sz;
00728 
00729         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00730         {
00731             // We will synchronize all these reads with the other processors,
00732             // so the wait will be inside this double loop; is it too much?
00733             size_t nb_reads = pLocalGid->psize();
00734             std::vector< int > requests( nb_reads ), statuss( nb_reads );
00735             size_t idxReq = 0;
00736 
00737             // Set readStart for each timestep along time dimension
00738             vdatas[i].readStarts[0] = tstep_nums[t];
00739 
00740             switch( vdatas[i].varDataType )
00741             {
00742                 case NC_FLOAT:
00743                 case NC_DOUBLE: {
00744                     // Read float as double
00745                     std::vector< double > tmpdoubledata( sz );
00746 
00747                     // In the case of ucd mesh, and on multiple proc,
00748                     // we need to read as many times as subranges we have in the
00749                     // localGid range;
00750                     // basically, we have to give a different point
00751                     // for data to start, for every subrange :(
00752                     size_t indexInDoubleArray = 0;
00753                     size_t ic                 = 0;
00754                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00755                          ++pair_iter, ic++ )
00756                     {
00757                         EntityHandle starth     = pair_iter->first;
00758                         EntityHandle endh       = pair_iter->second;  // Inclusive
00759                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00760                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00761 
00762                         // Do a partial read, in each subrange
00763                         // Wait outside this loop
00764                         success =
00765                             NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00766                                                         &( vdatas[i].readCounts[0] ),
00767                                                         &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] );
00768                         if( success )
00769                             MB_SET_ERR( MB_FAILURE,
00770                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00771                         // We need to increment the index in double array for the
00772                         // next subrange
00773                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00774                     }
00775                     assert( ic == pLocalGid->psize() );
00776 
00777                     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00778                     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00779 
00780                     if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00781                     {
00782                         // For a cell variable that is NOT on one contiguous chunk of faces,
00783                         // allocate tag space for each cell group, and utilize cellHandleToGlobalID
00784                         // map to read tag data
00785                         Range::iterator iter = facesOwned.begin();
00786                         while( iter != facesOwned.end() )
00787                         {
00788                             int count;
00789                             void* ptr;
00790                             rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
00791 
00792                             for( int j = 0; j < count; j++ )
00793                             {
00794                                 int global_cell_idx =
00795                                     cellHandleToGlobalID[*( iter + j )];  // Global cell index, 1 based
00796                                 int local_cell_idx =
00797                                     localGidCells.index( global_cell_idx );  // Local cell index, 0 based
00798                                 assert( local_cell_idx != -1 );
00799                                 for( int level = 0; level < vdatas[i].numLev; level++ )
00800                                     ( (double*)ptr )[j * vdatas[i].numLev + level] =
00801                                         tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
00802                             }
00803 
00804                             iter += count;
00805                         }
00806                     }
00807                     else
00808                     {
00809                         void* data = vdatas[i].varDatas[t];
00810                         for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00811                             ( (double*)data )[idx] = tmpdoubledata[idx];
00812                     }
00813 
00814                     break;
00815                 }
00816                 default:
00817                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00818             }
00819         }
00820     }
00821 
00822     // Debug output, if requested
00823     if( 1 == dbgOut.get_verbosity() )
00824     {
00825         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00826         for( unsigned int i = 1; i < vdatas.size(); i++ )
00827             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00828         dbgOut.tprintf( 1, "\n" );
00829     }
00830 
00831     return rval;
00832 }
00833 #else
00834 ErrorCode NCHelperMPAS::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas,
00835                                                       std::vector< int >& tstep_nums )
00836 {
00837     Interface*& mbImpl = _readNC->mbImpl;
00838     bool& noEdges = _readNC->noEdges;
00839     DebugOutput& dbgOut = _readNC->dbgOut;
00840 
00841     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00842 
00843     // Finally, read into that space
00844     int success;
00845     Range* pLocalGid = NULL;
00846 
00847     for( unsigned int i = 0; i < vdatas.size(); i++ )
00848     {
00849         // Skip edge variables, if specified by the read options
00850         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00851 
00852         switch( vdatas[i].entLoc )
00853         {
00854             case ReadNC::ENTLOCVERT:
00855                 pLocalGid = &localGidVerts;
00856                 break;
00857             case ReadNC::ENTLOCFACE:
00858                 pLocalGid = &localGidCells;
00859                 break;
00860             case ReadNC::ENTLOCEDGE:
00861                 pLocalGid = &localGidEdges;
00862                 break;
00863             default:
00864                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00865         }
00866 
00867         std::size_t sz = vdatas[i].sz;
00868 
00869         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00870         {
00871             // Set readStart for each timestep along time dimension
00872             vdatas[i].readStarts[0] = tstep_nums[t];
00873 
00874             switch( vdatas[i].varDataType )
00875             {
00876                 case NC_FLOAT:
00877                 case NC_DOUBLE: {
00878                     // Read float as double
00879                     std::vector< double > tmpdoubledata( sz );
00880 
00881                     // In the case of ucd mesh, and on multiple proc,
00882                     // we need to read as many times as subranges we have in the
00883                     // localGid range;
00884                     // basically, we have to give a different point
00885                     // for data to start, for every subrange :(
00886                     size_t indexInDoubleArray = 0;
00887                     size_t ic = 0;
00888                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00889                          ++pair_iter, ic++ )
00890                     {
00891                         EntityHandle starth = pair_iter->first;
00892                         EntityHandle endh = pair_iter->second;  // Inclusive
00893                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00894                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00895 
00896                         success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00897                                                             &( vdatas[i].readCounts[0] ),
00898                                                             &( tmpdoubledata[indexInDoubleArray] ) );
00899                         if( success )
00900                             MB_SET_ERR( MB_FAILURE,
00901                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00902                         // We need to increment the index in double array for the
00903                         // next subrange
00904                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00905                     }
00906                     assert( ic == pLocalGid->psize() );
00907 
00908                     if( vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1 )
00909                     {
00910                         // For a cell variable that is NOT on one contiguous chunk of faces,
00911                         // allocate tag space for each cell group, and utilize cellHandleToGlobalID
00912                         // map to read tag data
00913                         Range::iterator iter = facesOwned.begin();
00914                         while( iter != facesOwned.end() )
00915                         {
00916                             int count;
00917                             void* ptr;
00918                             rval = mbImpl->tag_iterate( vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr );MB_CHK_SET_ERR( rval, "Failed to iterate tag on owned faces" );
00919 
00920                             for( int j = 0; j < count; j++ )
00921                             {
00922                                 int global_cell_idx =
00923                                     cellHandleToGlobalID[*( iter + j )];  // Global cell index, 1 based
00924                                 int local_cell_idx =
00925                                     localGidCells.index( global_cell_idx );  // Local cell index, 0 based
00926                                 assert( local_cell_idx != -1 );
00927                                 for( int level = 0; level < vdatas[i].numLev; level++ )
00928                                     ( (double*)ptr )[j * vdatas[i].numLev + level] =
00929                                         tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
00930                             }
00931 
00932                             iter += count;
00933                         }
00934                     }
00935                     else
00936                     {
00937                         void* data = vdatas[i].varDatas[t];
00938                         for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00939                             ( (double*)data )[idx] = tmpdoubledata[idx];
00940                     }
00941 
00942                     break;
00943                 }
00944                 default:
00945                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00946             }
00947         }
00948     }
00949 
00950     // Debug output, if requested
00951     if( 1 == dbgOut.get_verbosity() )
00952     {
00953         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00954         for( unsigned int i = 1; i < vdatas.size(); i++ )
00955             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00956         dbgOut.tprintf( 1, "\n" );
00957     }
00958 
00959     return rval;
00960 }
00961 #endif
00962 
00963 #ifdef MOAB_HAVE_MPI
00964 ErrorCode NCHelperMPAS::redistribute_local_cells( int start_cell_idx )
00965 {
00966     // If possible, apply Zoltan partition
00967 #ifdef MOAB_HAVE_ZOLTAN
00968     if( ScdParData::RCBZOLTAN == _readNC->partMethod )
00969     {
00970         // Read x coordinates of cell centers
00971         int xCellVarId;
00972         int success = NCFUNC( inq_varid )( _fileId, "xCell", &xCellVarId );
00973         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xCell" );
00974         std::vector< double > xCell( nLocalCells );
00975         NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00976         NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00977         success              = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] );
00978         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xCell data" );
00979 
00980         // Read y coordinates of cell centers
00981         int yCellVarId;
00982         success = NCFUNC( inq_varid )( _fileId, "yCell", &yCellVarId );
00983         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yCell" );
00984         std::vector< double > yCell( nLocalCells );
00985         success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] );
00986         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yCell data" );
00987 
00988         // Read z coordinates of cell centers
00989         int zCellVarId;
00990         success = NCFUNC( inq_varid )( _fileId, "zCell", &zCellVarId );
00991         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zCell" );
00992         std::vector< double > zCell( nLocalCells );
00993         success = NCFUNCAG( _vara_double )( _fileId, zCellVarId, &read_start, &read_count, &zCell[0] );
00994         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zCell data" );
00995 
00996         // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00997         // is better
00998         Interface*& mbImpl         = _readNC->mbImpl;
00999         DebugOutput& dbgOut        = _readNC->dbgOut;
01000         ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, false, 0, NULL );
01001         ErrorCode rval             = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
01002         delete mbZTool;
01003 
01004         dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
01005         dbgOut.tprintf( 1, "                           localGidCells.size() = %d\n", (int)localGidCells.size() );
01006 
01007         // This is important: local cells are now redistributed, so nLocalCells might be different!
01008         nLocalCells = localGidCells.size();
01009 
01010         return MB_SUCCESS;
01011     }
01012 #endif
01013 
01014     // By default, apply trivial partition
01015     localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
01016 
01017     return MB_SUCCESS;
01018 }
01019 #endif
01020 
01021 ErrorCode NCHelperMPAS::create_local_vertices( const std::vector< int >& vertices_on_local_cells,
01022                                                EntityHandle& start_vertex )
01023 {
01024     Interface*& mbImpl      = _readNC->mbImpl;
01025     Tag& mGlobalIdTag       = _readNC->mGlobalIdTag;
01026     const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01027     DebugOutput& dbgOut     = _readNC->dbgOut;
01028 
01029     // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell
01030     // connectivity later)
01031     std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells );
01032     std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() );
01033     std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(),
01034                range_inserter( localGidVerts ) );
01035     nLocalVertices = localGidVerts.size();
01036 
01037     dbgOut.tprintf( 1, "   localGidVerts.psize() = %d\n", (int)localGidVerts.psize() );
01038     dbgOut.tprintf( 1, "   localGidVerts.size() = %d\n", (int)localGidVerts.size() );
01039 
01040     // Create local vertices
01041     std::vector< double* > arrays;
01042     ErrorCode rval =
01043         _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays,
01044                                                  // Might have to create gather mesh later
01045                                                  ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
01046 
01047     // Add local vertices to current file set
01048     Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 );
01049     rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" );
01050 
01051     // Get ptr to GID memory for local vertices
01052     int count  = 0;
01053     void* data = NULL;
01054     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" );
01055     assert( count == nLocalVertices );
01056     int* gid_data = (int*)data;
01057     std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
01058 
01059     // Duplicate GID data, which will be used to resolve sharing
01060     if( mpFileIdTag )
01061     {
01062         rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" );
01063         assert( count == nLocalVertices );
01064         int bytes_per_tag = 4;
01065         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01066         if( 4 == bytes_per_tag )
01067         {
01068             gid_data = (int*)data;
01069             std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
01070         }
01071         else if( 8 == bytes_per_tag )
01072         {  // Should be a handle tag on 64 bit machine?
01073             long* handle_tag_data = (long*)data;
01074             std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data );
01075         }
01076     }
01077 
01078 #ifdef MOAB_HAVE_PNETCDF
01079     size_t nb_reads = localGidVerts.psize();
01080     std::vector< int > requests( nb_reads );
01081     std::vector< int > statuss( nb_reads );
01082     size_t idxReq = 0;
01083 #endif
01084 
01085     // Read x coordinates for local vertices
01086     double* xptr = arrays[0];
01087     int xVertexVarId;
01088     int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
01089     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
01090     size_t indexInArray = 0;
01091     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01092          ++pair_iter )
01093     {
01094         EntityHandle starth  = pair_iter->first;
01095         EntityHandle endh    = pair_iter->second;
01096         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
01097         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
01098 
01099         // Do a partial read in each subrange
01100 #ifdef MOAB_HAVE_PNETCDF
01101         success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ),
01102                                               &requests[idxReq++] );
01103 #else
01104         success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) );
01105 #endif
01106         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data in a loop" );
01107 
01108         // Increment the index for next subrange
01109         indexInArray += ( endh - starth + 1 );
01110     }
01111 
01112 #ifdef MOAB_HAVE_PNETCDF
01113     // Wait outside the loop
01114     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01115     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01116 #endif
01117 
01118     // Read y coordinates for local vertices
01119     double* yptr = arrays[1];
01120     int yVertexVarId;
01121     success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
01122     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
01123 #ifdef MOAB_HAVE_PNETCDF
01124     idxReq = 0;
01125 #endif
01126     indexInArray = 0;
01127     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01128          ++pair_iter )
01129     {
01130         EntityHandle starth  = pair_iter->first;
01131         EntityHandle endh    = pair_iter->second;
01132         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
01133         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
01134 
01135         // Do a partial read in each subrange
01136 #ifdef MOAB_HAVE_PNETCDF
01137         success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ),
01138                                               &requests[idxReq++] );
01139 #else
01140         success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) );
01141 #endif
01142         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data in a loop" );
01143 
01144         // Increment the index for next subrange
01145         indexInArray += ( endh - starth + 1 );
01146     }
01147 
01148 #ifdef MOAB_HAVE_PNETCDF
01149     // Wait outside the loop
01150     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01151     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01152 #endif
01153 
01154     // Read z coordinates for local vertices
01155     double* zptr = arrays[2];
01156     int zVertexVarId;
01157     success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
01158     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
01159 #ifdef MOAB_HAVE_PNETCDF
01160     idxReq = 0;
01161 #endif
01162     indexInArray = 0;
01163     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
01164          ++pair_iter )
01165     {
01166         EntityHandle starth  = pair_iter->first;
01167         EntityHandle endh    = pair_iter->second;
01168         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
01169         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
01170 
01171         // Do a partial read in each subrange
01172 #ifdef MOAB_HAVE_PNETCDF
01173         success = NCFUNCREQG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ),
01174                                               &requests[idxReq++] );
01175 #else
01176         success = NCFUNCAG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, &( zptr[indexInArray] ) );
01177 #endif
01178         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data in a loop" );
01179 
01180         // Increment the index for next subrange
01181         indexInArray += ( endh - starth + 1 );
01182     }
01183 
01184 #ifdef MOAB_HAVE_PNETCDF
01185     // Wait outside the loop
01186     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01187     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01188 #endif
01189 
01190     return MB_SUCCESS;
01191 }
01192 
01193 ErrorCode NCHelperMPAS::create_local_edges( EntityHandle start_vertex,
01194                                             const std::vector< int >& num_edges_on_local_cells )
01195 {
01196     Interface*& mbImpl  = _readNC->mbImpl;
01197     Tag& mGlobalIdTag   = _readNC->mGlobalIdTag;
01198     DebugOutput& dbgOut = _readNC->dbgOut;
01199 
01200     // Read edges on each local cell, to get localGidEdges
01201     int edgesOnCellVarId;
01202     int success = NCFUNC( inq_varid )( _fileId, "edgesOnCell", &edgesOnCellVarId );
01203     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edgesOnCell" );
01204 
01205     std::vector< int > edges_on_local_cells( nLocalCells * maxEdgesPerCell );
01206     dbgOut.tprintf( 1, "   edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() );
01207 
01208 #ifdef MOAB_HAVE_PNETCDF
01209     size_t nb_reads = localGidCells.psize();
01210     std::vector< int > requests( nb_reads );
01211     std::vector< int > statuss( nb_reads );
01212     size_t idxReq = 0;
01213 #endif
01214     size_t indexInArray = 0;
01215     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
01216          ++pair_iter )
01217     {
01218         EntityHandle starth      = pair_iter->first;
01219         EntityHandle endh        = pair_iter->second;
01220         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01221         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
01222                                      static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01223 
01224         // Do a partial read in each subrange
01225 #ifdef MOAB_HAVE_PNETCDF
01226         success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01227                                            &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
01228 #else
01229         success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01230                                          &( edges_on_local_cells[indexInArray] ) );
01231 #endif
01232         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edgesOnCell data in a loop" );
01233 
01234         // Increment the index for next subrange
01235         indexInArray += ( endh - starth + 1 ) * maxEdgesPerCell;
01236     }
01237 
01238 #ifdef MOAB_HAVE_PNETCDF
01239     // Wait outside the loop
01240     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01241     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01242 #endif
01243 
01244     // Correct local cell edges array in the same way as local cell vertices array, replace the
01245     // padded edges with the last edges in the corresponding cells
01246     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01247     {
01248         int num_edges             = num_edges_on_local_cells[local_cell_idx];
01249         int idx_in_local_edge_arr = local_cell_idx * maxEdgesPerCell;
01250         int last_edge_idx         = edges_on_local_cells[idx_in_local_edge_arr + num_edges - 1];
01251         for( int i = num_edges; i < maxEdgesPerCell; i++ )
01252             edges_on_local_cells[idx_in_local_edge_arr + i] = last_edge_idx;
01253     }
01254 
01255     // Collect local edges
01256     std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() );
01257     std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) );
01258     nLocalEdges = localGidEdges.size();
01259 
01260     dbgOut.tprintf( 1, "   localGidEdges.psize() = %d\n", (int)localGidEdges.psize() );
01261     dbgOut.tprintf( 1, "   localGidEdges.size() = %d\n", (int)localGidEdges.size() );
01262 
01263     // Create local edges
01264     EntityHandle start_edge;
01265     EntityHandle* conn_arr_edges = NULL;
01266     ErrorCode rval =
01267         _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
01268                                                      // Might have to create gather mesh later
01269                                                      ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" );
01270 
01271     // Add local edges to current file set
01272     Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 );
01273     rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" );
01274 
01275     // Get ptr to GID memory for edges
01276     int count  = 0;
01277     void* data = NULL;
01278     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" );
01279     assert( count == nLocalEdges );
01280     int* gid_data = (int*)data;
01281     std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data );
01282 
01283     int verticesOnEdgeVarId;
01284 
01285     // Read vertices on each local edge, to get edge connectivity
01286     success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
01287     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
01288     // Utilize the memory storage pointed by conn_arr_edges
01289     int* vertices_on_local_edges = (int*)conn_arr_edges;
01290 #ifdef MOAB_HAVE_PNETCDF
01291     nb_reads = localGidEdges.psize();
01292     requests.resize( nb_reads );
01293     statuss.resize( nb_reads );
01294     idxReq = 0;
01295 #endif
01296     indexInArray = 0;
01297     for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end();
01298          ++pair_iter )
01299     {
01300         EntityHandle starth      = pair_iter->first;
01301         EntityHandle endh        = pair_iter->second;
01302         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01303         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 };
01304 
01305         // Do a partial read in each subrange
01306 #ifdef MOAB_HAVE_PNETCDF
01307         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01308                                            &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] );
01309 #else
01310         success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01311                                          &( vertices_on_local_edges[indexInArray] ) );
01312 #endif
01313         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data in a loop" );
01314 
01315         // Increment the index for next subrange
01316         indexInArray += ( endh - starth + 1 ) * 2;
01317     }
01318 
01319 #ifdef MOAB_HAVE_PNETCDF
01320     // Wait outside the loop
01321     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01322     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01323 #endif
01324 
01325     // Populate connectivity data for local edges
01326     // Convert in-place from int (stored in the first half) to EntityHandle
01327     // Reading backward is the trick
01328     for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01329     {
01330         int global_vert_idx = vertices_on_local_edges[edge_vert];      // Global vertex index, 1 based
01331         int local_vert_idx  = localGidVerts.index( global_vert_idx );  // Local vertex index, 0 based
01332         assert( local_vert_idx != -1 );
01333         conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
01334     }
01335 
01336     return MB_SUCCESS;
01337 }
01338 
01339 ErrorCode NCHelperMPAS::create_local_cells( const std::vector< int >& vertices_on_local_cells,
01340                                             const std::vector< int >& num_edges_on_local_cells,
01341                                             EntityHandle start_vertex, Range& faces )
01342 {
01343     Interface*& mbImpl = _readNC->mbImpl;
01344     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
01345 
01346     // Divide local cells into groups based on the number of edges
01347     Range local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01348     // Insert larger values before smaller ones to increase efficiency
01349     for( int i = nLocalCells - 1; i >= 0; i-- )
01350     {
01351         int num_edges = num_edges_on_local_cells[i];
01352         local_cells_with_n_edges[num_edges].insert( localGidCells[i] );  // Global cell index
01353     }
01354 
01355     std::vector< int > num_edges_on_cell_groups;
01356     for( int i = 3; i <= maxEdgesPerCell; i++ )
01357     {
01358         if( local_cells_with_n_edges[i].size() > 0 ) num_edges_on_cell_groups.push_back( i );
01359     }
01360     numCellGroups = num_edges_on_cell_groups.size();
01361 
01362     EntityHandle* conn_arr_local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01363     for( int i = 0; i < numCellGroups; i++ )
01364     {
01365         int num_edges_per_cell = num_edges_on_cell_groups[i];
01366         int num_group_cells    = (int)local_cells_with_n_edges[num_edges_per_cell].size();
01367 
01368         // Create local cells for each non-empty cell group
01369         EntityHandle start_element;
01370         ErrorCode rval = _readNC->readMeshIface->get_element_connect(
01371             num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
01372             conn_arr_local_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01373         faces.insert( start_element, start_element + num_group_cells - 1 );
01374 
01375         // Add local cells to current file set
01376         Range local_cells_range( start_element, start_element + num_group_cells - 1 );
01377         rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01378 
01379         // Get ptr to gid memory for local cells
01380         int count  = 0;
01381         void* data = NULL;
01382         rval = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01383         assert( count == num_group_cells );
01384         int* gid_data = (int*)data;
01385         std::copy( local_cells_with_n_edges[num_edges_per_cell].begin(),
01386                    local_cells_with_n_edges[num_edges_per_cell].end(), gid_data );
01387 
01388         // Set connectivity array with proper local vertices handles
01389         for( int j = 0; j < num_group_cells; j++ )
01390         {
01391             EntityHandle global_cell_idx =
01392                 local_cells_with_n_edges[num_edges_per_cell][j];          // Global cell index, 1 based
01393             int local_cell_idx = localGidCells.index( global_cell_idx );  // Local cell index, 0 based
01394             assert( local_cell_idx != -1 );
01395 
01396             if( numCellGroups > 1 )
01397             {
01398                 // Populate cellHandleToGlobalID map to read cell variables
01399                 cellHandleToGlobalID[start_element + j] = global_cell_idx;
01400             }
01401 
01402             for( int k = 0; k < num_edges_per_cell; k++ )
01403             {
01404                 EntityHandle global_vert_idx =
01405                     vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + k];  // Global vertex index, 1 based
01406                 int local_vert_idx = localGidVerts.index( global_vert_idx );        // Local vertex index, 0 based
01407                 assert( local_vert_idx != -1 );
01408                 conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
01409                     start_vertex + local_vert_idx;
01410             }
01411         }
01412     }
01413 
01414     return MB_SUCCESS;
01415 }
01416 
01417 ErrorCode NCHelperMPAS::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells,
01418                                                    EntityHandle start_vertex, Range& faces )
01419 {
01420     Interface*& mbImpl = _readNC->mbImpl;
01421     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
01422 
01423     // Only one group of cells (each cell is represented by a polygon with maxEdgesPerCell edges)
01424     numCellGroups = 1;
01425 
01426     // Create cells for this cell group
01427     EntityHandle start_element;
01428     EntityHandle* conn_arr_local_cells = NULL;
01429     ErrorCode rval =
01430         _readNC->readMeshIface->get_element_connect( nLocalCells, maxEdgesPerCell, MBPOLYGON, 0, start_element,
01431                                                      conn_arr_local_cells,
01432                                                      // Might have to create gather mesh later
01433                                                      ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01434     faces.insert( start_element, start_element + nLocalCells - 1 );
01435 
01436     // Add local cells to current file set
01437     Range local_cells_range( start_element, start_element + nLocalCells - 1 );
01438     rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01439 
01440     // Get ptr to GID memory for local cells
01441     int count  = 0;
01442     void* data = NULL;
01443     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01444     assert( count == nLocalCells );
01445     int* gid_data = (int*)data;
01446     std::copy( localGidCells.begin(), localGidCells.end(), gid_data );
01447 
01448     // Set connectivity array with proper local vertices handles
01449     // vertices_on_local_cells array was already corrected to have the last vertices padded
01450     // no need for extra checks considering
01451     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01452     {
01453         for( int i = 0; i < maxEdgesPerCell; i++ )
01454         {
01455             EntityHandle global_vert_idx =
01456                 vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + i];  // Global vertex index, 1 based
01457             int local_vert_idx = localGidVerts.index( global_vert_idx );        // Local vertex index, 0 based
01458             assert( local_vert_idx != -1 );
01459             conn_arr_local_cells[local_cell_idx * maxEdgesPerCell + i] = start_vertex + local_vert_idx;
01460         }
01461     }
01462 
01463     return MB_SUCCESS;
01464 }
01465 
01466 ErrorCode NCHelperMPAS::create_gather_set_vertices( EntityHandle gather_set, EntityHandle& gather_set_start_vertex )
01467 {
01468     Interface*& mbImpl      = _readNC->mbImpl;
01469     Tag& mGlobalIdTag       = _readNC->mGlobalIdTag;
01470     const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01471 
01472     // Create gather set vertices
01473     std::vector< double* > arrays;
01474     // Don't need to specify allocation number here, because we know enough vertices were created
01475     // before
01476     ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" );
01477 
01478     // Add vertices to the gather set
01479     Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 );
01480     rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" );
01481 
01482     // Read x coordinates for gather set vertices
01483     double* xptr = arrays[0];
01484     int xVertexVarId;
01485     int success = NCFUNC( inq_varid )( _fileId, "xVertex", &xVertexVarId );
01486     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of xVertex" );
01487     NCDF_SIZE read_start = 0;
01488     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices );
01489 #ifdef MOAB_HAVE_PNETCDF
01490     // Enter independent I/O mode, since this read is only for the gather processor
01491     success = NCFUNC( begin_indep_data )( _fileId );
01492     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01493     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01494     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
01495     success = NCFUNC( end_indep_data )( _fileId );
01496     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01497 #else
01498     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01499     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read xVertex data" );
01500 #endif
01501 
01502     // Read y coordinates for gather set vertices
01503     double* yptr = arrays[1];
01504     int yVertexVarId;
01505     success = NCFUNC( inq_varid )( _fileId, "yVertex", &yVertexVarId );
01506     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of yVertex" );
01507 #ifdef MOAB_HAVE_PNETCDF
01508     // Enter independent I/O mode, since this read is only for the gather processor
01509     success = NCFUNC( begin_indep_data )( _fileId );
01510     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01511     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01512     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
01513     success = NCFUNC( end_indep_data )( _fileId );
01514     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01515 #else
01516     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01517     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read yVertex data" );
01518 #endif
01519 
01520     // Read z coordinates for gather set vertices
01521     double* zptr = arrays[2];
01522     int zVertexVarId;
01523     success = NCFUNC( inq_varid )( _fileId, "zVertex", &zVertexVarId );
01524     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of zVertex" );
01525 #ifdef MOAB_HAVE_PNETCDF
01526     // Enter independent I/O mode, since this read is only for the gather processor
01527     success = NCFUNC( begin_indep_data )( _fileId );
01528     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01529     success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
01530     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
01531     success = NCFUNC( end_indep_data )( _fileId );
01532     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01533 #else
01534     success = NCFUNCG( _vara_double )( _fileId, zVertexVarId, &read_start, &read_count, zptr );
01535     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read zVertex data" );
01536 #endif
01537 
01538     // Get ptr to GID memory for gather set vertices
01539     int count  = 0;
01540     void* data = NULL;
01541     rval =
01542         mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" );
01543     assert( count == nVertices );
01544     int* gid_data = (int*)data;
01545     for( int j = 1; j <= nVertices; j++ )
01546         gid_data[j - 1] = j;
01547 
01548     // Set the file id tag too, it should be bigger something not interfering with global id
01549     if( mpFileIdTag )
01550     {
01551         rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count,
01552                                     data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" );
01553         assert( count == nVertices );
01554         int bytes_per_tag = 4;
01555         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01556         if( 4 == bytes_per_tag )
01557         {
01558             gid_data = (int*)data;
01559             for( int j = 1; j <= nVertices; j++ )
01560                 gid_data[j - 1] = nVertices + j;  // Bigger than global id tag
01561         }
01562         else if( 8 == bytes_per_tag )
01563         {  // Should be a handle tag on 64 bit machine?
01564             long* handle_tag_data = (long*)data;
01565             for( int j = 1; j <= nVertices; j++ )
01566                 handle_tag_data[j - 1] = nVertices + j;  // Bigger than global id tag
01567         }
01568     }
01569 
01570     return MB_SUCCESS;
01571 }
01572 
01573 ErrorCode NCHelperMPAS::create_gather_set_edges( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01574 {
01575     Interface*& mbImpl = _readNC->mbImpl;
01576 
01577     // Create gather set edges
01578     EntityHandle start_edge;
01579     EntityHandle* conn_arr_gather_set_edges = NULL;
01580     // Don't need to specify allocation number here, because we know enough edges were created
01581     // before
01582     ErrorCode rval =
01583         _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" );
01584 
01585     // Add edges to the gather set
01586     Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 );
01587     rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" );
01588 
01589     // Read vertices on each edge
01590     int verticesOnEdgeVarId;
01591     int success = NCFUNC( inq_varid )( _fileId, "verticesOnEdge", &verticesOnEdgeVarId );
01592     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnEdge" );
01593     // Utilize the memory storage pointed by conn_arr_gather_set_edges
01594     int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges;
01595     NCDF_SIZE read_starts[2]          = { 0, 0 };
01596     NCDF_SIZE read_counts[2]          = { static_cast< NCDF_SIZE >( nEdges ), 2 };
01597 #ifdef MOAB_HAVE_PNETCDF
01598     // Enter independent I/O mode, since this read is only for the gather processor
01599     success = NCFUNC( begin_indep_data )( _fileId );
01600     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01601     success =
01602         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01603     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
01604     success = NCFUNC( end_indep_data )( _fileId );
01605     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01606 #else
01607     success =
01608         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01609     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnEdge data" );
01610 #endif
01611 
01612     // Populate connectivity data for gather set edges
01613     // Convert in-place from int (stored in the first half) to EntityHandle
01614     // Reading backward is the trick
01615     for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01616     {
01617         int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert];  // Global vertex index, 1 based
01618         gather_set_vert_idx--;                                              // 1 based -> 0 based
01619         // Connectivity array is shifted by where the gather set vertices start
01620         conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
01621     }
01622 
01623     return MB_SUCCESS;
01624 }
01625 
01626 ErrorCode NCHelperMPAS::create_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01627 {
01628     Interface*& mbImpl = _readNC->mbImpl;
01629 
01630     // Read number of edges on each gather set cell
01631     int nEdgesOnCellVarId;
01632     int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
01633     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
01634     std::vector< int > num_edges_on_gather_set_cells( nCells );
01635     NCDF_SIZE read_start = 0;
01636     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
01637 #ifdef MOAB_HAVE_PNETCDF
01638     // Enter independent I/O mode, since this read is only for the gather processor
01639     success = NCFUNC( begin_indep_data )( _fileId );
01640     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01641     success =
01642         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01643     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01644     success = NCFUNC( end_indep_data )( _fileId );
01645     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01646 #else
01647     success =
01648         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01649     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01650 #endif
01651 
01652     // Read vertices on each gather set cell (connectivity)
01653     int verticesOnCellVarId;
01654     success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
01655     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
01656     std::vector< int > vertices_on_gather_set_cells( nCells * maxEdgesPerCell );
01657     NCDF_SIZE read_starts[2] = { 0, 0 };
01658     NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01659 #ifdef MOAB_HAVE_PNETCDF
01660     // Enter independent I/O mode, since this read is only for the gather processor
01661     success = NCFUNC( begin_indep_data )( _fileId );
01662     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01663     success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
01664                                     &vertices_on_gather_set_cells[0] );
01665     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01666     success = NCFUNC( end_indep_data )( _fileId );
01667     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01668 #else
01669     success = NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
01670                                     &vertices_on_gather_set_cells[0] );
01671     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01672 #endif
01673 
01674     // Divide gather set cells into groups based on the number of edges
01675     Range gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01676     // Insert larger values before smaller values to increase efficiency
01677     for( int i = nCells - 1; i >= 0; i-- )
01678     {
01679         int num_edges = num_edges_on_gather_set_cells[i];
01680         gather_set_cells_with_n_edges[num_edges].insert( i + 1 );  // 0 based -> 1 based
01681     }
01682 
01683     // Create gather set cells
01684     EntityHandle* conn_arr_gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
01685     for( int num_edges_per_cell = 3; num_edges_per_cell <= maxEdgesPerCell; num_edges_per_cell++ )
01686     {
01687         int num_group_cells = gather_set_cells_with_n_edges[num_edges_per_cell].size();
01688         if( num_group_cells > 0 )
01689         {
01690             EntityHandle start_element;
01691             ErrorCode rval = _readNC->readMeshIface->get_element_connect(
01692                 num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
01693                 conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell], num_group_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01694 
01695             // Add cells to the gather set
01696             Range gather_set_cells_range( start_element, start_element + num_group_cells - 1 );
01697             rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01698 
01699             for( int j = 0; j < num_group_cells; j++ )
01700             {
01701                 int gather_set_cell_idx =
01702                     gather_set_cells_with_n_edges[num_edges_per_cell][j];  // Global cell index, 1 based
01703                 gather_set_cell_idx--;                                     // 1 based -> 0 based
01704 
01705                 for( int k = 0; k < num_edges_per_cell; k++ )
01706                 {
01707                     EntityHandle gather_set_vert_idx =
01708                         vertices_on_gather_set_cells[gather_set_cell_idx * maxEdgesPerCell +
01709                                                      k];  // Global vertex index, 1 based
01710                     gather_set_vert_idx--;                // 1 based -> 0 based
01711 
01712                     // Connectivity array is shifted by where the gather set vertices start
01713                     conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
01714                         gather_set_start_vertex + gather_set_vert_idx;
01715                 }
01716             }
01717         }
01718     }
01719 
01720     return MB_SUCCESS;
01721 }
01722 
01723 ErrorCode NCHelperMPAS::create_padded_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01724 {
01725     Interface*& mbImpl = _readNC->mbImpl;
01726 
01727     // Read number of edges on each gather set cell
01728     int nEdgesOnCellVarId;
01729     int success = NCFUNC( inq_varid )( _fileId, "nEdgesOnCell", &nEdgesOnCellVarId );
01730     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of nEdgesOnCell" );
01731     std::vector< int > num_edges_on_gather_set_cells( nCells );
01732     NCDF_SIZE read_start = 0;
01733     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nCells );
01734 #ifdef MOAB_HAVE_PNETCDF
01735     // Enter independent I/O mode, since this read is only for the gather processor
01736     success = NCFUNC( begin_indep_data )( _fileId );
01737     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01738     success =
01739         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01740     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01741     success = NCFUNC( end_indep_data )( _fileId );
01742     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01743 #else
01744     success =
01745         NCFUNCG( _vara_int )( _fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0] );
01746     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read nEdgesOnCell data" );
01747 #endif
01748 
01749     // Create gather set cells
01750     EntityHandle start_element;
01751     EntityHandle* conn_arr_gather_set_cells = NULL;
01752     // Don't need to specify allocation number here, because we know enough cells were created
01753     // before
01754     ErrorCode rval = _readNC->readMeshIface->get_element_connect( nCells, maxEdgesPerCell, MBPOLYGON, 0, start_element,
01755                                                                   conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01756 
01757     // Add cells to the gather set
01758     Range gather_set_cells_range( start_element, start_element + nCells - 1 );
01759     rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01760 
01761     // Read vertices on each gather set cell (connectivity)
01762     int verticesOnCellVarId;
01763     success = NCFUNC( inq_varid )( _fileId, "verticesOnCell", &verticesOnCellVarId );
01764     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of verticesOnCell" );
01765     // Utilize the memory storage pointed by conn_arr_gather_set_cells
01766     int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells;
01767     NCDF_SIZE read_starts[2]          = { 0, 0 };
01768     NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( maxEdgesPerCell ) };
01769 #ifdef MOAB_HAVE_PNETCDF
01770     // Enter independent I/O mode, since this read is only for the gather processor
01771     success = NCFUNC( begin_indep_data )( _fileId );
01772     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01773     success =
01774         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01775     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01776     success = NCFUNC( end_indep_data )( _fileId );
01777     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01778 #else
01779     success =
01780         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01781     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read verticesOnCell data" );
01782 #endif
01783 
01784     // Correct gather set cell vertices array in the same way as local cell vertices array,
01785     // replace the padded vertices with the last vertices in the corresponding cells
01786     for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ )
01787     {
01788         int num_edges                  = num_edges_on_gather_set_cells[gather_set_cell_idx];
01789         int idx_in_gather_set_vert_arr = gather_set_cell_idx * maxEdgesPerCell;
01790         int last_vert_idx              = vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + num_edges - 1];
01791         for( int i = num_edges; i < maxEdgesPerCell; i++ )
01792             vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + i] = last_vert_idx;
01793     }
01794 
01795     // Populate connectivity data for gather set cells
01796     // Convert in-place from int (stored in the first half) to EntityHandle
01797     // Reading backward is the trick
01798     for( int cell_vert = nCells * maxEdgesPerCell - 1; cell_vert >= 0; cell_vert-- )
01799     {
01800         int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert];  // Global vertex index, 1 based
01801         gather_set_vert_idx--;                                              // 1 based -> 0 based
01802         // Connectivity array is shifted by where the gather set vertices start
01803         conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
01804     }
01805 
01806     return MB_SUCCESS;
01807 }
01808 
01809 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines