MOAB: Mesh Oriented datABase  (version 5.2.1)
NCHelperGCRM.cpp
Go to the documentation of this file.
00001 #include "NCHelperGCRM.hpp"
00002 #include "moab/ReadUtilIface.hpp"
00003 #include "moab/FileOptions.hpp"
00004 #include "moab/SpectralMeshTool.hpp"
00005 #include "MBTagConventions.hpp"
00006 
00007 #ifdef MOAB_HAVE_ZOLTAN
00008 #include "moab/ZoltanPartitioner.hpp"
00009 #endif
00010 
00011 #include <cmath>
00012 
00013 namespace moab
00014 {
00015 
00016 // GCRM cells are either pentagons or hexagons, and pentagons are always padded to hexagons
00017 const int EDGES_PER_CELL = 6;
00018 
00019 NCHelperGCRM::NCHelperGCRM( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet )
00020     : UcdNCHelper( readNC, fileId, opts, fileSet ), createGatherSet( false )
00021 {
00022     // Ignore variables containing topological information
00023     ignoredVarNames.insert( "grid" );
00024     ignoredVarNames.insert( "cell_corners" );
00025     ignoredVarNames.insert( "cell_edges" );
00026     ignoredVarNames.insert( "edge_corners" );
00027     ignoredVarNames.insert( "cell_neighbors" );
00028 }
00029 
00030 bool NCHelperGCRM::can_read_file( ReadNC* readNC )
00031 {
00032     std::vector< std::string >& dimNames = readNC->dimNames;
00033 
00034     // If dimension name "cells" exists then it should be the GCRM grid
00035     if( std::find( dimNames.begin(), dimNames.end(), std::string( "cells" ) ) != dimNames.end() ) return true;
00036 
00037     return false;
00038 }
00039 
00040 ErrorCode NCHelperGCRM::init_mesh_vals()
00041 {
00042     std::vector< std::string >& dimNames              = _readNC->dimNames;
00043     std::vector< int >& dimLens                       = _readNC->dimLens;
00044     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00045 
00046     ErrorCode rval;
00047     unsigned int idx;
00048     std::vector< std::string >::iterator vit;
00049 
00050     // Look for time dimension
00051     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() )
00052         idx = vit - dimNames.begin();
00053     else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() )
00054         idx = vit - dimNames.begin();
00055     else
00056     {
00057         MB_SET_ERR( MB_FAILURE, "Couldn't find 'Time' or 'time' dimension" );
00058     }
00059     tDim       = idx;
00060     nTimeSteps = dimLens[idx];
00061 
00062     // Get number of cells
00063     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "cells" ) ) != dimNames.end() )
00064         idx = vit - dimNames.begin();
00065     else
00066     {
00067         MB_SET_ERR( MB_FAILURE, "Couldn't find 'cells' dimension" );
00068     }
00069     cDim   = idx;
00070     nCells = dimLens[idx];
00071 
00072     // Get number of edges
00073     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "edges" ) ) != dimNames.end() )
00074         idx = vit - dimNames.begin();
00075     else
00076     {
00077         MB_SET_ERR( MB_FAILURE, "Couldn't find 'edges' dimension" );
00078     }
00079     eDim   = idx;
00080     nEdges = dimLens[idx];
00081 
00082     // Get number of vertices
00083     vDim = -1;
00084     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "corners" ) ) != dimNames.end() )
00085         idx = vit - dimNames.begin();
00086     else
00087     {
00088         MB_SET_ERR( MB_FAILURE, "Couldn't find 'corners' dimension" );
00089     }
00090     vDim      = idx;
00091     nVertices = dimLens[idx];
00092 
00093     // Get number of layers
00094     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "layers" ) ) != dimNames.end() )
00095         idx = vit - dimNames.begin();
00096     else
00097     {
00098         MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' dimension" );
00099     }
00100     levDim  = idx;
00101     nLevels = dimLens[idx];
00102 
00103     // Dimension indices for other optional levels
00104     std::vector< unsigned int > opt_lev_dims;
00105 
00106     // Get index of interface levels
00107     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "interfaces" ) ) != dimNames.end() )
00108     {
00109         idx = vit - dimNames.begin();
00110         opt_lev_dims.push_back( idx );
00111     }
00112 
00113     std::map< std::string, ReadNC::VarData >::iterator vmit;
00114 
00115     // Store time coordinate values in tVals
00116     if( nTimeSteps > 0 )
00117     {
00118         if( ( vmit = varInfo.find( "time" ) ) != varInfo.end() )
00119         {
00120             rval = read_coordinate( "time", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'time' variable" );
00121         }
00122         else if( ( vmit = varInfo.find( "t" ) ) != varInfo.end() )
00123         {
00124             rval = read_coordinate( "t", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 't' variable" );
00125         }
00126         else
00127         {
00128             // If expected time variable is not available, set dummy time coordinate values to tVals
00129             for( int t = 0; t < nTimeSteps; t++ )
00130                 tVals.push_back( (double)t );
00131         }
00132     }
00133 
00134     // For each variable, determine the entity location type and number of levels
00135     for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit )
00136     {
00137         ReadNC::VarData& vd = ( *vmit ).second;
00138 
00139         // Default entLoc is ENTLOCSET
00140         if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
00141         {
00142             if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() )
00143                 vd.entLoc = ReadNC::ENTLOCVERT;
00144             else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() )
00145                 vd.entLoc = ReadNC::ENTLOCEDGE;
00146             else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() )
00147                 vd.entLoc = ReadNC::ENTLOCFACE;
00148         }
00149 
00150         // Default numLev is 0
00151         if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() )
00152             vd.numLev = nLevels;
00153         else
00154         {
00155             // If layers dimension is not found, try other optional levels such as interfaces
00156             for( unsigned int i = 0; i < opt_lev_dims.size(); i++ )
00157             {
00158                 if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() )
00159                 {
00160                     vd.numLev = dimLens[opt_lev_dims[i]];
00161                     break;
00162                 }
00163             }
00164         }
00165     }
00166 
00167     // Hack: create dummy variables for dimensions (like cells) with no corresponding coordinate
00168     // variables
00169     rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" );
00170 
00171     return MB_SUCCESS;
00172 }
00173 
00174 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
00175 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
00176 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
00177 // has to restore them based on the existing mesh from last read
00178 ErrorCode NCHelperGCRM::check_existing_mesh()
00179 {
00180     Interface*& mbImpl = _readNC->mbImpl;
00181     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
00182     bool& noMesh       = _readNC->noMesh;
00183 
00184     if( noMesh )
00185     {
00186         ErrorCode rval;
00187 
00188         if( localGidVerts.empty() )
00189         {
00190             // Get all vertices from current file set (it is the input set in no_mesh scenario)
00191             Range local_verts;
00192             rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" );
00193 
00194             if( !local_verts.empty() )
00195             {
00196                 std::vector< int > gids( local_verts.size() );
00197 
00198                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00199                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" );
00200 
00201                 // Restore localGidVerts
00202                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) );
00203                 nLocalVertices = localGidVerts.size();
00204             }
00205         }
00206 
00207         if( localGidEdges.empty() )
00208         {
00209             // Get all edges from current file set (it is the input set in no_mesh scenario)
00210             Range local_edges;
00211             rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" );
00212 
00213             if( !local_edges.empty() )
00214             {
00215                 std::vector< int > gids( local_edges.size() );
00216 
00217                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00218                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" );
00219 
00220                 // Restore localGidEdges
00221                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) );
00222                 nLocalEdges = localGidEdges.size();
00223             }
00224         }
00225 
00226         if( localGidCells.empty() )
00227         {
00228             // Get all cells from current file set (it is the input set in no_mesh scenario)
00229             Range local_cells;
00230             rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" );
00231 
00232             if( !local_cells.empty() )
00233             {
00234                 std::vector< int > gids( local_cells.size() );
00235 
00236                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00237                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" );
00238 
00239                 // Restore localGidCells
00240                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) );
00241                 nLocalCells = localGidCells.size();
00242             }
00243         }
00244     }
00245 
00246     return MB_SUCCESS;
00247 }
00248 
00249 ErrorCode NCHelperGCRM::create_mesh( Range& faces )
00250 {
00251     bool& noEdges       = _readNC->noEdges;
00252     DebugOutput& dbgOut = _readNC->dbgOut;
00253 
00254 #ifdef MOAB_HAVE_MPI
00255     int rank         = 0;
00256     int procs        = 1;
00257     bool& isParallel = _readNC->isParallel;
00258     if( isParallel )
00259     {
00260         ParallelComm*& myPcomm = _readNC->myPcomm;
00261         rank                   = myPcomm->proc_config().proc_rank();
00262         procs                  = myPcomm->proc_config().proc_size();
00263     }
00264 
00265     // Need to know whether we'll be creating gather mesh
00266     if( rank == _readNC->gatherSetRank ) createGatherSet = true;
00267 
00268     if( procs >= 2 )
00269     {
00270         // Shift rank to obtain a rotated trivial partition
00271         int shifted_rank           = rank;
00272         int& trivialPartitionShift = _readNC->trivialPartitionShift;
00273         if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00274 
00275         // Compute the number of local cells on this proc
00276         nLocalCells = int( std::floor( 1.0 * nCells / procs ) );
00277 
00278         // The starting global cell index in the GCRM file for this proc
00279         int start_cell_idx = shifted_rank * nLocalCells;
00280 
00281         // Number of extra cells after equal split over procs
00282         int iextra = nCells % procs;
00283 
00284         // Allocate extra cells over procs
00285         if( shifted_rank < iextra ) nLocalCells++;
00286         start_cell_idx += std::min( shifted_rank, iextra );
00287 
00288         start_cell_idx++;  // 0 based -> 1 based
00289 
00290         // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00291         ErrorCode rval = redistribute_local_cells( start_cell_idx );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00292     }
00293     else
00294     {
00295         nLocalCells = nCells;
00296         localGidCells.insert( 1, nLocalCells );
00297     }
00298 #else
00299     nLocalCells = nCells;
00300     localGidCells.insert( 1, nLocalCells );
00301 #endif
00302     dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00303     dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00304 
00305     // Read vertices on each local cell, to get localGidVerts and cell connectivity later
00306     int verticesOnCellVarId;
00307     int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId );
00308     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" );
00309     std::vector< int > vertices_on_local_cells( nLocalCells * EDGES_PER_CELL );
00310     dbgOut.tprintf( 1, " nLocalCells = %d\n", (int)nLocalCells );
00311     dbgOut.tprintf( 1, " vertices_on_local_cells.size() = %d\n", (int)vertices_on_local_cells.size() );
00312 #ifdef MOAB_HAVE_PNETCDF
00313     size_t nb_reads = localGidCells.psize();
00314     std::vector< int > requests( nb_reads );
00315     std::vector< int > statuss( nb_reads );
00316     size_t idxReq = 0;
00317 #endif
00318     size_t indexInArray = 0;
00319     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00320          ++pair_iter )
00321     {
00322         EntityHandle starth = pair_iter->first;
00323         EntityHandle endh   = pair_iter->second;
00324         dbgOut.tprintf( 1, " cell_corners starth = %d\n", (int)starth );
00325         dbgOut.tprintf( 1, " cell_corners   endh = %d\n", (int)endh );
00326         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00327         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00328                                      static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
00329 
00330         // Do a partial read in each subrange
00331 #ifdef MOAB_HAVE_PNETCDF
00332         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00333                                            &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] );
00334 #else
00335         success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00336                                          &( vertices_on_local_cells[indexInArray] ) );
00337 #endif
00338         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data in a loop" );
00339 
00340         // Increment the index for next subrange
00341         indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL;
00342     }
00343 
00344 #ifdef MOAB_HAVE_PNETCDF
00345     // Wait outside the loop
00346     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00347     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00348 #endif
00349 
00350     // Correct vertices_on_local_cells array. Pentagons as hexagons should have
00351     // a connectivity like 123455 and not 122345
00352     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
00353     {
00354         int* pvertex = &vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL];
00355         for( int k = 0; k < EDGES_PER_CELL - 2; k++ )
00356         {
00357             if( *( pvertex + k ) == *( pvertex + k + 1 ) )
00358             {
00359                 // Shift the connectivity
00360                 for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ )
00361                     *( pvertex + kk ) = *( pvertex + kk + 1 );
00362                 // No need to try next k
00363                 break;
00364             }
00365         }
00366     }
00367 
00368     // GCRM is 0 based, convert vertex indices from 0 to 1 based
00369     for( std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++ )
00370         vertices_on_local_cells[idx] += 1;
00371 
00372     // Create local vertices
00373     EntityHandle start_vertex;
00374     ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for GCRM mesh" );
00375 
00376     // Create local edges (unless NO_EDGES read option is set)
00377     if( !noEdges )
00378     {
00379         rval = create_local_edges( start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local edges for GCRM mesh" );
00380     }
00381 
00382     // Create local cells, padded
00383     rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for GCRM mesh" );
00384 
00385     if( createGatherSet )
00386     {
00387         EntityHandle gather_set;
00388         rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" );
00389 
00390         // Create gather set vertices
00391         EntityHandle start_gather_set_vertex;
00392         rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for GCRM mesh" );
00393 
00394         // Create gather set edges (unless NO_EDGES read option is set)
00395         if( !noEdges )
00396         {
00397             rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for GCRM mesh" );
00398         }
00399 
00400         // Create gather set cells with padding
00401         rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for GCRM mesh" );
00402     }
00403 
00404     return MB_SUCCESS;
00405 }
00406 
00407 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas,
00408                                                                std::vector< int >& tstep_nums )
00409 {
00410     Interface*& mbImpl          = _readNC->mbImpl;
00411     std::vector< int >& dimLens = _readNC->dimLens;
00412     bool& noEdges               = _readNC->noEdges;
00413     DebugOutput& dbgOut         = _readNC->dbgOut;
00414 
00415     Range* range = NULL;
00416 
00417     // Get vertices
00418     Range verts;
00419     ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" );
00420     assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 );
00421 
00422     // Get edges
00423     Range edges;
00424     rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" );
00425 
00426     // Get faces
00427     Range faces;
00428     rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" );
00429     assert( "Should only have a single face subrange, since they were read in one shot" && faces.psize() == 1 );
00430 
00431 #ifdef MOAB_HAVE_MPI
00432     bool& isParallel = _readNC->isParallel;
00433     if( isParallel )
00434     {
00435         ParallelComm*& myPcomm = _readNC->myPcomm;
00436         rval                   = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" );
00437     }
00438     else
00439         facesOwned = faces;  // not running in parallel, but still with MPI
00440 #else
00441     facesOwned = faces;
00442 #endif
00443 
00444     for( unsigned int i = 0; i < vdatas.size(); i++ )
00445     {
00446         // Skip edge variables, if specified by the read options
00447         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00448 
00449         // Support non-set variables with 3 dimensions like (time, cells, layers), or
00450         // 2 dimensions like (time, cells)
00451         assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() );
00452 
00453         // For a non-set variable, time should be the first dimension
00454         assert( tDim == vdatas[i].varDims[0] );
00455 
00456         // Set up readStarts and readCounts
00457         vdatas[i].readStarts.resize( 3 );
00458         vdatas[i].readCounts.resize( 3 );
00459 
00460         // First: Time
00461         vdatas[i].readStarts[0] = 0;  // This value is timestep dependent, will be set later
00462         vdatas[i].readCounts[0] = 1;
00463 
00464         // Next: nVertices / nCells / nEdges
00465         switch( vdatas[i].entLoc )
00466         {
00467             case ReadNC::ENTLOCVERT:
00468                 // Vertices
00469                 // Start from the first localGidVerts
00470                 // Actually, this will be reset later on in a loop
00471                 vdatas[i].readStarts[1] = localGidVerts[0] - 1;
00472                 vdatas[i].readCounts[1] = nLocalVertices;
00473                 range                   = &verts;
00474                 break;
00475             case ReadNC::ENTLOCFACE:
00476                 // Faces
00477                 // Start from the first localGidCells
00478                 // Actually, this will be reset later on in a loop
00479                 vdatas[i].readStarts[1] = localGidCells[0] - 1;
00480                 vdatas[i].readCounts[1] = nLocalCells;
00481                 range                   = &facesOwned;
00482                 break;
00483             case ReadNC::ENTLOCEDGE:
00484                 // Edges
00485                 // Start from the first localGidEdges
00486                 // Actually, this will be reset later on in a loop
00487                 vdatas[i].readStarts[1] = localGidEdges[0] - 1;
00488                 vdatas[i].readCounts[1] = nLocalEdges;
00489                 range                   = &edges;
00490                 break;
00491             default:
00492                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00493         }
00494 
00495         // Finally: layers or other optional levels, it is possible that there is no
00496         // level dimension (numLev is 0) for this non-set variable
00497         if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1;
00498         vdatas[i].readStarts[2] = 0;
00499         vdatas[i].readCounts[2] = vdatas[i].numLev;
00500 
00501         // Get variable size
00502         vdatas[i].sz = 1;
00503         for( std::size_t idx = 0; idx != 3; idx++ )
00504             vdatas[i].sz *= vdatas[i].readCounts[idx];
00505 
00506         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00507         {
00508             dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] );
00509 
00510             if( tstep_nums[t] >= dimLens[tDim] )
00511             { MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] ); }
00512 
00513             // Get the tag to read into
00514             if( !vdatas[i].varTags[t] )
00515             {
00516                 rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName );
00517             }
00518 
00519             // Get ptr to tag space
00520             assert( 1 == range->psize() );
00521             void* data;
00522             int count;
00523             rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName );
00524             assert( (unsigned)count == range->size() );
00525             vdatas[i].varDatas[t] = data;
00526         }
00527     }
00528 
00529     return rval;
00530 }
00531 
00532 #ifdef MOAB_HAVE_PNETCDF
00533 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas,
00534                                                             std::vector< int >& tstep_nums )
00535 {
00536     bool& noEdges       = _readNC->noEdges;
00537     DebugOutput& dbgOut = _readNC->dbgOut;
00538 
00539     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00540 
00541     // Finally, read into that space
00542     int success;
00543     Range* pLocalGid = NULL;
00544 
00545     for( unsigned int i = 0; i < vdatas.size(); i++ )
00546     {
00547         // Skip edge variables, if specified by the read options
00548         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00549 
00550         switch( vdatas[i].entLoc )
00551         {
00552             case ReadNC::ENTLOCVERT:
00553                 pLocalGid = &localGidVerts;
00554                 break;
00555             case ReadNC::ENTLOCFACE:
00556                 pLocalGid = &localGidCells;
00557                 break;
00558             case ReadNC::ENTLOCEDGE:
00559                 pLocalGid = &localGidEdges;
00560                 break;
00561             default:
00562                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00563         }
00564 
00565         std::size_t sz = vdatas[i].sz;
00566 
00567         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00568         {
00569             // We will synchronize all these reads with the other processors,
00570             // so the wait will be inside this double loop; is it too much?
00571             size_t nb_reads = pLocalGid->psize();
00572             std::vector< int > requests( nb_reads ), statuss( nb_reads );
00573             size_t idxReq = 0;
00574 
00575             // Set readStart for each timestep along time dimension
00576             vdatas[i].readStarts[0] = tstep_nums[t];
00577 
00578             switch( vdatas[i].varDataType )
00579             {
00580                 case NC_FLOAT:
00581                 case NC_DOUBLE: {
00582                     // Read float as double
00583                     std::vector< double > tmpdoubledata( sz );
00584 
00585                     // In the case of ucd mesh, and on multiple proc,
00586                     // we need to read as many times as subranges we have in the
00587                     // localGid range;
00588                     // basically, we have to give a different point
00589                     // for data to start, for every subrange :(
00590                     size_t indexInDoubleArray = 0;
00591                     size_t ic                 = 0;
00592                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00593                          ++pair_iter, ic++ )
00594                     {
00595                         EntityHandle starth     = pair_iter->first;
00596                         EntityHandle endh       = pair_iter->second;  // inclusive
00597                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00598                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00599 
00600                         // Do a partial read, in each subrange
00601                         // wait outside this loop
00602                         success =
00603                             NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00604                                                         &( vdatas[i].readCounts[0] ),
00605                                                         &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] );
00606                         if( success )
00607                             MB_SET_ERR( MB_FAILURE,
00608                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00609                         // We need to increment the index in double array for the
00610                         // next subrange
00611                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00612                     }
00613                     assert( ic == pLocalGid->psize() );
00614 
00615                     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00616                     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00617 
00618                     void* data = vdatas[i].varDatas[t];
00619                     for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00620                         ( (double*)data )[idx] = tmpdoubledata[idx];
00621 
00622                     break;
00623                 }
00624                 default:
00625                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00626             }
00627         }
00628     }
00629 
00630     // Debug output, if requested
00631     if( 1 == dbgOut.get_verbosity() )
00632     {
00633         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00634         for( unsigned int i = 1; i < vdatas.size(); i++ )
00635             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00636         dbgOut.tprintf( 1, "\n" );
00637     }
00638 
00639     return rval;
00640 }
00641 #else
00642 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas,
00643                                                       std::vector< int >& tstep_nums )
00644 {
00645     bool& noEdges = _readNC->noEdges;
00646     DebugOutput& dbgOut = _readNC->dbgOut;
00647 
00648     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00649 
00650     // Finally, read into that space
00651     int success;
00652     Range* pLocalGid = NULL;
00653 
00654     for( unsigned int i = 0; i < vdatas.size(); i++ )
00655     {
00656         // Skip edge variables, if specified by the read options
00657         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00658 
00659         switch( vdatas[i].entLoc )
00660         {
00661             case ReadNC::ENTLOCVERT:
00662                 pLocalGid = &localGidVerts;
00663                 break;
00664             case ReadNC::ENTLOCFACE:
00665                 pLocalGid = &localGidCells;
00666                 break;
00667             case ReadNC::ENTLOCEDGE:
00668                 pLocalGid = &localGidEdges;
00669                 break;
00670             default:
00671                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00672         }
00673 
00674         std::size_t sz = vdatas[i].sz;
00675 
00676         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00677         {
00678             // Set readStart for each timestep along time dimension
00679             vdatas[i].readStarts[0] = tstep_nums[t];
00680 
00681             switch( vdatas[i].varDataType )
00682             {
00683                 case NC_FLOAT:
00684                 case NC_DOUBLE: {
00685                     // Read float as double
00686                     std::vector< double > tmpdoubledata( sz );
00687 
00688                     // In the case of ucd mesh, and on multiple proc,
00689                     // we need to read as many times as subranges we have in the
00690                     // localGid range;
00691                     // basically, we have to give a different point
00692                     // for data to start, for every subrange :(
00693                     size_t indexInDoubleArray = 0;
00694                     size_t ic = 0;
00695                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00696                          ++pair_iter, ic++ )
00697                     {
00698                         EntityHandle starth = pair_iter->first;
00699                         EntityHandle endh = pair_iter->second;  // Inclusive
00700                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00701                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00702 
00703                         success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00704                                                             &( vdatas[i].readCounts[0] ),
00705                                                             &( tmpdoubledata[indexInDoubleArray] ) );
00706                         if( success )
00707                             MB_SET_ERR( MB_FAILURE,
00708                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00709                         // We need to increment the index in double array for the
00710                         // next subrange
00711                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00712                     }
00713                     assert( ic == pLocalGid->psize() );
00714 
00715                     void* data = vdatas[i].varDatas[t];
00716                     for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00717                         ( (double*)data )[idx] = tmpdoubledata[idx];
00718 
00719                     break;
00720                 }
00721                 default:
00722                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00723             }
00724         }
00725     }
00726 
00727     // Debug output, if requested
00728     if( 1 == dbgOut.get_verbosity() )
00729     {
00730         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00731         for( unsigned int i = 1; i < vdatas.size(); i++ )
00732             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00733         dbgOut.tprintf( 1, "\n" );
00734     }
00735 
00736     return rval;
00737 }
00738 #endif
00739 
00740 #ifdef MOAB_HAVE_MPI
00741 ErrorCode NCHelperGCRM::redistribute_local_cells( int start_cell_idx )
00742 {
00743     // If possible, apply Zoltan partition
00744 #ifdef MOAB_HAVE_ZOLTAN
00745     if( _readNC->partMethod == ScdParData::RCBZOLTAN )
00746     {
00747         // Read lat/lon coordinates of cell centers, then convert spherical to
00748         // Cartesian, and use them as input to Zoltan partition
00749         int xCellVarId;
00750         int success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &xCellVarId );
00751         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
00752         std::vector< double > xCell( nLocalCells );
00753         NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00754         NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00755         success              = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] );
00756         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
00757 
00758         // Read y coordinates of cell centers
00759         int yCellVarId;
00760         success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &yCellVarId );
00761         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
00762         std::vector< double > yCell( nLocalCells );
00763         success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] );
00764         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
00765 
00766         // Convert lon/lat/rad to x/y/z
00767         std::vector< double > zCell( nLocalCells );
00768         double rad = 8000.0;  // This is just a approximate radius
00769         for( int i = 0; i < nLocalCells; i++ )
00770         {
00771             double cosphi = cos( xCell[i] );
00772             double zmult  = sin( xCell[i] );
00773             double xmult  = cosphi * cos( yCell[i] );
00774             double ymult  = cosphi * sin( yCell[i] );
00775             xCell[i]      = rad * xmult;
00776             yCell[i]      = rad * ymult;
00777             zCell[i]      = rad * zmult;
00778         }
00779 
00780         // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00781         // is better
00782         Interface*& mbImpl         = _readNC->mbImpl;
00783         DebugOutput& dbgOut        = _readNC->dbgOut;
00784         ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, false, 0, NULL );
00785         ErrorCode rval             = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
00786         delete mbZTool;
00787 
00788         dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00789         dbgOut.tprintf( 1, "                           localGidCells.size() = %d\n", (int)localGidCells.size() );
00790 
00791         // This is important: local cells are now redistributed, so nLocalCells might be different!
00792         nLocalCells = localGidCells.size();
00793 
00794         return MB_SUCCESS;
00795     }
00796 #endif
00797 
00798     // By default, apply trivial partition
00799     localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
00800 
00801     return MB_SUCCESS;
00802 }
00803 #endif
00804 
00805 ErrorCode NCHelperGCRM::create_local_vertices( const std::vector< int >& vertices_on_local_cells,
00806                                                EntityHandle& start_vertex )
00807 {
00808     Interface*& mbImpl                                = _readNC->mbImpl;
00809     Tag& mGlobalIdTag                                 = _readNC->mGlobalIdTag;
00810     const Tag*& mpFileIdTag                           = _readNC->mpFileIdTag;
00811     DebugOutput& dbgOut                               = _readNC->dbgOut;
00812     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00813 
00814     // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell
00815     // connectivity later)
00816     std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells );
00817     std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() );
00818     std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(),
00819                range_inserter( localGidVerts ) );
00820     nLocalVertices = localGidVerts.size();
00821 
00822     dbgOut.tprintf( 1, "   localGidVerts.psize() = %d\n", (int)localGidVerts.psize() );
00823     dbgOut.tprintf( 1, "   localGidVerts.size() = %d\n", (int)localGidVerts.size() );
00824 
00825     // Create local vertices
00826     std::vector< double* > arrays;
00827     ErrorCode rval =
00828         _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays,
00829                                                  // Might have to create gather mesh later
00830                                                  ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
00831 
00832     // Add local vertices to current file set
00833     Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 );
00834     rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" );
00835 
00836     // Get ptr to GID memory for local vertices
00837     int count  = 0;
00838     void* data = NULL;
00839     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" );
00840     assert( count == nLocalVertices );
00841     int* gid_data = (int*)data;
00842     std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
00843 
00844     // Duplicate GID data, which will be used to resolve sharing
00845     if( mpFileIdTag )
00846     {
00847         rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" );
00848         assert( count == nLocalVertices );
00849         int bytes_per_tag = 4;
00850         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "can't get number of bytes for file id tag" );
00851         if( 4 == bytes_per_tag )
00852         {
00853             gid_data = (int*)data;
00854             std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
00855         }
00856         else if( 8 == bytes_per_tag )
00857         {  // Should be a handle tag on 64 bit machine?
00858             long* handle_tag_data = (long*)data;
00859             std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data );
00860         }
00861     }
00862 
00863 #ifdef MOAB_HAVE_PNETCDF
00864     size_t nb_reads = localGidVerts.psize();
00865     std::vector< int > requests( nb_reads );
00866     std::vector< int > statuss( nb_reads );
00867     size_t idxReq = 0;
00868 #endif
00869 
00870     // Store lev values in levVals
00871     std::map< std::string, ReadNC::VarData >::iterator vmit;
00872     if( ( vmit = varInfo.find( "layers" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
00873     {
00874         rval = read_coordinate( "layers", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'layers' variable" );
00875     }
00876     else if( ( vmit = varInfo.find( "interfaces" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
00877     {
00878         rval = read_coordinate( "interfaces", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'interfaces' variable" );
00879     }
00880     else
00881     {
00882         MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' or 'interfaces' variable" );
00883     }
00884 
00885     // Decide whether down is positive
00886     char posval[10] = { 0 };
00887     int success     = NCFUNC( get_att_text )( _fileId, ( *vmit ).second.varId, "positive", posval );
00888     if( 0 == success && !strncmp( posval, "down", 4 ) )
00889     {
00890         for( std::vector< double >::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit )
00891             ( *dvit ) *= -1.0;
00892     }
00893 
00894     // Read x coordinates for local vertices
00895     double* xptr = arrays[0];
00896     int xVertexVarId;
00897     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId );
00898     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
00899     size_t indexInArray = 0;
00900     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
00901          ++pair_iter )
00902     {
00903         EntityHandle starth  = pair_iter->first;
00904         EntityHandle endh    = pair_iter->second;
00905         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
00906         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
00907 
00908         // Do a partial read in each subrange
00909 #ifdef MOAB_HAVE_PNETCDF
00910         success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ),
00911                                               &requests[idxReq++] );
00912 #else
00913         success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) );
00914 #endif
00915         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
00916 
00917         // Increment the index for next subrange
00918         indexInArray += ( endh - starth + 1 );
00919     }
00920 
00921 #ifdef MOAB_HAVE_PNETCDF
00922     // Wait outside the loop
00923     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00924     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00925 #endif
00926 
00927     // Read y coordinates for local vertices
00928     double* yptr = arrays[1];
00929     int yVertexVarId;
00930     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId );
00931     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
00932 #ifdef MOAB_HAVE_PNETCDF
00933     idxReq = 0;
00934 #endif
00935     indexInArray = 0;
00936     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
00937          ++pair_iter )
00938     {
00939         EntityHandle starth  = pair_iter->first;
00940         EntityHandle endh    = pair_iter->second;
00941         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
00942         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
00943 
00944         // Do a partial read in each subrange
00945 #ifdef MOAB_HAVE_PNETCDF
00946         success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ),
00947                                               &requests[idxReq++] );
00948 #else
00949         success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) );
00950 #endif
00951         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
00952 
00953         // Increment the index for next subrange
00954         indexInArray += ( endh - starth + 1 );
00955     }
00956 
00957 #ifdef MOAB_HAVE_PNETCDF
00958     // Wait outside the loop
00959     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00960     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00961 #endif
00962 
00963     // Convert lon/lat/rad to x/y/z
00964     double* zptr = arrays[2];
00965     double rad   = 8000.0 + levVals[0];
00966     for( int i = 0; i < nLocalVertices; i++ )
00967     {
00968         double cosphi = cos( yptr[i] );
00969         double zmult  = sin( yptr[i] );
00970         double xmult  = cosphi * cos( xptr[i] );
00971         double ymult  = cosphi * sin( xptr[i] );
00972         xptr[i]       = rad * xmult;
00973         yptr[i]       = rad * ymult;
00974         zptr[i]       = rad * zmult;
00975     }
00976 
00977     return MB_SUCCESS;
00978 }
00979 
00980 ErrorCode NCHelperGCRM::create_local_edges( EntityHandle start_vertex )
00981 {
00982     Interface*& mbImpl  = _readNC->mbImpl;
00983     Tag& mGlobalIdTag   = _readNC->mGlobalIdTag;
00984     DebugOutput& dbgOut = _readNC->dbgOut;
00985 
00986     // Read edges on each local cell, to get localGidEdges
00987     int edgesOnCellVarId;
00988     int success = NCFUNC( inq_varid )( _fileId, "cell_edges", &edgesOnCellVarId );
00989     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_edges" );
00990 
00991     std::vector< int > edges_on_local_cells( nLocalCells * EDGES_PER_CELL );
00992     dbgOut.tprintf( 1, "   edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() );
00993 
00994 #ifdef MOAB_HAVE_PNETCDF
00995     size_t nb_reads = localGidCells.psize();
00996     std::vector< int > requests( nb_reads );
00997     std::vector< int > statuss( nb_reads );
00998     size_t idxReq = 0;
00999 #endif
01000     size_t indexInArray = 0;
01001     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
01002          ++pair_iter )
01003     {
01004         EntityHandle starth = pair_iter->first;
01005         EntityHandle endh   = pair_iter->second;
01006         dbgOut.tprintf( 1, "   starth = %d\n", (int)starth );
01007         dbgOut.tprintf( 1, "   endh = %d\n", (int)endh );
01008         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01009         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
01010                                      static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
01011 
01012         // Do a partial read in each subrange
01013 #ifdef MOAB_HAVE_PNETCDF
01014         success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01015                                            &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
01016 #else
01017         success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01018                                          &( edges_on_local_cells[indexInArray] ) );
01019 #endif
01020         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_edges data in a loop" );
01021 
01022         // Increment the index for next subrange
01023         indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL;
01024     }
01025 
01026 #ifdef MOAB_HAVE_PNETCDF
01027     // Wait outside the loop
01028     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01029     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01030 #endif
01031 
01032     // GCRM is 0 based, convert edge indices from 0 to 1 based
01033     for( std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++ )
01034         edges_on_local_cells[idx] += 1;
01035 
01036     // Collect local edges
01037     std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() );
01038     std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) );
01039     nLocalEdges = localGidEdges.size();
01040 
01041     dbgOut.tprintf( 1, "   localGidEdges.psize() = %d\n", (int)localGidEdges.psize() );
01042     dbgOut.tprintf( 1, "   localGidEdges.size() = %d\n", (int)localGidEdges.size() );
01043 
01044     // Create local edges
01045     EntityHandle start_edge;
01046     EntityHandle* conn_arr_edges = NULL;
01047     ErrorCode rval =
01048         _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
01049                                                      // Might have to create gather mesh later
01050                                                      ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" );
01051 
01052     // Add local edges to current file set
01053     Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 );
01054     rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" );
01055 
01056     // Get ptr to GID memory for edges
01057     int count  = 0;
01058     void* data = NULL;
01059     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" );
01060     assert( count == nLocalEdges );
01061     int* gid_data = (int*)data;
01062     std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data );
01063 
01064     int verticesOnEdgeVarId;
01065 
01066     // Read vertices on each local edge, to get edge connectivity
01067     success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId );
01068     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" );
01069     // Utilize the memory storage pointed by conn_arr_edges
01070     int* vertices_on_local_edges = (int*)conn_arr_edges;
01071 #ifdef MOAB_HAVE_PNETCDF
01072     nb_reads = localGidEdges.psize();
01073     requests.resize( nb_reads );
01074     statuss.resize( nb_reads );
01075     idxReq = 0;
01076 #endif
01077     indexInArray = 0;
01078     for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end();
01079          ++pair_iter )
01080     {
01081         EntityHandle starth      = pair_iter->first;
01082         EntityHandle endh        = pair_iter->second;
01083         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01084         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 };
01085 
01086         // Do a partial read in each subrange
01087 #ifdef MOAB_HAVE_PNETCDF
01088         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01089                                            &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] );
01090 #else
01091         success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01092                                          &( vertices_on_local_edges[indexInArray] ) );
01093 #endif
01094         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data in a loop" );
01095 
01096         // Increment the index for next subrange
01097         indexInArray += ( endh - starth + 1 ) * 2;
01098     }
01099 
01100 #ifdef MOAB_HAVE_PNETCDF
01101     // Wait outside the loop
01102     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01103     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01104 #endif
01105 
01106     // Populate connectivity data for local edges
01107     // Convert in-place from int (stored in the first half) to EntityHandle
01108     // Reading backward is the trick
01109     for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01110     {
01111         // Note, indices stored in vertices_on_local_edges are 0 based
01112         int global_vert_idx = vertices_on_local_edges[edge_vert] + 1;  // Global vertex index, 1 based
01113         int local_vert_idx  = localGidVerts.index( global_vert_idx );  // Local vertex index, 0 based
01114         assert( local_vert_idx != -1 );
01115         conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
01116     }
01117 
01118     return MB_SUCCESS;
01119 }
01120 
01121 ErrorCode NCHelperGCRM::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells,
01122                                                    EntityHandle start_vertex, Range& faces )
01123 {
01124     Interface*& mbImpl = _readNC->mbImpl;
01125     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
01126 
01127     // Create cells
01128     EntityHandle start_element;
01129     EntityHandle* conn_arr_local_cells = NULL;
01130     ErrorCode rval =
01131         _readNC->readMeshIface->get_element_connect( nLocalCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element,
01132                                                      conn_arr_local_cells,
01133                                                      // Might have to create gather mesh later
01134                                                      ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01135     faces.insert( start_element, start_element + nLocalCells - 1 );
01136 
01137     // Add local cells to current file set
01138     Range local_cells_range( start_element, start_element + nLocalCells - 1 );
01139     rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01140 
01141     // Get ptr to GID memory for local cells
01142     int count  = 0;
01143     void* data = NULL;
01144     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01145     assert( count == nLocalCells );
01146     int* gid_data = (int*)data;
01147     std::copy( localGidCells.begin(), localGidCells.end(), gid_data );
01148 
01149     // Set connectivity array with proper local vertices handles
01150     // vertices_on_local_cells array was already corrected to have
01151     // the last vertices repeated for pentagons, e.g. 122345 => 123455
01152     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01153     {
01154         for( int i = 0; i < EDGES_PER_CELL; i++ )
01155         {
01156             // Note, indices stored in vertices_on_local_cells are 1 based
01157             EntityHandle global_vert_idx =
01158                 vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL + i];  // Global vertex index, 1 based
01159             int local_vert_idx = localGidVerts.index( global_vert_idx );       // Local vertex index, 0 based
01160             assert( local_vert_idx != -1 );
01161             conn_arr_local_cells[local_cell_idx * EDGES_PER_CELL + i] = start_vertex + local_vert_idx;
01162         }
01163     }
01164 
01165     return MB_SUCCESS;
01166 }
01167 
01168 ErrorCode NCHelperGCRM::create_gather_set_vertices( EntityHandle gather_set, EntityHandle& gather_set_start_vertex )
01169 {
01170     Interface*& mbImpl      = _readNC->mbImpl;
01171     Tag& mGlobalIdTag       = _readNC->mGlobalIdTag;
01172     const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01173 
01174     // Create gather set vertices
01175     std::vector< double* > arrays;
01176     // Don't need to specify allocation number here, because we know enough vertices were created
01177     // before
01178     ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" );
01179 
01180     // Add vertices to the gather set
01181     Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 );
01182     rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" );
01183 
01184     // Read x coordinates for gather set vertices
01185     double* xptr = arrays[0];
01186     int xVertexVarId;
01187     int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId );
01188     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
01189     NCDF_SIZE read_start = 0;
01190     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices );
01191 #ifdef MOAB_HAVE_PNETCDF
01192     // Enter independent I/O mode, since this read is only for the gather processor
01193     success = NCFUNC( begin_indep_data )( _fileId );
01194     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01195     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01196     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" );
01197     success = NCFUNC( end_indep_data )( _fileId );
01198     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01199 #else
01200     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01201     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" );
01202 #endif
01203 
01204     // Read y coordinates for gather set vertices
01205     double* yptr = arrays[1];
01206     int yVertexVarId;
01207     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId );
01208     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
01209 #ifdef MOAB_HAVE_PNETCDF
01210     // Enter independent I/O mode, since this read is only for the gather processor
01211     success = NCFUNC( begin_indep_data )( _fileId );
01212     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01213     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01214     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" );
01215     success = NCFUNC( end_indep_data )( _fileId );
01216     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01217 #else
01218     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01219     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" );
01220 #endif
01221 
01222     // Convert lon/lat/rad to x/y/z
01223     double* zptr = arrays[2];
01224     double rad   = 8000.0 + levVals[0];
01225     for( int i = 0; i < nVertices; i++ )
01226     {
01227         double cosphi = cos( yptr[i] );
01228         double zmult  = sin( yptr[i] );
01229         double xmult  = cosphi * cos( xptr[i] );
01230         double ymult  = cosphi * sin( xptr[i] );
01231         xptr[i]       = rad * xmult;
01232         yptr[i]       = rad * ymult;
01233         zptr[i]       = rad * zmult;
01234     }
01235 
01236     // Get ptr to GID memory for gather set vertices
01237     int count  = 0;
01238     void* data = NULL;
01239     rval =
01240         mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" );
01241     assert( count == nVertices );
01242     int* gid_data = (int*)data;
01243     for( int j = 1; j <= nVertices; j++ )
01244         gid_data[j - 1] = j;
01245 
01246     // Set the file id tag too, it should be bigger something not interfering with global id
01247     if( mpFileIdTag )
01248     {
01249         rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count,
01250                                     data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" );
01251         assert( count == nVertices );
01252         int bytes_per_tag = 4;
01253         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01254         if( 4 == bytes_per_tag )
01255         {
01256             gid_data = (int*)data;
01257             for( int j = 1; j <= nVertices; j++ )
01258                 gid_data[j - 1] = nVertices + j;  // Bigger than global id tag
01259         }
01260         else if( 8 == bytes_per_tag )
01261         {  // Should be a handle tag on 64 bit machine?
01262             long* handle_tag_data = (long*)data;
01263             for( int j = 1; j <= nVertices; j++ )
01264                 handle_tag_data[j - 1] = nVertices + j;  // Bigger than global id tag
01265         }
01266     }
01267 
01268     return MB_SUCCESS;
01269 }
01270 
01271 ErrorCode NCHelperGCRM::create_gather_set_edges( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01272 {
01273     Interface*& mbImpl = _readNC->mbImpl;
01274 
01275     // Create gather set edges
01276     EntityHandle start_edge;
01277     EntityHandle* conn_arr_gather_set_edges = NULL;
01278     // Don't need to specify allocation number here, because we know enough edges were created
01279     // before
01280     ErrorCode rval =
01281         _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" );
01282 
01283     // Add edges to the gather set
01284     Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 );
01285     rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" );
01286 
01287     // Read vertices on each edge
01288     int verticesOnEdgeVarId;
01289     int success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId );
01290     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" );
01291     // Utilize the memory storage pointed by conn_arr_gather_set_edges
01292     int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges;
01293     NCDF_SIZE read_starts[2]          = { 0, 0 };
01294     NCDF_SIZE read_counts[2]          = { static_cast< NCDF_SIZE >( nEdges ), 2 };
01295 #ifdef MOAB_HAVE_PNETCDF
01296     // Enter independent I/O mode, since this read is only for the gather processor
01297     success = NCFUNC( begin_indep_data )( _fileId );
01298     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01299     success =
01300         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01301     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" );
01302     success = NCFUNC( end_indep_data )( _fileId );
01303     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01304 #else
01305     success =
01306         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01307     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" );
01308 #endif
01309 
01310     // Populate connectivity data for gather set edges
01311     // Convert in-place from int (stored in the first half) to EntityHandle
01312     // Reading backward is the trick
01313     for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01314     {
01315         // Note, indices stored in vertices_on_gather_set_edges are 0 based
01316         int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert];  // Global vertex index, 0 based
01317         // Connectivity array is shifted by where the gather set vertices start
01318         conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
01319     }
01320 
01321     return MB_SUCCESS;
01322 }
01323 
01324 ErrorCode NCHelperGCRM::create_padded_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01325 {
01326     Interface*& mbImpl = _readNC->mbImpl;
01327 
01328     // Create gather set cells
01329     EntityHandle start_element;
01330     EntityHandle* conn_arr_gather_set_cells = NULL;
01331     // Don't need to specify allocation number here, because we know enough cells were created
01332     // before
01333     ErrorCode rval = _readNC->readMeshIface->get_element_connect( nCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element,
01334                                                                   conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01335 
01336     // Add cells to the gather set
01337     Range gather_set_cells_range( start_element, start_element + nCells - 1 );
01338     rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01339 
01340     // Read vertices on each gather set cell (connectivity)
01341     int verticesOnCellVarId;
01342     int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId );
01343     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" );
01344     // Utilize the memory storage pointed by conn_arr_gather_set_cells
01345     int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells;
01346     NCDF_SIZE read_starts[2]          = { 0, 0 };
01347     NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
01348 #ifdef MOAB_HAVE_PNETCDF
01349     // Enter independent I/O mode, since this read is only for the gather processor
01350     success = NCFUNC( begin_indep_data )( _fileId );
01351     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01352     success =
01353         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01354     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" );
01355     success = NCFUNC( end_indep_data )( _fileId );
01356     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01357 #else
01358     success =
01359         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01360     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" );
01361 #endif
01362 
01363     // Correct gather set cell vertices array in the same way as local cell vertices array
01364     // Pentagons as hexagons should have a connectivity like 123455 and not 122345
01365     for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ )
01366     {
01367         int* pvertex = vertices_on_gather_set_cells + gather_set_cell_idx * EDGES_PER_CELL;
01368         for( int k = 0; k < EDGES_PER_CELL - 2; k++ )
01369         {
01370             if( *( pvertex + k ) == *( pvertex + k + 1 ) )
01371             {
01372                 // Shift the connectivity
01373                 for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ )
01374                     *( pvertex + kk ) = *( pvertex + kk + 1 );
01375                 // No need to try next k
01376                 break;
01377             }
01378         }
01379     }
01380 
01381     // Populate connectivity data for gather set cells
01382     // Convert in-place from int (stored in the first half) to EntityHandle
01383     // Reading backward is the trick
01384     for( int cell_vert = nCells * EDGES_PER_CELL - 1; cell_vert >= 0; cell_vert-- )
01385     {
01386         // Note, indices stored in vertices_on_gather_set_cells are 0 based
01387         int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert];  // Global vertex index, 0 based
01388         // Connectivity array is shifted by where the gather set vertices start
01389         conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
01390     }
01391 
01392     return MB_SUCCESS;
01393 }
01394 
01395 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines