MOAB: Mesh Oriented datABase  (version 5.3.1)
NCHelperGCRM.cpp
Go to the documentation of this file.
00001 #include "NCHelperGCRM.hpp"
00002 #include "moab/ReadUtilIface.hpp"
00003 #include "moab/FileOptions.hpp"
00004 #include "moab/SpectralMeshTool.hpp"
00005 #include "MBTagConventions.hpp"
00006 
00007 #ifdef MOAB_HAVE_ZOLTAN
00008 #include "moab/ZoltanPartitioner.hpp"
00009 #endif
00010 
00011 #include <cmath>
00012 
00013 namespace moab
00014 {
00015 
00016 // GCRM cells are either pentagons or hexagons, and pentagons are always padded to hexagons
00017 const int EDGES_PER_CELL = 6;
00018 
00019 NCHelperGCRM::NCHelperGCRM( ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet )
00020     : UcdNCHelper( readNC, fileId, opts, fileSet ), createGatherSet( false )
00021 {
00022     // Ignore variables containing topological information
00023     ignoredVarNames.insert( "grid" );
00024     ignoredVarNames.insert( "cell_corners" );
00025     ignoredVarNames.insert( "cell_edges" );
00026     ignoredVarNames.insert( "edge_corners" );
00027     ignoredVarNames.insert( "cell_neighbors" );
00028 }
00029 
00030 bool NCHelperGCRM::can_read_file( ReadNC* readNC )
00031 {
00032     std::vector< std::string >& dimNames = readNC->dimNames;
00033 
00034     // If dimension name "cells" exists then it should be the GCRM grid
00035     if( std::find( dimNames.begin(), dimNames.end(), std::string( "cells" ) ) != dimNames.end() ) return true;
00036 
00037     return false;
00038 }
00039 
00040 ErrorCode NCHelperGCRM::init_mesh_vals()
00041 {
00042     std::vector< std::string >& dimNames              = _readNC->dimNames;
00043     std::vector< int >& dimLens                       = _readNC->dimLens;
00044     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00045 
00046     ErrorCode rval;
00047     unsigned int idx;
00048     std::vector< std::string >::iterator vit;
00049 
00050     // Look for time dimension
00051     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "Time" ) ) != dimNames.end() )
00052         idx = vit - dimNames.begin();
00053     else if( ( vit = std::find( dimNames.begin(), dimNames.end(), "time" ) ) != dimNames.end() )
00054         idx = vit - dimNames.begin();
00055     else
00056     {
00057         MB_SET_ERR( MB_FAILURE, "Couldn't find 'Time' or 'time' dimension" );
00058     }
00059     tDim       = idx;
00060     nTimeSteps = dimLens[idx];
00061 
00062     // Get number of cells
00063     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "cells" ) ) != dimNames.end() )
00064         idx = vit - dimNames.begin();
00065     else
00066     {
00067         MB_SET_ERR( MB_FAILURE, "Couldn't find 'cells' dimension" );
00068     }
00069     cDim   = idx;
00070     nCells = dimLens[idx];
00071 
00072     // Get number of edges
00073     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "edges" ) ) != dimNames.end() )
00074         idx = vit - dimNames.begin();
00075     else
00076     {
00077         MB_SET_ERR( MB_FAILURE, "Couldn't find 'edges' dimension" );
00078     }
00079     eDim   = idx;
00080     nEdges = dimLens[idx];
00081 
00082     // Get number of vertices
00083     vDim = -1;
00084     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "corners" ) ) != dimNames.end() )
00085         idx = vit - dimNames.begin();
00086     else
00087     {
00088         MB_SET_ERR( MB_FAILURE, "Couldn't find 'corners' dimension" );
00089     }
00090     vDim      = idx;
00091     nVertices = dimLens[idx];
00092 
00093     // Get number of layers
00094     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "layers" ) ) != dimNames.end() )
00095         idx = vit - dimNames.begin();
00096     else
00097     {
00098         MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' dimension" );
00099     }
00100     levDim  = idx;
00101     nLevels = dimLens[idx];
00102 
00103     // Dimension indices for other optional levels
00104     std::vector< unsigned int > opt_lev_dims;
00105 
00106     // Get index of interface levels
00107     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "interfaces" ) ) != dimNames.end() )
00108     {
00109         idx = vit - dimNames.begin();
00110         opt_lev_dims.push_back( idx );
00111     }
00112 
00113     std::map< std::string, ReadNC::VarData >::iterator vmit;
00114 
00115     // Store time coordinate values in tVals
00116     if( nTimeSteps > 0 )
00117     {
00118         if( ( vmit = varInfo.find( "time" ) ) != varInfo.end() )
00119         {
00120             rval = read_coordinate( "time", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 'time' variable" );
00121         }
00122         else if( ( vmit = varInfo.find( "t" ) ) != varInfo.end() )
00123         {
00124             rval = read_coordinate( "t", 0, nTimeSteps - 1, tVals );MB_CHK_SET_ERR( rval, "Trouble reading 't' variable" );
00125         }
00126         else
00127         {
00128             // If expected time variable is not available, set dummy time coordinate values to tVals
00129             for( int t = 0; t < nTimeSteps; t++ )
00130                 tVals.push_back( (double)t );
00131         }
00132     }
00133 
00134     // For each variable, determine the entity location type and number of levels
00135     for( vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit )
00136     {
00137         ReadNC::VarData& vd = ( *vmit ).second;
00138 
00139         // Default entLoc is ENTLOCSET
00140         if( std::find( vd.varDims.begin(), vd.varDims.end(), tDim ) != vd.varDims.end() )
00141         {
00142             if( std::find( vd.varDims.begin(), vd.varDims.end(), vDim ) != vd.varDims.end() )
00143                 vd.entLoc = ReadNC::ENTLOCVERT;
00144             else if( std::find( vd.varDims.begin(), vd.varDims.end(), eDim ) != vd.varDims.end() )
00145                 vd.entLoc = ReadNC::ENTLOCEDGE;
00146             else if( std::find( vd.varDims.begin(), vd.varDims.end(), cDim ) != vd.varDims.end() )
00147                 vd.entLoc = ReadNC::ENTLOCFACE;
00148         }
00149 
00150         // Default numLev is 0
00151         if( std::find( vd.varDims.begin(), vd.varDims.end(), levDim ) != vd.varDims.end() )
00152             vd.numLev = nLevels;
00153         else
00154         {
00155             // If layers dimension is not found, try other optional levels such as interfaces
00156             for( unsigned int i = 0; i < opt_lev_dims.size(); i++ )
00157             {
00158                 if( std::find( vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i] ) != vd.varDims.end() )
00159                 {
00160                     vd.numLev = dimLens[opt_lev_dims[i]];
00161                     break;
00162                 }
00163             }
00164         }
00165     }
00166 
00167     // Hack: create dummy variables for dimensions (like cells) with no corresponding coordinate
00168     // variables
00169     rval = create_dummy_variables();MB_CHK_SET_ERR( rval, "Failed to create dummy variables" );
00170 
00171     return MB_SUCCESS;
00172 }
00173 
00174 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
00175 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
00176 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
00177 // has to restore them based on the existing mesh from last read
00178 ErrorCode NCHelperGCRM::check_existing_mesh()
00179 {
00180     Interface*& mbImpl = _readNC->mbImpl;
00181     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
00182     bool& noMesh       = _readNC->noMesh;
00183 
00184     if( noMesh )
00185     {
00186         ErrorCode rval;
00187 
00188         if( localGidVerts.empty() )
00189         {
00190             // Get all vertices from current file set (it is the input set in no_mesh scenario)
00191             Range local_verts;
00192             rval = mbImpl->get_entities_by_dimension( _fileSet, 0, local_verts );MB_CHK_SET_ERR( rval, "Trouble getting local vertices in current file set" );
00193 
00194             if( !local_verts.empty() )
00195             {
00196                 std::vector< int > gids( local_verts.size() );
00197 
00198                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00199                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_verts, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of vertices" );
00200 
00201                 // Restore localGidVerts
00202                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidVerts ) );
00203                 nLocalVertices = localGidVerts.size();
00204             }
00205         }
00206 
00207         if( localGidEdges.empty() )
00208         {
00209             // Get all edges from current file set (it is the input set in no_mesh scenario)
00210             Range local_edges;
00211             rval = mbImpl->get_entities_by_dimension( _fileSet, 1, local_edges );MB_CHK_SET_ERR( rval, "Trouble getting local edges in current file set" );
00212 
00213             if( !local_edges.empty() )
00214             {
00215                 std::vector< int > gids( local_edges.size() );
00216 
00217                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00218                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_edges, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of edges" );
00219 
00220                 // Restore localGidEdges
00221                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidEdges ) );
00222                 nLocalEdges = localGidEdges.size();
00223             }
00224         }
00225 
00226         if( localGidCells.empty() )
00227         {
00228             // Get all cells from current file set (it is the input set in no_mesh scenario)
00229             Range local_cells;
00230             rval = mbImpl->get_entities_by_dimension( _fileSet, 2, local_cells );MB_CHK_SET_ERR( rval, "Trouble getting local cells in current file set" );
00231 
00232             if( !local_cells.empty() )
00233             {
00234                 std::vector< int > gids( local_cells.size() );
00235 
00236                 // !IMPORTANT : this has to be the GLOBAL_ID tag
00237                 rval = mbImpl->tag_get_data( mGlobalIdTag, local_cells, &gids[0] );MB_CHK_SET_ERR( rval, "Trouble getting local gid values of cells" );
00238 
00239                 // Restore localGidCells
00240                 std::copy( gids.rbegin(), gids.rend(), range_inserter( localGidCells ) );
00241                 nLocalCells = localGidCells.size();
00242             }
00243         }
00244     }
00245 
00246     return MB_SUCCESS;
00247 }
00248 
00249 ErrorCode NCHelperGCRM::create_mesh( Range& faces )
00250 {
00251     bool& noEdges       = _readNC->noEdges;
00252     DebugOutput& dbgOut = _readNC->dbgOut;
00253 
00254 #ifdef MOAB_HAVE_MPI
00255     int rank         = 0;
00256     int procs        = 1;
00257     bool& isParallel = _readNC->isParallel;
00258     if( isParallel )
00259     {
00260         ParallelComm*& myPcomm = _readNC->myPcomm;
00261         rank                   = myPcomm->proc_config().proc_rank();
00262         procs                  = myPcomm->proc_config().proc_size();
00263     }
00264 
00265     // Need to know whether we'll be creating gather mesh
00266     if( rank == _readNC->gatherSetRank ) createGatherSet = true;
00267 
00268     if( procs >= 2 )
00269     {
00270         // Shift rank to obtain a rotated trivial partition
00271         int shifted_rank           = rank;
00272         int& trivialPartitionShift = _readNC->trivialPartitionShift;
00273         if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00274 
00275         // Compute the number of local cells on this proc
00276         nLocalCells = int( std::floor( 1.0 * nCells / procs ) );
00277 
00278         // The starting global cell index in the GCRM file for this proc
00279         int start_cell_idx = shifted_rank * nLocalCells;
00280 
00281         // Number of extra cells after equal split over procs
00282         int iextra = nCells % procs;
00283 
00284         // Allocate extra cells over procs
00285         if( shifted_rank < iextra ) nLocalCells++;
00286         start_cell_idx += std::min( shifted_rank, iextra );
00287 
00288         start_cell_idx++;  // 0 based -> 1 based
00289 
00290         // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00291         ErrorCode rval = redistribute_local_cells( start_cell_idx );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00292     }
00293     else
00294     {
00295         nLocalCells = nCells;
00296         localGidCells.insert( 1, nLocalCells );
00297     }
00298 #else
00299     nLocalCells = nCells;
00300     localGidCells.insert( 1, nLocalCells );
00301 #endif
00302     dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00303     dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00304 
00305     // Read vertices on each local cell, to get localGidVerts and cell connectivity later
00306     int verticesOnCellVarId;
00307     int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId );
00308     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" );
00309     std::vector< int > vertices_on_local_cells( nLocalCells * EDGES_PER_CELL );
00310     dbgOut.tprintf( 1, " nLocalCells = %d\n", (int)nLocalCells );
00311     dbgOut.tprintf( 1, " vertices_on_local_cells.size() = %d\n", (int)vertices_on_local_cells.size() );
00312 #ifdef MOAB_HAVE_PNETCDF
00313     size_t nb_reads = localGidCells.psize();
00314     std::vector< int > requests( nb_reads );
00315     std::vector< int > statuss( nb_reads );
00316     size_t idxReq = 0;
00317 #endif
00318     size_t indexInArray = 0;
00319     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00320          ++pair_iter )
00321     {
00322         EntityHandle starth = pair_iter->first;
00323         EntityHandle endh   = pair_iter->second;
00324         dbgOut.tprintf( 1, " cell_corners starth = %d\n", (int)starth );
00325         dbgOut.tprintf( 1, " cell_corners   endh = %d\n", (int)endh );
00326         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00327         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00328                                      static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
00329 
00330         // Do a partial read in each subrange
00331 #ifdef MOAB_HAVE_PNETCDF
00332         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00333                                            &( vertices_on_local_cells[indexInArray] ), &requests[idxReq++] );
00334 #else
00335         success = NCFUNCAG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts,
00336                                          &( vertices_on_local_cells[indexInArray] ) );
00337 #endif
00338         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data in a loop" );
00339 
00340         // Increment the index for next subrange
00341         indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL;
00342     }
00343 
00344 #ifdef MOAB_HAVE_PNETCDF
00345     // Wait outside the loop
00346     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00347     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00348 #endif
00349 
00350     // Correct vertices_on_local_cells array. Pentagons as hexagons should have
00351     // a connectivity like 123455 and not 122345
00352     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
00353     {
00354         int* pvertex = &vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL];
00355         for( int k = 0; k < EDGES_PER_CELL - 2; k++ )
00356         {
00357             if( *( pvertex + k ) == *( pvertex + k + 1 ) )
00358             {
00359                 // Shift the connectivity
00360                 for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ )
00361                     *( pvertex + kk ) = *( pvertex + kk + 1 );
00362                 // No need to try next k
00363                 break;
00364             }
00365         }
00366     }
00367 
00368     // GCRM is 0 based, convert vertex indices from 0 to 1 based
00369     for( std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++ )
00370         vertices_on_local_cells[idx] += 1;
00371 
00372     // Create local vertices
00373     EntityHandle start_vertex;
00374     ErrorCode rval = create_local_vertices( vertices_on_local_cells, start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local vertices for GCRM mesh" );
00375 
00376     // Create local edges (unless NO_EDGES read option is set)
00377     if( !noEdges )
00378     {
00379         rval = create_local_edges( start_vertex );MB_CHK_SET_ERR( rval, "Failed to create local edges for GCRM mesh" );
00380     }
00381 
00382     // Create local cells, padded
00383     rval = create_padded_local_cells( vertices_on_local_cells, start_vertex, faces );MB_CHK_SET_ERR( rval, "Failed to create padded local cells for GCRM mesh" );
00384 
00385     if( createGatherSet )
00386     {
00387         EntityHandle gather_set;
00388         rval = _readNC->readMeshIface->create_gather_set( gather_set );MB_CHK_SET_ERR( rval, "Failed to create gather set" );
00389 
00390         // Create gather set vertices
00391         EntityHandle start_gather_set_vertex;
00392         rval = create_gather_set_vertices( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices for GCRM mesh" );
00393 
00394         // Create gather set edges (unless NO_EDGES read option is set)
00395         if( !noEdges )
00396         {
00397             rval = create_gather_set_edges( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create gather set edges for GCRM mesh" );
00398         }
00399 
00400         // Create gather set cells with padding
00401         rval = create_padded_gather_set_cells( gather_set, start_gather_set_vertex );MB_CHK_SET_ERR( rval, "Failed to create padded gather set cells for GCRM mesh" );
00402     }
00403 
00404     return MB_SUCCESS;
00405 }
00406 
00407 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_allocate( std::vector< ReadNC::VarData >& vdatas,
00408                                                                std::vector< int >& tstep_nums )
00409 {
00410     Interface*& mbImpl          = _readNC->mbImpl;
00411     std::vector< int >& dimLens = _readNC->dimLens;
00412     bool& noEdges               = _readNC->noEdges;
00413     DebugOutput& dbgOut         = _readNC->dbgOut;
00414 
00415     Range* range = NULL;
00416 
00417     // Get vertices
00418     Range verts;
00419     ErrorCode rval = mbImpl->get_entities_by_dimension( _fileSet, 0, verts );MB_CHK_SET_ERR( rval, "Trouble getting vertices in current file set" );
00420     assert( "Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1 );
00421 
00422     // Get edges
00423     Range edges;
00424     rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges );MB_CHK_SET_ERR( rval, "Trouble getting edges in current file set" );
00425 
00426     // Get faces
00427     Range faces;
00428     rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_SET_ERR( rval, "Trouble getting faces in current file set" );
00429     assert( "Should only have a single face subrange, since they were read in one shot" && faces.psize() == 1 );
00430 
00431 #ifdef MOAB_HAVE_MPI
00432     bool& isParallel = _readNC->isParallel;
00433     if( isParallel )
00434     {
00435         ParallelComm*& myPcomm = _readNC->myPcomm;
00436         rval                   = myPcomm->filter_pstatus( faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned );MB_CHK_SET_ERR( rval, "Trouble getting owned faces in current file set" );
00437     }
00438     else
00439         facesOwned = faces;  // not running in parallel, but still with MPI
00440 #else
00441     facesOwned = faces;
00442 #endif
00443 
00444     for( unsigned int i = 0; i < vdatas.size(); i++ )
00445     {
00446         // Skip edge variables, if specified by the read options
00447         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00448 
00449         // Support non-set variables with 3 dimensions like (time, cells, layers), or
00450         // 2 dimensions like (time, cells)
00451         assert( 3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size() );
00452 
00453         // For a non-set variable, time should be the first dimension
00454         assert( tDim == vdatas[i].varDims[0] );
00455 
00456         // Set up readStarts and readCounts
00457         vdatas[i].readStarts.resize( 3 );
00458         vdatas[i].readCounts.resize( 3 );
00459 
00460         // First: Time
00461         vdatas[i].readStarts[0] = 0;  // This value is timestep dependent, will be set later
00462         vdatas[i].readCounts[0] = 1;
00463 
00464         // Next: nVertices / nCells / nEdges
00465         switch( vdatas[i].entLoc )
00466         {
00467             case ReadNC::ENTLOCVERT:
00468                 // Vertices
00469                 // Start from the first localGidVerts
00470                 // Actually, this will be reset later on in a loop
00471                 vdatas[i].readStarts[1] = localGidVerts[0] - 1;
00472                 vdatas[i].readCounts[1] = nLocalVertices;
00473                 range                   = &verts;
00474                 break;
00475             case ReadNC::ENTLOCFACE:
00476                 // Faces
00477                 // Start from the first localGidCells
00478                 // Actually, this will be reset later on in a loop
00479                 vdatas[i].readStarts[1] = localGidCells[0] - 1;
00480                 vdatas[i].readCounts[1] = nLocalCells;
00481                 range                   = &facesOwned;
00482                 break;
00483             case ReadNC::ENTLOCEDGE:
00484                 // Edges
00485                 // Start from the first localGidEdges
00486                 // Actually, this will be reset later on in a loop
00487                 vdatas[i].readStarts[1] = localGidEdges[0] - 1;
00488                 vdatas[i].readCounts[1] = nLocalEdges;
00489                 range                   = &edges;
00490                 break;
00491             default:
00492                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00493         }
00494 
00495         // Finally: layers or other optional levels, it is possible that there is no
00496         // level dimension (numLev is 0) for this non-set variable
00497         if( vdatas[i].numLev < 1 ) vdatas[i].numLev = 1;
00498         vdatas[i].readStarts[2] = 0;
00499         vdatas[i].readCounts[2] = vdatas[i].numLev;
00500 
00501         // Get variable size
00502         vdatas[i].sz = 1;
00503         for( std::size_t idx = 0; idx != 3; idx++ )
00504             vdatas[i].sz *= vdatas[i].readCounts[idx];
00505 
00506         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00507         {
00508             dbgOut.tprintf( 2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t] );
00509 
00510             if( tstep_nums[t] >= dimLens[tDim] )
00511             {
00512                 MB_SET_ERR( MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t] );
00513             }
00514 
00515             // Get the tag to read into
00516             if( !vdatas[i].varTags[t] )
00517             {
00518                 rval = get_tag_to_nonset( vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev );MB_CHK_SET_ERR( rval, "Trouble getting tag for variable " << vdatas[i].varName );
00519             }
00520 
00521             // Get ptr to tag space
00522             assert( 1 == range->psize() );
00523             void* data;
00524             int count;
00525             rval = mbImpl->tag_iterate( vdatas[i].varTags[t], range->begin(), range->end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate tag for variable " << vdatas[i].varName );
00526             assert( (unsigned)count == range->size() );
00527             vdatas[i].varDatas[t] = data;
00528         }
00529     }
00530 
00531     return rval;
00532 }
00533 
00534 #ifdef MOAB_HAVE_PNETCDF
00535 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_async( std::vector< ReadNC::VarData >& vdatas,
00536                                                             std::vector< int >& tstep_nums )
00537 {
00538     bool& noEdges       = _readNC->noEdges;
00539     DebugOutput& dbgOut = _readNC->dbgOut;
00540 
00541     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00542 
00543     // Finally, read into that space
00544     int success;
00545     Range* pLocalGid = NULL;
00546 
00547     for( unsigned int i = 0; i < vdatas.size(); i++ )
00548     {
00549         // Skip edge variables, if specified by the read options
00550         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00551 
00552         switch( vdatas[i].entLoc )
00553         {
00554             case ReadNC::ENTLOCVERT:
00555                 pLocalGid = &localGidVerts;
00556                 break;
00557             case ReadNC::ENTLOCFACE:
00558                 pLocalGid = &localGidCells;
00559                 break;
00560             case ReadNC::ENTLOCEDGE:
00561                 pLocalGid = &localGidEdges;
00562                 break;
00563             default:
00564                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00565         }
00566 
00567         std::size_t sz = vdatas[i].sz;
00568 
00569         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00570         {
00571             // We will synchronize all these reads with the other processors,
00572             // so the wait will be inside this double loop; is it too much?
00573             size_t nb_reads = pLocalGid->psize();
00574             std::vector< int > requests( nb_reads ), statuss( nb_reads );
00575             size_t idxReq = 0;
00576 
00577             // Set readStart for each timestep along time dimension
00578             vdatas[i].readStarts[0] = tstep_nums[t];
00579 
00580             switch( vdatas[i].varDataType )
00581             {
00582                 case NC_FLOAT:
00583                 case NC_DOUBLE: {
00584                     // Read float as double
00585                     std::vector< double > tmpdoubledata( sz );
00586 
00587                     // In the case of ucd mesh, and on multiple proc,
00588                     // we need to read as many times as subranges we have in the
00589                     // localGid range;
00590                     // basically, we have to give a different point
00591                     // for data to start, for every subrange :(
00592                     size_t indexInDoubleArray = 0;
00593                     size_t ic                 = 0;
00594                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00595                          ++pair_iter, ic++ )
00596                     {
00597                         EntityHandle starth     = pair_iter->first;
00598                         EntityHandle endh       = pair_iter->second;  // inclusive
00599                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00600                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00601 
00602                         // Do a partial read, in each subrange
00603                         // wait outside this loop
00604                         success =
00605                             NCFUNCREQG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00606                                                         &( vdatas[i].readCounts[0] ),
00607                                                         &( tmpdoubledata[indexInDoubleArray] ), &requests[idxReq++] );
00608                         if( success )
00609                             MB_SET_ERR( MB_FAILURE,
00610                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00611                         // We need to increment the index in double array for the
00612                         // next subrange
00613                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00614                     }
00615                     assert( ic == pLocalGid->psize() );
00616 
00617                     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00618                     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00619 
00620                     void* data = vdatas[i].varDatas[t];
00621                     for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00622                         ( (double*)data )[idx] = tmpdoubledata[idx];
00623 
00624                     break;
00625                 }
00626                 default:
00627                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00628             }
00629         }
00630     }
00631 
00632     // Debug output, if requested
00633     if( 1 == dbgOut.get_verbosity() )
00634     {
00635         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00636         for( unsigned int i = 1; i < vdatas.size(); i++ )
00637             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00638         dbgOut.tprintf( 1, "\n" );
00639     }
00640 
00641     return rval;
00642 }
00643 #else
00644 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset( std::vector< ReadNC::VarData >& vdatas,
00645                                                       std::vector< int >& tstep_nums )
00646 {
00647     bool& noEdges = _readNC->noEdges;
00648     DebugOutput& dbgOut = _readNC->dbgOut;
00649 
00650     ErrorCode rval = read_ucd_variables_to_nonset_allocate( vdatas, tstep_nums );MB_CHK_SET_ERR( rval, "Trouble allocating space to read non-set variables" );
00651 
00652     // Finally, read into that space
00653     int success;
00654     Range* pLocalGid = NULL;
00655 
00656     for( unsigned int i = 0; i < vdatas.size(); i++ )
00657     {
00658         // Skip edge variables, if specified by the read options
00659         if( noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE ) continue;
00660 
00661         switch( vdatas[i].entLoc )
00662         {
00663             case ReadNC::ENTLOCVERT:
00664                 pLocalGid = &localGidVerts;
00665                 break;
00666             case ReadNC::ENTLOCFACE:
00667                 pLocalGid = &localGidCells;
00668                 break;
00669             case ReadNC::ENTLOCEDGE:
00670                 pLocalGid = &localGidEdges;
00671                 break;
00672             default:
00673                 MB_SET_ERR( MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName );
00674         }
00675 
00676         std::size_t sz = vdatas[i].sz;
00677 
00678         for( unsigned int t = 0; t < tstep_nums.size(); t++ )
00679         {
00680             // Set readStart for each timestep along time dimension
00681             vdatas[i].readStarts[0] = tstep_nums[t];
00682 
00683             switch( vdatas[i].varDataType )
00684             {
00685                 case NC_FLOAT:
00686                 case NC_DOUBLE: {
00687                     // Read float as double
00688                     std::vector< double > tmpdoubledata( sz );
00689 
00690                     // In the case of ucd mesh, and on multiple proc,
00691                     // we need to read as many times as subranges we have in the
00692                     // localGid range;
00693                     // basically, we have to give a different point
00694                     // for data to start, for every subrange :(
00695                     size_t indexInDoubleArray = 0;
00696                     size_t ic = 0;
00697                     for( Range::pair_iterator pair_iter = pLocalGid->pair_begin(); pair_iter != pLocalGid->pair_end();
00698                          ++pair_iter, ic++ )
00699                     {
00700                         EntityHandle starth = pair_iter->first;
00701                         EntityHandle endh = pair_iter->second;  // Inclusive
00702                         vdatas[i].readStarts[1] = ( NCDF_SIZE )( starth - 1 );
00703                         vdatas[i].readCounts[1] = ( NCDF_SIZE )( endh - starth + 1 );
00704 
00705                         success = NCFUNCAG( _vara_double )( _fileId, vdatas[i].varId, &( vdatas[i].readStarts[0] ),
00706                                                             &( vdatas[i].readCounts[0] ),
00707                                                             &( tmpdoubledata[indexInDoubleArray] ) );
00708                         if( success )
00709                             MB_SET_ERR( MB_FAILURE,
00710                                         "Failed to read double data in a loop for variable " << vdatas[i].varName );
00711                         // We need to increment the index in double array for the
00712                         // next subrange
00713                         indexInDoubleArray += ( endh - starth + 1 ) * 1 * vdatas[i].numLev;
00714                     }
00715                     assert( ic == pLocalGid->psize() );
00716 
00717                     void* data = vdatas[i].varDatas[t];
00718                     for( std::size_t idx = 0; idx != tmpdoubledata.size(); idx++ )
00719                         ( (double*)data )[idx] = tmpdoubledata[idx];
00720 
00721                     break;
00722                 }
00723                 default:
00724                     MB_SET_ERR( MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName );
00725             }
00726         }
00727     }
00728 
00729     // Debug output, if requested
00730     if( 1 == dbgOut.get_verbosity() )
00731     {
00732         dbgOut.printf( 1, "Read variables: %s", vdatas.begin()->varName.c_str() );
00733         for( unsigned int i = 1; i < vdatas.size(); i++ )
00734             dbgOut.printf( 1, ", %s ", vdatas[i].varName.c_str() );
00735         dbgOut.tprintf( 1, "\n" );
00736     }
00737 
00738     return rval;
00739 }
00740 #endif
00741 
00742 #ifdef MOAB_HAVE_MPI
00743 ErrorCode NCHelperGCRM::redistribute_local_cells( int start_cell_idx )
00744 {
00745     // If possible, apply Zoltan partition
00746 #ifdef MOAB_HAVE_ZOLTAN
00747     if( _readNC->partMethod == ScdParData::RCBZOLTAN )
00748     {
00749         // Read lat/lon coordinates of cell centers, then convert spherical to
00750         // Cartesian, and use them as input to Zoltan partition
00751         int xCellVarId;
00752         int success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &xCellVarId );
00753         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
00754         std::vector< double > xCell( nLocalCells );
00755         NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00756         NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00757         success              = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xCell[0] );
00758         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
00759 
00760         // Read y coordinates of cell centers
00761         int yCellVarId;
00762         success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &yCellVarId );
00763         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
00764         std::vector< double > yCell( nLocalCells );
00765         success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yCell[0] );
00766         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
00767 
00768         // Convert lon/lat/rad to x/y/z
00769         std::vector< double > zCell( nLocalCells );
00770         double rad = 8000.0;  // This is just a approximate radius
00771         for( int i = 0; i < nLocalCells; i++ )
00772         {
00773             double cosphi = cos( xCell[i] );
00774             double zmult  = sin( xCell[i] );
00775             double xmult  = cosphi * cos( yCell[i] );
00776             double ymult  = cosphi * sin( yCell[i] );
00777             xCell[i]      = rad * xmult;
00778             yCell[i]      = rad * ymult;
00779             zCell[i]      = rad * zmult;
00780         }
00781 
00782         // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00783         // is better
00784         Interface*& mbImpl         = _readNC->mbImpl;
00785         DebugOutput& dbgOut        = _readNC->dbgOut;
00786         ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, false, 0, NULL );
00787         ErrorCode rval             = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
00788         delete mbZTool;
00789 
00790         dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00791         dbgOut.tprintf( 1, "                           localGidCells.size() = %d\n", (int)localGidCells.size() );
00792 
00793         // This is important: local cells are now redistributed, so nLocalCells might be different!
00794         nLocalCells = localGidCells.size();
00795 
00796         return MB_SUCCESS;
00797     }
00798 #endif
00799 
00800     // By default, apply trivial partition
00801     localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
00802 
00803     return MB_SUCCESS;
00804 }
00805 #endif
00806 
00807 ErrorCode NCHelperGCRM::create_local_vertices( const std::vector< int >& vertices_on_local_cells,
00808                                                EntityHandle& start_vertex )
00809 {
00810     Interface*& mbImpl                                = _readNC->mbImpl;
00811     Tag& mGlobalIdTag                                 = _readNC->mGlobalIdTag;
00812     const Tag*& mpFileIdTag                           = _readNC->mpFileIdTag;
00813     DebugOutput& dbgOut                               = _readNC->dbgOut;
00814     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00815 
00816     // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell
00817     // connectivity later)
00818     std::vector< int > vertices_on_local_cells_sorted( vertices_on_local_cells );
00819     std::sort( vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end() );
00820     std::copy( vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(),
00821                range_inserter( localGidVerts ) );
00822     nLocalVertices = localGidVerts.size();
00823 
00824     dbgOut.tprintf( 1, "   localGidVerts.psize() = %d\n", (int)localGidVerts.psize() );
00825     dbgOut.tprintf( 1, "   localGidVerts.size() = %d\n", (int)localGidVerts.size() );
00826 
00827     // Create local vertices
00828     std::vector< double* > arrays;
00829     ErrorCode rval =
00830         _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays,
00831                                                  // Might have to create gather mesh later
00832                                                  ( createGatherSet ? nLocalVertices + nVertices : nLocalVertices ) );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
00833 
00834     // Add local vertices to current file set
00835     Range local_verts_range( start_vertex, start_vertex + nLocalVertices - 1 );
00836     rval = _readNC->mbImpl->add_entities( _fileSet, local_verts_range );MB_CHK_SET_ERR( rval, "Failed to add local vertices to current file set" );
00837 
00838     // Get ptr to GID memory for local vertices
00839     int count  = 0;
00840     void* data = NULL;
00841     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local vertices" );
00842     assert( count == nLocalVertices );
00843     int* gid_data = (int*)data;
00844     std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
00845 
00846     // Duplicate GID data, which will be used to resolve sharing
00847     if( mpFileIdTag )
00848     {
00849         rval = mbImpl->tag_iterate( *mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on local vertices" );
00850         assert( count == nLocalVertices );
00851         int bytes_per_tag = 4;
00852         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "can't get number of bytes for file id tag" );
00853         if( 4 == bytes_per_tag )
00854         {
00855             gid_data = (int*)data;
00856             std::copy( localGidVerts.begin(), localGidVerts.end(), gid_data );
00857         }
00858         else if( 8 == bytes_per_tag )
00859         {  // Should be a handle tag on 64 bit machine?
00860             long* handle_tag_data = (long*)data;
00861             std::copy( localGidVerts.begin(), localGidVerts.end(), handle_tag_data );
00862         }
00863     }
00864 
00865 #ifdef MOAB_HAVE_PNETCDF
00866     size_t nb_reads = localGidVerts.psize();
00867     std::vector< int > requests( nb_reads );
00868     std::vector< int > statuss( nb_reads );
00869     size_t idxReq = 0;
00870 #endif
00871 
00872     // Store lev values in levVals
00873     std::map< std::string, ReadNC::VarData >::iterator vmit;
00874     if( ( vmit = varInfo.find( "layers" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
00875     {
00876         rval = read_coordinate( "layers", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'layers' variable" );
00877     }
00878     else if( ( vmit = varInfo.find( "interfaces" ) ) != varInfo.end() && ( *vmit ).second.varDims.size() == 1 )
00879     {
00880         rval = read_coordinate( "interfaces", 0, nLevels - 1, levVals );MB_CHK_SET_ERR( rval, "Trouble reading 'interfaces' variable" );
00881     }
00882     else
00883     {
00884         MB_SET_ERR( MB_FAILURE, "Couldn't find 'layers' or 'interfaces' variable" );
00885     }
00886 
00887     // Decide whether down is positive
00888     char posval[10] = { 0 };
00889     int success     = NCFUNC( get_att_text )( _fileId, ( *vmit ).second.varId, "positive", posval );
00890     if( 0 == success && !strncmp( posval, "down", 4 ) )
00891     {
00892         for( std::vector< double >::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit )
00893             ( *dvit ) *= -1.0;
00894     }
00895 
00896     // Read x coordinates for local vertices
00897     double* xptr = arrays[0];
00898     int xVertexVarId;
00899     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId );
00900     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
00901     size_t indexInArray = 0;
00902     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
00903          ++pair_iter )
00904     {
00905         EntityHandle starth  = pair_iter->first;
00906         EntityHandle endh    = pair_iter->second;
00907         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
00908         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
00909 
00910         // Do a partial read in each subrange
00911 #ifdef MOAB_HAVE_PNETCDF
00912         success = NCFUNCREQG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ),
00913                                               &requests[idxReq++] );
00914 #else
00915         success = NCFUNCAG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, &( xptr[indexInArray] ) );
00916 #endif
00917         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
00918 
00919         // Increment the index for next subrange
00920         indexInArray += ( endh - starth + 1 );
00921     }
00922 
00923 #ifdef MOAB_HAVE_PNETCDF
00924     // Wait outside the loop
00925     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00926     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00927 #endif
00928 
00929     // Read y coordinates for local vertices
00930     double* yptr = arrays[1];
00931     int yVertexVarId;
00932     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId );
00933     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
00934 #ifdef MOAB_HAVE_PNETCDF
00935     idxReq = 0;
00936 #endif
00937     indexInArray = 0;
00938     for( Range::pair_iterator pair_iter = localGidVerts.pair_begin(); pair_iter != localGidVerts.pair_end();
00939          ++pair_iter )
00940     {
00941         EntityHandle starth  = pair_iter->first;
00942         EntityHandle endh    = pair_iter->second;
00943         NCDF_SIZE read_start = ( NCDF_SIZE )( starth - 1 );
00944         NCDF_SIZE read_count = ( NCDF_SIZE )( endh - starth + 1 );
00945 
00946         // Do a partial read in each subrange
00947 #ifdef MOAB_HAVE_PNETCDF
00948         success = NCFUNCREQG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ),
00949                                               &requests[idxReq++] );
00950 #else
00951         success = NCFUNCAG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, &( yptr[indexInArray] ) );
00952 #endif
00953         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
00954 
00955         // Increment the index for next subrange
00956         indexInArray += ( endh - starth + 1 );
00957     }
00958 
00959 #ifdef MOAB_HAVE_PNETCDF
00960     // Wait outside the loop
00961     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00962     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00963 #endif
00964 
00965     // Convert lon/lat/rad to x/y/z
00966     double* zptr = arrays[2];
00967     double rad   = 8000.0 + levVals[0];
00968     for( int i = 0; i < nLocalVertices; i++ )
00969     {
00970         double cosphi = cos( yptr[i] );
00971         double zmult  = sin( yptr[i] );
00972         double xmult  = cosphi * cos( xptr[i] );
00973         double ymult  = cosphi * sin( xptr[i] );
00974         xptr[i]       = rad * xmult;
00975         yptr[i]       = rad * ymult;
00976         zptr[i]       = rad * zmult;
00977     }
00978 
00979     return MB_SUCCESS;
00980 }
00981 
00982 ErrorCode NCHelperGCRM::create_local_edges( EntityHandle start_vertex )
00983 {
00984     Interface*& mbImpl  = _readNC->mbImpl;
00985     Tag& mGlobalIdTag   = _readNC->mGlobalIdTag;
00986     DebugOutput& dbgOut = _readNC->dbgOut;
00987 
00988     // Read edges on each local cell, to get localGidEdges
00989     int edgesOnCellVarId;
00990     int success = NCFUNC( inq_varid )( _fileId, "cell_edges", &edgesOnCellVarId );
00991     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_edges" );
00992 
00993     std::vector< int > edges_on_local_cells( nLocalCells * EDGES_PER_CELL );
00994     dbgOut.tprintf( 1, "   edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size() );
00995 
00996 #ifdef MOAB_HAVE_PNETCDF
00997     size_t nb_reads = localGidCells.psize();
00998     std::vector< int > requests( nb_reads );
00999     std::vector< int > statuss( nb_reads );
01000     size_t idxReq = 0;
01001 #endif
01002     size_t indexInArray = 0;
01003     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
01004          ++pair_iter )
01005     {
01006         EntityHandle starth = pair_iter->first;
01007         EntityHandle endh   = pair_iter->second;
01008         dbgOut.tprintf( 1, "   starth = %d\n", (int)starth );
01009         dbgOut.tprintf( 1, "   endh = %d\n", (int)endh );
01010         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01011         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
01012                                      static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
01013 
01014         // Do a partial read in each subrange
01015 #ifdef MOAB_HAVE_PNETCDF
01016         success = NCFUNCREQG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01017                                            &( edges_on_local_cells[indexInArray] ), &requests[idxReq++] );
01018 #else
01019         success = NCFUNCAG( _vara_int )( _fileId, edgesOnCellVarId, read_starts, read_counts,
01020                                          &( edges_on_local_cells[indexInArray] ) );
01021 #endif
01022         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_edges data in a loop" );
01023 
01024         // Increment the index for next subrange
01025         indexInArray += ( endh - starth + 1 ) * EDGES_PER_CELL;
01026     }
01027 
01028 #ifdef MOAB_HAVE_PNETCDF
01029     // Wait outside the loop
01030     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01031     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01032 #endif
01033 
01034     // GCRM is 0 based, convert edge indices from 0 to 1 based
01035     for( std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++ )
01036         edges_on_local_cells[idx] += 1;
01037 
01038     // Collect local edges
01039     std::sort( edges_on_local_cells.begin(), edges_on_local_cells.end() );
01040     std::copy( edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter( localGidEdges ) );
01041     nLocalEdges = localGidEdges.size();
01042 
01043     dbgOut.tprintf( 1, "   localGidEdges.psize() = %d\n", (int)localGidEdges.psize() );
01044     dbgOut.tprintf( 1, "   localGidEdges.size() = %d\n", (int)localGidEdges.size() );
01045 
01046     // Create local edges
01047     EntityHandle start_edge;
01048     EntityHandle* conn_arr_edges = NULL;
01049     ErrorCode rval =
01050         _readNC->readMeshIface->get_element_connect( nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
01051                                                      // Might have to create gather mesh later
01052                                                      ( createGatherSet ? nLocalEdges + nEdges : nLocalEdges ) );MB_CHK_SET_ERR( rval, "Failed to create local edges" );
01053 
01054     // Add local edges to current file set
01055     Range local_edges_range( start_edge, start_edge + nLocalEdges - 1 );
01056     rval = _readNC->mbImpl->add_entities( _fileSet, local_edges_range );MB_CHK_SET_ERR( rval, "Failed to add local edges to current file set" );
01057 
01058     // Get ptr to GID memory for edges
01059     int count  = 0;
01060     void* data = NULL;
01061     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local edges" );
01062     assert( count == nLocalEdges );
01063     int* gid_data = (int*)data;
01064     std::copy( localGidEdges.begin(), localGidEdges.end(), gid_data );
01065 
01066     int verticesOnEdgeVarId;
01067 
01068     // Read vertices on each local edge, to get edge connectivity
01069     success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId );
01070     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" );
01071     // Utilize the memory storage pointed by conn_arr_edges
01072     int* vertices_on_local_edges = (int*)conn_arr_edges;
01073 #ifdef MOAB_HAVE_PNETCDF
01074     nb_reads = localGidEdges.psize();
01075     requests.resize( nb_reads );
01076     statuss.resize( nb_reads );
01077     idxReq = 0;
01078 #endif
01079     indexInArray = 0;
01080     for( Range::pair_iterator pair_iter = localGidEdges.pair_begin(); pair_iter != localGidEdges.pair_end();
01081          ++pair_iter )
01082     {
01083         EntityHandle starth      = pair_iter->first;
01084         EntityHandle endh        = pair_iter->second;
01085         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
01086         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ), 2 };
01087 
01088         // Do a partial read in each subrange
01089 #ifdef MOAB_HAVE_PNETCDF
01090         success = NCFUNCREQG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01091                                            &( vertices_on_local_edges[indexInArray] ), &requests[idxReq++] );
01092 #else
01093         success = NCFUNCAG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts,
01094                                          &( vertices_on_local_edges[indexInArray] ) );
01095 #endif
01096         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data in a loop" );
01097 
01098         // Increment the index for next subrange
01099         indexInArray += ( endh - starth + 1 ) * 2;
01100     }
01101 
01102 #ifdef MOAB_HAVE_PNETCDF
01103     // Wait outside the loop
01104     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
01105     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
01106 #endif
01107 
01108     // Populate connectivity data for local edges
01109     // Convert in-place from int (stored in the first half) to EntityHandle
01110     // Reading backward is the trick
01111     for( int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01112     {
01113         // Note, indices stored in vertices_on_local_edges are 0 based
01114         int global_vert_idx = vertices_on_local_edges[edge_vert] + 1;  // Global vertex index, 1 based
01115         int local_vert_idx  = localGidVerts.index( global_vert_idx );  // Local vertex index, 0 based
01116         assert( local_vert_idx != -1 );
01117         conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
01118     }
01119 
01120     return MB_SUCCESS;
01121 }
01122 
01123 ErrorCode NCHelperGCRM::create_padded_local_cells( const std::vector< int >& vertices_on_local_cells,
01124                                                    EntityHandle start_vertex, Range& faces )
01125 {
01126     Interface*& mbImpl = _readNC->mbImpl;
01127     Tag& mGlobalIdTag  = _readNC->mGlobalIdTag;
01128 
01129     // Create cells
01130     EntityHandle start_element;
01131     EntityHandle* conn_arr_local_cells = NULL;
01132     ErrorCode rval =
01133         _readNC->readMeshIface->get_element_connect( nLocalCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element,
01134                                                      conn_arr_local_cells,
01135                                                      // Might have to create gather mesh later
01136                                                      ( createGatherSet ? nLocalCells + nCells : nLocalCells ) );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
01137     faces.insert( start_element, start_element + nLocalCells - 1 );
01138 
01139     // Add local cells to current file set
01140     Range local_cells_range( start_element, start_element + nLocalCells - 1 );
01141     rval = _readNC->mbImpl->add_entities( _fileSet, local_cells_range );MB_CHK_SET_ERR( rval, "Failed to add local cells to current file set" );
01142 
01143     // Get ptr to GID memory for local cells
01144     int count  = 0;
01145     void* data = NULL;
01146     rval       = mbImpl->tag_iterate( mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on local cells" );
01147     assert( count == nLocalCells );
01148     int* gid_data = (int*)data;
01149     std::copy( localGidCells.begin(), localGidCells.end(), gid_data );
01150 
01151     // Set connectivity array with proper local vertices handles
01152     // vertices_on_local_cells array was already corrected to have
01153     // the last vertices repeated for pentagons, e.g. 122345 => 123455
01154     for( int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++ )
01155     {
01156         for( int i = 0; i < EDGES_PER_CELL; i++ )
01157         {
01158             // Note, indices stored in vertices_on_local_cells are 1 based
01159             EntityHandle global_vert_idx =
01160                 vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL + i];  // Global vertex index, 1 based
01161             int local_vert_idx = localGidVerts.index( global_vert_idx );       // Local vertex index, 0 based
01162             assert( local_vert_idx != -1 );
01163             conn_arr_local_cells[local_cell_idx * EDGES_PER_CELL + i] = start_vertex + local_vert_idx;
01164         }
01165     }
01166 
01167     return MB_SUCCESS;
01168 }
01169 
01170 ErrorCode NCHelperGCRM::create_gather_set_vertices( EntityHandle gather_set, EntityHandle& gather_set_start_vertex )
01171 {
01172     Interface*& mbImpl      = _readNC->mbImpl;
01173     Tag& mGlobalIdTag       = _readNC->mGlobalIdTag;
01174     const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
01175 
01176     // Create gather set vertices
01177     std::vector< double* > arrays;
01178     // Don't need to specify allocation number here, because we know enough vertices were created
01179     // before
01180     ErrorCode rval = _readNC->readMeshIface->get_node_coords( 3, nVertices, 0, gather_set_start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create gather set vertices" );
01181 
01182     // Add vertices to the gather set
01183     Range gather_set_verts_range( gather_set_start_vertex, gather_set_start_vertex + nVertices - 1 );
01184     rval = mbImpl->add_entities( gather_set, gather_set_verts_range );MB_CHK_SET_ERR( rval, "Failed to add vertices to the gather set" );
01185 
01186     // Read x coordinates for gather set vertices
01187     double* xptr = arrays[0];
01188     int xVertexVarId;
01189     int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xVertexVarId );
01190     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
01191     NCDF_SIZE read_start = 0;
01192     NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nVertices );
01193 #ifdef MOAB_HAVE_PNETCDF
01194     // Enter independent I/O mode, since this read is only for the gather processor
01195     success = NCFUNC( begin_indep_data )( _fileId );
01196     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01197     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01198     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" );
01199     success = NCFUNC( end_indep_data )( _fileId );
01200     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01201 #else
01202     success = NCFUNCG( _vara_double )( _fileId, xVertexVarId, &read_start, &read_count, xptr );
01203     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data" );
01204 #endif
01205 
01206     // Read y coordinates for gather set vertices
01207     double* yptr = arrays[1];
01208     int yVertexVarId;
01209     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yVertexVarId );
01210     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
01211 #ifdef MOAB_HAVE_PNETCDF
01212     // Enter independent I/O mode, since this read is only for the gather processor
01213     success = NCFUNC( begin_indep_data )( _fileId );
01214     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01215     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01216     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" );
01217     success = NCFUNC( end_indep_data )( _fileId );
01218     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01219 #else
01220     success = NCFUNCG( _vara_double )( _fileId, yVertexVarId, &read_start, &read_count, yptr );
01221     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data" );
01222 #endif
01223 
01224     // Convert lon/lat/rad to x/y/z
01225     double* zptr = arrays[2];
01226     double rad   = 8000.0 + levVals[0];
01227     for( int i = 0; i < nVertices; i++ )
01228     {
01229         double cosphi = cos( yptr[i] );
01230         double zmult  = sin( yptr[i] );
01231         double xmult  = cosphi * cos( xptr[i] );
01232         double ymult  = cosphi * sin( xptr[i] );
01233         xptr[i]       = rad * xmult;
01234         yptr[i]       = rad * ymult;
01235         zptr[i]       = rad * zmult;
01236     }
01237 
01238     // Get ptr to GID memory for gather set vertices
01239     int count  = 0;
01240     void* data = NULL;
01241     rval =
01242         mbImpl->tag_iterate( mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count, data );MB_CHK_SET_ERR( rval, "Failed to iterate global id tag on gather set vertices" );
01243     assert( count == nVertices );
01244     int* gid_data = (int*)data;
01245     for( int j = 1; j <= nVertices; j++ )
01246         gid_data[j - 1] = j;
01247 
01248     // Set the file id tag too, it should be bigger something not interfering with global id
01249     if( mpFileIdTag )
01250     {
01251         rval = mbImpl->tag_iterate( *mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(), count,
01252                                     data );MB_CHK_SET_ERR( rval, "Failed to iterate file id tag on gather set vertices" );
01253         assert( count == nVertices );
01254         int bytes_per_tag = 4;
01255         rval              = mbImpl->tag_get_bytes( *mpFileIdTag, bytes_per_tag );MB_CHK_SET_ERR( rval, "Can't get number of bytes for file id tag" );
01256         if( 4 == bytes_per_tag )
01257         {
01258             gid_data = (int*)data;
01259             for( int j = 1; j <= nVertices; j++ )
01260                 gid_data[j - 1] = nVertices + j;  // Bigger than global id tag
01261         }
01262         else if( 8 == bytes_per_tag )
01263         {  // Should be a handle tag on 64 bit machine?
01264             long* handle_tag_data = (long*)data;
01265             for( int j = 1; j <= nVertices; j++ )
01266                 handle_tag_data[j - 1] = nVertices + j;  // Bigger than global id tag
01267         }
01268     }
01269 
01270     return MB_SUCCESS;
01271 }
01272 
01273 ErrorCode NCHelperGCRM::create_gather_set_edges( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01274 {
01275     Interface*& mbImpl = _readNC->mbImpl;
01276 
01277     // Create gather set edges
01278     EntityHandle start_edge;
01279     EntityHandle* conn_arr_gather_set_edges = NULL;
01280     // Don't need to specify allocation number here, because we know enough edges were created
01281     // before
01282     ErrorCode rval =
01283         _readNC->readMeshIface->get_element_connect( nEdges, 2, MBEDGE, 0, start_edge, conn_arr_gather_set_edges );MB_CHK_SET_ERR( rval, "Failed to create gather set edges" );
01284 
01285     // Add edges to the gather set
01286     Range gather_set_edges_range( start_edge, start_edge + nEdges - 1 );
01287     rval = mbImpl->add_entities( gather_set, gather_set_edges_range );MB_CHK_SET_ERR( rval, "Failed to add edges to the gather set" );
01288 
01289     // Read vertices on each edge
01290     int verticesOnEdgeVarId;
01291     int success = NCFUNC( inq_varid )( _fileId, "edge_corners", &verticesOnEdgeVarId );
01292     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of edge_corners" );
01293     // Utilize the memory storage pointed by conn_arr_gather_set_edges
01294     int* vertices_on_gather_set_edges = (int*)conn_arr_gather_set_edges;
01295     NCDF_SIZE read_starts[2]          = { 0, 0 };
01296     NCDF_SIZE read_counts[2]          = { static_cast< NCDF_SIZE >( nEdges ), 2 };
01297 #ifdef MOAB_HAVE_PNETCDF
01298     // Enter independent I/O mode, since this read is only for the gather processor
01299     success = NCFUNC( begin_indep_data )( _fileId );
01300     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01301     success =
01302         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01303     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" );
01304     success = NCFUNC( end_indep_data )( _fileId );
01305     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01306 #else
01307     success =
01308         NCFUNCG( _vara_int )( _fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges );
01309     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read edge_corners data" );
01310 #endif
01311 
01312     // Populate connectivity data for gather set edges
01313     // Convert in-place from int (stored in the first half) to EntityHandle
01314     // Reading backward is the trick
01315     for( int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert-- )
01316     {
01317         // Note, indices stored in vertices_on_gather_set_edges are 0 based
01318         int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert];  // Global vertex index, 0 based
01319         // Connectivity array is shifted by where the gather set vertices start
01320         conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
01321     }
01322 
01323     return MB_SUCCESS;
01324 }
01325 
01326 ErrorCode NCHelperGCRM::create_padded_gather_set_cells( EntityHandle gather_set, EntityHandle gather_set_start_vertex )
01327 {
01328     Interface*& mbImpl = _readNC->mbImpl;
01329 
01330     // Create gather set cells
01331     EntityHandle start_element;
01332     EntityHandle* conn_arr_gather_set_cells = NULL;
01333     // Don't need to specify allocation number here, because we know enough cells were created
01334     // before
01335     ErrorCode rval = _readNC->readMeshIface->get_element_connect( nCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element,
01336                                                                   conn_arr_gather_set_cells );MB_CHK_SET_ERR( rval, "Failed to create gather set cells" );
01337 
01338     // Add cells to the gather set
01339     Range gather_set_cells_range( start_element, start_element + nCells - 1 );
01340     rval = mbImpl->add_entities( gather_set, gather_set_cells_range );MB_CHK_SET_ERR( rval, "Failed to add cells to the gather set" );
01341 
01342     // Read vertices on each gather set cell (connectivity)
01343     int verticesOnCellVarId;
01344     int success = NCFUNC( inq_varid )( _fileId, "cell_corners", &verticesOnCellVarId );
01345     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of cell_corners" );
01346     // Utilize the memory storage pointed by conn_arr_gather_set_cells
01347     int* vertices_on_gather_set_cells = (int*)conn_arr_gather_set_cells;
01348     NCDF_SIZE read_starts[2]          = { 0, 0 };
01349     NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( nCells ), static_cast< NCDF_SIZE >( EDGES_PER_CELL ) };
01350 #ifdef MOAB_HAVE_PNETCDF
01351     // Enter independent I/O mode, since this read is only for the gather processor
01352     success = NCFUNC( begin_indep_data )( _fileId );
01353     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to begin independent I/O mode" );
01354     success =
01355         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01356     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" );
01357     success = NCFUNC( end_indep_data )( _fileId );
01358     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to end independent I/O mode" );
01359 #else
01360     success =
01361         NCFUNCG( _vara_int )( _fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells );
01362     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read cell_corners data" );
01363 #endif
01364 
01365     // Correct gather set cell vertices array in the same way as local cell vertices array
01366     // Pentagons as hexagons should have a connectivity like 123455 and not 122345
01367     for( int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++ )
01368     {
01369         int* pvertex = vertices_on_gather_set_cells + gather_set_cell_idx * EDGES_PER_CELL;
01370         for( int k = 0; k < EDGES_PER_CELL - 2; k++ )
01371         {
01372             if( *( pvertex + k ) == *( pvertex + k + 1 ) )
01373             {
01374                 // Shift the connectivity
01375                 for( int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++ )
01376                     *( pvertex + kk ) = *( pvertex + kk + 1 );
01377                 // No need to try next k
01378                 break;
01379             }
01380         }
01381     }
01382 
01383     // Populate connectivity data for gather set cells
01384     // Convert in-place from int (stored in the first half) to EntityHandle
01385     // Reading backward is the trick
01386     for( int cell_vert = nCells * EDGES_PER_CELL - 1; cell_vert >= 0; cell_vert-- )
01387     {
01388         // Note, indices stored in vertices_on_gather_set_cells are 0 based
01389         int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert];  // Global vertex index, 0 based
01390         // Connectivity array is shifted by where the gather set vertices start
01391         conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
01392     }
01393 
01394     return MB_SUCCESS;
01395 }
01396 
01397 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines