MOAB: Mesh Oriented datABase  (version 5.4.1)
NCHelperScrip.cpp
Go to the documentation of this file.
00001 /*
00002  * NCHelperScrip.cpp
00003  */
00004 
00005 #include "NCHelperScrip.hpp"
00006 #include "moab/ReadUtilIface.hpp"
00007 #include "moab/IntxMesh/IntxUtils.hpp"
00008 #ifdef MOAB_HAVE_MPI
00009 #include "moab/ParallelMergeMesh.hpp"
00010 #endif
00011 #ifdef MOAB_HAVE_ZOLTAN
00012 #include "moab/ZoltanPartitioner.hpp"
00013 #endif
00014 
00015 namespace moab
00016 {
00017 
00018 bool NCHelperScrip::can_read_file( ReadNC* readNC, int /*fileId*/ )
00019 {
00020     std::vector< std::string >& dimNames = readNC->dimNames;
00021 
00022     // If dimension names "grid_size" AND "grid_corners" AND "grid_rank" exist then it should be the Scrip grid
00023     if( ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_size" ) ) != dimNames.end() ) &&
00024         ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_corners" ) ) != dimNames.end() ) &&
00025         ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_rank" ) ) != dimNames.end() ) )
00026     {
00027 
00028         return true;
00029     }
00030 
00031     return false;
00032 }
00033 ErrorCode NCHelperScrip::init_mesh_vals()
00034 {
00035     Interface*& mbImpl                   = _readNC->mbImpl;
00036     std::vector< std::string >& dimNames = _readNC->dimNames;
00037     std::vector< int >& dimLens          = _readNC->dimLens;
00038 
00039     unsigned int idx;
00040     std::vector< std::string >::iterator vit;
00041 
00042     // get grid_size
00043     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_size" ) ) != dimNames.end() )
00044     {
00045         idx       = vit - dimNames.begin();
00046         grid_size = dimLens[idx];
00047     }
00048 
00049     // get grid_corners
00050     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_corners" ) ) != dimNames.end() )
00051     {
00052         idx          = vit - dimNames.begin();
00053         grid_corners = dimLens[idx];
00054     }
00055     // do not need conventional tags
00056     Tag convTagsCreated = 0;
00057     int def_val         = 0;
00058     ErrorCode rval      = mbImpl->tag_get_handle( "__CONV_TAGS_CREATED", 1, MB_TYPE_INTEGER, convTagsCreated,
00059                                                   MB_TAG_SPARSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble getting _CONV_TAGS_CREATED tag" );
00060     int create_conv_tags_flag = 1;
00061     rval                      = mbImpl->tag_set_data( convTagsCreated, &_fileSet, 1, &create_conv_tags_flag );MB_CHK_SET_ERR( rval, "Trouble setting _CONV_TAGS_CREATED tag" );
00062 
00063     // decide now the units, by looking at grid_center_lon
00064     int xCellVarId;
00065     int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
00066     if( success ) MB_CHK_SET_ERR( MB_FAILURE, "Trouble getting grid_center_lon" );
00067     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00068     auto vmit                                         = varInfo.find( "grid_center_lon" );
00069     if( varInfo.end() == vmit )
00070         MB_SET_ERR( MB_FAILURE, "Couldn't find variable "
00071                                     << "grid_center_lon" );
00072     ReadNC::VarData& glData = vmit->second;
00073     auto attIt              = glData.varAtts.find( "units" );
00074     if( attIt != glData.varAtts.end() )
00075     {
00076         unsigned int sz = attIt->second.attLen;
00077         std::string att_data;
00078         att_data.resize( sz + 1 );
00079         att_data[sz] = '\000';
00080         success =
00081             NCFUNC( get_att_text )( _fileId, attIt->second.attVarId, attIt->second.attName.c_str(), &att_data[0] );
00082         if( 0 == success && att_data.find( "radians" ) != std::string::npos ) degrees = false;
00083     }
00084 
00085     return MB_SUCCESS;
00086 }
00087 ErrorCode NCHelperScrip::create_mesh( Range& faces )
00088 {
00089     Interface*& mbImpl  = _readNC->mbImpl;
00090     DebugOutput& dbgOut = _readNC->dbgOut;
00091     Tag& mGlobalIdTag   = _readNC->mGlobalIdTag;
00092     ErrorCode rval;
00093 
00094 #ifdef MOAB_HAVE_MPI
00095     int rank         = 0;
00096     int procs        = 1;
00097     bool& isParallel = _readNC->isParallel;
00098     if( isParallel )
00099     {
00100         ParallelComm*& myPcomm = _readNC->myPcomm;
00101         rank                   = myPcomm->proc_config().proc_rank();
00102         procs                  = myPcomm->proc_config().proc_size();
00103     }
00104 
00105     if( procs >= 2 )
00106     {
00107         // Shift rank to obtain a rotated trivial partition
00108         int shifted_rank           = rank;
00109         int& trivialPartitionShift = _readNC->trivialPartitionShift;
00110         if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00111 
00112         // Compute the number of local cells on this proc
00113         nLocalCells = int( std::floor( 1.0 * grid_size / procs ) );
00114 
00115         // The starting global cell index in the MPAS file for this proc
00116         int start_cell_idx = shifted_rank * nLocalCells;
00117 
00118         // Number of extra cells after equal split over procs
00119         int iextra = grid_size % procs;
00120 
00121         // Allocate extra cells over procs
00122         if( shifted_rank < iextra ) nLocalCells++;
00123         start_cell_idx += std::min( shifted_rank, iextra );
00124 
00125         start_cell_idx++;  // 0 based -> 1 based
00126 
00127         // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00128         ErrorCode rval = redistribute_local_cells( start_cell_idx );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00129     }
00130     else
00131     {
00132         nLocalCells = grid_size;
00133         localGidCells.insert( 1, nLocalCells );
00134     }
00135 #else
00136     nLocalCells = grid_size;
00137     localGidCells.insert( 1, nLocalCells );
00138 #endif
00139     dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00140     dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00141 
00142     // double grid_corner_lat(grid_size, grid_corners) ;
00143     // double grid_corner_lon(grid_size, grid_corners) ;
00144     int xvId, yvId;
00145     int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xvId );
00146     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
00147     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yvId );
00148     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
00149 
00150     // important upgrade: read masks if they exist, and save them as tags
00151     int gmId      = -1;
00152     int sizeMasks = 0;
00153 #ifdef MOAB_HAVE_PNETCDF
00154     int factorRequests = 2;  // we would read in general only 2 variables, xv and yv
00155 #endif
00156     success     = NCFUNC( inq_varid )( _fileId, "grid_imask", &gmId );
00157     Tag maskTag = 0;  // not sure yet if we have the masks or not
00158     if( success )
00159     {
00160         gmId = -1;  // we do not have masks
00161     }
00162     else
00163     {
00164         sizeMasks = nLocalCells;
00165 #ifdef MOAB_HAVE_PNETCDF
00166         factorRequests = 3;  // we also need to read masks distributed
00167 #endif
00168         // create the maskTag GRID_IMASK, with default value of 1
00169         int def_val = 1;
00170         rval =
00171             mbImpl->tag_get_handle( "GRID_IMASK", 1, MB_TYPE_INTEGER, maskTag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble creating GRID_IMASK tag" );
00172     }
00173 
00174     std::vector< double > xv( nLocalCells * grid_corners );
00175     std::vector< double > yv( nLocalCells * grid_corners );
00176     std::vector< int > masks( sizeMasks );
00177 #ifdef MOAB_HAVE_PNETCDF
00178     size_t nb_reads = localGidCells.psize();
00179     std::vector< int > requests( nb_reads * factorRequests );
00180     std::vector< int > statuss( nb_reads * factorRequests );
00181     size_t idxReq = 0;
00182 #endif
00183     size_t indexInArray     = 0;
00184     size_t indexInMaskArray = 0;
00185     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00186          ++pair_iter )
00187     {
00188         EntityHandle starth      = pair_iter->first;
00189         EntityHandle endh        = pair_iter->second;
00190         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00191         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00192                                      static_cast< NCDF_SIZE >( grid_corners ) };
00193 
00194         // Do a partial read in each subrange
00195 #ifdef MOAB_HAVE_PNETCDF
00196         success = NCFUNCREQG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ),
00197                                               &requests[idxReq++] );
00198 #else
00199         success = NCFUNCAG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ) );
00200 #endif
00201         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
00202 
00203             // Do a partial read in each subrange
00204 #ifdef MOAB_HAVE_PNETCDF
00205         success = NCFUNCREQG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ),
00206                                               &requests[idxReq++] );
00207 #else
00208         success = NCFUNCAG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ) );
00209 #endif
00210         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
00211         // Increment the index for next subrange
00212         indexInArray += ( endh - starth + 1 ) * grid_corners;
00213 
00214         if( gmId >= 0 )  // it means we need to read masks too, distributed:
00215         {
00216             NCDF_SIZE read_st = static_cast< NCDF_SIZE >( starth - 1 );
00217             NCDF_SIZE read_ct = static_cast< NCDF_SIZE >( endh - starth + 1 );
00218             // Do a partial read in each subrange, for mask variable:
00219 #ifdef MOAB_HAVE_PNETCDF
00220             success = NCFUNCREQG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ),
00221                                                &requests[idxReq++] );
00222 #else
00223             success = NCFUNCAG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ) );
00224 #endif
00225             if( success ) MB_SET_ERR( MB_FAILURE, "Failed on mask read " );
00226             indexInMaskArray += endh - starth + 1;
00227         }
00228     }
00229 
00230 #ifdef MOAB_HAVE_PNETCDF
00231     // Wait outside the loop
00232     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00233     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00234 #endif
00235 
00236     // so we read xv, yv for all corners in the local mesh, and masks if they exist
00237 
00238     // Create vertices; first identify different ones, with a tolerance
00239     std::map< Node3D, EntityHandle > vertex_map;
00240 
00241     // Set vertex coordinates
00242     // will read all xv, yv, but use only those with correct mask on
00243 
00244     int elem_index = 0;   // local index in netcdf arrays
00245     double pideg   = 1.;  // radians
00246     if( degrees ) pideg = acos( -1.0 ) / 180.0;
00247 
00248     for( ; elem_index < nLocalCells; elem_index++ )
00249     {
00250         // set area and fraction on those elements too
00251         for( int k = 0; k < grid_corners; k++ )
00252         {
00253             int index_v_arr = grid_corners * elem_index + k;
00254             double x, y;
00255             x             = xv[index_v_arr];
00256             y             = yv[index_v_arr];
00257             double cosphi = cos( pideg * y );
00258             double zmult  = sin( pideg * y );
00259             double xmult  = cosphi * cos( x * pideg );
00260             double ymult  = cosphi * sin( x * pideg );
00261             Node3D pt( xmult, ymult, zmult );
00262             vertex_map[pt] = 0;
00263         }
00264     }
00265     int nLocalVertices = (int)vertex_map.size();
00266     std::vector< double* > arrays;
00267     EntityHandle start_vertex, vtx_handle;
00268     rval = _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
00269 
00270     vtx_handle = start_vertex;
00271     // Copy vertex coordinates into entity sequence coordinate arrays
00272     // and copy handle into vertex_map.
00273     double *x = arrays[0], *y = arrays[1], *z = arrays[2];
00274     for( auto i = vertex_map.begin(); i != vertex_map.end(); ++i )
00275     {
00276         i->second = vtx_handle;
00277         ++vtx_handle;
00278         *x = i->first.coords[0];
00279         ++x;
00280         *y = i->first.coords[1];
00281         ++y;
00282         *z = i->first.coords[2];
00283         ++z;
00284     }
00285 
00286     EntityHandle start_cell;
00287     int nv              = grid_corners;
00288     EntityType mdb_type = MBVERTEX;
00289     if( nv == 3 )
00290         mdb_type = MBTRI;
00291     else if( nv == 4 )
00292         mdb_type = MBQUAD;
00293     else if( nv > 4 )  // (nv > 4)
00294         mdb_type = MBPOLYGON;
00295 
00296     Range tmp_range;
00297     EntityHandle* conn_arr;
00298 
00299     rval = _readNC->readMeshIface->get_element_connect( nLocalCells, nv, mdb_type, 0, start_cell, conn_arr );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
00300     tmp_range.insert( start_cell, start_cell + nLocalCells - 1 );
00301 
00302     elem_index = 0;
00303 
00304     for( ; elem_index < nLocalCells; elem_index++ )
00305     {
00306         for( int k = 0; k < nv; k++ )
00307         {
00308             int index_v_arr = nv * elem_index + k;
00309             if( nv > 1 )
00310             {
00311                 double x      = xv[index_v_arr];
00312                 double y      = yv[index_v_arr];
00313                 double cosphi = cos( pideg * y );
00314                 double zmult  = sin( pideg * y );
00315                 double xmult  = cosphi * cos( x * pideg );
00316                 double ymult  = cosphi * sin( x * pideg );
00317                 Node3D pt( xmult, ymult, zmult );
00318                 conn_arr[elem_index * nv + k] = vertex_map[pt];
00319             }
00320         }
00321         EntityHandle cell = start_cell + elem_index;
00322         // set other tags, like xc, yc, frac, area
00323         /*rval = mbImpl->tag_set_data( xcTag, &cell, 1, &xc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set xc tag" );
00324         rval = mbImpl->tag_set_data( ycTag, &cell, 1, &yc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set yc tag" );
00325         rval = mbImpl->tag_set_data( areaTag, &cell, 1, &area[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set area tag" );
00326         rval = mbImpl->tag_set_data( fracTag, &cell, 1, &frac[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set frac tag" );
00327 */
00328         // set the global id too:
00329         int globalId = localGidCells[elem_index];
00330 
00331         rval = mbImpl->tag_set_data( mGlobalIdTag, &cell, 1, &globalId );MB_CHK_SET_ERR( rval, "Failed to set global id tag" );
00332         if( gmId >= 0 )
00333         {
00334             int localMask = masks[elem_index];
00335             rval          = mbImpl->tag_set_data( maskTag, &cell, 1, &localMask );MB_CHK_SET_ERR( rval, "Failed to set mask tag" );
00336         }
00337     }
00338 
00339     rval = mbImpl->add_entities( _fileSet, tmp_range );MB_CHK_SET_ERR( rval, "Failed to add new cells to current file set" );
00340 
00341     // modify local file set, to merge coincident vertices, and to correct repeated vertices in elements
00342     std::vector< Tag > tagList;
00343     tagList.push_back( mGlobalIdTag );
00344     if( gmId >= 0 ) tagList.push_back( maskTag );
00345     rval = IntxUtils::remove_padded_vertices( mbImpl, _fileSet, tagList );MB_CHK_SET_ERR( rval, "Failed to remove duplicate vertices" );
00346 
00347     rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_ERR( rval );
00348     Range all_verts;
00349     rval = mbImpl->get_connectivity( faces, all_verts );MB_CHK_ERR( rval );
00350     rval = mbImpl->add_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
00351 #ifdef MOAB_HAVE_MPI
00352     ParallelComm*& myPcomm = _readNC->myPcomm;
00353     if( myPcomm )
00354     {
00355         double tol = 1.e-12;  // this is the same as static tolerance in NCHelper
00356         ParallelMergeMesh pmm( myPcomm, tol );
00357         rval = pmm.merge( _fileSet,
00358                           /* do not do local merge*/ false,
00359                           /*  2d cells*/ 2 );MB_CHK_SET_ERR( rval, "Failed to merge vertices in parallel" );
00360 
00361         // assign global ids only for vertices, cells have them fine
00362         rval = myPcomm->assign_global_ids( _fileSet, /*dim*/ 0 );MB_CHK_ERR( rval );
00363         // remove all sets, edges and vertices from the file set
00364         Range edges, vertices;
00365         rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges, /*recursive*/ true );MB_CHK_ERR( rval );
00366         rval = mbImpl->get_entities_by_dimension( _fileSet, 0, vertices, /*recursive*/ true );MB_CHK_ERR( rval );
00367         rval = mbImpl->remove_entities( _fileSet, edges );MB_CHK_ERR( rval );
00368         rval = mbImpl->remove_entities( _fileSet, vertices );MB_CHK_ERR( rval );
00369 
00370         Range intfSets = myPcomm->interface_sets();
00371         // empty intf sets
00372         rval = mbImpl->clear_meshset( intfSets );MB_CHK_ERR( rval );
00373         // delete the sets without shame :)
00374         //sets.merge(intfSets);
00375         //rval = myPcomm->delete_entities(sets);MB_CHK_ERR( rval ); // will also clean shared ents !
00376         rval = myPcomm->delete_entities( edges );MB_CHK_ERR( rval );  // will also clean shared ents !
00377     }
00378 #else
00379     rval = mbImpl->remove_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
00380 #endif
00381 
00382     return MB_SUCCESS;
00383 }
00384 
00385 #ifdef MOAB_HAVE_MPI
00386 ErrorCode NCHelperScrip::redistribute_local_cells( int start_cell_idx )
00387 {
00388     // If possible, apply Zoltan partition
00389 #ifdef MOAB_HAVE_ZOLTAN
00390     if( ScdParData::RCBZOLTAN == _readNC->partMethod )
00391     {
00392         // Read grid_center_lat coordinates of cell centers
00393         int xCellVarId;
00394         int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
00395         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
00396         std::vector< double > xc( nLocalCells );
00397         NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00398         NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00399         success              = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xc[0] );
00400         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
00401 
00402         // Read grid_center_lon coordinates of cell centers
00403         int yCellVarId;
00404         success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &yCellVarId );
00405         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
00406         std::vector< double > yc( nLocalCells );
00407         success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yc[0] );
00408         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
00409 
00410         // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00411         // is better
00412         Interface*& mbImpl         = _readNC->mbImpl;
00413         DebugOutput& dbgOut        = _readNC->dbgOut;
00414         ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, false, 0, NULL );
00415         std::vector< double > xCell( nLocalCells );
00416         std::vector< double > yCell( nLocalCells );
00417         std::vector< double > zCell( nLocalCells );
00418         double pideg = 1.;  // radians
00419         if( degrees ) pideg = acos( -1.0 ) / 180.0;
00420         double x, y, cosphi;
00421         for( int i = 0; i < nLocalCells; i++ )
00422         {
00423             x        = xc[i];
00424             y        = yc[i];
00425             cosphi   = cos( pideg * y );
00426             zCell[i] = sin( pideg * y );
00427             xCell[i] = cosphi * cos( x * pideg );
00428             yCell[i] = cosphi * sin( x * pideg );
00429         }
00430         ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
00431         delete mbZTool;
00432 
00433         dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00434         dbgOut.tprintf( 1, "                           localGidCells.size() = %d\n", (int)localGidCells.size() );
00435 
00436         // This is important: local cells are now redistributed, so nLocalCells might be different!
00437         nLocalCells = localGidCells.size();
00438 
00439         return MB_SUCCESS;
00440     }
00441 #endif
00442 
00443     // By default, apply trivial partition
00444     localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
00445 
00446     return MB_SUCCESS;
00447 }
00448 #endif
00449 
00450 } /* namespace moab */
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines