MOAB: Mesh Oriented datABase  (version 5.3.1)
NCHelperScrip.cpp
Go to the documentation of this file.
00001 /*
00002  * NCHelperScrip.cpp
00003  */
00004 
00005 #include "NCHelperScrip.hpp"
00006 #include "moab/ReadUtilIface.hpp"
00007 #include "moab/IntxMesh/IntxUtils.hpp"
00008 #ifdef MOAB_HAVE_MPI
00009 #include "moab/ParallelMergeMesh.hpp"
00010 #endif
00011 #ifdef MOAB_HAVE_ZOLTAN
00012 #include "moab/ZoltanPartitioner.hpp"
00013 #endif
00014 
00015 namespace moab {
00016 
00017 bool NCHelperScrip::can_read_file( ReadNC* readNC, int fileId )
00018 {
00019     std::vector< std::string >& dimNames = readNC->dimNames;
00020 
00021     // If dimension names "grid_size" AND "grid_corners" AND "grid_rank" exist then it should be the Scrip grid
00022     if( ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_size" ) ) != dimNames.end() ) &&
00023         ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_corners" ) ) != dimNames.end() ) &&
00024         ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_rank" ) ) != dimNames.end() )  )
00025     {
00026 
00027         return true;
00028     }
00029 
00030     return false;
00031 }
00032 ErrorCode NCHelperScrip::init_mesh_vals()
00033 {
00034     Interface*& mbImpl                                = _readNC->mbImpl;
00035     std::vector< std::string >& dimNames              = _readNC->dimNames;
00036     std::vector< int >& dimLens                       = _readNC->dimLens;
00037 
00038     unsigned int idx;
00039     std::vector< std::string >::iterator vit;
00040 
00041     // get grid_size
00042     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_size" ) ) != dimNames.end() )
00043     {
00044         idx             = vit - dimNames.begin();
00045         grid_size = dimLens[idx];
00046     }
00047 
00048     // get grid_corners
00049     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_corners" ) ) != dimNames.end() )
00050     {
00051         idx             = vit - dimNames.begin();
00052         grid_corners = dimLens[idx];
00053     }
00054     // do not need conventional tags
00055     Tag convTagsCreated = 0;
00056     int def_val         = 0;
00057     ErrorCode rval                = mbImpl->tag_get_handle( "__CONV_TAGS_CREATED", 1, MB_TYPE_INTEGER, convTagsCreated,
00058                                    MB_TAG_SPARSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble getting _CONV_TAGS_CREATED tag" );
00059     int create_conv_tags_flag = 1;
00060     rval                      = mbImpl->tag_set_data( convTagsCreated, &_fileSet, 1, &create_conv_tags_flag );MB_CHK_SET_ERR( rval, "Trouble setting _CONV_TAGS_CREATED tag" );
00061 
00062 
00063     return MB_SUCCESS;
00064 }
00065 ErrorCode NCHelperScrip::create_mesh( Range& faces )
00066 {
00067     Interface*& mbImpl    = _readNC->mbImpl;
00068     DebugOutput& dbgOut   = _readNC->dbgOut;
00069     Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
00070     ErrorCode rval;
00071 
00072 #ifdef MOAB_HAVE_MPI
00073     int rank         = 0;
00074     int procs        = 1;
00075     bool& isParallel = _readNC->isParallel;
00076     if( isParallel )
00077     {
00078         ParallelComm*& myPcomm = _readNC->myPcomm;
00079         rank                   = myPcomm->proc_config().proc_rank();
00080         procs                  = myPcomm->proc_config().proc_size();
00081     }
00082 
00083     if( procs >= 2 )
00084     {
00085         // Shift rank to obtain a rotated trivial partition
00086         int shifted_rank           = rank;
00087         int& trivialPartitionShift = _readNC->trivialPartitionShift;
00088         if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00089 
00090         // Compute the number of local cells on this proc
00091         nLocalCells = int( std::floor( 1.0 * grid_size / procs ) );
00092 
00093         // The starting global cell index in the MPAS file for this proc
00094         int start_cell_idx = shifted_rank * nLocalCells;
00095 
00096         // Number of extra cells after equal split over procs
00097         int iextra = grid_size % procs;
00098 
00099         // Allocate extra cells over procs
00100         if( shifted_rank < iextra ) nLocalCells++;
00101         start_cell_idx += std::min( shifted_rank, iextra );
00102 
00103         start_cell_idx++;  // 0 based -> 1 based
00104 
00105         // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00106         ErrorCode rval = redistribute_local_cells( start_cell_idx );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00107     }
00108     else
00109     {
00110         nLocalCells = grid_size;
00111         localGidCells.insert( 1, nLocalCells );
00112     }
00113 #else
00114     nLocalCells = grid_size;
00115     localGidCells.insert( 1, nLocalCells );
00116 #endif
00117     dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00118     dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00119 
00120     // double grid_corner_lat(grid_size, grid_corners) ;
00121     // double grid_corner_lon(grid_size, grid_corners) ;
00122     int xvId, yvId;
00123     int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xvId );
00124     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
00125     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yvId );
00126     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
00127 
00128     std::vector< double > xv( nLocalCells * grid_corners );
00129     std::vector< double > yv( nLocalCells * grid_corners );
00130 #ifdef MOAB_HAVE_PNETCDF
00131     size_t nb_reads = localGidCells.psize();
00132     std::vector< int > requests( nb_reads * 2);
00133     std::vector< int > statuss( nb_reads * 2);
00134     size_t idxReq = 0;
00135 #endif
00136     size_t indexInArray = 0;
00137     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00138          ++pair_iter )
00139     {
00140         EntityHandle starth  = pair_iter->first;
00141         EntityHandle endh    = pair_iter->second;
00142         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00143         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00144                                              static_cast< NCDF_SIZE >( grid_corners ) };
00145 
00146         // Do a partial read in each subrange
00147 #ifdef MOAB_HAVE_PNETCDF
00148         success = NCFUNCREQG( _vara_double )( _fileId, xvId, read_starts, read_counts,
00149                                            &( xv[indexInArray] ), &requests[idxReq++] );
00150 #else
00151         success = NCFUNCAG( _vara_double )( _fileId, xvId, read_starts, read_counts,
00152                                          &( xv[indexInArray] ) );
00153 #endif
00154         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
00155 
00156         // Do a partial read in each subrange
00157 #ifdef MOAB_HAVE_PNETCDF
00158         success = NCFUNCREQG( _vara_double )( _fileId, yvId, read_starts, read_counts,
00159                                           &( yv[indexInArray] ), &requests[idxReq++] );
00160 #else
00161         success = NCFUNCAG( _vara_double )( _fileId, yvId, read_starts, read_counts,
00162                                         &( yv[indexInArray] ) );
00163 #endif
00164         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
00165         // Increment the index for next subrange
00166         indexInArray += ( endh - starth + 1 ) * grid_corners;
00167     }
00168 
00169 #ifdef MOAB_HAVE_PNETCDF
00170     // Wait outside the loop
00171     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00172     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00173 #endif
00174 
00175     // so we read xv, yv for all corners in the local mesh
00176 
00177     // Create vertices; first identify different ones, with a tolerance
00178     std::map< Node3D, EntityHandle > vertex_map;
00179 
00180     // Set vertex coordinates
00181     // will read all xv, yv, but use only those with correct mask on
00182 
00183     int elem_index     = 0;  // total index in netcdf arrays
00184     const double pideg = acos( -1.0 ) / 180.0;
00185 
00186     for( ; elem_index < nLocalCells; elem_index++ )
00187     {
00188         // set area and fraction on those elements too
00189         for( int k = 0; k < grid_corners; k++ )
00190         {
00191             int index_v_arr = grid_corners * elem_index + k;
00192             double x, y;
00193             x                        = xv[index_v_arr];
00194             y                        = yv[index_v_arr];
00195             double cosphi            = cos( pideg * y );
00196             double zmult             = sin( pideg * y );
00197             double xmult             = cosphi * cos( x * pideg );
00198             double ymult             = cosphi * sin( x * pideg );
00199             Node3D pt(xmult, ymult, zmult);
00200             vertex_map[pt] = 0;
00201         }
00202     }
00203     int nLocalVertices = (int) vertex_map.size();
00204     std::vector< double* > arrays;
00205     EntityHandle start_vertex, vtx_handle;
00206     rval = _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
00207 
00208     vtx_handle = start_vertex;
00209     // Copy vertex coordinates into entity sequence coordinate arrays
00210     // and copy handle into vertex_map.
00211     double *x = arrays[0], *y = arrays[1], *z = arrays[2];
00212     for( auto i = vertex_map.begin(); i != vertex_map.end(); ++i )
00213     {
00214         i->second = vtx_handle;
00215         ++vtx_handle;
00216         *x = i->first.coords[0];
00217         ++x;
00218         *y = i->first.coords[1];
00219         ++y;
00220         *z = i->first.coords[2];
00221         ++z;
00222     }
00223 
00224     EntityHandle start_cell;
00225     int nv = grid_corners;
00226     EntityType mdb_type = MBVERTEX;
00227     if( nv == 3 )
00228         mdb_type = MBTRI;
00229     else if( nv == 4 )
00230         mdb_type = MBQUAD;
00231     else if( nv > 4 )  // (nv > 4)
00232         mdb_type = MBPOLYGON;
00233 
00234     Range tmp_range;
00235     EntityHandle* conn_arr;
00236 
00237     rval = _readNC->readMeshIface->get_element_connect( nLocalCells, nv, mdb_type, 0, start_cell, conn_arr );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
00238     tmp_range.insert( start_cell, start_cell + nLocalCells - 1 );
00239 
00240     elem_index = 0;
00241 
00242     for( ; elem_index < nLocalCells; elem_index++ )
00243     {
00244         for( int k = 0; k < nv; k++ )
00245         {
00246             int index_v_arr = nv * elem_index + k;
00247             if( nv > 1 )
00248             {
00249                 double x                        = xv[index_v_arr];
00250                 double y                        = yv[index_v_arr];
00251                 double cosphi            = cos( pideg * y );
00252                 double zmult             = sin( pideg * y );
00253                 double xmult             = cosphi * cos( x * pideg );
00254                 double ymult             = cosphi * sin( x * pideg );
00255                 Node3D pt(xmult, ymult, zmult);
00256                 conn_arr[elem_index * nv + k] =  vertex_map[pt];
00257 
00258             }
00259         }
00260         EntityHandle cell = start_cell + elem_index;
00261         // set other tags, like xc, yc, frac, area
00262         /*rval = mbImpl->tag_set_data( xcTag, &cell, 1, &xc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set xc tag" );
00263         rval = mbImpl->tag_set_data( ycTag, &cell, 1, &yc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set yc tag" );
00264         rval = mbImpl->tag_set_data( areaTag, &cell, 1, &area[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set area tag" );
00265         rval = mbImpl->tag_set_data( fracTag, &cell, 1, &frac[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set frac tag" );
00266 */
00267         // set the global id too:
00268         int globalId = localGidCells[elem_index];
00269 
00270         rval = mbImpl->tag_set_data( mGlobalIdTag, &cell, 1, &globalId );MB_CHK_SET_ERR( rval, "Failed to set global id tag" );
00271 
00272     }
00273 
00274     rval = mbImpl->add_entities( _fileSet, tmp_range );MB_CHK_SET_ERR( rval, "Failed to add new cells to current file set" );
00275 
00276     // modify local file set, to merge coincident vertices, and to correct repeated vertices in elements
00277     std::vector< Tag > tagList;
00278     tagList.push_back( mGlobalIdTag );
00279     rval       = IntxUtils::remove_padded_vertices( mbImpl, _fileSet, tagList );MB_CHK_SET_ERR( rval, "Failed to remove duplicate vertices" );
00280 
00281     rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_ERR( rval );
00282     Range all_verts;
00283     rval = mbImpl->get_connectivity( faces, all_verts );MB_CHK_ERR( rval );
00284     rval = mbImpl->add_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
00285 #ifdef MOAB_HAVE_MPI
00286     ParallelComm*& myPcomm = _readNC->myPcomm;
00287     if( myPcomm )
00288     {
00289         double tol = 1.e-12; // this is the same as static tolerance in NCHelper
00290         ParallelMergeMesh pmm( myPcomm, tol );
00291         rval = pmm.merge( _fileSet,
00292                           /* do not do local merge*/ false,
00293                           /*  2d cells*/ 2 );MB_CHK_SET_ERR( rval, "Failed to merge vertices in parallel" );
00294 
00295         // assign global ids only for vertices, cells have them fine
00296         rval = myPcomm->assign_global_ids( _fileSet, /*dim*/ 0 );MB_CHK_ERR( rval );
00297     }
00298 #endif
00299 
00300     return MB_SUCCESS;
00301 }
00302 
00303 #ifdef MOAB_HAVE_MPI
00304 ErrorCode NCHelperScrip::redistribute_local_cells( int start_cell_idx )
00305 {
00306     // If possible, apply Zoltan partition
00307 #ifdef MOAB_HAVE_ZOLTAN
00308     if( ScdParData::RCBZOLTAN == _readNC->partMethod )
00309     {
00310         // Read grid_center_lat coordinates of cell centers
00311         int xCellVarId;
00312         int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
00313         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
00314         std::vector< double > xc( nLocalCells );
00315         NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00316         NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00317         success              = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xc[0] );
00318         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
00319 
00320         // Read grid_center_lon coordinates of cell centers
00321         int yCellVarId;
00322         success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &yCellVarId );
00323         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
00324         std::vector< double > yc( nLocalCells );
00325         success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yc[0] );
00326         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
00327 
00328         // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00329         // is better
00330         Interface*& mbImpl         = _readNC->mbImpl;
00331         DebugOutput& dbgOut        = _readNC->dbgOut;
00332         ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, false, 0, NULL );
00333         std::vector< double > xCell( nLocalCells );
00334         std::vector< double > yCell( nLocalCells );
00335         std::vector< double > zCell( nLocalCells );
00336         const double pideg = acos( -1.0 ) / 180.0;
00337         double x,y, cosphi;
00338         for (int i=0; i<nLocalCells; i++)
00339         {
00340             x = xc[i];
00341             y = yc[i];
00342             cosphi = cos( pideg * y );
00343             zCell[i] = sin( pideg * y );
00344             xCell[i] = cosphi * cos( x * pideg );
00345             yCell[i] = cosphi * sin( x * pideg );
00346         }
00347         ErrorCode rval             = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
00348         delete mbZTool;
00349 
00350         dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00351         dbgOut.tprintf( 1, "                           localGidCells.size() = %d\n", (int)localGidCells.size() );
00352 
00353         // This is important: local cells are now redistributed, so nLocalCells might be different!
00354         nLocalCells = localGidCells.size();
00355 
00356         return MB_SUCCESS;
00357     }
00358 #endif
00359 
00360     // By default, apply trivial partition
00361     localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
00362 
00363     return MB_SUCCESS;
00364 }
00365 #endif
00366 
00367 } /* namespace moab */
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines