Mesh Oriented datABase  (version 5.4.1)
Array-based unstructured mesh datastructure
NCHelperScrip.cpp
Go to the documentation of this file.
00001 /*
00002  * NCHelperScrip.cpp
00003  */
00004 
00005 #include "NCHelperScrip.hpp"
00006 #include "moab/ReadUtilIface.hpp"
00007 #include "AEntityFactory.hpp"
00008 #include "moab/IntxMesh/IntxUtils.hpp"
00009 #ifdef MOAB_HAVE_MPI
00010 #include "moab/ParallelMergeMesh.hpp"
00011 #endif
00012 #ifdef MOAB_HAVE_ZOLTAN
00013 #include "moab/ZoltanPartitioner.hpp"
00014 #endif
00015 
00016 namespace moab
00017 {
00018 
00019 bool NCHelperScrip::can_read_file( ReadNC* readNC, int /*fileId*/ )
00020 {
00021     std::vector< std::string >& dimNames = readNC->dimNames;
00022 
00023     // If dimension names "grid_size" AND "grid_corners" AND "grid_rank" exist then it should be the Scrip grid
00024     if( ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_size" ) ) != dimNames.end() ) &&
00025         ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_corners" ) ) != dimNames.end() ) &&
00026         ( std::find( dimNames.begin(), dimNames.end(), std::string( "grid_rank" ) ) != dimNames.end() ) )
00027     {
00028 
00029         return true;
00030     }
00031 
00032     return false;
00033 }
00034 ErrorCode NCHelperScrip::init_mesh_vals()
00035 {
00036     Interface*& mbImpl                   = _readNC->mbImpl;
00037     std::vector< std::string >& dimNames = _readNC->dimNames;
00038     std::vector< int >& dimLens          = _readNC->dimLens;
00039 
00040     unsigned int idx;
00041     std::vector< std::string >::iterator vit;
00042 
00043     // get grid_size
00044     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_size" ) ) != dimNames.end() )
00045     {
00046         idx       = vit - dimNames.begin();
00047         grid_size = dimLens[idx];
00048     }
00049 
00050     // get grid_corners
00051     if( ( vit = std::find( dimNames.begin(), dimNames.end(), "grid_corners" ) ) != dimNames.end() )
00052     {
00053         idx          = vit - dimNames.begin();
00054         grid_corners = dimLens[idx];
00055     }
00056     // do not need conventional tags
00057     Tag convTagsCreated = 0;
00058     int def_val         = 0;
00059     ErrorCode rval      = mbImpl->tag_get_handle( "__CONV_TAGS_CREATED", 1, MB_TYPE_INTEGER, convTagsCreated,
00060                                                   MB_TAG_SPARSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble getting _CONV_TAGS_CREATED tag" );
00061     int create_conv_tags_flag = 1;
00062     rval                      = mbImpl->tag_set_data( convTagsCreated, &_fileSet, 1, &create_conv_tags_flag );MB_CHK_SET_ERR( rval, "Trouble setting _CONV_TAGS_CREATED tag" );
00063 
00064     // decide now the units, by looking at grid_center_lon
00065     int xCellVarId;
00066     int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
00067     if( success ) MB_CHK_SET_ERR( MB_FAILURE, "Trouble getting grid_center_lon" );
00068     std::map< std::string, ReadNC::VarData >& varInfo = _readNC->varInfo;
00069     auto vmit                                         = varInfo.find( "grid_center_lon" );
00070     if( varInfo.end() == vmit )
00071         MB_SET_ERR( MB_FAILURE, "Couldn't find variable "
00072                                     << "grid_center_lon" );
00073     ReadNC::VarData& glData = vmit->second;
00074     auto attIt              = glData.varAtts.find( "units" );
00075     if( attIt != glData.varAtts.end() )
00076     {
00077         unsigned int sz = attIt->second.attLen;
00078         std::string att_data;
00079         att_data.resize( sz + 1 );
00080         att_data[sz] = '\000';
00081         success =
00082             NCFUNC( get_att_text )( _fileId, attIt->second.attVarId, attIt->second.attName.c_str(), &att_data[0] );
00083         if( 0 == success && att_data.find( "radians" ) != std::string::npos ) degrees = false;
00084     }
00085 
00086     return MB_SUCCESS;
00087 }
00088 ErrorCode NCHelperScrip::create_mesh( Range& faces )
00089 {
00090     Interface*& mbImpl  = _readNC->mbImpl;
00091     DebugOutput& dbgOut = _readNC->dbgOut;
00092     Tag& mGlobalIdTag   = _readNC->mGlobalIdTag;
00093     ErrorCode rval;
00094 
00095 #ifdef MOAB_HAVE_MPI
00096     int rank         = 0;
00097     int procs        = 1;
00098     bool& isParallel = _readNC->isParallel;
00099     ParallelComm* myPcomm = NULL;
00100     if( isParallel )
00101     {
00102         myPcomm = _readNC->myPcomm;
00103         rank                   = myPcomm->proc_config().proc_rank();
00104         procs                  = myPcomm->proc_config().proc_size();
00105     }
00106 
00107     if( procs >= 2 )
00108     {
00109         // Shift rank to obtain a rotated trivial partition
00110         int shifted_rank           = rank;
00111         int& trivialPartitionShift = _readNC->trivialPartitionShift;
00112         if( trivialPartitionShift > 0 ) shifted_rank = ( rank + trivialPartitionShift ) % procs;
00113 
00114         // Compute the number of local cells on this proc
00115         nLocalCells = int( std::floor( 1.0 * grid_size / procs ) );
00116 
00117         // The starting global cell index in the MPAS file for this proc
00118         int start_cell_idx = shifted_rank * nLocalCells;
00119 
00120         // Number of extra cells after equal split over procs
00121         int iextra = grid_size % procs;
00122 
00123         // Allocate extra cells over procs
00124         if( shifted_rank < iextra ) nLocalCells++;
00125         start_cell_idx += std::min( shifted_rank, iextra );
00126 
00127         start_cell_idx++;  // 0 based -> 1 based
00128 
00129         // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
00130         ErrorCode rval = redistribute_local_cells( start_cell_idx, myPcomm );MB_CHK_SET_ERR( rval, "Failed to redistribute local cells after trivial partition" );
00131     }
00132     else
00133     {
00134         nLocalCells = grid_size;
00135         localGidCells.insert( 1, nLocalCells );
00136     }
00137 #else
00138     nLocalCells = grid_size;
00139     localGidCells.insert( 1, nLocalCells );
00140 #endif
00141     dbgOut.tprintf( 1, " localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00142     dbgOut.tprintf( 1, " localGidCells.size() = %d\n", (int)localGidCells.size() );
00143 
00144     // double grid_corner_lat(grid_size, grid_corners) ;
00145     // double grid_corner_lon(grid_size, grid_corners) ;
00146     int xvId, yvId;
00147     int success = NCFUNC( inq_varid )( _fileId, "grid_corner_lon", &xvId );
00148     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lon" );
00149     success = NCFUNC( inq_varid )( _fileId, "grid_corner_lat", &yvId );
00150     if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_corner_lat" );
00151 
00152     // important upgrade: read masks if they exist, and save them as tags
00153     int gmId      = -1;
00154     int sizeMasks = 0;
00155 #ifdef MOAB_HAVE_PNETCDF
00156     int factorRequests = 2;  // we would read in general only 2 variables, xv and yv
00157 #endif
00158     success     = NCFUNC( inq_varid )( _fileId, "grid_imask", &gmId );
00159     Tag maskTag = 0;  // not sure yet if we have the masks or not
00160     if( success )
00161     {
00162         gmId = -1;  // we do not have masks
00163     }
00164     else
00165     {
00166         sizeMasks = nLocalCells;
00167 #ifdef MOAB_HAVE_PNETCDF
00168         factorRequests = 3;  // we also need to read masks distributed
00169 #endif
00170         // create the maskTag GRID_IMASK, with default value of 1
00171         int def_val = 1;
00172         rval =
00173             mbImpl->tag_get_handle( "GRID_IMASK", 1, MB_TYPE_INTEGER, maskTag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val );MB_CHK_SET_ERR( rval, "Trouble creating GRID_IMASK tag" );
00174     }
00175 
00176     std::vector< double > xv( nLocalCells * grid_corners );
00177     std::vector< double > yv( nLocalCells * grid_corners );
00178     std::vector< int > masks( sizeMasks );
00179 #ifdef MOAB_HAVE_PNETCDF
00180     size_t nb_reads = localGidCells.psize();
00181     std::vector< int > requests( nb_reads * factorRequests );
00182     std::vector< int > statuss( nb_reads * factorRequests );
00183     size_t idxReq = 0;
00184 #endif
00185     size_t indexInArray     = 0;
00186     size_t indexInMaskArray = 0;
00187     for( Range::pair_iterator pair_iter = localGidCells.pair_begin(); pair_iter != localGidCells.pair_end();
00188          ++pair_iter )
00189     {
00190         EntityHandle starth      = pair_iter->first;
00191         EntityHandle endh        = pair_iter->second;
00192         NCDF_SIZE read_starts[2] = { static_cast< NCDF_SIZE >( starth - 1 ), 0 };
00193         NCDF_SIZE read_counts[2] = { static_cast< NCDF_SIZE >( endh - starth + 1 ),
00194                                      static_cast< NCDF_SIZE >( grid_corners ) };
00195 
00196         // Do a partial read in each subrange
00197 #ifdef MOAB_HAVE_PNETCDF
00198         success = NCFUNCREQG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ),
00199                                               &requests[idxReq++] );
00200 #else
00201         success = NCFUNCAG( _vara_double )( _fileId, xvId, read_starts, read_counts, &( xv[indexInArray] ) );
00202 #endif
00203         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lon data in a loop" );
00204 
00205             // Do a partial read in each subrange
00206 #ifdef MOAB_HAVE_PNETCDF
00207         success = NCFUNCREQG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ),
00208                                               &requests[idxReq++] );
00209 #else
00210         success = NCFUNCAG( _vara_double )( _fileId, yvId, read_starts, read_counts, &( yv[indexInArray] ) );
00211 #endif
00212         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_corner_lat data in a loop" );
00213         // Increment the index for next subrange
00214         indexInArray += ( endh - starth + 1 ) * grid_corners;
00215 
00216         if( gmId >= 0 )  // it means we need to read masks too, distributed:
00217         {
00218             NCDF_SIZE read_st = static_cast< NCDF_SIZE >( starth - 1 );
00219             NCDF_SIZE read_ct = static_cast< NCDF_SIZE >( endh - starth + 1 );
00220             // Do a partial read in each subrange, for mask variable:
00221 #ifdef MOAB_HAVE_PNETCDF
00222             success = NCFUNCREQG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ),
00223                                                &requests[idxReq++] );
00224 #else
00225             success = NCFUNCAG( _vara_int )( _fileId, gmId, &read_st, &read_ct, &( masks[indexInMaskArray] ) );
00226 #endif
00227             if( success ) MB_SET_ERR( MB_FAILURE, "Failed on mask read " );
00228             indexInMaskArray += endh - starth + 1;
00229         }
00230     }
00231 
00232 #ifdef MOAB_HAVE_PNETCDF
00233     // Wait outside the loop
00234     success = NCFUNC( wait_all )( _fileId, requests.size(), &requests[0], &statuss[0] );
00235     if( success ) MB_SET_ERR( MB_FAILURE, "Failed on wait_all" );
00236 #endif
00237 
00238     // so we read xv, yv for all corners in the local mesh, and masks if they exist
00239 
00240     // Create vertices; first identify different ones, with a tolerance
00241     std::map< Node3D, EntityHandle > vertex_map;
00242 
00243     // Set vertex coordinates
00244     // will read all xv, yv, but use only those with correct mask on
00245 
00246     int elem_index = 0;   // local index in netcdf arrays
00247     double pideg   = 1.;  // radians
00248     if( degrees ) pideg = acos( -1.0 ) / 180.0;
00249 
00250     for( ; elem_index < nLocalCells; elem_index++ )
00251     {
00252         // set area and fraction on those elements too
00253         for( int k = 0; k < grid_corners; k++ )
00254         {
00255             int index_v_arr = grid_corners * elem_index + k;
00256             double x, y;
00257             x             = xv[index_v_arr];
00258             y             = yv[index_v_arr];
00259             double cosphi = cos( pideg * y );
00260             double zmult  = sin( pideg * y );
00261             double xmult  = cosphi * cos( x * pideg );
00262             double ymult  = cosphi * sin( x * pideg );
00263             Node3D pt( xmult, ymult, zmult );
00264             vertex_map[pt] = 0;
00265         }
00266     }
00267     int nLocalVertices = (int)vertex_map.size();
00268     std::vector< double* > arrays;
00269     EntityHandle start_vertex, vtx_handle;
00270     rval = _readNC->readMeshIface->get_node_coords( 3, nLocalVertices, 0, start_vertex, arrays );MB_CHK_SET_ERR( rval, "Failed to create local vertices" );
00271 
00272     vtx_handle = start_vertex;
00273     // Copy vertex coordinates into entity sequence coordinate arrays
00274     // and copy handle into vertex_map.
00275     double *x = arrays[0], *y = arrays[1], *z = arrays[2];
00276     for( auto i = vertex_map.begin(); i != vertex_map.end(); ++i )
00277     {
00278         i->second = vtx_handle;
00279         ++vtx_handle;
00280         *x = i->first.coords[0];
00281         ++x;
00282         *y = i->first.coords[1];
00283         ++y;
00284         *z = i->first.coords[2];
00285         ++z;
00286     }
00287 
00288     EntityHandle start_cell;
00289     int nv              = grid_corners;
00290     EntityType mdb_type = MBVERTEX;
00291     if( nv == 3 )
00292         mdb_type = MBTRI;
00293     else if( nv == 4 )
00294         mdb_type = MBQUAD;
00295     else if( nv > 4 )  // (nv > 4)
00296         mdb_type = MBPOLYGON;
00297 
00298     Range tmp_range;
00299     EntityHandle* conn_arr;
00300 
00301     rval = _readNC->readMeshIface->get_element_connect( nLocalCells, nv, mdb_type, 0, start_cell, conn_arr );MB_CHK_SET_ERR( rval, "Failed to create local cells" );
00302     tmp_range.insert( start_cell, start_cell + nLocalCells - 1 );
00303 
00304     elem_index = 0;
00305 
00306     for( ; elem_index < nLocalCells; elem_index++ )
00307     {
00308         for( int k = 0; k < nv; k++ )
00309         {
00310             int index_v_arr = nv * elem_index + k;
00311             if( nv > 1 )
00312             {
00313                 double x      = xv[index_v_arr];
00314                 double y      = yv[index_v_arr];
00315                 double cosphi = cos( pideg * y );
00316                 double zmult  = sin( pideg * y );
00317                 double xmult  = cosphi * cos( x * pideg );
00318                 double ymult  = cosphi * sin( x * pideg );
00319                 Node3D pt( xmult, ymult, zmult );
00320                 conn_arr[elem_index * nv + k] = vertex_map[pt];
00321             }
00322         }
00323         EntityHandle cell = start_cell + elem_index;
00324         // set other tags, like xc, yc, frac, area
00325         /*rval = mbImpl->tag_set_data( xcTag, &cell, 1, &xc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set xc tag" );
00326         rval = mbImpl->tag_set_data( ycTag, &cell, 1, &yc[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set yc tag" );
00327         rval = mbImpl->tag_set_data( areaTag, &cell, 1, &area[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set area tag" );
00328         rval = mbImpl->tag_set_data( fracTag, &cell, 1, &frac[elem_index] );MB_CHK_SET_ERR( rval, "Failed to set frac tag" );
00329 */
00330         // set the global id too:
00331         int globalId = localGidCells[elem_index];
00332 
00333         rval = mbImpl->tag_set_data( mGlobalIdTag, &cell, 1, &globalId );MB_CHK_SET_ERR( rval, "Failed to set global id tag" );
00334         if( gmId >= 0 )
00335         {
00336             int localMask = masks[elem_index];
00337             rval          = mbImpl->tag_set_data( maskTag, &cell, 1, &localMask );MB_CHK_SET_ERR( rval, "Failed to set mask tag" );
00338         }
00339     }
00340 
00341     rval = mbImpl->add_entities( _fileSet, tmp_range );MB_CHK_SET_ERR( rval, "Failed to add new cells to current file set" );
00342 
00343     // modify local file set, to merge coincident vertices, and to correct repeated vertices in elements
00344     std::vector< Tag > tagList;
00345     tagList.push_back( mGlobalIdTag );
00346     if( gmId >= 0 ) tagList.push_back( maskTag );
00347     rval = IntxUtils::remove_padded_vertices( mbImpl, _fileSet, tagList );MB_CHK_SET_ERR( rval, "Failed to remove duplicate vertices" );
00348 
00349     rval = mbImpl->get_entities_by_dimension( _fileSet, 2, faces );MB_CHK_ERR( rval );
00350     Range all_verts;
00351     rval = mbImpl->get_connectivity( faces, all_verts );MB_CHK_ERR( rval );
00352     rval = mbImpl->add_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
00353     // need to add adjacencies; TODO: fix this for all nc readers
00354     // copy this logic from migrate mesh in par comm graph
00355     Core* mb                 = (Core*)mbImpl;
00356     AEntityFactory* adj_fact = mb->a_entity_factory();
00357     if( !adj_fact->vert_elem_adjacencies() )
00358         adj_fact->create_vert_elem_adjacencies();
00359     else
00360     {
00361         for( Range::iterator it = faces.begin(); it != faces.end(); ++it )
00362         {
00363             EntityHandle eh          = *it;
00364             const EntityHandle* conn = NULL;
00365             int num_nodes            = 0;
00366             rval                     = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
00367             adj_fact->notify_create_entity( eh, conn, num_nodes );
00368         }
00369     }
00370 
00371 #ifdef MOAB_HAVE_MPI
00372     if( myPcomm )
00373     {
00374         double tol = 1.e-12;  // this is the same as static tolerance in NCHelper
00375         ParallelMergeMesh pmm( myPcomm, tol );
00376         rval = pmm.merge( _fileSet,
00377                           /* do not do local merge*/ false,
00378                           /*  2d cells*/ 2 );MB_CHK_SET_ERR( rval, "Failed to merge vertices in parallel" );
00379 
00380         // assign global ids only for vertices, cells have them fine
00381         rval = myPcomm->assign_global_ids( _fileSet, /*dim*/ 0 );MB_CHK_ERR( rval );
00382         // remove all sets, edges and vertices from the file set
00383         Range edges, vertices;
00384         rval = mbImpl->get_entities_by_dimension( _fileSet, 1, edges, /*recursive*/ true );MB_CHK_ERR( rval );
00385         rval = mbImpl->get_entities_by_dimension( _fileSet, 0, vertices, /*recursive*/ true );MB_CHK_ERR( rval );
00386         rval = mbImpl->remove_entities( _fileSet, edges );MB_CHK_ERR( rval );
00387         rval = mbImpl->remove_entities( _fileSet, vertices );MB_CHK_ERR( rval );
00388 
00389         Range intfSets = myPcomm->interface_sets();
00390         // empty intf sets
00391         rval = mbImpl->clear_meshset( intfSets );MB_CHK_ERR( rval );
00392         // delete the sets without shame :)
00393         //sets.merge(intfSets);
00394         //rval = myPcomm->delete_entities(sets);MB_CHK_ERR( rval ); // will also clean shared ents !
00395         rval = myPcomm->delete_entities( edges );MB_CHK_ERR( rval );  // will also clean shared ents !
00396     }
00397 #else
00398     rval = mbImpl->remove_entities( _fileSet, all_verts );MB_CHK_ERR( rval );
00399 #endif
00400 
00401     return MB_SUCCESS;
00402 }
00403 
00404 #ifdef MOAB_HAVE_MPI
00405 ErrorCode NCHelperScrip::redistribute_local_cells( int start_cell_idx, ParallelComm * pco )
00406 {
00407     // If possible, apply Zoltan partition
00408 #ifdef MOAB_HAVE_ZOLTAN
00409     if( ScdParData::RCBZOLTAN == _readNC->partMethod )
00410     {
00411         // Read grid_center_lat coordinates of cell centers
00412         int xCellVarId;
00413         int success = NCFUNC( inq_varid )( _fileId, "grid_center_lon", &xCellVarId );
00414         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lon" );
00415         std::vector< double > xc( nLocalCells );
00416         NCDF_SIZE read_start = static_cast< NCDF_SIZE >( start_cell_idx - 1 );
00417         NCDF_SIZE read_count = static_cast< NCDF_SIZE >( nLocalCells );
00418         success              = NCFUNCAG( _vara_double )( _fileId, xCellVarId, &read_start, &read_count, &xc[0] );
00419         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lat data" );
00420 
00421         // Read grid_center_lon coordinates of cell centers
00422         int yCellVarId;
00423         success = NCFUNC( inq_varid )( _fileId, "grid_center_lat", &yCellVarId );
00424         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to get variable id of grid_center_lat" );
00425         std::vector< double > yc( nLocalCells );
00426         success = NCFUNCAG( _vara_double )( _fileId, yCellVarId, &read_start, &read_count, &yc[0] );
00427         if( success ) MB_SET_ERR( MB_FAILURE, "Failed to read grid_center_lon data" );
00428 
00429         // Zoltan partition using RCB; maybe more studies would be good, as to which partition
00430         // is better
00431         Interface*& mbImpl         = _readNC->mbImpl;
00432         DebugOutput& dbgOut        = _readNC->dbgOut;
00433         ZoltanPartitioner* mbZTool = new ZoltanPartitioner( mbImpl, pco, false, 0, NULL );
00434         std::vector< double > xCell( nLocalCells );
00435         std::vector< double > yCell( nLocalCells );
00436         std::vector< double > zCell( nLocalCells );
00437         double pideg = 1.;  // radians
00438         if( degrees ) pideg = acos( -1.0 ) / 180.0;
00439         double x, y, cosphi;
00440         for( int i = 0; i < nLocalCells; i++ )
00441         {
00442             x        = xc[i];
00443             y        = yc[i];
00444             cosphi   = cos( pideg * y );
00445             zCell[i] = sin( pideg * y );
00446             xCell[i] = cosphi * cos( x * pideg );
00447             yCell[i] = cosphi * sin( x * pideg );
00448         }
00449         ErrorCode rval = mbZTool->repartition( xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells );MB_CHK_SET_ERR( rval, "Error in Zoltan partitioning" );
00450         delete mbZTool;
00451 
00452         dbgOut.tprintf( 1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize() );
00453         dbgOut.tprintf( 1, "                           localGidCells.size() = %d\n", (int)localGidCells.size() );
00454 
00455         // This is important: local cells are now redistributed, so nLocalCells might be different!
00456         nLocalCells = localGidCells.size();
00457 
00458         return MB_SUCCESS;
00459     }
00460 #endif
00461 
00462     // By default, apply trivial partition
00463     localGidCells.insert( start_cell_idx, start_cell_idx + nLocalCells - 1 );
00464 
00465     return MB_SUCCESS;
00466 }
00467 #endif
00468 
00469 } /* namespace moab */
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines