Mesh Oriented datABase  (version 5.4.1)
Array-based unstructured mesh datastructure
TempestOnlineMapIO.cpp
Go to the documentation of this file.
00001 /*
00002  * =====================================================================================
00003  *
00004  *       Filename:  TempestOnlineMapIO.cpp
00005  *
00006  *    Description:  All I/O implementations related to TempestOnlineMap
00007  *
00008  *        Version:  1.0
00009  *        Created:  02/06/2021 02:35:41
00010  *
00011  *         Author:  Vijay S. Mahadevan (vijaysm), [email protected]
00012  *        Company:  Argonne National Lab
00013  *
00014  * =====================================================================================
00015  */
00016 
00017 #include "FiniteElementTools.h"
00018 #include "moab/Remapping/TempestOnlineMap.hpp"
00019 #include "moab/TupleList.hpp"
00020 
00021 #ifdef MOAB_HAVE_NETCDFPAR
00022 #include "netcdfcpp_par.hpp"
00023 #else
00024 #include "netcdfcpp.h"
00025 #endif
00026 
00027 #ifdef MOAB_HAVE_PNETCDF
00028 #include <pnetcdf.h>
00029 
00030 #define ERR_PARNC( err )                                                              \
00031     if( err != NC_NOERR )                                                             \
00032     {                                                                                 \
00033         fprintf( stderr, "Error at line %d: %s\n", __LINE__, ncmpi_strerror( err ) ); \
00034         MPI_Abort( MPI_COMM_WORLD, 1 );                                               \
00035     }
00036 
00037 #endif
00038 
00039 #ifdef MOAB_HAVE_MPI
00040 
00041 #define MPI_CHK_ERR( err )                                          \
00042     if( err )                                                       \
00043     {                                                               \
00044         std::cout << "MPI Failure. ErrorCode (" << ( err ) << ") "; \
00045         std::cout << "\nMPI Aborting... \n";                        \
00046         return moab::MB_FAILURE;                                    \
00047     }
00048 
00049 int moab::TempestOnlineMap::rearrange_arrays_by_dofs( const std::vector< unsigned int >& gdofmap,
00050                                                       DataArray1D< double >& vecFaceArea,
00051                                                       DataArray1D< double >& dCenterLon,
00052                                                       DataArray1D< double >& dCenterLat,
00053                                                       DataArray2D< double >& dVertexLon,
00054                                                       DataArray2D< double >& dVertexLat,
00055                                                       std::vector< int >& masks,
00056                                                       unsigned& N,  // will have the local, after
00057                                                       int& nv,
00058                                                       int& maxdof )
00059 {
00060     // first decide maxdof, for partitioning
00061 
00062     unsigned int localmax = 0;
00063     for( unsigned i = 0; i < N; i++ )
00064         if( gdofmap[i] > localmax ) localmax = gdofmap[i];
00065 
00066     int localMax[2];
00067     int globalMax[2];
00068     localMax[0] = localmax;
00069     localMax[1] = nv;
00070     // we also need to find out maximum of nv; if nv is 0 (no cells on a task), it will lead to
00071     // problems in gs transfer, because the size of the tuple depends on nv
00072 
00073     MPI_Allreduce( localMax, globalMax, 2, MPI_INT, MPI_MAX, m_pcomm->comm() );
00074     maxdof = globalMax[0];
00075     nv     = globalMax[1];
00076     // maxdof is 0 based, so actual number is +1
00077     // maxdof
00078     // decide partitioning based on maxdof/size
00079 
00080     int size_per_task = ( maxdof + 1 ) / size;  // based on this, processor to process dof x is x/size_per_task
00081     // so we decide to reorder by actual dof, such that task 0 has dofs from [0 to size_per_task), etc
00082     moab::TupleList tl;
00083     unsigned numr = 2 * nv + 3;         //  doubles: area, centerlon, center lat, nv (vertex lon, vertex lat)
00084     tl.initialize( 3, 0, 0, numr, N );  // to proc, dof, then
00085     tl.enableWriteAccess();
00086     // populate
00087     for( unsigned i = 0; i < N; i++ )
00088     {
00089         int gdof    = gdofmap[i];
00090         int to_proc = gdof / size_per_task;
00091         int mask    = masks[i];
00092         if( to_proc >= size ) to_proc = size - 1;  // the last ones go to last proc
00093         int n                  = tl.get_n();
00094         tl.vi_wr[3 * n]        = to_proc;
00095         tl.vi_wr[3 * n + 1]    = gdof;
00096         tl.vi_wr[3 * n + 2]    = mask;
00097         tl.vr_wr[n * numr]     = vecFaceArea[i];
00098         tl.vr_wr[n * numr + 1] = dCenterLon[i];
00099         tl.vr_wr[n * numr + 2] = dCenterLat[i];
00100         for( int j = 0; j < nv; j++ )
00101         {
00102             tl.vr_wr[n * numr + 3 + j]      = dVertexLon[i][j];
00103             tl.vr_wr[n * numr + 3 + nv + j] = dVertexLat[i][j];
00104         }
00105         tl.inc_n();
00106     }
00107 
00108     // now do the heavy communication
00109 
00110     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tl, 0 );
00111 
00112     // after communication, on each processor we should have tuples coming in
00113     // still need to order by global dofs; then rearrange input vectors
00114     moab::TupleList::buffer sort_buffer;
00115     sort_buffer.buffer_init( tl.get_n() );
00116     tl.sort( 1, &sort_buffer );
00117     // count how many are unique, and collapse
00118     int nb_unique = 1;
00119     for( unsigned i = 0; i < tl.get_n() - 1; i++ )
00120     {
00121         if( tl.vi_wr[3 * i + 1] != tl.vi_wr[3 * i + 4] ) nb_unique++;
00122     }
00123     vecFaceArea.Allocate( nb_unique );
00124     dCenterLon.Allocate( nb_unique );
00125     dCenterLat.Allocate( nb_unique );
00126     dVertexLon.Allocate( nb_unique, nv );
00127     dVertexLat.Allocate( nb_unique, nv );
00128     masks.resize( nb_unique );
00129     int current_size = 1;
00130     vecFaceArea[0]   = tl.vr_wr[0];
00131     dCenterLon[0]    = tl.vr_wr[1];
00132     dCenterLat[0]    = tl.vr_wr[2];
00133     masks[0]         = tl.vi_wr[2];
00134     for( int j = 0; j < nv; j++ )
00135     {
00136         dVertexLon[0][j] = tl.vr_wr[3 + j];
00137         dVertexLat[0][j] = tl.vr_wr[3 + nv + j];
00138     }
00139     for( unsigned i = 0; i < tl.get_n() - 1; i++ )
00140     {
00141         int i1 = i + 1;
00142         if( tl.vi_wr[3 * i + 1] != tl.vi_wr[3 * i + 4] )
00143         {
00144             vecFaceArea[current_size] = tl.vr_wr[i1 * numr];
00145             dCenterLon[current_size]  = tl.vr_wr[i1 * numr + 1];
00146             dCenterLat[current_size]  = tl.vr_wr[i1 * numr + 2];
00147             for( int j = 0; j < nv; j++ )
00148             {
00149                 dVertexLon[current_size][j] = tl.vr_wr[i1 * numr + 3 + j];
00150                 dVertexLat[current_size][j] = tl.vr_wr[i1 * numr + 3 + nv + j];
00151             }
00152             masks[current_size] = tl.vi_wr[3 * i1 + 2];
00153             current_size++;
00154         }
00155         else
00156         {
00157             vecFaceArea[current_size - 1] += tl.vr_wr[i1 * numr];  // accumulate areas; will come here only for cgll ?
00158         }
00159     }
00160 
00161     N = current_size;  // or nb_unique, should be the same
00162     return 0;
00163 }
00164 #endif
00165 
00166 ///////////////////////////////////////////////////////////////////////////////
00167 
00168 moab::ErrorCode moab::TempestOnlineMap::WriteParallelMap( const std::string& strFilename )
00169 {
00170     moab::ErrorCode rval;
00171 
00172     size_t lastindex      = strFilename.find_last_of( "." );
00173     std::string extension = strFilename.substr( lastindex + 1, strFilename.size() );
00174 
00175     // Write the map file to disk in parallel
00176     if( extension == "nc" )
00177     {
00178         /* Invoke the actual call to write the parallel map to disk in SCRIP format */
00179         rval = this->WriteSCRIPMapFile( strFilename.c_str() );MB_CHK_ERR( rval );
00180     }
00181     else
00182     {
00183         /* Write to the parallel H5M format */
00184         rval = this->WriteHDF5MapFile( strFilename.c_str() );MB_CHK_ERR( rval );
00185     }
00186 
00187     return rval;
00188 }
00189 
00190 ///////////////////////////////////////////////////////////////////////////////
00191 
00192 moab::ErrorCode moab::TempestOnlineMap::WriteSCRIPMapFile( const std::string& strFilename )
00193 {
00194     NcError error( NcError::silent_nonfatal );
00195 
00196 #ifdef MOAB_HAVE_NETCDFPAR
00197     bool is_independent = true;
00198     ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strFilename.c_str(), NcFile::Replace, NcFile::Netcdf4 );
00199     // ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strFilename.c_str(), NcmpiFile::replace, NcmpiFile::classic5 );
00200 #else
00201     NcFile ncMap( strFilename.c_str(), NcFile::Replace );
00202 #endif
00203 
00204     if( !ncMap.is_valid() )
00205     {
00206         _EXCEPTION1( "Unable to open output map file \"%s\"", strFilename.c_str() );
00207     }
00208 
00209     // Attributes
00210     ncMap.add_att( "Title", "MOAB-TempestRemap Online Regridding Weight Generator" );
00211 
00212     /**
00213      * Need to get the global maximum of number of vertices per element
00214      * Key issue is that when calling InitializeCoordinatesFromMeshFV, the allocation for
00215      *dVertexLon/dVertexLat are made based on the maximum vertices in the current process. However,
00216      *when writing this out, other processes may have a different size for the same array. This is
00217      *hence a mess to consolidate in h5mtoscrip eventually.
00218      **/
00219 
00220     /* Let us compute all relevant data for the current original source mesh on the process */
00221     DataArray1D< double > vecSourceFaceArea, vecTargetFaceArea;
00222     DataArray1D< double > dSourceCenterLon, dSourceCenterLat, dTargetCenterLon, dTargetCenterLat;
00223     DataArray2D< double > dSourceVertexLon, dSourceVertexLat, dTargetVertexLon, dTargetVertexLat;
00224     if( m_srcDiscType == DiscretizationType_FV || m_srcDiscType == DiscretizationType_PCLOUD )
00225     {
00226         this->InitializeCoordinatesFromMeshFV(
00227             *m_meshInput, dSourceCenterLon, dSourceCenterLat, dSourceVertexLon, dSourceVertexLat,
00228             ( this->m_remapper->m_source_type == moab::TempestRemapper::RLL ), /* fLatLon = false */
00229             m_remapper->max_source_edges );
00230 
00231         vecSourceFaceArea.Allocate( m_meshInput->vecFaceArea.GetRows() );
00232         for( unsigned i = 0; i < m_meshInput->vecFaceArea.GetRows(); ++i )
00233             vecSourceFaceArea[i] = m_meshInput->vecFaceArea[i];
00234     }
00235     else
00236     {
00237         DataArray3D< double > dataGLLJacobianSrc;
00238         this->InitializeCoordinatesFromMeshFE( *m_meshInput, m_nDofsPEl_Src, dataGLLNodesSrc, dSourceCenterLon,
00239                                                dSourceCenterLat, dSourceVertexLon, dSourceVertexLat );
00240 
00241         // Generate the continuous Jacobian for input mesh
00242         GenerateMetaData( *m_meshInput, m_nDofsPEl_Src, false /* fBubble */, dataGLLNodesSrc, dataGLLJacobianSrc );
00243 
00244         if( m_srcDiscType == DiscretizationType_CGLL )
00245         {
00246             GenerateUniqueJacobian( dataGLLNodesSrc, dataGLLJacobianSrc, vecSourceFaceArea );
00247         }
00248         else
00249         {
00250             GenerateDiscontinuousJacobian( dataGLLJacobianSrc, vecSourceFaceArea );
00251         }
00252     }
00253 
00254     if( m_destDiscType == DiscretizationType_FV || m_destDiscType == DiscretizationType_PCLOUD )
00255     {
00256         this->InitializeCoordinatesFromMeshFV(
00257             *m_meshOutput, dTargetCenterLon, dTargetCenterLat, dTargetVertexLon, dTargetVertexLat,
00258             ( this->m_remapper->m_target_type == moab::TempestRemapper::RLL ), /* fLatLon = false */
00259             m_remapper->max_target_edges );
00260 
00261         vecTargetFaceArea.Allocate( m_meshOutput->vecFaceArea.GetRows() );
00262         for( unsigned i = 0; i < m_meshOutput->vecFaceArea.GetRows(); ++i )
00263         {
00264             vecTargetFaceArea[i] = m_meshOutput->vecFaceArea[i];
00265         }
00266     }
00267     else
00268     {
00269         DataArray3D< double > dataGLLJacobianDest;
00270         this->InitializeCoordinatesFromMeshFE( *m_meshOutput, m_nDofsPEl_Dest, dataGLLNodesDest, dTargetCenterLon,
00271                                                dTargetCenterLat, dTargetVertexLon, dTargetVertexLat );
00272 
00273         // Generate the continuous Jacobian for input mesh
00274         GenerateMetaData( *m_meshOutput, m_nDofsPEl_Dest, false /* fBubble */, dataGLLNodesDest, dataGLLJacobianDest );
00275 
00276         if( m_destDiscType == DiscretizationType_CGLL )
00277         {
00278             GenerateUniqueJacobian( dataGLLNodesDest, dataGLLJacobianDest, vecTargetFaceArea );
00279         }
00280         else
00281         {
00282             GenerateDiscontinuousJacobian( dataGLLJacobianDest, vecTargetFaceArea );
00283         }
00284     }
00285 
00286     // Map dimensions
00287     unsigned nA = ( vecSourceFaceArea.GetRows() );
00288     unsigned nB = ( vecTargetFaceArea.GetRows() );
00289 
00290     std::vector< int > masksA, masksB;
00291     ErrorCode rval = m_remapper->GetIMasks( moab::Remapper::SourceMesh, masksA );MB_CHK_SET_ERR( rval, "Trouble getting masks for source" );
00292     rval = m_remapper->GetIMasks( moab::Remapper::TargetMesh, masksB );MB_CHK_SET_ERR( rval, "Trouble getting masks for target" );
00293 
00294     // Number of nodes per Face
00295     int nSourceNodesPerFace = dSourceVertexLon.GetColumns();
00296     int nTargetNodesPerFace = dTargetVertexLon.GetColumns();
00297     // if source or target cells have triangles at poles, center of those triangles need to come from
00298     // the original quad, not from center in 3d, converted to 2d again
00299     // start copy OnlineMap.cpp tempestremap
00300     // right now, do this only for source  mesh; copy the logic for target mesh
00301 
00302     for( unsigned i = 0; i < nA; i++ )
00303     {
00304         const Face& face = m_meshInput->faces[i];
00305 
00306         int nNodes          = face.edges.size();
00307         int indexNodeAtPole = -1;
00308         if( 3 == nNodes )  // check if one node at the poles
00309         {
00310             for( int j = 0; j < nNodes; j++ )
00311                 if( fabs( fabs( dSourceVertexLat[i][j] ) - 90.0 ) < 1.0e-12 )
00312                 {
00313                     indexNodeAtPole = j;
00314                     break;
00315                 }
00316         }
00317         if( indexNodeAtPole < 0 ) continue;  // continue i loop, do nothing
00318         // recompute center of cell, from 3d data; add one 2 nodes at pole, and average
00319         int nodeAtPole = face[indexNodeAtPole];  // use the overloaded operator
00320         Node nodePole  = m_meshInput->nodes[nodeAtPole];
00321         Node newCenter = nodePole * 2;
00322         for( int j = 1; j < nNodes; j++ )
00323         {
00324             int indexi       = ( indexNodeAtPole + j ) % nNodes;  // nNodes is 3 !
00325             const Node& node = m_meshInput->nodes[face[indexi]];
00326             newCenter        = newCenter + node;
00327         }
00328         newCenter = newCenter * 0.25;
00329         newCenter = newCenter.Normalized();
00330 
00331 #ifdef VERBOSE
00332         double iniLon = dSourceCenterLon[i], iniLat = dSourceCenterLat[i];
00333 #endif
00334         // dSourceCenterLon, dSourceCenterLat
00335         XYZtoRLL_Deg( newCenter.x, newCenter.y, newCenter.z, dSourceCenterLon[i], dSourceCenterLat[i] );
00336 #ifdef VERBOSE
00337         std::cout << " modify center of triangle from " << iniLon << " " << iniLat << " to " << dSourceCenterLon[i]
00338                   << " " << dSourceCenterLat[i] << "\n";
00339 #endif
00340     }
00341 
00342     // first move data if in parallel
00343 #if defined( MOAB_HAVE_MPI )
00344     int max_row_dof, max_col_dof;  // output; arrays will be re-distributed in chunks [maxdof/size]
00345     // if (size > 1)
00346     {
00347 
00348         int ierr = rearrange_arrays_by_dofs( srccol_gdofmap, vecSourceFaceArea, dSourceCenterLon, dSourceCenterLat,
00349                                              dSourceVertexLon, dSourceVertexLat, masksA, nA, nSourceNodesPerFace,
00350                                              max_col_dof );  // now nA will be close to maxdof/size
00351         if( ierr != 0 )
00352         {
00353             _EXCEPTION1( "Unable to arrange source data %d ", nA );
00354         }
00355         // rearrange target data: (nB)
00356         //
00357         ierr = rearrange_arrays_by_dofs( row_gdofmap, vecTargetFaceArea, dTargetCenterLon, dTargetCenterLat,
00358                                          dTargetVertexLon, dTargetVertexLat, masksB, nB, nTargetNodesPerFace,
00359                                          max_row_dof );  // now nA will be close to maxdof/size
00360         if( ierr != 0 )
00361         {
00362             _EXCEPTION1( "Unable to arrange target data %d ", nB );
00363         }
00364     }
00365 #endif
00366 
00367     // Number of non-zeros in the remap matrix operator
00368     int nS = m_weightMatrix.nonZeros();
00369 
00370 #if defined( MOAB_HAVE_MPI ) && defined( MOAB_HAVE_NETCDFPAR )
00371     int locbuf[5] = {(int)nA, (int)nB, nS, nSourceNodesPerFace, nTargetNodesPerFace};
00372     int offbuf[3] = {0, 0, 0};
00373     int globuf[5] = {0, 0, 0, 0, 0};
00374     MPI_Scan( locbuf, offbuf, 3, MPI_INT, MPI_SUM, m_pcomm->comm() );
00375     MPI_Allreduce( locbuf, globuf, 3, MPI_INT, MPI_SUM, m_pcomm->comm() );
00376     MPI_Allreduce( &locbuf[3], &globuf[3], 2, MPI_INT, MPI_MAX, m_pcomm->comm() );
00377 
00378     // MPI_Scan is inclusive of data in current rank; modify accordingly.
00379     offbuf[0] -= nA;
00380     offbuf[1] -= nB;
00381     offbuf[2] -= nS;
00382 
00383 #else
00384     int offbuf[3]            = {0, 0, 0};
00385     int globuf[5]            = {(int)nA, (int)nB, nS, nSourceNodesPerFace, nTargetNodesPerFace};
00386 #endif
00387 
00388     // Write output dimensions entries
00389     unsigned nSrcGridDims = ( m_vecSourceDimSizes.size() );
00390     unsigned nDstGridDims = ( m_vecTargetDimSizes.size() );
00391 
00392     NcDim* dimSrcGridRank = ncMap.add_dim( "src_grid_rank", nSrcGridDims );
00393     NcDim* dimDstGridRank = ncMap.add_dim( "dst_grid_rank", nDstGridDims );
00394 
00395     NcVar* varSrcGridDims = ncMap.add_var( "src_grid_dims", ncInt, dimSrcGridRank );
00396     NcVar* varDstGridDims = ncMap.add_var( "dst_grid_dims", ncInt, dimDstGridRank );
00397 
00398 #ifdef MOAB_HAVE_NETCDFPAR
00399     ncMap.enable_var_par_access( varSrcGridDims, is_independent );
00400     ncMap.enable_var_par_access( varDstGridDims, is_independent );
00401 #endif
00402 
00403     char szDim[64];
00404     if( ( nSrcGridDims == 1 ) && ( m_vecSourceDimSizes[0] != (int)nA ) )
00405     {
00406         varSrcGridDims->put( &globuf[0], 1 );
00407         varSrcGridDims->add_att( "name0", "num_dof" );
00408     }
00409     else
00410     {
00411         for( unsigned i = 0; i < m_vecSourceDimSizes.size(); i++ )
00412         {
00413             int tmp = ( i == 0 ? globuf[0] : m_vecSourceDimSizes[i] );
00414             varSrcGridDims->set_cur( nSrcGridDims - i - 1 );
00415             varSrcGridDims->put( &( tmp ), 1 );
00416         }
00417 
00418         for( unsigned i = 0; i < m_vecSourceDimSizes.size(); i++ )
00419         {
00420             sprintf( szDim, "name%i", i );
00421             varSrcGridDims->add_att( szDim, m_vecSourceDimNames[nSrcGridDims - i - 1].c_str() );
00422         }
00423     }
00424 
00425     if( ( nDstGridDims == 1 ) && ( m_vecTargetDimSizes[0] != (int)nB ) )
00426     {
00427         varDstGridDims->put( &globuf[1], 1 );
00428         varDstGridDims->add_att( "name0", "num_dof" );
00429     }
00430     else
00431     {
00432         for( unsigned i = 0; i < m_vecTargetDimSizes.size(); i++ )
00433         {
00434             int tmp = ( i == 0 ? globuf[1] : m_vecTargetDimSizes[i] );
00435             varDstGridDims->set_cur( nDstGridDims - i - 1 );
00436             varDstGridDims->put( &( tmp ), 1 );
00437         }
00438 
00439         for( unsigned i = 0; i < m_vecTargetDimSizes.size(); i++ )
00440         {
00441             sprintf( szDim, "name%i", i );
00442             varDstGridDims->add_att( szDim, m_vecTargetDimNames[nDstGridDims - i - 1].c_str() );
00443         }
00444     }
00445 
00446     // Source and Target mesh resolutions
00447     NcDim* dimNA = ncMap.add_dim( "n_a", globuf[0] );
00448     NcDim* dimNB = ncMap.add_dim( "n_b", globuf[1] );
00449 
00450     // Number of nodes per Face
00451     NcDim* dimNVA = ncMap.add_dim( "nv_a", globuf[3] );
00452     NcDim* dimNVB = ncMap.add_dim( "nv_b", globuf[4] );
00453 
00454     // Write coordinates
00455     NcVar* varYCA = ncMap.add_var( "yc_a", ncDouble, dimNA );
00456     NcVar* varYCB = ncMap.add_var( "yc_b", ncDouble, dimNB );
00457 
00458     NcVar* varXCA = ncMap.add_var( "xc_a", ncDouble, dimNA );
00459     NcVar* varXCB = ncMap.add_var( "xc_b", ncDouble, dimNB );
00460 
00461     NcVar* varYVA = ncMap.add_var( "yv_a", ncDouble, dimNA, dimNVA );
00462     NcVar* varYVB = ncMap.add_var( "yv_b", ncDouble, dimNB, dimNVB );
00463 
00464     NcVar* varXVA = ncMap.add_var( "xv_a", ncDouble, dimNA, dimNVA );
00465     NcVar* varXVB = ncMap.add_var( "xv_b", ncDouble, dimNB, dimNVB );
00466 
00467     // Write masks
00468     NcVar* varMaskA = ncMap.add_var( "mask_a", ncInt, dimNA );
00469     NcVar* varMaskB = ncMap.add_var( "mask_b", ncInt, dimNB );
00470 
00471 #ifdef MOAB_HAVE_NETCDFPAR
00472     ncMap.enable_var_par_access( varYCA, is_independent );
00473     ncMap.enable_var_par_access( varYCB, is_independent );
00474     ncMap.enable_var_par_access( varXCA, is_independent );
00475     ncMap.enable_var_par_access( varXCB, is_independent );
00476     ncMap.enable_var_par_access( varYVA, is_independent );
00477     ncMap.enable_var_par_access( varYVB, is_independent );
00478     ncMap.enable_var_par_access( varXVA, is_independent );
00479     ncMap.enable_var_par_access( varXVB, is_independent );
00480     ncMap.enable_var_par_access( varMaskA, is_independent );
00481     ncMap.enable_var_par_access( varMaskB, is_independent );
00482 #endif
00483 
00484     varYCA->add_att( "units", "degrees" );
00485     varYCB->add_att( "units", "degrees" );
00486 
00487     varXCA->add_att( "units", "degrees" );
00488     varXCB->add_att( "units", "degrees" );
00489 
00490     varYVA->add_att( "units", "degrees" );
00491     varYVB->add_att( "units", "degrees" );
00492 
00493     varXVA->add_att( "units", "degrees" );
00494     varXVB->add_att( "units", "degrees" );
00495 
00496     // Verify dimensionality
00497     if( dSourceCenterLon.GetRows() != nA )
00498     {
00499         _EXCEPTIONT( "Mismatch between dSourceCenterLon and nA" );
00500     }
00501     if( dSourceCenterLat.GetRows() != nA )
00502     {
00503         _EXCEPTIONT( "Mismatch between dSourceCenterLat and nA" );
00504     }
00505     if( dTargetCenterLon.GetRows() != nB )
00506     {
00507         _EXCEPTIONT( "Mismatch between dTargetCenterLon and nB" );
00508     }
00509     if( dTargetCenterLat.GetRows() != nB )
00510     {
00511         _EXCEPTIONT( "Mismatch between dTargetCenterLat and nB" );
00512     }
00513     if( dSourceVertexLon.GetRows() != nA )
00514     {
00515         _EXCEPTIONT( "Mismatch between dSourceVertexLon and nA" );
00516     }
00517     if( dSourceVertexLat.GetRows() != nA )
00518     {
00519         _EXCEPTIONT( "Mismatch between dSourceVertexLat and nA" );
00520     }
00521     if( dTargetVertexLon.GetRows() != nB )
00522     {
00523         _EXCEPTIONT( "Mismatch between dTargetVertexLon and nB" );
00524     }
00525     if( dTargetVertexLat.GetRows() != nB )
00526     {
00527         _EXCEPTIONT( "Mismatch between dTargetVertexLat and nB" );
00528     }
00529 
00530     varYCA->set_cur( (long)offbuf[0] );
00531     varYCA->put( &( dSourceCenterLat[0] ), nA );
00532     varYCB->set_cur( (long)offbuf[1] );
00533     varYCB->put( &( dTargetCenterLat[0] ), nB );
00534 
00535     varXCA->set_cur( (long)offbuf[0] );
00536     varXCA->put( &( dSourceCenterLon[0] ), nA );
00537     varXCB->set_cur( (long)offbuf[1] );
00538     varXCB->put( &( dTargetCenterLon[0] ), nB );
00539 
00540     varYVA->set_cur( (long)offbuf[0] );
00541     varYVA->put( &( dSourceVertexLat[0][0] ), nA, nSourceNodesPerFace );
00542     varYVB->set_cur( (long)offbuf[1] );
00543     varYVB->put( &( dTargetVertexLat[0][0] ), nB, nTargetNodesPerFace );
00544 
00545     varXVA->set_cur( (long)offbuf[0] );
00546     varXVA->put( &( dSourceVertexLon[0][0] ), nA, nSourceNodesPerFace );
00547     varXVB->set_cur( (long)offbuf[1] );
00548     varXVB->put( &( dTargetVertexLon[0][0] ), nB, nTargetNodesPerFace );
00549 
00550     varMaskA->set_cur( (long)offbuf[0] );
00551     varMaskA->put( &( masksA[0] ), nA );
00552     varMaskB->set_cur( (long)offbuf[1] );
00553     varMaskB->put( &( masksB[0] ), nB );
00554 
00555     // Write areas
00556     NcVar* varAreaA = ncMap.add_var( "area_a", ncDouble, dimNA );
00557 #ifdef MOAB_HAVE_NETCDFPAR
00558     ncMap.enable_var_par_access( varAreaA, is_independent );
00559 #endif
00560     varAreaA->set_cur( (long)offbuf[0] );
00561     varAreaA->put( &( vecSourceFaceArea[0] ), nA );
00562 
00563     NcVar* varAreaB = ncMap.add_var( "area_b", ncDouble, dimNB );
00564 #ifdef MOAB_HAVE_NETCDFPAR
00565     ncMap.enable_var_par_access( varAreaB, is_independent );
00566 #endif
00567     varAreaB->set_cur( (long)offbuf[1] );
00568     varAreaB->put( &( vecTargetFaceArea[0] ), nB );
00569 
00570     // Write SparseMatrix entries
00571     DataArray1D< int > vecRow( nS );
00572     DataArray1D< int > vecCol( nS );
00573     DataArray1D< double > vecS( nS );
00574     DataArray1D< double > dFracA( nA );
00575     DataArray1D< double > dFracB( nB );
00576 
00577     moab::TupleList tlValRow, tlValCol;
00578     unsigned numr = 1;  //
00579     // value has to be sent to processor row/nB for for fracA and col/nA for fracB
00580     // vecTargetArea (indexRow ) has to be sent for fracA (index col?)
00581     // vecTargetFaceArea will have to be sent to col index, with its index !
00582     tlValRow.initialize( 2, 0, 0, numr, nS );  // to proc(row),  global row , value
00583     tlValCol.initialize( 3, 0, 0, numr, nS );  // to proc(col),  global row / col, value
00584     tlValRow.enableWriteAccess();
00585     tlValCol.enableWriteAccess();
00586     /*
00587      *
00588              dFracA[ col ] += val / vecSourceFaceArea[ col ] * vecTargetFaceArea[ row ];
00589              dFracB[ row ] += val ;
00590      */
00591     int offset = 0;
00592 #if defined( MOAB_HAVE_MPI )
00593     int nAbase = ( max_col_dof + 1 ) / size;  // it is nA, except last rank ( == size - 1 )
00594     int nBbase = ( max_row_dof + 1 ) / size;  // it is nB, except last rank ( == size - 1 )
00595 #endif
00596     for( int i = 0; i < m_weightMatrix.outerSize(); ++i )
00597     {
00598         for( WeightMatrix::InnerIterator it( m_weightMatrix, i ); it; ++it )
00599         {
00600             vecRow[offset] = 1 + this->GetRowGlobalDoF( it.row() );  // row index
00601             vecCol[offset] = 1 + this->GetColGlobalDoF( it.col() );  // col index
00602             vecS[offset]   = it.value();                             // value
00603 
00604 #if defined( MOAB_HAVE_MPI )
00605             {
00606                 // value M(row, col) will contribute to procRow and procCol values for fracA and fracB
00607                 int procRow = ( vecRow[offset] - 1 ) / nBbase;
00608                 if( procRow >= size ) procRow = size - 1;
00609                 int procCol = ( vecCol[offset] - 1 ) / nAbase;
00610                 if( procCol >= size ) procCol = size - 1;
00611                 int nrInd                     = tlValRow.get_n();
00612                 tlValRow.vi_wr[2 * nrInd]     = procRow;
00613                 tlValRow.vi_wr[2 * nrInd + 1] = vecRow[offset] - 1;
00614                 tlValRow.vr_wr[nrInd]         = vecS[offset];
00615                 tlValRow.inc_n();
00616                 int ncInd                     = tlValCol.get_n();
00617                 tlValCol.vi_wr[3 * ncInd]     = procCol;
00618                 tlValCol.vi_wr[3 * ncInd + 1] = vecRow[offset] - 1;
00619                 tlValCol.vi_wr[3 * ncInd + 2] = vecCol[offset] - 1;  // this is column
00620                 tlValCol.vr_wr[ncInd]         = vecS[offset];
00621                 tlValCol.inc_n();
00622             }
00623 #endif
00624             offset++;
00625         }
00626     }
00627 #if defined( MOAB_HAVE_MPI )
00628     // need to send values for their row and col processors, to compute fractions there
00629     // now do the heavy communication
00630     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tlValCol, 0 );
00631     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tlValRow, 0 );
00632 
00633     // we have now, for example,  dFracB[ row ] += val ;
00634     // so we know that on current task, we received tlValRow
00635     // reminder dFracA[ col ] += val / vecSourceFaceArea[ col ] * vecTargetFaceArea[ row ];
00636     //          dFracB[ row ] += val ;
00637     for( unsigned i = 0; i < tlValRow.get_n(); i++ )
00638     {
00639         // int fromProc = tlValRow.vi_wr[2 * i];
00640         int gRowInd       = tlValRow.vi_wr[2 * i + 1];
00641         int localIndexRow = gRowInd - nBbase * rank;  // modulo nBbase rank is from 0 to size - 1;
00642         double wgt        = tlValRow.vr_wr[i];
00643         assert( localIndexRow >= 0 );
00644         assert( nB - localIndexRow > 0 );
00645         dFracB[localIndexRow] += wgt;
00646     }
00647     // to compute dFracA we need vecTargetFaceArea[ row ]; we know the row, and we can get the proc we need it from
00648 
00649     std::set< int > neededRows;
00650     for( unsigned i = 0; i < tlValCol.get_n(); i++ )
00651     {
00652         int rRowInd = tlValCol.vi_wr[3 * i + 1];
00653         neededRows.insert( rRowInd );
00654         // we need vecTargetFaceAreaGlobal[ rRowInd ]; this exists on proc procRow
00655     }
00656     moab::TupleList tgtAreaReq;
00657     tgtAreaReq.initialize( 2, 0, 0, 0, neededRows.size() );
00658     tgtAreaReq.enableWriteAccess();
00659     for( std::set< int >::iterator sit = neededRows.begin(); sit != neededRows.end(); sit++ )
00660     {
00661         int neededRow = *sit;
00662         int procRow   = neededRow / nBbase;
00663         if( procRow >= size ) procRow = size - 1;
00664         int nr                       = tgtAreaReq.get_n();
00665         tgtAreaReq.vi_wr[2 * nr]     = procRow;
00666         tgtAreaReq.vi_wr[2 * nr + 1] = neededRow;
00667         tgtAreaReq.inc_n();
00668     }
00669 
00670     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tgtAreaReq, 0 );
00671     // we need to send back the tgtArea corresponding to row
00672     moab::TupleList tgtAreaInfo;  // load it with tgtArea at row
00673     tgtAreaInfo.initialize( 2, 0, 0, 1, tgtAreaReq.get_n() );
00674     tgtAreaInfo.enableWriteAccess();
00675     for( unsigned i = 0; i < tgtAreaReq.get_n(); i++ )
00676     {
00677         int from_proc     = tgtAreaReq.vi_wr[2 * i];
00678         int row           = tgtAreaReq.vi_wr[2 * i + 1];
00679         int locaIndexRow  = row - rank * nBbase;
00680         double areaToSend = vecTargetFaceArea[locaIndexRow];
00681         // int remoteIndex = tgtAreaReq.vi_wr[3*i + 2] ;
00682 
00683         tgtAreaInfo.vi_wr[2 * i]     = from_proc;  // send back requested info
00684         tgtAreaInfo.vi_wr[2 * i + 1] = row;
00685         tgtAreaInfo.vr_wr[i]         = areaToSend;  // this will be tgt area at row
00686         tgtAreaInfo.inc_n();
00687     }
00688     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tgtAreaInfo, 0 );
00689 
00690     std::map< int, double > areaAtRow;
00691     for( unsigned i = 0; i < tgtAreaInfo.get_n(); i++ )
00692     {
00693         // we have received from proc, value for row !
00694         int row        = tgtAreaInfo.vi_wr[2 * i + 1];
00695         areaAtRow[row] = tgtAreaInfo.vr_wr[i];
00696     }
00697 
00698     // we have now for rows the
00699     // it is ordered by index, so:
00700     // now compute reminder dFracA[ col ] += val / vecSourceFaceArea[ col ] * vecTargetFaceArea[ row ];
00701     // tgtAreaInfo will have at index i the area we need (from row)
00702     // there should be an easier way :(
00703     for( unsigned i = 0; i < tlValCol.get_n(); i++ )
00704     {
00705         int rRowInd     = tlValCol.vi_wr[3 * i + 1];
00706         int colInd      = tlValCol.vi_wr[3 * i + 2];
00707         double val      = tlValCol.vr_wr[i];
00708         int localColInd = colInd - rank * nAbase;  // < local nA
00709         // we need vecTargetFaceAreaGlobal[ rRowInd ]; this exists on proc procRow
00710         auto itMap = areaAtRow.find( rRowInd );  // it should be different from end
00711         if( itMap != areaAtRow.end() )
00712         {
00713             double areaRow = itMap->second;  // we fished a lot for this !
00714             assert( localColInd < vecSourceFaceArea.GetRows() );
00715             dFracA[localColInd] += val / vecSourceFaceArea[localColInd] * areaRow;
00716         }
00717     }
00718 
00719 #endif
00720     // Load in data
00721     NcDim* dimNS = ncMap.add_dim( "n_s", globuf[2] );
00722 
00723     NcVar* varRow = ncMap.add_var( "row", ncInt, dimNS );
00724     NcVar* varCol = ncMap.add_var( "col", ncInt, dimNS );
00725     NcVar* varS   = ncMap.add_var( "S", ncDouble, dimNS );
00726 #ifdef MOAB_HAVE_NETCDFPAR
00727     ncMap.enable_var_par_access( varRow, is_independent );
00728     ncMap.enable_var_par_access( varCol, is_independent );
00729     ncMap.enable_var_par_access( varS, is_independent );
00730 #endif
00731 
00732     varRow->set_cur( (long)offbuf[2] );
00733     varRow->put( &( vecRow[0] ), nS );
00734 
00735     varCol->set_cur( (long)offbuf[2] );
00736     varCol->put( &( vecCol[0] ), nS );
00737 
00738     varS->set_cur( (long)offbuf[2] );
00739     varS->put( &( vecS[0] ), nS );
00740 
00741     // Calculate and write fractional coverage arrays
00742     NcVar* varFracA = ncMap.add_var( "frac_a", ncDouble, dimNA );
00743 #ifdef MOAB_HAVE_NETCDFPAR
00744     ncMap.enable_var_par_access( varFracA, is_independent );
00745 #endif
00746     varFracA->add_att( "name", "fraction of target coverage of source dof" );
00747     varFracA->add_att( "units", "unitless" );
00748     varFracA->set_cur( (long)offbuf[0] );
00749     varFracA->put( &( dFracA[0] ), nA );
00750 
00751     NcVar* varFracB = ncMap.add_var( "frac_b", ncDouble, dimNB );
00752 #ifdef MOAB_HAVE_NETCDFPAR
00753     ncMap.enable_var_par_access( varFracB, is_independent );
00754 #endif
00755     varFracB->add_att( "name", "fraction of source coverage of target dof" );
00756     varFracB->add_att( "units", "unitless" );
00757     varFracB->set_cur( (long)offbuf[1] );
00758     varFracB->put( &( dFracB[0] ), nB );
00759 
00760     // Add global attributes
00761     // std::map<std::string, std::string>::const_iterator iterAttributes =
00762     //     mapAttributes.begin();
00763     // for (; iterAttributes != mapAttributes.end(); iterAttributes++) {
00764     //     ncMap.add_att(
00765     //         iterAttributes->first.c_str(),
00766     //         iterAttributes->second.c_str());
00767     // }
00768 
00769     ncMap.close();
00770 
00771     return moab::MB_SUCCESS;
00772 }
00773 
00774 ///////////////////////////////////////////////////////////////////////////////
00775 
00776 moab::ErrorCode moab::TempestOnlineMap::WriteHDF5MapFile( const std::string& strOutputFile )
00777 {
00778     moab::ErrorCode rval;
00779 
00780     /**
00781      * Need to get the global maximum of number of vertices per element
00782      * Key issue is that when calling InitializeCoordinatesFromMeshFV, the allocation for
00783      *dVertexLon/dVertexLat are made based on the maximum vertices in the current process. However,
00784      *when writing this out, other processes may have a different size for the same array. This is
00785      *hence a mess to consolidate in h5mtoscrip eventually.
00786      **/
00787 
00788     /* Let us compute all relevant data for the current original source mesh on the process */
00789     DataArray1D< double > vecSourceFaceArea, vecTargetFaceArea;
00790     DataArray1D< double > dSourceCenterLon, dSourceCenterLat, dTargetCenterLon, dTargetCenterLat;
00791     DataArray2D< double > dSourceVertexLon, dSourceVertexLat, dTargetVertexLon, dTargetVertexLat;
00792     if( m_srcDiscType == DiscretizationType_FV || m_srcDiscType == DiscretizationType_PCLOUD )
00793     {
00794         this->InitializeCoordinatesFromMeshFV(
00795             *m_meshInput, dSourceCenterLon, dSourceCenterLat, dSourceVertexLon, dSourceVertexLat,
00796             ( this->m_remapper->m_source_type == moab::TempestRemapper::RLL ) /* fLatLon = false */,
00797             m_remapper->max_source_edges );
00798 
00799         vecSourceFaceArea.Allocate( m_meshInput->vecFaceArea.GetRows() );
00800         for( unsigned i = 0; i < m_meshInput->vecFaceArea.GetRows(); ++i )
00801             vecSourceFaceArea[i] = m_meshInput->vecFaceArea[i];
00802     }
00803     else
00804     {
00805         DataArray3D< double > dataGLLJacobianSrc;
00806         this->InitializeCoordinatesFromMeshFE( *m_meshInput, m_nDofsPEl_Src, dataGLLNodesSrc, dSourceCenterLon,
00807                                                dSourceCenterLat, dSourceVertexLon, dSourceVertexLat );
00808 
00809         // Generate the continuous Jacobian for input mesh
00810         GenerateMetaData( *m_meshInput, m_nDofsPEl_Src, false /* fBubble */, dataGLLNodesSrc, dataGLLJacobianSrc );
00811 
00812         if( m_srcDiscType == DiscretizationType_CGLL )
00813         {
00814             GenerateUniqueJacobian( dataGLLNodesSrc, dataGLLJacobianSrc, m_meshInput->vecFaceArea );
00815         }
00816         else
00817         {
00818             GenerateDiscontinuousJacobian( dataGLLJacobianSrc, m_meshInput->vecFaceArea );
00819         }
00820 
00821         vecSourceFaceArea.Allocate( m_meshInput->faces.size() * m_nDofsPEl_Src * m_nDofsPEl_Src );
00822         int offset = 0;
00823         for( size_t e = 0; e < m_meshInput->faces.size(); e++ )
00824         {
00825             for( int s = 0; s < m_nDofsPEl_Src; s++ )
00826             {
00827                 for( int t = 0; t < m_nDofsPEl_Src; t++ )
00828                 {
00829                     vecSourceFaceArea[srccol_dtoc_dofmap[offset + s * m_nDofsPEl_Src + t]] =
00830                         dataGLLJacobianSrc[s][t][e];
00831                 }
00832             }
00833             offset += m_nDofsPEl_Src * m_nDofsPEl_Src;
00834         }
00835     }
00836 
00837     if( m_destDiscType == DiscretizationType_FV || m_destDiscType == DiscretizationType_PCLOUD )
00838     {
00839         this->InitializeCoordinatesFromMeshFV(
00840             *m_meshOutput, dTargetCenterLon, dTargetCenterLat, dTargetVertexLon, dTargetVertexLat,
00841             ( this->m_remapper->m_target_type == moab::TempestRemapper::RLL ) /* fLatLon = false */,
00842             m_remapper->max_target_edges );
00843 
00844         vecTargetFaceArea.Allocate( m_meshOutput->vecFaceArea.GetRows() );
00845         for( unsigned i = 0; i < m_meshOutput->vecFaceArea.GetRows(); ++i )
00846             vecTargetFaceArea[i] = m_meshOutput->vecFaceArea[i];
00847     }
00848     else
00849     {
00850         DataArray3D< double > dataGLLJacobianDest;
00851         this->InitializeCoordinatesFromMeshFE( *m_meshOutput, m_nDofsPEl_Dest, dataGLLNodesDest, dTargetCenterLon,
00852                                                dTargetCenterLat, dTargetVertexLon, dTargetVertexLat );
00853 
00854         // Generate the continuous Jacobian for input mesh
00855         GenerateMetaData( *m_meshOutput, m_nDofsPEl_Dest, false /* fBubble */, dataGLLNodesDest, dataGLLJacobianDest );
00856 
00857         if( m_destDiscType == DiscretizationType_CGLL )
00858         {
00859             GenerateUniqueJacobian( dataGLLNodesDest, dataGLLJacobianDest, m_meshOutput->vecFaceArea );
00860         }
00861         else
00862         {
00863             GenerateDiscontinuousJacobian( dataGLLJacobianDest, m_meshOutput->vecFaceArea );
00864         }
00865 
00866         vecTargetFaceArea.Allocate( m_meshOutput->faces.size() * m_nDofsPEl_Dest * m_nDofsPEl_Dest );
00867         int offset = 0;
00868         for( size_t e = 0; e < m_meshOutput->faces.size(); e++ )
00869         {
00870             for( int s = 0; s < m_nDofsPEl_Dest; s++ )
00871             {
00872                 for( int t = 0; t < m_nDofsPEl_Dest; t++ )
00873                 {
00874                     vecTargetFaceArea[row_dtoc_dofmap[offset + s * m_nDofsPEl_Dest + t]] = dataGLLJacobianDest[s][t][e];
00875                 }
00876             }
00877             offset += m_nDofsPEl_Dest * m_nDofsPEl_Dest;
00878         }
00879     }
00880 
00881     moab::EntityHandle& m_meshOverlapSet = m_remapper->m_overlap_set;
00882     int tot_src_ents                     = m_remapper->m_source_entities.size();
00883     int tot_tgt_ents                     = m_remapper->m_target_entities.size();
00884     int tot_src_size                     = dSourceCenterLon.GetRows();
00885     int tot_tgt_size                     = m_dTargetCenterLon.GetRows();
00886     int tot_vsrc_size                    = dSourceVertexLon.GetRows() * dSourceVertexLon.GetColumns();
00887     int tot_vtgt_size                    = m_dTargetVertexLon.GetRows() * m_dTargetVertexLon.GetColumns();
00888 
00889     const int weightMatNNZ = m_weightMatrix.nonZeros();
00890     moab::Tag tagMapMetaData, tagMapIndexRow, tagMapIndexCol, tagMapValues, srcEleIDs, tgtEleIDs;
00891     rval = m_interface->tag_get_handle( "SMAT_DATA", 13, moab::MB_TYPE_INTEGER, tagMapMetaData,
00892                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00893     rval = m_interface->tag_get_handle( "SMAT_ROWS", weightMatNNZ, moab::MB_TYPE_INTEGER, tagMapIndexRow,
00894                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00895     rval = m_interface->tag_get_handle( "SMAT_COLS", weightMatNNZ, moab::MB_TYPE_INTEGER, tagMapIndexCol,
00896                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00897     rval = m_interface->tag_get_handle( "SMAT_VALS", weightMatNNZ, moab::MB_TYPE_DOUBLE, tagMapValues,
00898                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00899     rval = m_interface->tag_get_handle( "SourceGIDS", tot_src_size, moab::MB_TYPE_INTEGER, srcEleIDs,
00900                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00901     rval = m_interface->tag_get_handle( "TargetGIDS", tot_tgt_size, moab::MB_TYPE_INTEGER, tgtEleIDs,
00902                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00903     moab::Tag srcAreaValues, tgtAreaValues;
00904     rval = m_interface->tag_get_handle( "SourceAreas", tot_src_size, moab::MB_TYPE_DOUBLE, srcAreaValues,
00905                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00906     rval = m_interface->tag_get_handle( "TargetAreas", tot_tgt_size, moab::MB_TYPE_DOUBLE, tgtAreaValues,
00907                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00908     moab::Tag tagSrcCoordsCLon, tagSrcCoordsCLat, tagTgtCoordsCLon, tagTgtCoordsCLat;
00909     rval = m_interface->tag_get_handle( "SourceCoordCenterLon", tot_src_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsCLon,
00910                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00911     rval = m_interface->tag_get_handle( "SourceCoordCenterLat", tot_src_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsCLat,
00912                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00913     rval = m_interface->tag_get_handle( "TargetCoordCenterLon", tot_tgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsCLon,
00914                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00915     rval = m_interface->tag_get_handle( "TargetCoordCenterLat", tot_tgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsCLat,
00916                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00917     moab::Tag tagSrcCoordsVLon, tagSrcCoordsVLat, tagTgtCoordsVLon, tagTgtCoordsVLat;
00918     rval = m_interface->tag_get_handle( "SourceCoordVertexLon", tot_vsrc_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsVLon,
00919                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00920     rval = m_interface->tag_get_handle( "SourceCoordVertexLat", tot_vsrc_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsVLat,
00921                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00922     rval = m_interface->tag_get_handle( "TargetCoordVertexLon", tot_vtgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsVLon,
00923                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00924     rval = m_interface->tag_get_handle( "TargetCoordVertexLat", tot_vtgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsVLat,
00925                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00926     moab::Tag srcMaskValues, tgtMaskValues;
00927     if( m_iSourceMask.IsAttached() )
00928     {
00929         rval = m_interface->tag_get_handle( "SourceMask", m_iSourceMask.GetRows(), moab::MB_TYPE_INTEGER, srcMaskValues,
00930                                             moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00931     }
00932     if( m_iTargetMask.IsAttached() )
00933     {
00934         rval = m_interface->tag_get_handle( "TargetMask", m_iTargetMask.GetRows(), moab::MB_TYPE_INTEGER, tgtMaskValues,
00935                                             moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00936     }
00937 
00938     std::vector< int > smatrowvals( weightMatNNZ ), smatcolvals( weightMatNNZ );
00939     std::vector< double > smatvals( weightMatNNZ );
00940     // const double* smatvals = m_weightMatrix.valuePtr();
00941     // Loop over the matrix entries and find the max global ID for rows and columns
00942     for( int k = 0, offset = 0; k < m_weightMatrix.outerSize(); ++k )
00943     {
00944         for( moab::TempestOnlineMap::WeightMatrix::InnerIterator it( m_weightMatrix, k ); it; ++it, ++offset )
00945         {
00946             smatrowvals[offset] = this->GetRowGlobalDoF( it.row() );
00947             smatcolvals[offset] = this->GetColGlobalDoF( it.col() );
00948             smatvals[offset]    = it.value();
00949         }
00950     }
00951 
00952     /* Set the global IDs for the DoFs */
00953     ////
00954     // col_gdofmap [ col_ldofmap [ 0 : local_ndofs ] ] = GDOF
00955     // row_gdofmap [ row_ldofmap [ 0 : local_ndofs ] ] = GDOF
00956     ////
00957     int maxrow = 0, maxcol = 0;
00958     std::vector< int > src_global_dofs( tot_src_size ), tgt_global_dofs( tot_tgt_size );
00959     for( int i = 0; i < tot_src_size; ++i )
00960     {
00961         src_global_dofs[i] = srccol_gdofmap[i];
00962         maxcol             = ( src_global_dofs[i] > maxcol ) ? src_global_dofs[i] : maxcol;
00963     }
00964 
00965     for( int i = 0; i < tot_tgt_size; ++i )
00966     {
00967         tgt_global_dofs[i] = row_gdofmap[i];
00968         maxrow             = ( tgt_global_dofs[i] > maxrow ) ? tgt_global_dofs[i] : maxrow;
00969     }
00970 
00971     ///////////////////////////////////////////////////////////////////////////
00972     // The metadata in H5M file contains the following data:
00973     //
00974     //   1. n_a: Total source entities: (number of elements in source mesh)
00975     //   2. n_b: Total target entities: (number of elements in target mesh)
00976     //   3. nv_a: Max edge size of elements in source mesh
00977     //   4. nv_b: Max edge size of elements in target mesh
00978     //   5. maxrows: Number of rows in remap weight matrix
00979     //   6. maxcols: Number of cols in remap weight matrix
00980     //   7. nnz: Number of total nnz in sparse remap weight matrix
00981     //   8. np_a: The order of the field description on the source mesh: >= 1
00982     //   9. np_b: The order of the field description on the target mesh: >= 1
00983     //   10. method_a: The type of discretization for field on source mesh: [0 = FV, 1 = cGLL, 2 =
00984     //   dGLL]
00985     //   11. method_b: The type of discretization for field on target mesh: [0 = FV, 1 = cGLL, 2 =
00986     //   dGLL]
00987     //   12. conserved: Flag to specify whether the remap operator has conservation constraints: [0,
00988     //   1]
00989     //   13. monotonicity: Flags to specify whether the remap operator has monotonicity constraints:
00990     //   [0, 1, 2]
00991     //
00992     ///////////////////////////////////////////////////////////////////////////
00993     int map_disc_details[6];
00994     map_disc_details[0] = m_nDofsPEl_Src;
00995     map_disc_details[1] = m_nDofsPEl_Dest;
00996     map_disc_details[2] = ( m_srcDiscType == DiscretizationType_FV || m_srcDiscType == DiscretizationType_PCLOUD
00997                                 ? 0
00998                                 : ( m_srcDiscType == DiscretizationType_CGLL ? 1 : 2 ) );
00999     map_disc_details[3] = ( m_destDiscType == DiscretizationType_FV || m_destDiscType == DiscretizationType_PCLOUD
01000                                 ? 0
01001                                 : ( m_destDiscType == DiscretizationType_CGLL ? 1 : 2 ) );
01002     map_disc_details[4] = ( m_bConserved ? 1 : 0 );
01003     map_disc_details[5] = m_iMonotonicity;
01004 
01005 #ifdef MOAB_HAVE_MPI
01006     int loc_smatmetadata[13] = {tot_src_ents,
01007                                 tot_tgt_ents,
01008                                 m_remapper->max_source_edges,
01009                                 m_remapper->max_target_edges,
01010                                 maxrow + 1,
01011                                 maxcol + 1,
01012                                 weightMatNNZ,
01013                                 map_disc_details[0],
01014                                 map_disc_details[1],
01015                                 map_disc_details[2],
01016                                 map_disc_details[3],
01017                                 map_disc_details[4],
01018                                 map_disc_details[5]};
01019     rval                     = m_interface->tag_set_data( tagMapMetaData, &m_meshOverlapSet, 1, &loc_smatmetadata[0] );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01020     int glb_smatmetadata[13] = {0,
01021                                 0,
01022                                 0,
01023                                 0,
01024                                 0,
01025                                 0,
01026                                 0,
01027                                 map_disc_details[0],
01028                                 map_disc_details[1],
01029                                 map_disc_details[2],
01030                                 map_disc_details[3],
01031                                 map_disc_details[4],
01032                                 map_disc_details[5]};
01033     int loc_buf[7]           = {
01034         tot_src_ents, tot_tgt_ents, weightMatNNZ, m_remapper->max_source_edges, m_remapper->max_target_edges,
01035         maxrow,       maxcol};
01036     int glb_buf[4] = {0, 0, 0, 0};
01037     MPI_Reduce( &loc_buf[0], &glb_buf[0], 3, MPI_INT, MPI_SUM, 0, m_pcomm->comm() );
01038     glb_smatmetadata[0] = glb_buf[0];
01039     glb_smatmetadata[1] = glb_buf[1];
01040     glb_smatmetadata[6] = glb_buf[2];
01041     MPI_Reduce( &loc_buf[3], &glb_buf[0], 4, MPI_INT, MPI_MAX, 0, m_pcomm->comm() );
01042     glb_smatmetadata[2] = glb_buf[0];
01043     glb_smatmetadata[3] = glb_buf[1];
01044     glb_smatmetadata[4] = glb_buf[2];
01045     glb_smatmetadata[5] = glb_buf[3];
01046 #else
01047     int glb_smatmetadata[13] = {tot_src_ents,
01048                                 tot_tgt_ents,
01049                                 m_remapper->max_source_edges,
01050                                 m_remapper->max_target_edges,
01051                                 maxrow,
01052                                 maxcol,
01053                                 weightMatNNZ,
01054                                 map_disc_details[0],
01055                                 map_disc_details[1],
01056                                 map_disc_details[2],
01057                                 map_disc_details[3],
01058                                 map_disc_details[4],
01059                                 map_disc_details[5]};
01060 #endif
01061     // These values represent number of rows and columns. So should be 1-based.
01062     glb_smatmetadata[4]++;
01063     glb_smatmetadata[5]++;
01064 
01065     if( this->is_root )
01066     {
01067         std::cout << "  " << this->rank << "  Writing remap weights with size [" << glb_smatmetadata[4] << " X "
01068                   << glb_smatmetadata[5] << "] and NNZ = " << glb_smatmetadata[6] << std::endl;
01069         EntityHandle root_set = 0;
01070         rval                  = m_interface->tag_set_data( tagMapMetaData, &root_set, 1, &glb_smatmetadata[0] );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01071     }
01072 
01073     int dsize;
01074     const int numval          = weightMatNNZ;
01075     const void* smatrowvals_d = smatrowvals.data();
01076     const void* smatcolvals_d = smatcolvals.data();
01077     const void* smatvals_d    = smatvals.data();
01078     rval = m_interface->tag_set_by_ptr( tagMapIndexRow, &m_meshOverlapSet, 1, &smatrowvals_d, &numval );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01079     rval = m_interface->tag_set_by_ptr( tagMapIndexCol, &m_meshOverlapSet, 1, &smatcolvals_d, &numval );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01080     rval = m_interface->tag_set_by_ptr( tagMapValues, &m_meshOverlapSet, 1, &smatvals_d, &numval );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01081 
01082     /* Set the global IDs for the DoFs */
01083     const void* srceleidvals_d = src_global_dofs.data();
01084     const void* tgteleidvals_d = tgt_global_dofs.data();
01085     dsize                      = src_global_dofs.size();
01086     rval = m_interface->tag_set_by_ptr( srcEleIDs, &m_meshOverlapSet, 1, &srceleidvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01087     dsize = tgt_global_dofs.size();
01088     rval  = m_interface->tag_set_by_ptr( tgtEleIDs, &m_meshOverlapSet, 1, &tgteleidvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01089 
01090     /* Set the source and target areas */
01091     const void* srcareavals_d = vecSourceFaceArea;
01092     const void* tgtareavals_d = vecTargetFaceArea;
01093     dsize                     = tot_src_size;
01094     rval = m_interface->tag_set_by_ptr( srcAreaValues, &m_meshOverlapSet, 1, &srcareavals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01095     dsize = tot_tgt_size;
01096     rval  = m_interface->tag_set_by_ptr( tgtAreaValues, &m_meshOverlapSet, 1, &tgtareavals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01097 
01098     /* Set the coordinates for source and target center vertices */
01099     const void* srccoordsclonvals_d = &dSourceCenterLon[0];
01100     const void* srccoordsclatvals_d = &dSourceCenterLat[0];
01101     dsize                           = dSourceCenterLon.GetRows();
01102     rval = m_interface->tag_set_by_ptr( tagSrcCoordsCLon, &m_meshOverlapSet, 1, &srccoordsclonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01103     rval = m_interface->tag_set_by_ptr( tagSrcCoordsCLat, &m_meshOverlapSet, 1, &srccoordsclatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01104     const void* tgtcoordsclonvals_d = &m_dTargetCenterLon[0];
01105     const void* tgtcoordsclatvals_d = &m_dTargetCenterLat[0];
01106     dsize                           = vecTargetFaceArea.GetRows();
01107     rval = m_interface->tag_set_by_ptr( tagTgtCoordsCLon, &m_meshOverlapSet, 1, &tgtcoordsclonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01108     rval = m_interface->tag_set_by_ptr( tagTgtCoordsCLat, &m_meshOverlapSet, 1, &tgtcoordsclatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01109 
01110     /* Set the coordinates for source and target element vertices */
01111     const void* srccoordsvlonvals_d = &( dSourceVertexLon[0][0] );
01112     const void* srccoordsvlatvals_d = &( dSourceVertexLat[0][0] );
01113     dsize                           = dSourceVertexLon.GetRows() * dSourceVertexLon.GetColumns();
01114     rval = m_interface->tag_set_by_ptr( tagSrcCoordsVLon, &m_meshOverlapSet, 1, &srccoordsvlonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01115     rval = m_interface->tag_set_by_ptr( tagSrcCoordsVLat, &m_meshOverlapSet, 1, &srccoordsvlatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01116     const void* tgtcoordsvlonvals_d = &( m_dTargetVertexLon[0][0] );
01117     const void* tgtcoordsvlatvals_d = &( m_dTargetVertexLat[0][0] );
01118     dsize                           = m_dTargetVertexLon.GetRows() * m_dTargetVertexLon.GetColumns();
01119     rval = m_interface->tag_set_by_ptr( tagTgtCoordsVLon, &m_meshOverlapSet, 1, &tgtcoordsvlonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01120     rval = m_interface->tag_set_by_ptr( tagTgtCoordsVLat, &m_meshOverlapSet, 1, &tgtcoordsvlatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01121 
01122     /* Set the masks for source and target meshes if available */
01123     if( m_iSourceMask.IsAttached() )
01124     {
01125         const void* srcmaskvals_d = m_iSourceMask;
01126         dsize                     = m_iSourceMask.GetRows();
01127         rval = m_interface->tag_set_by_ptr( srcMaskValues, &m_meshOverlapSet, 1, &srcmaskvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01128     }
01129 
01130     if( m_iTargetMask.IsAttached() )
01131     {
01132         const void* tgtmaskvals_d = m_iTargetMask;
01133         dsize                     = m_iTargetMask.GetRows();
01134         rval = m_interface->tag_set_by_ptr( tgtMaskValues, &m_meshOverlapSet, 1, &tgtmaskvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01135     }
01136 
01137 #ifdef MOAB_HAVE_MPI
01138     const char* writeOptions = ( this->size > 1 ? "PARALLEL=WRITE_PART" : "" );
01139 #else
01140     const char* writeOptions = "";
01141 #endif
01142 
01143     // EntityHandle sets[3] = {m_remapper->m_source_set, m_remapper->m_target_set, m_remapper->m_overlap_set};
01144     EntityHandle sets[1] = {m_remapper->m_overlap_set};
01145     rval                 = m_interface->write_file( strOutputFile.c_str(), NULL, writeOptions, sets, 1 );MB_CHK_ERR( rval );
01146 
01147 #ifdef WRITE_SCRIP_FILE
01148     sstr.str( "" );
01149     sstr << ctx.outFilename.substr( 0, lastindex ) << "_" << proc_id << ".nc";
01150     std::map< std::string, std::string > mapAttributes;
01151     mapAttributes["Creator"] = "MOAB mbtempest workflow";
01152     if( !ctx.proc_id ) std::cout << "Writing offline map to file: " << sstr.str() << std::endl;
01153     this->Write( strOutputFile.c_str(), mapAttributes, NcFile::Netcdf4 );
01154     sstr.str( "" );
01155 #endif
01156 
01157     return moab::MB_SUCCESS;
01158 }
01159 
01160 ///////////////////////////////////////////////////////////////////////////////
01161 
01162 void print_progress( const int barWidth, const float progress, const char* message )
01163 {
01164     std::cout << message << " [";
01165     int pos = barWidth * progress;
01166     for( int i = 0; i < barWidth; ++i )
01167     {
01168         if( i < pos )
01169             std::cout << "=";
01170         else if( i == pos )
01171             std::cout << ">";
01172         else
01173             std::cout << " ";
01174     }
01175     std::cout << "] " << int( progress * 100.0 ) << " %\r";
01176     std::cout.flush();
01177 }
01178 
01179 ///////////////////////////////////////////////////////////////////////////////
01180 
01181 moab::ErrorCode moab::TempestOnlineMap::ReadParallelMap( const char* strSource,
01182                                                          const std::vector< int >& owned_dof_ids,
01183                                                          bool row_partition )
01184 {
01185     NcError error( NcError::silent_nonfatal );
01186 
01187     NcVar *varRow = NULL, *varCol = NULL, *varS = NULL;
01188     int nS = 0, nA = 0, nB = 0;
01189 #ifdef MOAB_HAVE_PNETCDF
01190     // some variables will be used just in the case netcdfpar reader fails
01191     int ncfile = -1, ret = 0;
01192     int ndims, nvars, ngatts, unlimited;
01193 #endif
01194 #ifdef MOAB_HAVE_NETCDFPAR
01195     bool is_independent = true;
01196     ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strSource, NcFile::ReadOnly, NcFile::Netcdf4 );
01197     // ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strFilename.c_str(), NcmpiFile::replace, NcmpiFile::classic5 );
01198 #else
01199     NcFile ncMap( strSource, NcFile::ReadOnly );
01200 #endif
01201 
01202 #define CHECK_EXCEPTION( obj, type, varstr )                                                      \
01203     {                                                                                             \
01204         if( obj == NULL )                                                                         \
01205         {                                                                                         \
01206             _EXCEPTION3( "Map file \"%s\" does not contain %s \"%s\"", strSource, type, varstr ); \
01207         }                                                                                         \
01208     }
01209 
01210     // Read SparseMatrix entries
01211 
01212     if( ncMap.is_valid() )
01213     {
01214         NcDim* dimNS = ncMap.get_dim( "n_s" );
01215         CHECK_EXCEPTION( dimNS, "dimension", "n_s" );
01216 
01217         NcDim* dimNA = ncMap.get_dim( "n_a" );
01218         CHECK_EXCEPTION( dimNA, "dimension", "n_a" );
01219 
01220         NcDim* dimNB = ncMap.get_dim( "n_b" );
01221         CHECK_EXCEPTION( dimNB, "dimension", "n_b" );
01222 
01223         // store total number of nonzeros
01224         nS = dimNS->size();
01225         nA = dimNA->size();
01226         nB = dimNB->size();
01227 
01228         varRow = ncMap.get_var( "row" );
01229         CHECK_EXCEPTION( varRow, "variable", "row" );
01230 
01231         varCol = ncMap.get_var( "col" );
01232         CHECK_EXCEPTION( varCol, "variable", "col" );
01233 
01234         varS = ncMap.get_var( "S" );
01235         CHECK_EXCEPTION( varS, "variable", "S" );
01236 
01237 #ifdef MOAB_HAVE_NETCDFPAR
01238         ncMap.enable_var_par_access( varRow, is_independent );
01239         ncMap.enable_var_par_access( varCol, is_independent );
01240         ncMap.enable_var_par_access( varS, is_independent );
01241 #endif
01242     }
01243     else
01244     {
01245 #ifdef MOAB_HAVE_PNETCDF
01246         // read the file using pnetcdf directly, in parallel; need to have MPI, we do not check that anymore
01247         // why build wth pnetcdf without MPI ?
01248         // ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strSource, NcFile::ReadOnly, NcFile::Netcdf4 );
01249         ret = ncmpi_open( m_pcomm->comm(), strSource, NC_NOWRITE, MPI_INFO_NULL, &ncfile );
01250         ERR_PARNC( ret );  // bail out completely
01251         ret = ncmpi_inq( ncfile, &ndims, &nvars, &ngatts, &unlimited );
01252         ERR_PARNC( ret );
01253         // find dimension ids for n_S
01254         int ins;
01255         ret = ncmpi_inq_dimid( ncfile, "n_s", &ins );
01256         ERR_PARNC( ret );
01257         MPI_Offset leng;
01258         ret = ncmpi_inq_dimlen( ncfile, ins, &leng );
01259         ERR_PARNC( ret );
01260         nS  = (int)leng;
01261         ret = ncmpi_inq_dimid( ncfile, "n_b", &ins );
01262         ERR_PARNC( ret );
01263         ret = ncmpi_inq_dimlen( ncfile, ins, &leng );
01264         ERR_PARNC( ret );
01265         nB = (int)leng;
01266 #else
01267         _EXCEPTION1( "cannot read the file %s", strSource );
01268 #endif
01269     }
01270 
01271     // Let us declare the map object for every process
01272     SparseMatrix< double >& sparseMatrix = this->GetSparseMatrix();
01273 
01274     int localSize   = nS / size;
01275     long offsetRead = rank * localSize;
01276     // leftovers on last rank
01277     if( rank == size - 1 )
01278     {
01279         localSize += nS % size;
01280     }
01281 
01282     std::vector< int > vecRow, vecCol;
01283     std::vector< double > vecS;
01284     vecRow.resize( localSize );
01285     vecCol.resize( localSize );
01286     vecS.resize( localSize );
01287 
01288     if( ncMap.is_valid() )
01289     {
01290         varRow->set_cur( (long)( offsetRead ) );
01291         varRow->get( &( vecRow[0] ), localSize );
01292 
01293         varCol->set_cur( (long)( offsetRead ) );
01294         varCol->get( &( vecCol[0] ), localSize );
01295 
01296         varS->set_cur( (long)( offsetRead ) );
01297         varS->get( &( vecS[0] ), localSize );
01298 
01299         ncMap.close();
01300     }
01301     else
01302     {
01303 #ifdef MOAB_HAVE_PNETCDF
01304         // fill the local vectors with the variables from pnetcdf file; first inquire, then fill
01305         MPI_Offset start = (MPI_Offset)offsetRead;
01306         MPI_Offset count = (MPI_Offset)localSize;
01307         int varid;
01308         ret = ncmpi_inq_varid( ncfile, "S", &varid );
01309         ERR_PARNC( ret );
01310         ret = ncmpi_get_vara_double_all( ncfile, varid, &start, &count, &vecS[0] );
01311         ERR_PARNC( ret );
01312         ret = ncmpi_inq_varid( ncfile, "row", &varid );
01313         ERR_PARNC( ret );
01314         ret = ncmpi_get_vara_int_all( ncfile, varid, &start, &count, &vecRow[0] );
01315         ERR_PARNC( ret );
01316         ret = ncmpi_inq_varid( ncfile, "col", &varid );
01317         ERR_PARNC( ret );
01318         ret = ncmpi_get_vara_int_all( ncfile, varid, &start, &count, &vecCol[0] );
01319         ERR_PARNC( ret );
01320         ret = ncmpi_close( ncfile );
01321         ERR_PARNC( ret );
01322 #endif
01323     }
01324 
01325     // Now let us set the necessary global-to-local ID maps so that A*x operations
01326     // can be performed cleanly as if map was computed online
01327     row_dtoc_dofmap.clear();
01328     // row_dtoc_dofmap.reserve( nB / size );
01329     col_dtoc_dofmap.clear();
01330     rowMap.clear();
01331     colMap.clear();
01332     // col_dtoc_dofmap.reserve( 2 * nA / size );
01333     // row_dtoc_dofmap.resize( m_nTotDofs_Dest, UINT_MAX );
01334     // col_dtoc_dofmap.resize( m_nTotDofs_SrcCov, UINT_MAX );
01335 
01336 #ifdef MOAB_HAVE_MPI
01337     // bother with tuple list only if size > 1
01338     // otherwise, just fill the sparse matrix
01339     if( size > 1 )
01340     {
01341         std::vector< int > ownership;
01342         // the default trivial partitioning scheme
01343         int nDofs = nB;                   // this is for row partitioning
01344         if( !row_partition ) nDofs = nA;  // column partitioning
01345 
01346         // assert(row_major_ownership == true); // this block is valid only for row-based partitioning
01347         ownership.resize( size );
01348         int nPerPart   = nDofs / size;
01349         int nRemainder = nDofs % size;  // Keep the remainder in root
01350         ownership[0]   = nPerPart + nRemainder;
01351         for( int ip = 1, roffset = ownership[0]; ip < size; ++ip )
01352         {
01353             roffset += nPerPart;
01354             ownership[ip] = roffset;
01355         }
01356         moab::TupleList* tl = new moab::TupleList;
01357         unsigned numr       = 1;                     //
01358         tl->initialize( 3, 0, 0, numr, localSize );  // to proc, row, col, value
01359         tl->enableWriteAccess();
01360         // populate
01361         for( int i = 0; i < localSize; i++ )
01362         {
01363             int rowval  = vecRow[i] - 1;  // dofs are 1 based in the file
01364             int colval  = vecCol[i] - 1;
01365             int to_proc = -1;
01366             int dof_val = colval;
01367             if( row_partition ) dof_val = rowval;
01368 
01369             if( ownership[0] > dof_val )
01370                 to_proc = 0;
01371             else
01372             {
01373                 for( int ip = 1; ip < size; ++ip )
01374                 {
01375                     if( ownership[ip - 1] <= dof_val && ownership[ip] > dof_val )
01376                     {
01377                         to_proc = ip;
01378                         break;
01379                     }
01380                 }
01381             }
01382 
01383             int n                = tl->get_n();
01384             tl->vi_wr[3 * n]     = to_proc;
01385             tl->vi_wr[3 * n + 1] = rowval;
01386             tl->vi_wr[3 * n + 2] = colval;
01387             tl->vr_wr[n]         = vecS[i];
01388             tl->inc_n();
01389         }
01390         // heavy communication
01391         ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, *tl, 0 );
01392 
01393         if( owned_dof_ids.size() > 0 )
01394         {
01395             // we need to send desired dof to the rendez_vous point
01396             moab::TupleList tl_re;                                 //
01397             tl_re.initialize( 2, 0, 0, 0, owned_dof_ids.size() );  // to proc, value
01398             tl_re.enableWriteAccess();
01399             // send first to rendez_vous point, decided by trivial partitioning
01400 
01401             for( size_t i = 0; i < owned_dof_ids.size(); i++ )
01402             {
01403                 int to_proc = -1;
01404                 int dof_val = owned_dof_ids[i] - 1;  // dofs are 1 based in the file, partition from 0 ?
01405 
01406                 if( ownership[0] > dof_val )
01407                     to_proc = 0;
01408                 else
01409                 {
01410                     for( int ip = 1; ip < size; ++ip )
01411                     {
01412                         if( ownership[ip - 1] <= dof_val && ownership[ip] > dof_val )
01413                         {
01414                             to_proc = ip;
01415                             break;
01416                         }
01417                     }
01418                 }
01419 
01420                 int n                  = tl_re.get_n();
01421                 tl_re.vi_wr[2 * n]     = to_proc;
01422                 tl_re.vi_wr[2 * n + 1] = dof_val;
01423 
01424                 tl_re.inc_n();
01425             }
01426             ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tl_re, 0 );
01427             // now we know in tl_re where do we need to send back dof_val
01428             moab::TupleList::buffer sort_buffer;
01429             sort_buffer.buffer_init( tl_re.get_n() );
01430             tl_re.sort( 1, &sort_buffer );  // so now we order by value
01431 
01432             sort_buffer.buffer_init( tl->get_n() );
01433             int indexOrder = 2;                  //  colVal
01434             if( row_partition ) indexOrder = 1;  //  rowVal
01435             //tl->sort( indexOrder, &sort_buffer );
01436 
01437             std::map< int, int > startDofIndex, endDofIndex;  // indices in tl_re for values we want
01438             int dofVal = -1;
01439             if( tl_re.get_n() > 0 ) dofVal = tl_re.vi_rd[1];  // first dof val on this rank
01440             startDofIndex[dofVal] = 0;
01441             endDofIndex[dofVal]   = 0;  // start and end
01442             for( unsigned k = 1; k < tl_re.get_n(); k++ )
01443             {
01444                 int newDof = tl_re.vi_rd[2 * k + 1];
01445                 if( dofVal == newDof )
01446                 {
01447                     endDofIndex[dofVal] = k;  // increment by 1 actually
01448                 }
01449                 else
01450                 {
01451                     dofVal                = newDof;
01452                     startDofIndex[dofVal] = k;
01453                     endDofIndex[dofVal]   = k;
01454                 }
01455             }
01456 
01457             // basically, for each value we are interested in, index in tl_re with those values are
01458             // tl_re.vi_rd[2*startDofIndex+1] == valDof == tl_re.vi_rd[2*endDofIndex+1]
01459             // so now we have ordered
01460             // tl_re shows to what proc do we need to send the tuple (row, col, val)
01461             moab::TupleList* tl_back = new moab::TupleList;
01462             unsigned numr            = 1;  //
01463             // localSize is a good guess, but maybe it should be bigger ?
01464             // this could be bigger for repeated dofs
01465             tl_back->initialize( 3, 0, 0, numr, tl->get_n() );  // to proc, row, col, value
01466             tl_back->enableWriteAccess();
01467             // now loop over tl and tl_re to see where to send
01468             // form the new tuple, which will contain the desired dofs per task, per row or column distribution
01469 
01470             for( unsigned k = 0; k < tl->get_n(); k++ )
01471             {
01472                 int valDof = tl->vi_rd[3 * k + indexOrder];  // 1 for row, 2 for column // first value, it should be
01473                 for( int ire = startDofIndex[valDof]; ire <= endDofIndex[valDof]; ire++ )
01474                 {
01475                     int to_proc               = tl_re.vi_rd[2 * ire];
01476                     int n                     = tl_back->get_n();
01477                     tl_back->vi_wr[3 * n]     = to_proc;
01478                     tl_back->vi_wr[3 * n + 1] = tl->vi_rd[3 * k + 1];  // row
01479                     tl_back->vi_wr[3 * n + 2] = tl->vi_rd[3 * k + 2];  // col
01480                     tl_back->vr_wr[n]         = tl->vr_rd[k];
01481                     tl_back->inc_n();
01482                 }
01483             }
01484 
01485             // now communicate to the desired tasks:
01486             ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, *tl_back, 0 );
01487 
01488             tl_re.reset();  // clear memory, although this will go out of scope
01489             tl->reset();
01490             tl = tl_back;
01491         }
01492 
01493         int rindexMax = 0, cindexMax = 0;
01494         // populate the sparsematrix, using rowMap and colMap
01495         int n = tl->get_n();
01496         for( int i = 0; i < n; i++ )
01497         {
01498             int rindex, cindex;
01499             const int& vecRowValue = tl->vi_wr[3 * i + 1];
01500             const int& vecColValue = tl->vi_wr[3 * i + 2];
01501 
01502             std::map< int, int >::iterator riter = rowMap.find( vecRowValue );
01503             if( riter == rowMap.end() )
01504             {
01505                 rowMap[vecRowValue] = rindexMax;
01506                 rindex              = rindexMax;
01507                 row_gdofmap.push_back( vecRowValue );
01508                 // row_dtoc_dofmap.push_back( vecRowValue );
01509                 rindexMax++;
01510             }
01511             else
01512                 rindex = riter->second;
01513 
01514             std::map< int, int >::iterator citer = colMap.find( vecColValue );
01515             if( citer == colMap.end() )
01516             {
01517                 colMap[vecColValue] = cindexMax;
01518                 cindex              = cindexMax;
01519                 col_gdofmap.push_back( vecColValue );
01520                 // col_dtoc_dofmap.push_back( vecColValue );
01521                 cindexMax++;
01522             }
01523             else
01524                 cindex = citer->second;
01525 
01526             sparseMatrix( rindex, cindex ) = tl->vr_wr[i];
01527         }
01528         tl->reset();
01529     }
01530     else
01531 #endif
01532     {
01533         int rindexMax = 0, cindexMax = 0;
01534 
01535         for( int i = 0; i < nS; i++ )
01536         {
01537             int rindex, cindex;
01538             const int& vecRowValue = vecRow[i] - 1;  // the rows, cols are 1 based in the file
01539             const int& vecColValue = vecCol[i] - 1;
01540 
01541             std::map< int, int >::iterator riter = rowMap.find( vecRowValue );
01542             if( riter == rowMap.end() )
01543             {
01544                 rowMap[vecRowValue] = rindexMax;
01545                 rindex              = rindexMax;
01546                 row_gdofmap.push_back( vecRowValue );
01547                 // row_dtoc_dofmap.push_back( vecRowValue );
01548                 rindexMax++;
01549             }
01550             else
01551                 rindex = riter->second;
01552 
01553             std::map< int, int >::iterator citer = colMap.find( vecColValue );
01554             if( citer == colMap.end() )
01555             {
01556                 colMap[vecColValue] = cindexMax;
01557                 cindex              = cindexMax;
01558                 col_gdofmap.push_back( vecColValue );
01559                 // col_dtoc_dofmap.push_back( vecColValue );
01560                 cindexMax++;
01561             }
01562             else
01563                 cindex = citer->second;
01564 
01565             sparseMatrix( rindex, cindex ) = vecS[i];
01566         }
01567     }
01568 
01569     m_nTotDofs_SrcCov = sparseMatrix.GetColumns();
01570     m_nTotDofs_Dest   = sparseMatrix.GetRows();
01571 
01572 #ifdef MOAB_HAVE_EIGEN3
01573     this->copy_tempest_sparsemat_to_eigen3();
01574 #endif
01575 
01576     // Reset the source and target data first
01577     m_rowVector.setZero();
01578     m_colVector.setZero();
01579 
01580     return moab::MB_SUCCESS;
01581 }
01582 
01583 ///////////////////////////////////////////////////////////////////////////////
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines