MOAB: Mesh Oriented datABase  (version 5.4.1)
TempestOnlineMapIO.cpp
Go to the documentation of this file.
00001 /*
00002  * =====================================================================================
00003  *
00004  *       Filename:  TempestOnlineMapIO.cpp
00005  *
00006  *    Description:  All I/O implementations related to TempestOnlineMap
00007  *
00008  *        Version:  1.0
00009  *        Created:  02/06/2021 02:35:41
00010  *
00011  *         Author:  Vijay S. Mahadevan (vijaysm), [email protected]
00012  *        Company:  Argonne National Lab
00013  *
00014  * =====================================================================================
00015  */
00016 
00017 #include "FiniteElementTools.h"
00018 #include "moab/Remapping/TempestOnlineMap.hpp"
00019 #include "moab/TupleList.hpp"
00020 
00021 #ifdef MOAB_HAVE_NETCDFPAR
00022 #include "netcdfcpp_par.hpp"
00023 #else
00024 #include "netcdfcpp.h"
00025 #endif
00026 
00027 #ifdef MOAB_HAVE_PNETCDF
00028 #include <pnetcdf.h>
00029 
00030 #define ERR_PARNC( err )                                                              \
00031     if( err != NC_NOERR )                                                             \
00032     {                                                                                 \
00033         fprintf( stderr, "Error at line %d: %s\n", __LINE__, ncmpi_strerror( err ) ); \
00034         MPI_Abort( MPI_COMM_WORLD, 1 );                                               \
00035     }
00036 
00037 #endif
00038 
00039 #ifdef MOAB_HAVE_MPI
00040 
00041 #define MPI_CHK_ERR( err )                                          \
00042     if( err )                                                       \
00043     {                                                               \
00044         std::cout << "MPI Failure. ErrorCode (" << ( err ) << ") "; \
00045         std::cout << "\nMPI Aborting... \n";                        \
00046         return moab::MB_FAILURE;                                    \
00047     }
00048 
00049 int moab::TempestOnlineMap::rearrange_arrays_by_dofs( const std::vector< unsigned int >& gdofmap,
00050                                                       DataArray1D< double >& vecFaceArea,
00051                                                       DataArray1D< double >& dCenterLon,
00052                                                       DataArray1D< double >& dCenterLat,
00053                                                       DataArray2D< double >& dVertexLon,
00054                                                       DataArray2D< double >& dVertexLat,
00055                                                       std::vector< int >& masks,
00056                                                       unsigned& N,  // will have the local, after
00057                                                       int nv,
00058                                                       int& maxdof )
00059 {
00060     // first decide maxdof, for partitioning
00061     unsigned int localmax = 0;
00062     for( unsigned i = 0; i < N; i++ )
00063         if( gdofmap[i] > localmax ) localmax = gdofmap[i];
00064 
00065     // decide partitioning based on maxdof/size
00066     MPI_Allreduce( &localmax, &maxdof, 1, MPI_INT, MPI_MAX, m_pcomm->comm() );
00067     // maxdof is 0 based, so actual number is +1
00068     // maxdof
00069     int size_per_task = ( maxdof + 1 ) / size;  // based on this, processor to process dof x is x/size_per_task
00070     // so we decide to reorder by actual dof, such that task 0 has dofs from [0 to size_per_task), etc
00071     moab::TupleList tl;
00072     unsigned numr = 2 * nv + 3;         //  doubles: area, centerlon, center lat, nv (vertex lon, vertex lat)
00073     tl.initialize( 3, 0, 0, numr, N );  // to proc, dof, then
00074     tl.enableWriteAccess();
00075     // populate
00076     for( unsigned i = 0; i < N; i++ )
00077     {
00078         int gdof    = gdofmap[i];
00079         int to_proc = gdof / size_per_task;
00080         int mask    = masks[i];
00081         if( to_proc >= size ) to_proc = size - 1;  // the last ones go to last proc
00082         int n                  = tl.get_n();
00083         tl.vi_wr[3 * n]        = to_proc;
00084         tl.vi_wr[3 * n + 1]    = gdof;
00085         tl.vi_wr[3 * n + 2]    = mask;
00086         tl.vr_wr[n * numr]     = vecFaceArea[i];
00087         tl.vr_wr[n * numr + 1] = dCenterLon[i];
00088         tl.vr_wr[n * numr + 2] = dCenterLat[i];
00089         for( int j = 0; j < nv; j++ )
00090         {
00091             tl.vr_wr[n * numr + 3 + j]      = dVertexLon[i][j];
00092             tl.vr_wr[n * numr + 3 + nv + j] = dVertexLat[i][j];
00093         }
00094         tl.inc_n();
00095     }
00096 
00097     // now do the heavy communication
00098     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tl, 0 );
00099 
00100     // after communication, on each processor we should have tuples coming in
00101     // still need to order by global dofs; then rearrange input vectors
00102     moab::TupleList::buffer sort_buffer;
00103     sort_buffer.buffer_init( tl.get_n() );
00104     tl.sort( 1, &sort_buffer );
00105     // count how many are unique, and collapse
00106     int nb_unique = 1;
00107     for( unsigned i = 0; i < tl.get_n() - 1; i++ )
00108     {
00109         if( tl.vi_wr[3 * i + 1] != tl.vi_wr[3 * i + 4] ) nb_unique++;
00110     }
00111     vecFaceArea.Allocate( nb_unique );
00112     dCenterLon.Allocate( nb_unique );
00113     dCenterLat.Allocate( nb_unique );
00114     dVertexLon.Allocate( nb_unique, nv );
00115     dVertexLat.Allocate( nb_unique, nv );
00116     masks.resize( nb_unique );
00117     int current_size = 1;
00118     vecFaceArea[0]   = tl.vr_wr[0];
00119     dCenterLon[0]    = tl.vr_wr[1];
00120     dCenterLat[0]    = tl.vr_wr[2];
00121     masks[0]         = tl.vi_wr[2];
00122     for( int j = 0; j < nv; j++ )
00123     {
00124         dVertexLon[0][j] = tl.vr_wr[3 + j];
00125         dVertexLat[0][j] = tl.vr_wr[3 + nv + j];
00126     }
00127     for( unsigned i = 0; i < tl.get_n() - 1; i++ )
00128     {
00129         int i1 = i + 1;
00130         if( tl.vi_wr[3 * i + 1] != tl.vi_wr[3 * i + 4] )
00131         {
00132             vecFaceArea[current_size] = tl.vr_wr[i1 * numr];
00133             dCenterLon[current_size]  = tl.vr_wr[i1 * numr + 1];
00134             dCenterLat[current_size]  = tl.vr_wr[i1 * numr + 2];
00135             for( int j = 0; j < nv; j++ )
00136             {
00137                 dVertexLon[current_size][j] = tl.vr_wr[i1 * numr + 3 + j];
00138                 dVertexLat[current_size][j] = tl.vr_wr[i1 * numr + 3 + nv + j];
00139             }
00140             masks[current_size] = tl.vi_wr[3 * i1 + 2];
00141             current_size++;
00142         }
00143         else
00144         {
00145             vecFaceArea[current_size - 1] += tl.vr_wr[i1 * numr];  // accumulate areas; will come here only for cgll ?
00146         }
00147     }
00148 
00149     N = current_size;  // or nb_unique, should be the same
00150     return 0;
00151 }
00152 #endif
00153 
00154 ///////////////////////////////////////////////////////////////////////////////
00155 
00156 moab::ErrorCode moab::TempestOnlineMap::WriteParallelMap( const std::string& strFilename )
00157 {
00158     moab::ErrorCode rval;
00159 
00160     size_t lastindex      = strFilename.find_last_of( "." );
00161     std::string extension = strFilename.substr( lastindex + 1, strFilename.size() );
00162 
00163     // Write the map file to disk in parallel
00164     if( extension == "nc" )
00165     {
00166         /* Invoke the actual call to write the parallel map to disk in SCRIP format */
00167         rval = this->WriteSCRIPMapFile( strFilename.c_str() );MB_CHK_ERR( rval );
00168     }
00169     else
00170     {
00171         /* Write to the parallel H5M format */
00172         rval = this->WriteHDF5MapFile( strFilename.c_str() );MB_CHK_ERR( rval );
00173     }
00174 
00175     return rval;
00176 }
00177 
00178 ///////////////////////////////////////////////////////////////////////////////
00179 
00180 moab::ErrorCode moab::TempestOnlineMap::WriteSCRIPMapFile( const std::string& strFilename )
00181 {
00182     NcError error( NcError::silent_nonfatal );
00183 
00184 #ifdef MOAB_HAVE_NETCDFPAR
00185     bool is_independent = true;
00186     ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strFilename.c_str(), NcFile::Replace, NcFile::Netcdf4 );
00187     // ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strFilename.c_str(), NcmpiFile::replace, NcmpiFile::classic5 );
00188 #else
00189     NcFile ncMap( strFilename.c_str(), NcFile::Replace );
00190 #endif
00191 
00192     if( !ncMap.is_valid() )
00193     {
00194         _EXCEPTION1( "Unable to open output map file \"%s\"", strFilename.c_str() );
00195     }
00196 
00197     // Attributes
00198     ncMap.add_att( "Title", "MOAB-TempestRemap Online Regridding Weight Generator" );
00199 
00200     /**
00201      * Need to get the global maximum of number of vertices per element
00202      * Key issue is that when calling InitializeCoordinatesFromMeshFV, the allocation for
00203      *dVertexLon/dVertexLat are made based on the maximum vertices in the current process. However,
00204      *when writing this out, other processes may have a different size for the same array. This is
00205      *hence a mess to consolidate in h5mtoscrip eventually.
00206      **/
00207 
00208     /* Let us compute all relevant data for the current original source mesh on the process */
00209     DataArray1D< double > vecSourceFaceArea, vecTargetFaceArea;
00210     DataArray1D< double > dSourceCenterLon, dSourceCenterLat, dTargetCenterLon, dTargetCenterLat;
00211     DataArray2D< double > dSourceVertexLon, dSourceVertexLat, dTargetVertexLon, dTargetVertexLat;
00212     if( m_srcDiscType == DiscretizationType_FV || m_srcDiscType == DiscretizationType_PCLOUD )
00213     {
00214         this->InitializeCoordinatesFromMeshFV(
00215             *m_meshInput, dSourceCenterLon, dSourceCenterLat, dSourceVertexLon, dSourceVertexLat,
00216             ( this->m_remapper->m_source_type == moab::TempestRemapper::RLL ), /* fLatLon = false */
00217             m_remapper->max_source_edges );
00218 
00219         vecSourceFaceArea.Allocate( m_meshInput->vecFaceArea.GetRows() );
00220         for( unsigned i = 0; i < m_meshInput->vecFaceArea.GetRows(); ++i )
00221             vecSourceFaceArea[i] = m_meshInput->vecFaceArea[i];
00222     }
00223     else
00224     {
00225         DataArray3D< double > dataGLLJacobianSrc;
00226         this->InitializeCoordinatesFromMeshFE( *m_meshInput, m_nDofsPEl_Src, dataGLLNodesSrc, dSourceCenterLon,
00227                                                dSourceCenterLat, dSourceVertexLon, dSourceVertexLat );
00228 
00229         // Generate the continuous Jacobian for input mesh
00230         GenerateMetaData( *m_meshInput, m_nDofsPEl_Src, false /* fBubble */, dataGLLNodesSrc, dataGLLJacobianSrc );
00231 
00232         if( m_srcDiscType == DiscretizationType_CGLL )
00233         {
00234             GenerateUniqueJacobian( dataGLLNodesSrc, dataGLLJacobianSrc, vecSourceFaceArea );
00235         }
00236         else
00237         {
00238             GenerateDiscontinuousJacobian( dataGLLJacobianSrc, vecSourceFaceArea );
00239         }
00240     }
00241 
00242     if( m_destDiscType == DiscretizationType_FV || m_destDiscType == DiscretizationType_PCLOUD )
00243     {
00244         this->InitializeCoordinatesFromMeshFV(
00245             *m_meshOutput, dTargetCenterLon, dTargetCenterLat, dTargetVertexLon, dTargetVertexLat,
00246             ( this->m_remapper->m_target_type == moab::TempestRemapper::RLL ), /* fLatLon = false */
00247             m_remapper->max_target_edges );
00248 
00249         vecTargetFaceArea.Allocate( m_meshOutput->vecFaceArea.GetRows() );
00250         for( unsigned i = 0; i < m_meshOutput->vecFaceArea.GetRows(); ++i )
00251         {
00252             vecTargetFaceArea[i] = m_meshOutput->vecFaceArea[i];
00253         }
00254     }
00255     else
00256     {
00257         DataArray3D< double > dataGLLJacobianDest;
00258         this->InitializeCoordinatesFromMeshFE( *m_meshOutput, m_nDofsPEl_Dest, dataGLLNodesDest, dTargetCenterLon,
00259                                                dTargetCenterLat, dTargetVertexLon, dTargetVertexLat );
00260 
00261         // Generate the continuous Jacobian for input mesh
00262         GenerateMetaData( *m_meshOutput, m_nDofsPEl_Dest, false /* fBubble */, dataGLLNodesDest, dataGLLJacobianDest );
00263 
00264         if( m_destDiscType == DiscretizationType_CGLL )
00265         {
00266             GenerateUniqueJacobian( dataGLLNodesDest, dataGLLJacobianDest, vecTargetFaceArea );
00267         }
00268         else
00269         {
00270             GenerateDiscontinuousJacobian( dataGLLJacobianDest, vecTargetFaceArea );
00271         }
00272     }
00273 
00274     // Map dimensions
00275     unsigned nA = ( vecSourceFaceArea.GetRows() );
00276     unsigned nB = ( vecTargetFaceArea.GetRows() );
00277 
00278     std::vector< int > masksA, masksB;
00279     ErrorCode rval = m_remapper->GetIMasks( moab::Remapper::SourceMesh, masksA );MB_CHK_SET_ERR( rval, "Trouble getting masks for source" );
00280     rval = m_remapper->GetIMasks( moab::Remapper::TargetMesh, masksB );MB_CHK_SET_ERR( rval, "Trouble getting masks for target" );
00281 
00282     // Number of nodes per Face
00283     int nSourceNodesPerFace = dSourceVertexLon.GetColumns();
00284     int nTargetNodesPerFace = dTargetVertexLon.GetColumns();
00285     // if source or target cells have triangles at poles, center of those triangles need to come from
00286     // the original quad, not from center in 3d, converted to 2d again
00287     // start copy OnlineMap.cpp tempestremap
00288     // right now, do this only for source  mesh; copy the logic for target mesh
00289 
00290     for( unsigned i = 0; i < nA; i++ )
00291     {
00292         const Face& face = m_meshInput->faces[i];
00293 
00294         int nNodes          = face.edges.size();
00295         int indexNodeAtPole = -1;
00296         if( 3 == nNodes )  // check if one node at the poles
00297         {
00298             for( int j = 0; j < nNodes; j++ )
00299                 if( fabs( fabs( dSourceVertexLat[i][j] ) - 90.0 ) < 1.0e-12 )
00300                 {
00301                     indexNodeAtPole = j;
00302                     break;
00303                 }
00304         }
00305         if( indexNodeAtPole < 0 ) continue;  // continue i loop, do nothing
00306         // recompute center of cell, from 3d data; add one 2 nodes at pole, and average
00307         int nodeAtPole = face[indexNodeAtPole];  // use the overloaded operator
00308         Node nodePole  = m_meshInput->nodes[nodeAtPole];
00309         Node newCenter = nodePole * 2;
00310         for( int j = 1; j < nNodes; j++ )
00311         {
00312             int indexi       = ( indexNodeAtPole + j ) % nNodes;  // nNodes is 3 !
00313             const Node& node = m_meshInput->nodes[face[indexi]];
00314             newCenter        = newCenter + node;
00315         }
00316         newCenter = newCenter * 0.25;
00317         newCenter = newCenter.Normalized();
00318 
00319 #ifdef VERBOSE
00320         double iniLon = dSourceCenterLon[i], iniLat = dSourceCenterLat[i];
00321 #endif
00322         // dSourceCenterLon, dSourceCenterLat
00323         XYZtoRLL_Deg( newCenter.x, newCenter.y, newCenter.z, dSourceCenterLon[i], dSourceCenterLat[i] );
00324 #ifdef VERBOSE
00325         std::cout << " modify center of triangle from " << iniLon << " " << iniLat << " to " << dSourceCenterLon[i]
00326                   << " " << dSourceCenterLat[i] << "\n";
00327 #endif
00328     }
00329 
00330     // first move data if in parallel
00331 #if defined( MOAB_HAVE_MPI )
00332     int max_row_dof, max_col_dof;  // output; arrays will be re-distributed in chunks [maxdof/size]
00333     // if (size > 1)
00334     {
00335 
00336         int ierr = rearrange_arrays_by_dofs( srccol_gdofmap, vecSourceFaceArea, dSourceCenterLon, dSourceCenterLat,
00337                                              dSourceVertexLon, dSourceVertexLat, masksA, nA, nSourceNodesPerFace,
00338                                              max_col_dof );  // now nA will be close to maxdof/size
00339         if( ierr != 0 )
00340         {
00341             _EXCEPTION1( "Unable to arrange source data %d ", nA );
00342         }
00343         // rearrange target data: (nB)
00344         //
00345         ierr = rearrange_arrays_by_dofs( row_gdofmap, vecTargetFaceArea, dTargetCenterLon, dTargetCenterLat,
00346                                          dTargetVertexLon, dTargetVertexLat, masksB, nB, nTargetNodesPerFace,
00347                                          max_row_dof );  // now nA will be close to maxdof/size
00348         if( ierr != 0 )
00349         {
00350             _EXCEPTION1( "Unable to arrange target data %d ", nB );
00351         }
00352     }
00353 #endif
00354 
00355     // Number of non-zeros in the remap matrix operator
00356     int nS = m_weightMatrix.nonZeros();
00357 
00358 #if defined( MOAB_HAVE_MPI ) && defined( MOAB_HAVE_NETCDFPAR )
00359     int locbuf[5] = { (int)nA, (int)nB, nS, nSourceNodesPerFace, nTargetNodesPerFace };
00360     int offbuf[3] = { 0, 0, 0 };
00361     int globuf[5] = { 0, 0, 0, 0, 0 };
00362     MPI_Scan( locbuf, offbuf, 3, MPI_INT, MPI_SUM, m_pcomm->comm() );
00363     MPI_Allreduce( locbuf, globuf, 3, MPI_INT, MPI_SUM, m_pcomm->comm() );
00364     MPI_Allreduce( &locbuf[3], &globuf[3], 2, MPI_INT, MPI_MAX, m_pcomm->comm() );
00365 
00366     // MPI_Scan is inclusive of data in current rank; modify accordingly.
00367     offbuf[0] -= nA;
00368     offbuf[1] -= nB;
00369     offbuf[2] -= nS;
00370 
00371 #else
00372     int offbuf[3]            = { 0, 0, 0 };
00373     int globuf[5]            = { (int)nA, (int)nB, nS, nSourceNodesPerFace, nTargetNodesPerFace };
00374 #endif
00375 
00376     // Write output dimensions entries
00377     unsigned nSrcGridDims = ( m_vecSourceDimSizes.size() );
00378     unsigned nDstGridDims = ( m_vecTargetDimSizes.size() );
00379 
00380     NcDim* dimSrcGridRank = ncMap.add_dim( "src_grid_rank", nSrcGridDims );
00381     NcDim* dimDstGridRank = ncMap.add_dim( "dst_grid_rank", nDstGridDims );
00382 
00383     NcVar* varSrcGridDims = ncMap.add_var( "src_grid_dims", ncInt, dimSrcGridRank );
00384     NcVar* varDstGridDims = ncMap.add_var( "dst_grid_dims", ncInt, dimDstGridRank );
00385 
00386 #ifdef MOAB_HAVE_NETCDFPAR
00387     ncMap.enable_var_par_access( varSrcGridDims, is_independent );
00388     ncMap.enable_var_par_access( varDstGridDims, is_independent );
00389 #endif
00390 
00391     char szDim[64];
00392     if( ( nSrcGridDims == 1 ) && ( m_vecSourceDimSizes[0] != (int)nA ) )
00393     {
00394         varSrcGridDims->put( &globuf[0], 1 );
00395         varSrcGridDims->add_att( "name0", "num_dof" );
00396     }
00397     else
00398     {
00399         for( unsigned i = 0; i < m_vecSourceDimSizes.size(); i++ )
00400         {
00401             int tmp = ( i == 0 ? globuf[0] : m_vecSourceDimSizes[i] );
00402             varSrcGridDims->set_cur( nSrcGridDims - i - 1 );
00403             varSrcGridDims->put( &( tmp ), 1 );
00404         }
00405 
00406         for( unsigned i = 0; i < m_vecSourceDimSizes.size(); i++ )
00407         {
00408             sprintf( szDim, "name%i", i );
00409             varSrcGridDims->add_att( szDim, m_vecSourceDimNames[nSrcGridDims - i - 1].c_str() );
00410         }
00411     }
00412 
00413     if( ( nDstGridDims == 1 ) && ( m_vecTargetDimSizes[0] != (int)nB ) )
00414     {
00415         varDstGridDims->put( &globuf[1], 1 );
00416         varDstGridDims->add_att( "name0", "num_dof" );
00417     }
00418     else
00419     {
00420         for( unsigned i = 0; i < m_vecTargetDimSizes.size(); i++ )
00421         {
00422             int tmp = ( i == 0 ? globuf[1] : m_vecTargetDimSizes[i] );
00423             varDstGridDims->set_cur( nDstGridDims - i - 1 );
00424             varDstGridDims->put( &( tmp ), 1 );
00425         }
00426 
00427         for( unsigned i = 0; i < m_vecTargetDimSizes.size(); i++ )
00428         {
00429             sprintf( szDim, "name%i", i );
00430             varDstGridDims->add_att( szDim, m_vecTargetDimNames[nDstGridDims - i - 1].c_str() );
00431         }
00432     }
00433 
00434     // Source and Target mesh resolutions
00435     NcDim* dimNA = ncMap.add_dim( "n_a", globuf[0] );
00436     NcDim* dimNB = ncMap.add_dim( "n_b", globuf[1] );
00437 
00438     // Number of nodes per Face
00439     NcDim* dimNVA = ncMap.add_dim( "nv_a", globuf[3] );
00440     NcDim* dimNVB = ncMap.add_dim( "nv_b", globuf[4] );
00441 
00442     // Write coordinates
00443     NcVar* varYCA = ncMap.add_var( "yc_a", ncDouble, dimNA );
00444     NcVar* varYCB = ncMap.add_var( "yc_b", ncDouble, dimNB );
00445 
00446     NcVar* varXCA = ncMap.add_var( "xc_a", ncDouble, dimNA );
00447     NcVar* varXCB = ncMap.add_var( "xc_b", ncDouble, dimNB );
00448 
00449     NcVar* varYVA = ncMap.add_var( "yv_a", ncDouble, dimNA, dimNVA );
00450     NcVar* varYVB = ncMap.add_var( "yv_b", ncDouble, dimNB, dimNVB );
00451 
00452     NcVar* varXVA = ncMap.add_var( "xv_a", ncDouble, dimNA, dimNVA );
00453     NcVar* varXVB = ncMap.add_var( "xv_b", ncDouble, dimNB, dimNVB );
00454 
00455     // Write masks
00456     NcVar* varMaskA = ncMap.add_var( "mask_a", ncInt, dimNA );
00457     NcVar* varMaskB = ncMap.add_var( "mask_b", ncInt, dimNB );
00458 
00459 #ifdef MOAB_HAVE_NETCDFPAR
00460     ncMap.enable_var_par_access( varYCA, is_independent );
00461     ncMap.enable_var_par_access( varYCB, is_independent );
00462     ncMap.enable_var_par_access( varXCA, is_independent );
00463     ncMap.enable_var_par_access( varXCB, is_independent );
00464     ncMap.enable_var_par_access( varYVA, is_independent );
00465     ncMap.enable_var_par_access( varYVB, is_independent );
00466     ncMap.enable_var_par_access( varXVA, is_independent );
00467     ncMap.enable_var_par_access( varXVB, is_independent );
00468     ncMap.enable_var_par_access( varMaskA, is_independent );
00469     ncMap.enable_var_par_access( varMaskB, is_independent );
00470 #endif
00471 
00472     varYCA->add_att( "units", "degrees" );
00473     varYCB->add_att( "units", "degrees" );
00474 
00475     varXCA->add_att( "units", "degrees" );
00476     varXCB->add_att( "units", "degrees" );
00477 
00478     varYVA->add_att( "units", "degrees" );
00479     varYVB->add_att( "units", "degrees" );
00480 
00481     varXVA->add_att( "units", "degrees" );
00482     varXVB->add_att( "units", "degrees" );
00483 
00484     // Verify dimensionality
00485     if( dSourceCenterLon.GetRows() != nA )
00486     {
00487         _EXCEPTIONT( "Mismatch between dSourceCenterLon and nA" );
00488     }
00489     if( dSourceCenterLat.GetRows() != nA )
00490     {
00491         _EXCEPTIONT( "Mismatch between dSourceCenterLat and nA" );
00492     }
00493     if( dTargetCenterLon.GetRows() != nB )
00494     {
00495         _EXCEPTIONT( "Mismatch between dTargetCenterLon and nB" );
00496     }
00497     if( dTargetCenterLat.GetRows() != nB )
00498     {
00499         _EXCEPTIONT( "Mismatch between dTargetCenterLat and nB" );
00500     }
00501     if( dSourceVertexLon.GetRows() != nA )
00502     {
00503         _EXCEPTIONT( "Mismatch between dSourceVertexLon and nA" );
00504     }
00505     if( dSourceVertexLat.GetRows() != nA )
00506     {
00507         _EXCEPTIONT( "Mismatch between dSourceVertexLat and nA" );
00508     }
00509     if( dTargetVertexLon.GetRows() != nB )
00510     {
00511         _EXCEPTIONT( "Mismatch between dTargetVertexLon and nB" );
00512     }
00513     if( dTargetVertexLat.GetRows() != nB )
00514     {
00515         _EXCEPTIONT( "Mismatch between dTargetVertexLat and nB" );
00516     }
00517 
00518     varYCA->set_cur( (long)offbuf[0] );
00519     varYCA->put( &( dSourceCenterLat[0] ), nA );
00520     varYCB->set_cur( (long)offbuf[1] );
00521     varYCB->put( &( dTargetCenterLat[0] ), nB );
00522 
00523     varXCA->set_cur( (long)offbuf[0] );
00524     varXCA->put( &( dSourceCenterLon[0] ), nA );
00525     varXCB->set_cur( (long)offbuf[1] );
00526     varXCB->put( &( dTargetCenterLon[0] ), nB );
00527 
00528     varYVA->set_cur( (long)offbuf[0] );
00529     varYVA->put( &( dSourceVertexLat[0][0] ), nA, nSourceNodesPerFace );
00530     varYVB->set_cur( (long)offbuf[1] );
00531     varYVB->put( &( dTargetVertexLat[0][0] ), nB, nTargetNodesPerFace );
00532 
00533     varXVA->set_cur( (long)offbuf[0] );
00534     varXVA->put( &( dSourceVertexLon[0][0] ), nA, nSourceNodesPerFace );
00535     varXVB->set_cur( (long)offbuf[1] );
00536     varXVB->put( &( dTargetVertexLon[0][0] ), nB, nTargetNodesPerFace );
00537 
00538     varMaskA->set_cur( (long)offbuf[0] );
00539     varMaskA->put( &( masksA[0] ), nA );
00540     varMaskB->set_cur( (long)offbuf[1] );
00541     varMaskB->put( &( masksB[0] ), nB );
00542 
00543     // Write areas
00544     NcVar* varAreaA = ncMap.add_var( "area_a", ncDouble, dimNA );
00545 #ifdef MOAB_HAVE_NETCDFPAR
00546     ncMap.enable_var_par_access( varAreaA, is_independent );
00547 #endif
00548     varAreaA->set_cur( (long)offbuf[0] );
00549     varAreaA->put( &( vecSourceFaceArea[0] ), nA );
00550 
00551     NcVar* varAreaB = ncMap.add_var( "area_b", ncDouble, dimNB );
00552 #ifdef MOAB_HAVE_NETCDFPAR
00553     ncMap.enable_var_par_access( varAreaB, is_independent );
00554 #endif
00555     varAreaB->set_cur( (long)offbuf[1] );
00556     varAreaB->put( &( vecTargetFaceArea[0] ), nB );
00557 
00558     // Write SparseMatrix entries
00559     DataArray1D< int > vecRow( nS );
00560     DataArray1D< int > vecCol( nS );
00561     DataArray1D< double > vecS( nS );
00562     DataArray1D< double > dFracA( nA );
00563     DataArray1D< double > dFracB( nB );
00564 
00565     moab::TupleList tlValRow, tlValCol;
00566     unsigned numr = 1;  //
00567     // value has to be sent to processor row/nB for for fracA and col/nA for fracB
00568     // vecTargetArea (indexRow ) has to be sent for fracA (index col?)
00569     // vecTargetFaceArea will have to be sent to col index, with its index !
00570     tlValRow.initialize( 2, 0, 0, numr, nS );  // to proc(row),  global row , value
00571     tlValCol.initialize( 3, 0, 0, numr, nS );  // to proc(col),  global row / col, value
00572     tlValRow.enableWriteAccess();
00573     tlValCol.enableWriteAccess();
00574     /*
00575      *
00576              dFracA[ col ] += val / vecSourceFaceArea[ col ] * vecTargetFaceArea[ row ];
00577              dFracB[ row ] += val ;
00578      */
00579     int offset = 0;
00580 #if defined( MOAB_HAVE_MPI )
00581     int nAbase = ( max_col_dof + 1 ) / size;  // it is nA, except last rank ( == size - 1 )
00582     int nBbase = ( max_row_dof + 1 ) / size;  // it is nB, except last rank ( == size - 1 )
00583 #endif
00584     for( int i = 0; i < m_weightMatrix.outerSize(); ++i )
00585     {
00586         for( WeightMatrix::InnerIterator it( m_weightMatrix, i ); it; ++it )
00587         {
00588             vecRow[offset] = 1 + this->GetRowGlobalDoF( it.row() );  // row index
00589             vecCol[offset] = 1 + this->GetColGlobalDoF( it.col() );  // col index
00590             vecS[offset]   = it.value();                             // value
00591 
00592 #if defined( MOAB_HAVE_MPI )
00593             {
00594                 // value M(row, col) will contribute to procRow and procCol values for fracA and fracB
00595                 int procRow = ( vecRow[offset] - 1 ) / nBbase;
00596                 if( procRow >= size ) procRow = size - 1;
00597                 int procCol = ( vecCol[offset] - 1 ) / nAbase;
00598                 if( procCol >= size ) procCol = size - 1;
00599                 int nrInd                     = tlValRow.get_n();
00600                 tlValRow.vi_wr[2 * nrInd]     = procRow;
00601                 tlValRow.vi_wr[2 * nrInd + 1] = vecRow[offset] - 1;
00602                 tlValRow.vr_wr[nrInd]         = vecS[offset];
00603                 tlValRow.inc_n();
00604                 int ncInd                     = tlValCol.get_n();
00605                 tlValCol.vi_wr[3 * ncInd]     = procCol;
00606                 tlValCol.vi_wr[3 * ncInd + 1] = vecRow[offset] - 1;
00607                 tlValCol.vi_wr[3 * ncInd + 2] = vecCol[offset] - 1;  // this is column
00608                 tlValCol.vr_wr[ncInd]         = vecS[offset];
00609                 tlValCol.inc_n();
00610             }
00611 
00612 #endif
00613             offset++;
00614         }
00615     }
00616 #if defined( MOAB_HAVE_MPI )
00617     // need to send values for their row and col processors, to compute fractions there
00618     // now do the heavy communication
00619     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tlValCol, 0 );
00620     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tlValRow, 0 );
00621 
00622     // we have now, for example,  dFracB[ row ] += val ;
00623     // so we know that on current task, we received tlValRow
00624     // reminder dFracA[ col ] += val / vecSourceFaceArea[ col ] * vecTargetFaceArea[ row ];
00625     //          dFracB[ row ] += val ;
00626     for( unsigned i = 0; i < tlValRow.get_n(); i++ )
00627     {
00628         // int fromProc = tlValRow.vi_wr[2 * i];
00629         int gRowInd       = tlValRow.vi_wr[2 * i + 1];
00630         int localIndexRow = gRowInd - nBbase * rank;  // modulo nBbase rank is from 0 to size - 1;
00631         double wgt        = tlValRow.vr_wr[i];
00632         assert( localIndexRow >= 0 );
00633         assert( nB - localIndexRow > 0 );
00634         dFracB[localIndexRow] += wgt;
00635     }
00636     // to compute dFracA we need vecTargetFaceArea[ row ]; we know the row, and we can get the proc we need it from
00637 
00638     std::set< int > neededRows;
00639     for( unsigned i = 0; i < tlValCol.get_n(); i++ )
00640     {
00641         int rRowInd = tlValCol.vi_wr[3 * i + 1];
00642         neededRows.insert( rRowInd );
00643         // we need vecTargetFaceAreaGlobal[ rRowInd ]; this exists on proc procRow
00644     }
00645     moab::TupleList tgtAreaReq;
00646     tgtAreaReq.initialize( 2, 0, 0, 0, neededRows.size() );
00647     tgtAreaReq.enableWriteAccess();
00648     for( std::set< int >::iterator sit = neededRows.begin(); sit != neededRows.end(); sit++ )
00649     {
00650         int neededRow = *sit;
00651         int procRow   = neededRow / nBbase;
00652         if( procRow >= size ) procRow = size - 1;
00653         int nr                       = tgtAreaReq.get_n();
00654         tgtAreaReq.vi_wr[2 * nr]     = procRow;
00655         tgtAreaReq.vi_wr[2 * nr + 1] = neededRow;
00656         tgtAreaReq.inc_n();
00657     }
00658 
00659     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tgtAreaReq, 0 );
00660     // we need to send back the tgtArea corresponding to row
00661     moab::TupleList tgtAreaInfo;  // load it with tgtArea at row
00662     tgtAreaInfo.initialize( 2, 0, 0, 1, tgtAreaReq.get_n() );
00663     tgtAreaInfo.enableWriteAccess();
00664     for( unsigned i = 0; i < tgtAreaReq.get_n(); i++ )
00665     {
00666         int from_proc     = tgtAreaReq.vi_wr[2 * i];
00667         int row           = tgtAreaReq.vi_wr[2 * i + 1];
00668         int locaIndexRow  = row - rank * nBbase;
00669         double areaToSend = vecTargetFaceArea[locaIndexRow];
00670         // int remoteIndex = tgtAreaReq.vi_wr[3*i + 2] ;
00671 
00672         tgtAreaInfo.vi_wr[2 * i]     = from_proc;  // send back requested info
00673         tgtAreaInfo.vi_wr[2 * i + 1] = row;
00674         tgtAreaInfo.vr_wr[i]         = areaToSend;  // this will be tgt area at row
00675         tgtAreaInfo.inc_n();
00676     }
00677     ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tgtAreaInfo, 0 );
00678 
00679     std::map< int, double > areaAtRow;
00680     for( unsigned i = 0; i < tgtAreaInfo.get_n(); i++ )
00681     {
00682         // we have received from proc, value for row !
00683         int row        = tgtAreaInfo.vi_wr[2 * i + 1];
00684         areaAtRow[row] = tgtAreaInfo.vr_wr[i];
00685     }
00686 
00687     // we have now for rows the
00688     // it is ordered by index, so:
00689     // now compute reminder dFracA[ col ] += val / vecSourceFaceArea[ col ] * vecTargetFaceArea[ row ];
00690     // tgtAreaInfo will have at index i the area we need (from row)
00691     // there should be an easier way :(
00692     for( unsigned i = 0; i < tlValCol.get_n(); i++ )
00693     {
00694         int rRowInd     = tlValCol.vi_wr[3 * i + 1];
00695         int colInd      = tlValCol.vi_wr[3 * i + 2];
00696         double val      = tlValCol.vr_wr[i];
00697         int localColInd = colInd - rank * nAbase;  // < local nA
00698         // we need vecTargetFaceAreaGlobal[ rRowInd ]; this exists on proc procRow
00699         auto itMap = areaAtRow.find( rRowInd );  // it should be different from end
00700         if( itMap != areaAtRow.end() )
00701         {
00702             double areaRow = itMap->second;  // we fished a lot for this !
00703             dFracA[localColInd] += val / vecSourceFaceArea[localColInd] * areaRow;
00704         }
00705     }
00706 
00707 #endif
00708     // Load in data
00709     NcDim* dimNS = ncMap.add_dim( "n_s", globuf[2] );
00710 
00711     NcVar* varRow = ncMap.add_var( "row", ncInt, dimNS );
00712     NcVar* varCol = ncMap.add_var( "col", ncInt, dimNS );
00713     NcVar* varS   = ncMap.add_var( "S", ncDouble, dimNS );
00714 #ifdef MOAB_HAVE_NETCDFPAR
00715     ncMap.enable_var_par_access( varRow, is_independent );
00716     ncMap.enable_var_par_access( varCol, is_independent );
00717     ncMap.enable_var_par_access( varS, is_independent );
00718 #endif
00719 
00720     varRow->set_cur( (long)offbuf[2] );
00721     varRow->put( vecRow, nS );
00722 
00723     varCol->set_cur( (long)offbuf[2] );
00724     varCol->put( vecCol, nS );
00725 
00726     varS->set_cur( (long)offbuf[2] );
00727     varS->put( &( vecS[0] ), nS );
00728 
00729     // Calculate and write fractional coverage arrays
00730     NcVar* varFracA = ncMap.add_var( "frac_a", ncDouble, dimNA );
00731 #ifdef MOAB_HAVE_NETCDFPAR
00732     ncMap.enable_var_par_access( varFracA, is_independent );
00733 #endif
00734     varFracA->add_att( "name", "fraction of target coverage of source dof" );
00735     varFracA->add_att( "units", "unitless" );
00736     varFracA->set_cur( (long)offbuf[0] );
00737     varFracA->put( &( dFracA[0] ), nA );
00738 
00739     NcVar* varFracB = ncMap.add_var( "frac_b", ncDouble, dimNB );
00740 #ifdef MOAB_HAVE_NETCDFPAR
00741     ncMap.enable_var_par_access( varFracB, is_independent );
00742 #endif
00743     varFracB->add_att( "name", "fraction of source coverage of target dof" );
00744     varFracB->add_att( "units", "unitless" );
00745     varFracB->set_cur( (long)offbuf[1] );
00746     varFracB->put( &( dFracB[0] ), nB );
00747 
00748     // Add global attributes
00749     // std::map<std::string, std::string>::const_iterator iterAttributes =
00750     //     mapAttributes.begin();
00751     // for (; iterAttributes != mapAttributes.end(); iterAttributes++) {
00752     //     ncMap.add_att(
00753     //         iterAttributes->first.c_str(),
00754     //         iterAttributes->second.c_str());
00755     // }
00756 
00757     ncMap.close();
00758 
00759     return moab::MB_SUCCESS;
00760 }
00761 
00762 ///////////////////////////////////////////////////////////////////////////////
00763 
00764 moab::ErrorCode moab::TempestOnlineMap::WriteHDF5MapFile( const std::string& strOutputFile )
00765 {
00766     moab::ErrorCode rval;
00767 
00768     /**
00769      * Need to get the global maximum of number of vertices per element
00770      * Key issue is that when calling InitializeCoordinatesFromMeshFV, the allocation for
00771      *dVertexLon/dVertexLat are made based on the maximum vertices in the current process. However,
00772      *when writing this out, other processes may have a different size for the same array. This is
00773      *hence a mess to consolidate in h5mtoscrip eventually.
00774      **/
00775 
00776     /* Let us compute all relevant data for the current original source mesh on the process */
00777     DataArray1D< double > vecSourceFaceArea, vecTargetFaceArea;
00778     DataArray1D< double > dSourceCenterLon, dSourceCenterLat, dTargetCenterLon, dTargetCenterLat;
00779     DataArray2D< double > dSourceVertexLon, dSourceVertexLat, dTargetVertexLon, dTargetVertexLat;
00780     if( m_srcDiscType == DiscretizationType_FV || m_srcDiscType == DiscretizationType_PCLOUD )
00781     {
00782         this->InitializeCoordinatesFromMeshFV(
00783             *m_meshInput, dSourceCenterLon, dSourceCenterLat, dSourceVertexLon, dSourceVertexLat,
00784             ( this->m_remapper->m_source_type == moab::TempestRemapper::RLL ) /* fLatLon = false */,
00785             m_remapper->max_source_edges );
00786 
00787         vecSourceFaceArea.Allocate( m_meshInput->vecFaceArea.GetRows() );
00788         for( unsigned i = 0; i < m_meshInput->vecFaceArea.GetRows(); ++i )
00789             vecSourceFaceArea[i] = m_meshInput->vecFaceArea[i];
00790     }
00791     else
00792     {
00793         DataArray3D< double > dataGLLJacobianSrc;
00794         this->InitializeCoordinatesFromMeshFE( *m_meshInput, m_nDofsPEl_Src, dataGLLNodesSrc, dSourceCenterLon,
00795                                                dSourceCenterLat, dSourceVertexLon, dSourceVertexLat );
00796 
00797         // Generate the continuous Jacobian for input mesh
00798         GenerateMetaData( *m_meshInput, m_nDofsPEl_Src, false /* fBubble */, dataGLLNodesSrc, dataGLLJacobianSrc );
00799 
00800         if( m_srcDiscType == DiscretizationType_CGLL )
00801         {
00802             GenerateUniqueJacobian( dataGLLNodesSrc, dataGLLJacobianSrc, m_meshInput->vecFaceArea );
00803         }
00804         else
00805         {
00806             GenerateDiscontinuousJacobian( dataGLLJacobianSrc, m_meshInput->vecFaceArea );
00807         }
00808 
00809         vecSourceFaceArea.Allocate( m_meshInput->faces.size() * m_nDofsPEl_Src * m_nDofsPEl_Src );
00810         int offset = 0;
00811         for( size_t e = 0; e < m_meshInput->faces.size(); e++ )
00812         {
00813             for( int s = 0; s < m_nDofsPEl_Src; s++ )
00814             {
00815                 for( int t = 0; t < m_nDofsPEl_Src; t++ )
00816                 {
00817                     vecSourceFaceArea[srccol_dtoc_dofmap[offset + s * m_nDofsPEl_Src + t]] =
00818                         dataGLLJacobianSrc[s][t][e];
00819                 }
00820             }
00821             offset += m_nDofsPEl_Src * m_nDofsPEl_Src;
00822         }
00823     }
00824 
00825     if( m_destDiscType == DiscretizationType_FV || m_destDiscType == DiscretizationType_PCLOUD )
00826     {
00827         this->InitializeCoordinatesFromMeshFV(
00828             *m_meshOutput, dTargetCenterLon, dTargetCenterLat, dTargetVertexLon, dTargetVertexLat,
00829             ( this->m_remapper->m_target_type == moab::TempestRemapper::RLL ) /* fLatLon = false */,
00830             m_remapper->max_target_edges );
00831 
00832         vecTargetFaceArea.Allocate( m_meshOutput->vecFaceArea.GetRows() );
00833         for( unsigned i = 0; i < m_meshOutput->vecFaceArea.GetRows(); ++i )
00834             vecTargetFaceArea[i] = m_meshOutput->vecFaceArea[i];
00835     }
00836     else
00837     {
00838         DataArray3D< double > dataGLLJacobianDest;
00839         this->InitializeCoordinatesFromMeshFE( *m_meshOutput, m_nDofsPEl_Dest, dataGLLNodesDest, dTargetCenterLon,
00840                                                dTargetCenterLat, dTargetVertexLon, dTargetVertexLat );
00841 
00842         // Generate the continuous Jacobian for input mesh
00843         GenerateMetaData( *m_meshOutput, m_nDofsPEl_Dest, false /* fBubble */, dataGLLNodesDest, dataGLLJacobianDest );
00844 
00845         if( m_destDiscType == DiscretizationType_CGLL )
00846         {
00847             GenerateUniqueJacobian( dataGLLNodesDest, dataGLLJacobianDest, m_meshOutput->vecFaceArea );
00848         }
00849         else
00850         {
00851             GenerateDiscontinuousJacobian( dataGLLJacobianDest, m_meshOutput->vecFaceArea );
00852         }
00853 
00854         vecTargetFaceArea.Allocate( m_meshOutput->faces.size() * m_nDofsPEl_Dest * m_nDofsPEl_Dest );
00855         int offset = 0;
00856         for( size_t e = 0; e < m_meshOutput->faces.size(); e++ )
00857         {
00858             for( int s = 0; s < m_nDofsPEl_Dest; s++ )
00859             {
00860                 for( int t = 0; t < m_nDofsPEl_Dest; t++ )
00861                 {
00862                     vecTargetFaceArea[row_dtoc_dofmap[offset + s * m_nDofsPEl_Dest + t]] = dataGLLJacobianDest[s][t][e];
00863                 }
00864             }
00865             offset += m_nDofsPEl_Dest * m_nDofsPEl_Dest;
00866         }
00867     }
00868 
00869     moab::EntityHandle& m_meshOverlapSet = m_remapper->m_overlap_set;
00870     int tot_src_ents                     = m_remapper->m_source_entities.size();
00871     int tot_tgt_ents                     = m_remapper->m_target_entities.size();
00872     int tot_src_size                     = dSourceCenterLon.GetRows();
00873     int tot_tgt_size                     = m_dTargetCenterLon.GetRows();
00874     int tot_vsrc_size                    = dSourceVertexLon.GetRows() * dSourceVertexLon.GetColumns();
00875     int tot_vtgt_size                    = m_dTargetVertexLon.GetRows() * m_dTargetVertexLon.GetColumns();
00876 
00877     const int weightMatNNZ = m_weightMatrix.nonZeros();
00878     moab::Tag tagMapMetaData, tagMapIndexRow, tagMapIndexCol, tagMapValues, srcEleIDs, tgtEleIDs;
00879     rval = m_interface->tag_get_handle( "SMAT_DATA", 13, moab::MB_TYPE_INTEGER, tagMapMetaData,
00880                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00881     rval = m_interface->tag_get_handle( "SMAT_ROWS", weightMatNNZ, moab::MB_TYPE_INTEGER, tagMapIndexRow,
00882                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00883     rval = m_interface->tag_get_handle( "SMAT_COLS", weightMatNNZ, moab::MB_TYPE_INTEGER, tagMapIndexCol,
00884                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00885     rval = m_interface->tag_get_handle( "SMAT_VALS", weightMatNNZ, moab::MB_TYPE_DOUBLE, tagMapValues,
00886                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00887     rval = m_interface->tag_get_handle( "SourceGIDS", tot_src_size, moab::MB_TYPE_INTEGER, srcEleIDs,
00888                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00889     rval = m_interface->tag_get_handle( "TargetGIDS", tot_tgt_size, moab::MB_TYPE_INTEGER, tgtEleIDs,
00890                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00891     moab::Tag srcAreaValues, tgtAreaValues;
00892     rval = m_interface->tag_get_handle( "SourceAreas", tot_src_size, moab::MB_TYPE_DOUBLE, srcAreaValues,
00893                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00894     rval = m_interface->tag_get_handle( "TargetAreas", tot_tgt_size, moab::MB_TYPE_DOUBLE, tgtAreaValues,
00895                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00896     moab::Tag tagSrcCoordsCLon, tagSrcCoordsCLat, tagTgtCoordsCLon, tagTgtCoordsCLat;
00897     rval = m_interface->tag_get_handle( "SourceCoordCenterLon", tot_src_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsCLon,
00898                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00899     rval = m_interface->tag_get_handle( "SourceCoordCenterLat", tot_src_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsCLat,
00900                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00901     rval = m_interface->tag_get_handle( "TargetCoordCenterLon", tot_tgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsCLon,
00902                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00903     rval = m_interface->tag_get_handle( "TargetCoordCenterLat", tot_tgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsCLat,
00904                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00905     moab::Tag tagSrcCoordsVLon, tagSrcCoordsVLat, tagTgtCoordsVLon, tagTgtCoordsVLat;
00906     rval = m_interface->tag_get_handle( "SourceCoordVertexLon", tot_vsrc_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsVLon,
00907                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00908     rval = m_interface->tag_get_handle( "SourceCoordVertexLat", tot_vsrc_size, moab::MB_TYPE_DOUBLE, tagSrcCoordsVLat,
00909                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00910     rval = m_interface->tag_get_handle( "TargetCoordVertexLon", tot_vtgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsVLon,
00911                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00912     rval = m_interface->tag_get_handle( "TargetCoordVertexLat", tot_vtgt_size, moab::MB_TYPE_DOUBLE, tagTgtCoordsVLat,
00913                                         moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00914     moab::Tag srcMaskValues, tgtMaskValues;
00915     if( m_iSourceMask.IsAttached() )
00916     {
00917         rval = m_interface->tag_get_handle( "SourceMask", m_iSourceMask.GetRows(), moab::MB_TYPE_INTEGER, srcMaskValues,
00918                                             moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00919     }
00920     if( m_iTargetMask.IsAttached() )
00921     {
00922         rval = m_interface->tag_get_handle( "TargetMask", m_iTargetMask.GetRows(), moab::MB_TYPE_INTEGER, tgtMaskValues,
00923                                             moab::MB_TAG_CREAT | moab::MB_TAG_SPARSE | moab::MB_TAG_VARLEN );MB_CHK_SET_ERR( rval, "Retrieving tag handles failed" );
00924     }
00925 
00926     std::vector< int > smatrowvals( weightMatNNZ ), smatcolvals( weightMatNNZ );
00927     std::vector< double > smatvals( weightMatNNZ );
00928     // const double* smatvals = m_weightMatrix.valuePtr();
00929     // Loop over the matrix entries and find the max global ID for rows and columns
00930     for( int k = 0, offset = 0; k < m_weightMatrix.outerSize(); ++k )
00931     {
00932         for( moab::TempestOnlineMap::WeightMatrix::InnerIterator it( m_weightMatrix, k ); it; ++it, ++offset )
00933         {
00934             smatrowvals[offset] = this->GetRowGlobalDoF( it.row() );
00935             smatcolvals[offset] = this->GetColGlobalDoF( it.col() );
00936             smatvals[offset]    = it.value();
00937         }
00938     }
00939 
00940     /* Set the global IDs for the DoFs */
00941     ////
00942     // col_gdofmap [ col_ldofmap [ 0 : local_ndofs ] ] = GDOF
00943     // row_gdofmap [ row_ldofmap [ 0 : local_ndofs ] ] = GDOF
00944     ////
00945     int maxrow = 0, maxcol = 0;
00946     std::vector< int > src_global_dofs( tot_src_size ), tgt_global_dofs( tot_tgt_size );
00947     for( int i = 0; i < tot_src_size; ++i )
00948     {
00949         src_global_dofs[i] = srccol_gdofmap[i];
00950         maxcol             = ( src_global_dofs[i] > maxcol ) ? src_global_dofs[i] : maxcol;
00951     }
00952 
00953     for( int i = 0; i < tot_tgt_size; ++i )
00954     {
00955         tgt_global_dofs[i] = row_gdofmap[i];
00956         maxrow             = ( tgt_global_dofs[i] > maxrow ) ? tgt_global_dofs[i] : maxrow;
00957     }
00958 
00959     ///////////////////////////////////////////////////////////////////////////
00960     // The metadata in H5M file contains the following data:
00961     //
00962     //   1. n_a: Total source entities: (number of elements in source mesh)
00963     //   2. n_b: Total target entities: (number of elements in target mesh)
00964     //   3. nv_a: Max edge size of elements in source mesh
00965     //   4. nv_b: Max edge size of elements in target mesh
00966     //   5. maxrows: Number of rows in remap weight matrix
00967     //   6. maxcols: Number of cols in remap weight matrix
00968     //   7. nnz: Number of total nnz in sparse remap weight matrix
00969     //   8. np_a: The order of the field description on the source mesh: >= 1
00970     //   9. np_b: The order of the field description on the target mesh: >= 1
00971     //   10. method_a: The type of discretization for field on source mesh: [0 = FV, 1 = cGLL, 2 =
00972     //   dGLL]
00973     //   11. method_b: The type of discretization for field on target mesh: [0 = FV, 1 = cGLL, 2 =
00974     //   dGLL]
00975     //   12. conserved: Flag to specify whether the remap operator has conservation constraints: [0,
00976     //   1]
00977     //   13. monotonicity: Flags to specify whether the remap operator has monotonicity constraints:
00978     //   [0, 1, 2]
00979     //
00980     ///////////////////////////////////////////////////////////////////////////
00981     int map_disc_details[6];
00982     map_disc_details[0] = m_nDofsPEl_Src;
00983     map_disc_details[1] = m_nDofsPEl_Dest;
00984     map_disc_details[2] = ( m_srcDiscType == DiscretizationType_FV || m_srcDiscType == DiscretizationType_PCLOUD
00985                                 ? 0
00986                                 : ( m_srcDiscType == DiscretizationType_CGLL ? 1 : 2 ) );
00987     map_disc_details[3] = ( m_destDiscType == DiscretizationType_FV || m_destDiscType == DiscretizationType_PCLOUD
00988                                 ? 0
00989                                 : ( m_destDiscType == DiscretizationType_CGLL ? 1 : 2 ) );
00990     map_disc_details[4] = ( m_bConserved ? 1 : 0 );
00991     map_disc_details[5] = m_iMonotonicity;
00992 
00993 #ifdef MOAB_HAVE_MPI
00994     int loc_smatmetadata[13] = { tot_src_ents,
00995                                  tot_tgt_ents,
00996                                  m_remapper->max_source_edges,
00997                                  m_remapper->max_target_edges,
00998                                  maxrow + 1,
00999                                  maxcol + 1,
01000                                  weightMatNNZ,
01001                                  map_disc_details[0],
01002                                  map_disc_details[1],
01003                                  map_disc_details[2],
01004                                  map_disc_details[3],
01005                                  map_disc_details[4],
01006                                  map_disc_details[5] };
01007     rval                     = m_interface->tag_set_data( tagMapMetaData, &m_meshOverlapSet, 1, &loc_smatmetadata[0] );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01008     int glb_smatmetadata[13] = { 0,
01009                                  0,
01010                                  0,
01011                                  0,
01012                                  0,
01013                                  0,
01014                                  0,
01015                                  map_disc_details[0],
01016                                  map_disc_details[1],
01017                                  map_disc_details[2],
01018                                  map_disc_details[3],
01019                                  map_disc_details[4],
01020                                  map_disc_details[5] };
01021     int loc_buf[7]           = {
01022         tot_src_ents, tot_tgt_ents, weightMatNNZ, m_remapper->max_source_edges, m_remapper->max_target_edges,
01023         maxrow,       maxcol };
01024     int glb_buf[4] = { 0, 0, 0, 0 };
01025     MPI_Reduce( &loc_buf[0], &glb_buf[0], 3, MPI_INT, MPI_SUM, 0, m_pcomm->comm() );
01026     glb_smatmetadata[0] = glb_buf[0];
01027     glb_smatmetadata[1] = glb_buf[1];
01028     glb_smatmetadata[6] = glb_buf[2];
01029     MPI_Reduce( &loc_buf[3], &glb_buf[0], 4, MPI_INT, MPI_MAX, 0, m_pcomm->comm() );
01030     glb_smatmetadata[2] = glb_buf[0];
01031     glb_smatmetadata[3] = glb_buf[1];
01032     glb_smatmetadata[4] = glb_buf[2];
01033     glb_smatmetadata[5] = glb_buf[3];
01034 #else
01035     int glb_smatmetadata[13] = { tot_src_ents,
01036                                  tot_tgt_ents,
01037                                  m_remapper->max_source_edges,
01038                                  m_remapper->max_target_edges,
01039                                  maxrow,
01040                                  maxcol,
01041                                  weightMatNNZ,
01042                                  map_disc_details[0],
01043                                  map_disc_details[1],
01044                                  map_disc_details[2],
01045                                  map_disc_details[3],
01046                                  map_disc_details[4],
01047                                  map_disc_details[5] };
01048 #endif
01049     // These values represent number of rows and columns. So should be 1-based.
01050     glb_smatmetadata[4]++;
01051     glb_smatmetadata[5]++;
01052 
01053     if( this->is_root )
01054     {
01055         std::cout << "  " << this->rank << "  Writing remap weights with size [" << glb_smatmetadata[4] << " X "
01056                   << glb_smatmetadata[5] << "] and NNZ = " << glb_smatmetadata[6] << std::endl;
01057         EntityHandle root_set = 0;
01058         rval                  = m_interface->tag_set_data( tagMapMetaData, &root_set, 1, &glb_smatmetadata[0] );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01059     }
01060 
01061     int dsize;
01062     const int numval          = weightMatNNZ;
01063     const void* smatrowvals_d = smatrowvals.data();
01064     const void* smatcolvals_d = smatcolvals.data();
01065     const void* smatvals_d    = smatvals.data();
01066     rval = m_interface->tag_set_by_ptr( tagMapIndexRow, &m_meshOverlapSet, 1, &smatrowvals_d, &numval );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01067     rval = m_interface->tag_set_by_ptr( tagMapIndexCol, &m_meshOverlapSet, 1, &smatcolvals_d, &numval );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01068     rval = m_interface->tag_set_by_ptr( tagMapValues, &m_meshOverlapSet, 1, &smatvals_d, &numval );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01069 
01070     /* Set the global IDs for the DoFs */
01071     const void* srceleidvals_d = src_global_dofs.data();
01072     const void* tgteleidvals_d = tgt_global_dofs.data();
01073     dsize                      = src_global_dofs.size();
01074     rval = m_interface->tag_set_by_ptr( srcEleIDs, &m_meshOverlapSet, 1, &srceleidvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01075     dsize = tgt_global_dofs.size();
01076     rval  = m_interface->tag_set_by_ptr( tgtEleIDs, &m_meshOverlapSet, 1, &tgteleidvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01077 
01078     /* Set the source and target areas */
01079     const void* srcareavals_d = vecSourceFaceArea;
01080     const void* tgtareavals_d = vecTargetFaceArea;
01081     dsize                     = tot_src_size;
01082     rval = m_interface->tag_set_by_ptr( srcAreaValues, &m_meshOverlapSet, 1, &srcareavals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01083     dsize = tot_tgt_size;
01084     rval  = m_interface->tag_set_by_ptr( tgtAreaValues, &m_meshOverlapSet, 1, &tgtareavals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01085 
01086     /* Set the coordinates for source and target center vertices */
01087     const void* srccoordsclonvals_d = &dSourceCenterLon[0];
01088     const void* srccoordsclatvals_d = &dSourceCenterLat[0];
01089     dsize                           = dSourceCenterLon.GetRows();
01090     rval = m_interface->tag_set_by_ptr( tagSrcCoordsCLon, &m_meshOverlapSet, 1, &srccoordsclonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01091     rval = m_interface->tag_set_by_ptr( tagSrcCoordsCLat, &m_meshOverlapSet, 1, &srccoordsclatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01092     const void* tgtcoordsclonvals_d = &m_dTargetCenterLon[0];
01093     const void* tgtcoordsclatvals_d = &m_dTargetCenterLat[0];
01094     dsize                           = vecTargetFaceArea.GetRows();
01095     rval = m_interface->tag_set_by_ptr( tagTgtCoordsCLon, &m_meshOverlapSet, 1, &tgtcoordsclonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01096     rval = m_interface->tag_set_by_ptr( tagTgtCoordsCLat, &m_meshOverlapSet, 1, &tgtcoordsclatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01097 
01098     /* Set the coordinates for source and target element vertices */
01099     const void* srccoordsvlonvals_d = &( dSourceVertexLon[0][0] );
01100     const void* srccoordsvlatvals_d = &( dSourceVertexLat[0][0] );
01101     dsize                           = dSourceVertexLon.GetRows() * dSourceVertexLon.GetColumns();
01102     rval = m_interface->tag_set_by_ptr( tagSrcCoordsVLon, &m_meshOverlapSet, 1, &srccoordsvlonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01103     rval = m_interface->tag_set_by_ptr( tagSrcCoordsVLat, &m_meshOverlapSet, 1, &srccoordsvlatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01104     const void* tgtcoordsvlonvals_d = &( m_dTargetVertexLon[0][0] );
01105     const void* tgtcoordsvlatvals_d = &( m_dTargetVertexLat[0][0] );
01106     dsize                           = m_dTargetVertexLon.GetRows() * m_dTargetVertexLon.GetColumns();
01107     rval = m_interface->tag_set_by_ptr( tagTgtCoordsVLon, &m_meshOverlapSet, 1, &tgtcoordsvlonvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01108     rval = m_interface->tag_set_by_ptr( tagTgtCoordsVLat, &m_meshOverlapSet, 1, &tgtcoordsvlatvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01109 
01110     /* Set the masks for source and target meshes if available */
01111     if( m_iSourceMask.IsAttached() )
01112     {
01113         const void* srcmaskvals_d = m_iSourceMask;
01114         dsize                     = m_iSourceMask.GetRows();
01115         rval = m_interface->tag_set_by_ptr( srcMaskValues, &m_meshOverlapSet, 1, &srcmaskvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01116     }
01117 
01118     if( m_iTargetMask.IsAttached() )
01119     {
01120         const void* tgtmaskvals_d = m_iTargetMask;
01121         dsize                     = m_iTargetMask.GetRows();
01122         rval = m_interface->tag_set_by_ptr( tgtMaskValues, &m_meshOverlapSet, 1, &tgtmaskvals_d, &dsize );MB_CHK_SET_ERR( rval, "Setting local tag data failed" );
01123     }
01124 
01125 #ifdef MOAB_HAVE_MPI
01126     const char* writeOptions = ( this->size > 1 ? "PARALLEL=WRITE_PART" : "" );
01127 #else
01128     const char* writeOptions = "";
01129 #endif
01130 
01131     // EntityHandle sets[3] = {m_remapper->m_source_set, m_remapper->m_target_set, m_remapper->m_overlap_set};
01132     EntityHandle sets[1] = { m_remapper->m_overlap_set };
01133     rval                 = m_interface->write_file( strOutputFile.c_str(), NULL, writeOptions, sets, 1 );MB_CHK_ERR( rval );
01134 
01135 #ifdef WRITE_SCRIP_FILE
01136     sstr.str( "" );
01137     sstr << ctx.outFilename.substr( 0, lastindex ) << "_" << proc_id << ".nc";
01138     std::map< std::string, std::string > mapAttributes;
01139     mapAttributes["Creator"] = "MOAB mbtempest workflow";
01140     if( !ctx.proc_id ) std::cout << "Writing offline map to file: " << sstr.str() << std::endl;
01141     this->Write( strOutputFile.c_str(), mapAttributes, NcFile::Netcdf4 );
01142     sstr.str( "" );
01143 #endif
01144 
01145     return moab::MB_SUCCESS;
01146 }
01147 
01148 ///////////////////////////////////////////////////////////////////////////////
01149 
01150 void print_progress( const int barWidth, const float progress, const char* message )
01151 {
01152     std::cout << message << " [";
01153     int pos = barWidth * progress;
01154     for( int i = 0; i < barWidth; ++i )
01155     {
01156         if( i < pos )
01157             std::cout << "=";
01158         else if( i == pos )
01159             std::cout << ">";
01160         else
01161             std::cout << " ";
01162     }
01163     std::cout << "] " << int( progress * 100.0 ) << " %\r";
01164     std::cout.flush();
01165 }
01166 
01167 ///////////////////////////////////////////////////////////////////////////////
01168 
01169 moab::ErrorCode moab::TempestOnlineMap::ReadParallelMap( const char* strSource,
01170                                                          const std::vector< int >& owned_dof_ids,
01171                                                          bool row_partition )
01172 {
01173     NcError error( NcError::silent_nonfatal );
01174 
01175     NcVar *varRow = NULL, *varCol = NULL, *varS = NULL;
01176     int nS = 0, nA = 0, nB = 0;
01177 #ifdef MOAB_HAVE_PNETCDF
01178     // some variables will be used just in the case netcdfpar reader fails
01179     int ncfile = -1, ret = 0;
01180     int ndims, nvars, ngatts, unlimited;
01181 #endif
01182 #ifdef MOAB_HAVE_NETCDFPAR
01183     bool is_independent = true;
01184     ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strSource, NcFile::ReadOnly, NcFile::Netcdf4 );
01185     // ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strFilename.c_str(), NcmpiFile::replace, NcmpiFile::classic5 );
01186 #else
01187     NcFile ncMap( strSource, NcFile::ReadOnly );
01188 #endif
01189 
01190 #define CHECK_EXCEPTION( obj, type, varstr )                                                      \
01191     {                                                                                             \
01192         if( obj == NULL )                                                                         \
01193         {                                                                                         \
01194             _EXCEPTION3( "Map file \"%s\" does not contain %s \"%s\"", strSource, type, varstr ); \
01195         }                                                                                         \
01196     }
01197 
01198     // Read SparseMatrix entries
01199 
01200     if( ncMap.is_valid() )
01201     {
01202         NcDim* dimNS = ncMap.get_dim( "n_s" );
01203         CHECK_EXCEPTION( dimNS, "dimension", "n_s" );
01204 
01205         NcDim* dimNA = ncMap.get_dim( "n_a" );
01206         CHECK_EXCEPTION( dimNA, "dimension", "n_a" );
01207 
01208         NcDim* dimNB = ncMap.get_dim( "n_b" );
01209         CHECK_EXCEPTION( dimNB, "dimension", "n_b" );
01210 
01211         // store total number of nonzeros
01212         nS = dimNS->size();
01213         nA = dimNA->size();
01214         nB = dimNB->size();
01215 
01216         varRow = ncMap.get_var( "row" );
01217         CHECK_EXCEPTION( varRow, "variable", "row" );
01218 
01219         varCol = ncMap.get_var( "col" );
01220         CHECK_EXCEPTION( varCol, "variable", "col" );
01221 
01222         varS = ncMap.get_var( "S" );
01223         CHECK_EXCEPTION( varS, "variable", "S" );
01224 
01225 #ifdef MOAB_HAVE_NETCDFPAR
01226         ncMap.enable_var_par_access( varRow, is_independent );
01227         ncMap.enable_var_par_access( varCol, is_independent );
01228         ncMap.enable_var_par_access( varS, is_independent );
01229 #endif
01230     }
01231     else
01232     {
01233 #ifdef MOAB_HAVE_PNETCDF
01234         // read the file using pnetcdf directly, in parallel; need to have MPI, we do not check that anymore
01235         // why build wth pnetcdf without MPI ?
01236         // ParNcFile ncMap( m_pcomm->comm(), MPI_INFO_NULL, strSource, NcFile::ReadOnly, NcFile::Netcdf4 );
01237         ret = ncmpi_open( m_pcomm->comm(), strSource, NC_NOWRITE, MPI_INFO_NULL, &ncfile );
01238         ERR_PARNC( ret );  // bail out completely
01239         ret = ncmpi_inq( ncfile, &ndims, &nvars, &ngatts, &unlimited );
01240         ERR_PARNC( ret );
01241         // find dimension ids for n_S
01242         int ins;
01243         ret = ncmpi_inq_dimid( ncfile, "n_s", &ins );
01244         ERR_PARNC( ret );
01245         MPI_Offset leng;
01246         ret = ncmpi_inq_dimlen( ncfile, ins, &leng );
01247         ERR_PARNC( ret );
01248         nS  = (int)leng;
01249         ret = ncmpi_inq_dimid( ncfile, "n_b", &ins );
01250         ERR_PARNC( ret );
01251         ret = ncmpi_inq_dimlen( ncfile, ins, &leng );
01252         ERR_PARNC( ret );
01253         nB = (int)leng;
01254 #else
01255         _EXCEPTION1( "cannot read the file %s", strSource );
01256 #endif
01257     }
01258 
01259     // Let us declare the map object for every process
01260     SparseMatrix< double >& sparseMatrix = this->GetSparseMatrix();
01261 
01262     int localSize   = nS / size;
01263     long offsetRead = rank * localSize;
01264     // leftovers on last rank
01265     if( rank == size - 1 )
01266     {
01267         localSize += nS % size;
01268     }
01269 
01270     std::vector< int > vecRow, vecCol;
01271     std::vector< double > vecS;
01272     vecRow.resize( localSize );
01273     vecCol.resize( localSize );
01274     vecS.resize( localSize );
01275 
01276     if( ncMap.is_valid() )
01277     {
01278         varRow->set_cur( (long)( offsetRead ) );
01279         varRow->get( &( vecRow[0] ), localSize );
01280 
01281         varCol->set_cur( (long)( offsetRead ) );
01282         varCol->get( &( vecCol[0] ), localSize );
01283 
01284         varS->set_cur( (long)( offsetRead ) );
01285         varS->get( &( vecS[0] ), localSize );
01286 
01287         ncMap.close();
01288     }
01289     else
01290     {
01291 #ifdef MOAB_HAVE_PNETCDF
01292         // fill the local vectors with the variables from pnetcdf file; first inquire, then fill
01293         MPI_Offset start = (MPI_Offset)offsetRead;
01294         MPI_Offset count = (MPI_Offset)localSize;
01295         int varid;
01296         ret = ncmpi_inq_varid( ncfile, "S", &varid );
01297         ERR_PARNC( ret );
01298         ret = ncmpi_get_vara_double_all( ncfile, varid, &start, &count, &vecS[0] );
01299         ERR_PARNC( ret );
01300         ret = ncmpi_inq_varid( ncfile, "row", &varid );
01301         ERR_PARNC( ret );
01302         ret = ncmpi_get_vara_int_all( ncfile, varid, &start, &count, &vecRow[0] );
01303         ERR_PARNC( ret );
01304         ret = ncmpi_inq_varid( ncfile, "col", &varid );
01305         ERR_PARNC( ret );
01306         ret = ncmpi_get_vara_int_all( ncfile, varid, &start, &count, &vecCol[0] );
01307         ERR_PARNC( ret );
01308         ret = ncmpi_close( ncfile );
01309         ERR_PARNC( ret );
01310 #endif
01311     }
01312 
01313     // Now let us set the necessary global-to-local ID maps so that A*x operations
01314     // can be performed cleanly as if map was computed online
01315     row_dtoc_dofmap.clear();
01316     // row_dtoc_dofmap.reserve( nB / size );
01317     col_dtoc_dofmap.clear();
01318     rowMap.clear();
01319     colMap.clear();
01320     // col_dtoc_dofmap.reserve( 2 * nA / size );
01321     // row_dtoc_dofmap.resize( m_nTotDofs_Dest, UINT_MAX );
01322     // col_dtoc_dofmap.resize( m_nTotDofs_SrcCov, UINT_MAX );
01323 
01324 #ifdef MOAB_HAVE_MPI
01325     // bother with tuple list only if size > 1
01326     // otherwise, just fill the sparse matrix
01327     if( size > 1 )
01328     {
01329         std::vector< int > ownership;
01330         // the default trivial partitioning scheme
01331         int nDofs = nB;                   // this is for row partitioning
01332         if( !row_partition ) nDofs = nA;  // column partitioning
01333 
01334         // assert(row_major_ownership == true); // this block is valid only for row-based partitioning
01335         ownership.resize( size );
01336         int nPerPart   = nDofs / size;
01337         int nRemainder = nDofs % size;  // Keep the remainder in root
01338         ownership[0]   = nPerPart + nRemainder;
01339         for( int ip = 1, roffset = ownership[0]; ip < size; ++ip )
01340         {
01341             roffset += nPerPart;
01342             ownership[ip] = roffset;
01343         }
01344         moab::TupleList* tl = new moab::TupleList;
01345         unsigned numr       = 1;                     //
01346         tl->initialize( 3, 0, 0, numr, localSize );  // to proc, row, col, value
01347         tl->enableWriteAccess();
01348         // populate
01349         for( int i = 0; i < localSize; i++ )
01350         {
01351             int rowval  = vecRow[i] - 1;  // dofs are 1 based in the file
01352             int colval  = vecCol[i] - 1;
01353             int to_proc = -1;
01354             int dof_val = colval;
01355             if( row_partition ) dof_val = rowval;
01356 
01357             if( ownership[0] > dof_val )
01358                 to_proc = 0;
01359             else
01360             {
01361                 for( int ip = 1; ip < size; ++ip )
01362                 {
01363                     if( ownership[ip - 1] <= dof_val && ownership[ip] > dof_val )
01364                     {
01365                         to_proc = ip;
01366                         break;
01367                     }
01368                 }
01369             }
01370 
01371             int n                = tl->get_n();
01372             tl->vi_wr[3 * n]     = to_proc;
01373             tl->vi_wr[3 * n + 1] = rowval;
01374             tl->vi_wr[3 * n + 2] = colval;
01375             tl->vr_wr[n]         = vecS[i];
01376             tl->inc_n();
01377         }
01378         // heavy communication
01379         ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, *tl, 0 );
01380 
01381         if( owned_dof_ids.size() > 0 )
01382         {
01383             // we need to send desired dof to the rendez_vous point
01384             moab::TupleList tl_re;                                 //
01385             tl_re.initialize( 2, 0, 0, 0, owned_dof_ids.size() );  // to proc, value
01386             tl_re.enableWriteAccess();
01387             // send first to rendez_vous point, decided by trivial partitioning
01388 
01389             for( size_t i = 0; i < owned_dof_ids.size(); i++ )
01390             {
01391                 int to_proc = -1;
01392                 int dof_val = owned_dof_ids[i] - 1;  // dofs are 1 based in the file, partition from 0 ?
01393 
01394                 if( ownership[0] > dof_val )
01395                     to_proc = 0;
01396                 else
01397                 {
01398                     for( int ip = 1; ip < size; ++ip )
01399                     {
01400                         if( ownership[ip - 1] <= dof_val && ownership[ip] > dof_val )
01401                         {
01402                             to_proc = ip;
01403                             break;
01404                         }
01405                     }
01406                 }
01407 
01408                 int n                  = tl_re.get_n();
01409                 tl_re.vi_wr[2 * n]     = to_proc;
01410                 tl_re.vi_wr[2 * n + 1] = dof_val;
01411 
01412                 tl_re.inc_n();
01413             }
01414             ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, tl_re, 0 );
01415             // now we know in tl_re where do we need to send back dof_val
01416             moab::TupleList::buffer sort_buffer;
01417             sort_buffer.buffer_init( tl_re.get_n() );
01418             tl_re.sort( 1, &sort_buffer );  // so now we order by value
01419 
01420             sort_buffer.buffer_init( tl->get_n() );
01421             int indexOrder = 2;                  //  colVal
01422             if( row_partition ) indexOrder = 1;  //  rowVal
01423             //tl->sort( indexOrder, &sort_buffer );
01424 
01425             std::map< int, int > startDofIndex, endDofIndex;  // indices in tl_re for values we want
01426             int dofVal = -1;
01427             if( tl_re.get_n() > 0 ) dofVal = tl_re.vi_rd[1];  // first dof val on this rank
01428             startDofIndex[dofVal] = 0;
01429             endDofIndex[dofVal]   = 0;  // start and end
01430             for( unsigned k = 1; k < tl_re.get_n(); k++ )
01431             {
01432                 int newDof = tl_re.vi_rd[2 * k + 1];
01433                 if( dofVal == newDof )
01434                 {
01435                     endDofIndex[dofVal] = k;  // increment by 1 actually
01436                 }
01437                 else
01438                 {
01439                     dofVal                = newDof;
01440                     startDofIndex[dofVal] = k;
01441                     endDofIndex[dofVal]   = k;
01442                 }
01443             }
01444 
01445             // basically, for each value we are interested in, index in tl_re with those values are
01446             // tl_re.vi_rd[2*startDofIndex+1] == valDof == tl_re.vi_rd[2*endDofIndex+1]
01447             // so now we have ordered
01448             // tl_re shows to what proc do we need to send the tuple (row, col, val)
01449             moab::TupleList* tl_back = new moab::TupleList;
01450             unsigned numr            = 1;  //
01451             // localSize is a good guess, but maybe it should be bigger ?
01452             // this could be bigger for repeated dofs
01453             tl_back->initialize( 3, 0, 0, numr, tl->get_n() );  // to proc, row, col, value
01454             tl_back->enableWriteAccess();
01455             // now loop over tl and tl_re to see where to send
01456             // form the new tuple, which will contain the desired dofs per task, per row or column distribution
01457 
01458             for( unsigned k = 0; k < tl->get_n(); k++ )
01459             {
01460                 int valDof = tl->vi_rd[3 * k + indexOrder];  // 1 for row, 2 for column // first value, it should be
01461                 for( int ire = startDofIndex[valDof]; ire <= endDofIndex[valDof]; ire++ )
01462                 {
01463                     int to_proc               = tl_re.vi_rd[2 * ire];
01464                     int n                     = tl_back->get_n();
01465                     tl_back->vi_wr[3 * n]     = to_proc;
01466                     tl_back->vi_wr[3 * n + 1] = tl->vi_rd[3 * k + 1];  // row
01467                     tl_back->vi_wr[3 * n + 2] = tl->vi_rd[3 * k + 2];  // col
01468                     tl_back->vr_wr[n]         = tl->vr_rd[k];
01469                     tl_back->inc_n();
01470                 }
01471             }
01472 
01473             // now communicate to the desired tasks:
01474             ( m_pcomm->proc_config().crystal_router() )->gs_transfer( 1, *tl_back, 0 );
01475 
01476             tl_re.reset();  // clear memory, although this will go out of scope
01477             tl->reset();
01478             tl = tl_back;
01479         }
01480 
01481         int rindexMax = 0, cindexMax = 0;
01482         // populate the sparsematrix, using rowMap and colMap
01483         int n = tl->get_n();
01484         for( int i = 0; i < n; i++ )
01485         {
01486             int rindex, cindex;
01487             const int& vecRowValue = tl->vi_wr[3 * i + 1];
01488             const int& vecColValue = tl->vi_wr[3 * i + 2];
01489 
01490             std::map< int, int >::iterator riter = rowMap.find( vecRowValue );
01491             if( riter == rowMap.end() )
01492             {
01493                 rowMap[vecRowValue] = rindexMax;
01494                 rindex              = rindexMax;
01495                 row_gdofmap.push_back( vecRowValue );
01496                 // row_dtoc_dofmap.push_back( vecRowValue );
01497                 rindexMax++;
01498             }
01499             else
01500                 rindex = riter->second;
01501 
01502             std::map< int, int >::iterator citer = colMap.find( vecColValue );
01503             if( citer == colMap.end() )
01504             {
01505                 colMap[vecColValue] = cindexMax;
01506                 cindex              = cindexMax;
01507                 col_gdofmap.push_back( vecColValue );
01508                 // col_dtoc_dofmap.push_back( vecColValue );
01509                 cindexMax++;
01510             }
01511             else
01512                 cindex = citer->second;
01513 
01514             sparseMatrix( rindex, cindex ) = tl->vr_wr[i];
01515         }
01516         tl->reset();
01517     }
01518     else
01519 #endif
01520     {
01521         int rindexMax = 0, cindexMax = 0;
01522 
01523         for( int i = 0; i < nS; i++ )
01524         {
01525             int rindex, cindex;
01526             const int& vecRowValue = vecRow[i] - 1;  // the rows, cols are 1 based in the file
01527             const int& vecColValue = vecCol[i] - 1;
01528 
01529             std::map< int, int >::iterator riter = rowMap.find( vecRowValue );
01530             if( riter == rowMap.end() )
01531             {
01532                 rowMap[vecRowValue] = rindexMax;
01533                 rindex              = rindexMax;
01534                 row_gdofmap.push_back( vecRowValue );
01535                 // row_dtoc_dofmap.push_back( vecRowValue );
01536                 rindexMax++;
01537             }
01538             else
01539                 rindex = riter->second;
01540 
01541             std::map< int, int >::iterator citer = colMap.find( vecColValue );
01542             if( citer == colMap.end() )
01543             {
01544                 colMap[vecColValue] = cindexMax;
01545                 cindex              = cindexMax;
01546                 col_gdofmap.push_back( vecColValue );
01547                 // col_dtoc_dofmap.push_back( vecColValue );
01548                 cindexMax++;
01549             }
01550             else
01551                 cindex = citer->second;
01552 
01553             sparseMatrix( rindex, cindex ) = vecS[i];
01554         }
01555     }
01556 
01557     m_nTotDofs_SrcCov = sparseMatrix.GetColumns();
01558     m_nTotDofs_Dest   = sparseMatrix.GetRows();
01559 
01560 #ifdef MOAB_HAVE_EIGEN3
01561     this->copy_tempest_sparsemat_to_eigen3();
01562 #endif
01563 
01564     // Reset the source and target data first
01565     m_rowVector.setZero();
01566     m_colVector.setZero();
01567 
01568     return moab::MB_SUCCESS;
01569 }
01570 
01571 ///////////////////////////////////////////////////////////////////////////////
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines