MOAB: Mesh Oriented datABase  (version 5.4.1)
hypre_test.cpp
Go to the documentation of this file.
00001 // Parts of this test have been taken from MFEM and HPCG benchmark example
00002 
00003 #include "moab/Core.hpp"
00004 #include "HypreSolver.hpp"
00005 
00006 #include <iostream>
00007 using namespace std;
00008 
00009 #undef DEBUG
00010 
00011 moab::ErrorCode GenerateTestMatrixAndVectors( int nx,
00012                                               int ny,
00013                                               int nz,
00014                                               moab::HypreParMatrix& A,
00015                                               moab::HypreParVector& sol,
00016                                               moab::HypreParVector& rhs,
00017                                               moab::HypreParVector& exactsol );
00018 
00019 int main( int argc, char* argv[] )
00020 {
00021     // double norm, d;
00022     int ierr = 0;
00023     double times[5];
00024     int nx = 5, ny = 5, nz = 5;
00025 #ifdef MOAB_HAVE_MPI
00026     MPI_Init( &argc, &argv );
00027     int size, rank;  // Number of MPI processes, My process ID
00028     MPI_Comm_size( MPI_COMM_WORLD, &size );
00029     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00030 #else
00031     int size = 1;  // Serial case (not using MPI)
00032     int rank = 0;
00033 #endif
00034     MPI_Comm comm = MPI_COMM_WORLD;
00035 
00036     moab::Core mbCore;
00037     moab::Interface& mb = mbCore;
00038 
00039     // rval = mb.load_file(example.c_str(), &euler_set, opts.c_str());
00040 
00041     moab::ParallelComm* pcomm = new moab::ParallelComm( &mb, comm );
00042 
00043     moab::HypreParMatrix A( pcomm );
00044     moab::HypreParVector x( pcomm ), b( pcomm ), xexact( pcomm );
00045 #ifdef DEBUG
00046 
00047     if( rank == 0 )
00048     {
00049         int junk = 0;
00050         cout << "Press enter to continue" << endl;
00051         cin >> junk;
00052     }
00053 
00054     MPI_Barrier( MPI_COMM_WORLD );
00055 #endif
00056 
00057     if( argc > 4 )
00058     {
00059         if( rank == 0 )
00060             cerr << "Usage:" << endl
00061                  << "\t" << argv[0] << " nx ny nz" << endl
00062                  << "     where nx, ny and nz are the local sub-block dimensions, or" << endl;
00063 
00064         exit( 1 );
00065     }
00066     else
00067     {
00068         if( rank == 0 )
00069             cout << "Using command:"
00070                  << "\t" << argv[0] << " nx ny nz" << endl;
00071     }
00072 
00073     times[0] = MPI_Wtime();  // Initialize it (if needed)
00074 
00075     if( argc == 4 )
00076     {
00077         nx = atoi( argv[1] );
00078         ny = atoi( argv[2] );
00079         nz = atoi( argv[3] );
00080     }
00081 
00082     GenerateTestMatrixAndVectors( nx, ny, nz, A, x, b, xexact );
00083 
00084     times[1]          = MPI_Wtime() - times[0];  // operator creation time
00085     const bool useCG  = true;                    // TODO make into command line option
00086     const bool useAMG = false;                   // TODO make into command line option
00087     int niters        = 0;
00088     double normr      = 0;
00089     int max_iter      = 150;
00090     double tolerance  = 1e-15;        // Set tolerance to zero to make all runs do max_iter iterations
00091     times[2]          = MPI_Wtime();  // reset
00092     moab::HypreSolver *lsSolver, *psSolver;
00093 
00094     if( useCG )
00095     {
00096         lsSolver                 = new moab::HyprePCG( A );
00097         moab::HyprePCG* cgSolver = dynamic_cast< moab::HyprePCG* >( lsSolver );
00098         cgSolver->SetTol( tolerance );
00099         cgSolver->SetMaxIter( max_iter );
00100         cgSolver->SetLogging( 1 );
00101         cgSolver->Verbosity( 1 );
00102         cgSolver->SetResidualConvergenceOptions( 3, 0.0 );
00103     }
00104     else
00105     {
00106         lsSolver                      = new moab::HypreGMRES( A );
00107         moab::HypreGMRES* gmresSolver = dynamic_cast< moab::HypreGMRES* >( lsSolver );
00108         gmresSolver->SetTol( tolerance, normr );
00109         gmresSolver->SetMaxIter( max_iter );
00110         gmresSolver->SetLogging( 1 );
00111         gmresSolver->Verbosity( 5 );
00112         gmresSolver->SetKDim( 30 );
00113     }
00114 
00115     if( useAMG )
00116     {
00117         psSolver = new moab::HypreBoomerAMG( A );
00118         dynamic_cast< moab::HypreBoomerAMG* >( psSolver )->SetSystemsOptions( 3 );
00119     }
00120     else
00121     {
00122         psSolver = new moab::HypreParaSails( A );
00123         dynamic_cast< moab::HypreParaSails* >( psSolver )->SetSymmetry( 1 );
00124     }
00125 
00126     if( psSolver ) lsSolver->SetPreconditioner( *psSolver );
00127 
00128     times[2] = MPI_Wtime() - times[2];  // solver setup time
00129     /* Now let us solve the linear system */
00130     times[3] = MPI_Wtime();  // reset
00131     ierr     = lsSolver->Solve( b, x );
00132 
00133     if( ierr ) cerr << "Error in call to CG/GMRES HYPRE Solver: " << ierr << ".\n" << endl;
00134 
00135     times[3] = MPI_Wtime() - times[3];  // solver time
00136     lsSolver->GetNumIterations( niters );
00137     lsSolver->GetFinalResidualNorm( normr );
00138     times[4] = MPI_Wtime() - times[0];
00139     // x.Print ( "solution.out" );
00140 #ifdef MOAB_HAVE_MPI
00141     double t4    = times[3];
00142     double t4min = 0.0;
00143     double t4max = 0.0;
00144     double t4avg = 0.0;
00145     MPI_Allreduce( &t4, &t4min, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD );
00146     MPI_Allreduce( &t4, &t4max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
00147     MPI_Allreduce( &t4, &t4avg, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
00148     t4avg = t4avg / ( (double)size );
00149 #endif
00150 
00151     if( rank == 0 )
00152     {  // Only PE 0 needs to compute and report timing results
00153         // double fniters = niters;
00154         // double fnrow = A.NNZ();
00155         // double fnnz = A.M();
00156         // double fnops_ddot = fniters * 4 * fnrow;
00157         // double fnops_waxpby = fniters * 6 * fnrow;
00158         // double fnops_sparsemv = fniters * 2 * fnnz;
00159         // double fnops = fnops_ddot + fnops_waxpby + fnops_sparsemv;
00160         std::cout << "Testing Laplace problem solver with HYPRE interface\n";
00161         {
00162             std::cout << "\tParallelism\n";
00163 #ifdef MOAB_HAVE_MPI
00164             std::cout << "Number of MPI ranks: \t" << size << std::endl;
00165 #else
00166             std::cout << "MPI not enabled\n";
00167 #endif
00168         }
00169         std::cout << "Dimensions (nx*ny*nz): \t(" << nx << "*" << ny << "*" << nz << ")\n";
00170         std::cout << "Number of iterations: \t" << niters << std::endl;
00171         std::cout << "Final residual: \t" << normr << std::endl;
00172         std::cout << "************ Performance Summary (times in sec) *************" << std::endl;
00173         {
00174             std::cout << "\nTime Summary" << std::endl;
00175             std::cout << "Total             \t" << times[4] << std::endl;
00176             std::cout << "Operator Creation \t" << times[1] << std::endl;
00177             std::cout << "Solver Setup      \t" << times[2] << std::endl;
00178             std::cout << "HYPRE Solver      \t" << times[3] << std::endl;
00179             std::cout << "Avg. Solver       \t" << t4avg << std::endl;
00180         }
00181     }
00182 
00183     // Compute difference between known exact solution and computed solution
00184     // All processors are needed here.
00185     //   double residual = 0;
00186     //   if ((ierr = ComputeLinfError(x, xexact, &residual)))
00187     //     cerr << "Error in call to ComputeLinfError: " << ierr << ".\n" << endl;
00188     //   if (rank==0)
00189     //      cout << "Difference between computed and exact  = "
00190     //           << residual << ".\n" << endl;
00191     delete lsSolver;
00192     // Finish up
00193 #ifdef MOAB_HAVE_MPI
00194     MPI_Finalize();
00195 #endif
00196     return 0;
00197 }
00198 
00199 moab::ErrorCode GenerateTestMatrixAndVectors( int nx,
00200                                               int ny,
00201                                               int nz,
00202                                               moab::HypreParMatrix& A,
00203                                               moab::HypreParVector& sol,
00204                                               moab::HypreParVector& rhs,
00205                                               moab::HypreParVector& exactsol )
00206 {
00207 #ifdef DEBUG
00208     int debug = 1;
00209 #else
00210     int debug = 0;
00211 #endif
00212 #ifdef MOAB_HAVE_MPI
00213     int size, rank;  // Number of MPI processes, My process ID
00214     MPI_Comm_size( MPI_COMM_WORLD, &size );
00215     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00216 #else
00217     int size  = 1;  // Serial case (not using MPI)
00218     int rank  = 0;
00219 #endif
00220     // Set this bool to true if you want a 7-pt stencil instead of a 27 pt stencil
00221     bool use_7pt_stencil = false;
00222     const int NNZPERROW  = 27;
00223 
00224     int local_nrow = nx * ny * nz;            // This is the size of our subblock
00225     assert( local_nrow > 0 );                 // Must have something to work with
00226     int local_nnz  = NNZPERROW * local_nrow;  // Approximately 27 nonzeros per row (except for boundary nodes)
00227     int total_nrow = 0;
00228     MPI_Allreduce( &local_nrow, &total_nrow, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD );
00229 
00230     // int total_nrow = local_nrow * size; // Total number of grid points in mesh
00231     long long total_nnz =
00232         NNZPERROW * (long long)total_nrow;  // Approximately 27 nonzeros per row (except for boundary nodes)
00233     int start_row = local_nrow * rank;      // Each processor gets a section of a chimney stack domain
00234     int stop_row  = start_row + local_nrow - 1;
00235 
00236     // Allocate arrays that are of length local_nrow
00237     // Eigen::SparseMatrix<double, Eigen::RowMajor> diagMatrix(local_nrow, local_nrow);
00238     // diagMatrix.reserve(local_nnz);
00239     HYPRE_Int col[2] = { start_row, stop_row };
00240     A.resize( total_nrow, col );
00241 
00242     sol.resize( total_nrow, start_row, stop_row );
00243     rhs.resize( total_nrow, start_row, stop_row );
00244     exactsol.resize( total_nrow, start_row, stop_row );
00245     long long nnzglobal = 0;
00246 
00247     for( int iz = 0; iz < nz; iz++ )
00248     {
00249         for( int iy = 0; iy < ny; iy++ )
00250         {
00251             for( int ix = 0; ix < nx; ix++ )
00252             {
00253                 const int curlocalrow = iz * nx * ny + iy * nx + ix;
00254                 const int currow      = start_row + curlocalrow;
00255                 int nnzrow            = 0;
00256                 std::vector< double > colvals;
00257                 std::vector< int > indices;
00258 
00259                 for( int sz = -1; sz <= 1; sz++ )
00260                 {
00261                     for( int sy = -1; sy <= 1; sy++ )
00262                     {
00263                         for( int sx = -1; sx <= 1; sx++ )
00264                         {
00265                             const int curlocalcol = sz * nx * ny + sy * nx + sx;
00266                             const int curcol      = currow + curlocalcol;
00267 
00268                             // Since we have a stack of nx by ny by nz domains , stacking in the z
00269                             // direction, we check to see if sx and sy are reaching outside of the
00270                             // domain, while the check for the curcol being valid is sufficient to
00271                             // check the z values
00272                             if( ( ix + sx >= 0 ) && ( ix + sx < nx ) && ( iy + sy >= 0 ) && ( iy + sy < ny ) &&
00273                                 ( curcol >= 0 && curcol < total_nrow ) )
00274                             {
00275                                 if( !use_7pt_stencil || ( sz * sz + sy * sy + sx * sx <= 1 ) )
00276                                 {   // This logic will skip over point that are not part of a 7-pt
00277                                     // stencil
00278                                     if( curcol == currow )
00279                                     {
00280                                         colvals.push_back( 27.0 );
00281                                     }
00282                                     else
00283                                     {
00284                                         colvals.push_back( -1.0 );
00285                                     }
00286 
00287                                     indices.push_back( curcol );
00288                                     nnzrow++;
00289                                 }
00290                             }
00291                         }  // end sx loop
00292                     }      // end sy loop
00293                 }          // end sz loop
00294 
00295                 int ncols = indices.size();
00296                 A.SetValues( 1, &ncols, &currow, indices.data(), colvals.data() );
00297                 // diagMatrix.set_row_values ( curlocalrow, indices.size(), indices.data(),
00298                 // colvals.data() );
00299                 nnzglobal += nnzrow;
00300                 sol.SetValue( currow, 0.0 );
00301                 rhs.SetValue( currow, 27.0 - ( (double)( nnzrow - 1 ) ) );
00302                 exactsol.SetValue( currow, 1.0 );
00303             }  // end ix loop
00304         }      // end iy loop
00305     }          // end iz loop
00306 
00307     if( debug )
00308         cout << "Global size of the matrix: " << total_nrow << " and NNZ (estimate) = " << total_nnz
00309              << " NNZ (actual) = " << nnzglobal << endl;
00310 
00311     if( debug ) cout << "Process " << rank << " of " << size << " has " << local_nrow << endl;
00312 
00313     if( debug ) cout << " rows. Global rows " << start_row << " through " << stop_row << endl;
00314 
00315     if( debug ) cout << "Process " << rank << " of " << size << " has " << local_nnz << " nonzeros." << endl;
00316 
00317     A.FinalizeAssembly();
00318     sol.FinalizeAssembly();
00319     rhs.FinalizeAssembly();
00320     exactsol.FinalizeAssembly();
00321     // A.Print("matrix.out");
00322     return moab::MB_SUCCESS;
00323 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines