MOAB: Mesh Oriented datABase  (version 5.2.1)
FeasibleNewton.cpp
Go to the documentation of this file.
00001 /* *****************************************************************
00002     MESQUITE -- The Mesh Quality Improvement Toolkit
00003 
00004     Copyright 2004 Sandia Corporation and Argonne National
00005     Laboratory.  Under the terms of Contract DE-AC04-94AL85000
00006     with Sandia Corporation, the U.S. Government retains certain
00007     rights in this software.
00008 
00009     This library is free software; you can redistribute it and/or
00010     modify it under the terms of the GNU Lesser General Public
00011     License as published by the Free Software Foundation; either
00012     version 2.1 of the License, or (at your option) any later version.
00013 
00014     This library is distributed in the hope that it will be useful,
00015     but WITHOUT ANY WARRANTY; without even the implied warranty of
00016     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00017     Lesser General Public License for more details.
00018 
00019     You should have received a copy of the GNU Lesser General Public License
00020     (lgpl.txt) along with this library; if not, write to the Free Software
00021     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
00022 
00023     diachin2@llnl.gov, djmelan@sandia.gov, mbrewer@sandia.gov,
00024     pknupp@sandia.gov, tleurent@mcs.anl.gov, tmunson@mcs.anl.gov
00025 
00026   ***************************************************************** */
00027 //
00028 //    AUTHOR: Thomas Leurent <tleurent@mcs.anl.gov>
00029 //       ORG: Argonne National Laboratory
00030 //    E-MAIL: tleurent@mcs.anl.gov
00031 //
00032 // ORIG-DATE: 15-Jan-03 at 08:05:56
00033 //  LAST-MOD: 15-Jun-04 at 15:45:00 by Thomas Leurent
00034 //
00035 // DESCRIPTION:
00036 // ============
00037 /*!
00038   \file   FeasibleNewton.cpp
00039   \brief
00040 
00041   Implements the FeasibleNewton class member functions.
00042 
00043   \author Thomas Leurent
00044   \author Todd Munson
00045   \date   2003-01-15
00046 */
00047 // DESCRIP-END.
00048 //
00049 
00050 #include "FeasibleNewton.hpp"
00051 #include "MsqFreeVertexIndexIterator.hpp"
00052 #include "MsqDebug.hpp"
00053 #include "XYPlanarDomain.hpp"
00054 
00055 using namespace MBMesquite;
00056 
00057 std::string FeasibleNewton::get_name() const
00058 {
00059     return "FeasibleNewton";
00060 }
00061 
00062 PatchSet* FeasibleNewton::get_patch_set()
00063 {
00064     return PatchSetUser::get_patch_set();
00065 }
00066 
00067 FeasibleNewton::FeasibleNewton( ObjectiveFunction* of )
00068     : VertexMover( of ), PatchSetUser( true ), convTol( 1e-6 ), coordsMem( 0 ), havePrintedDirectionMessage( false )
00069 {
00070     TerminationCriterion* default_crit = get_inner_termination_criterion();
00071     default_crit->add_absolute_gradient_L2_norm( 5e-5 );
00072 }
00073 
00074 void FeasibleNewton::initialize( PatchData& pd, MsqError& err )
00075 {
00076     // Cannot do anything.  Variable sizes with maximum size dependent
00077     // upon the entire MeshSet.
00078     coordsMem = pd.create_vertices_memento( err );MSQ_CHKERR( err );
00079     havePrintedDirectionMessage = false;
00080 }
00081 
00082 void FeasibleNewton::initialize_mesh_iteration( PatchData& pd, MsqError& /*err*/ )
00083 {
00084     pd.reorder();
00085 }
00086 
00087 void FeasibleNewton::optimize_vertex_positions( PatchData& pd, MsqError& err )
00088 {
00089     MSQ_FUNCTION_TIMER( "FeasibleNewton::optimize_vertex_positions" );
00090     MSQ_DBGOUT( 2 ) << "\no  Performing Feasible Newton optimization.\n";
00091 
00092     //
00093     // the only valid 2D meshes that FeasibleNewton works for are truly planar which
00094     // lie in the X-Y coordinate plane.
00095     //
00096 
00097     XYPlanarDomain* xyPlanarDomainPtr = dynamic_cast< XYPlanarDomain* >( pd.get_domain() );
00098     // only optimize if input mesh is a volume or an XYPlanarDomain
00099     if( !pd.domain_set() || xyPlanarDomainPtr != NULL )
00100     {
00101         const double sigma   = 1e-4;
00102         const double beta0   = 0.25;
00103         const double beta1   = 0.80;
00104         const double tol1    = 1e-8;
00105         const double tol2    = 1e-12;
00106         const double epsilon = 1e-10;
00107         double original_value, new_value;
00108         double beta;
00109 
00110         int nv = pd.num_free_vertices();
00111         std::vector< Vector3D > grad( nv ), d( nv );
00112         bool fn_bool = true;  // bool used for determining validity of patch
00113 
00114         OFEvaluator& objFunc = get_objective_function_evaluator();
00115 
00116         int i;
00117 
00118         // TODD -- Don't blame the code for bad things happening when using a
00119         //         bad termination test or requesting more accuracy than is
00120         //       possible.
00121         //
00122         //         Also,
00123 
00124         // 1.  Allocate a hessian and calculate the sparsity pattern.
00125         mHessian.initialize( pd, err );MSQ_ERRRTN( err );
00126 
00127         // does the Feasible Newton iteration until stopping is required.
00128         // Terminate when inner termination criterion signals.
00129 
00130         /* Computes the value of the stopping criterion*/
00131         TerminationCriterion* term_crit = get_inner_termination_criterion();
00132         while( !term_crit->terminate() )
00133         {
00134             fn_bool = objFunc.update( pd, original_value, grad, mHessian, err );MSQ_ERRRTN( err );
00135             if( !fn_bool )
00136             {
00137                 MSQ_SETERR( err )
00138                 ( "invalid patch for hessian calculation", MsqError::INTERNAL_ERROR );
00139                 return;
00140             }
00141 
00142             if( MSQ_DBG( 3 ) )
00143             {  // avoid expensive norm calculations if debug flag is off
00144                 MSQ_DBGOUT( 3 ) << "  o  objective function: " << original_value << std::endl;
00145                 MSQ_DBGOUT( 3 ) << "  o  gradient norm: " << length( grad ) << std::endl;
00146                 MSQ_DBGOUT( 3 ) << "  o  Hessian norm: " << mHessian.norm() << std::endl;
00147             }
00148 
00149             // Prints out free vertices coordinates.
00150             //
00151             // Comment out the following because it is way to verbose for larger
00152             // problems.  Consider doing:
00153             //  inner_term_crit->write_mesh_steps( "filename.vtk" );
00154             // instead.
00155             // - j.kraftcheck 2010-11-17
00156             //      if (MSQ_DBG(3)) {
00157             //        MSQ_DBGOUT(3) << "\n  o Free vertices ("<< pd.num_free_vertices()
00158             //                  <<")original coordinates:\n ";
00159             //        MSQ_ERRRTN(err);
00160             //        const MsqVertex* toto1 = pd.get_vertex_array(err); MSQ_ERRRTN(err);
00161             //        MsqFreeVertexIndexIterator ind1(pd, err); MSQ_ERRRTN(err);
00162             //        ind1.reset();
00163             //        while (ind1.next()) {
00164             //          MSQ_DBGOUT(3) << "\t\t\t" << toto1[ind1.value()];
00165             //        }
00166             //      }
00167 
00168             // 4. Calculate a direction using preconditionned conjugate gradients
00169             //    to find a zero of the Newton system of equations (H*d = -g)
00170             //    (a) stop if conjugate iteration limit reached
00171             //    (b) stop if relative residual is small
00172             //    (c) stop if direction of negative curvature is obtained
00173 
00174             mHessian.cg_solver( arrptr( d ), arrptr( grad ), err );MSQ_ERRRTN( err );
00175 
00176             // 5. Check for descent direction (inner produce of gradient and
00177             //    direction is negative.
00178             double alpha = inner( grad, d );
00179             // TODD -- Add back in if you encounter problems -- do a gradient
00180             //         step if the direction from the conjugate gradient solver
00181             //         is not a descent direction for the objective function.  We
00182             //         SHOULD always get a descent direction from the conjugate
00183             //         method though, unless the preconditioner is not positive
00184             //         definite.
00185             // If direction is positive, does a gradient (steepest descent) step.
00186 
00187             if( alpha > -epsilon )
00188             {
00189 
00190                 MSQ_DBGOUT( 3 ) << "  o  alpha = " << alpha << " (rejected)" << std::endl;
00191 
00192                 if( !havePrintedDirectionMessage )
00193                 {
00194                     MSQ_PRINT( 1 )
00195                     ( "Newton direction not guaranteed descent.  Ensure preconditioner is positive "
00196                       "definite.\n" );
00197                     havePrintedDirectionMessage = true;
00198                 }
00199 
00200                 // TODD: removed performing gradient step here since we will use
00201                 // gradient if step does not produce descent.  Instead we set
00202                 // alpha to a small negative value.
00203 
00204                 alpha = -epsilon;
00205 
00206                 // alpha = inner(grad, grad, nv); // compute norm squared of gradient
00207                 // if (alpha < 1) alpha = 1;    // take max with constant
00208                 // for (i = 0; i < nv; ++i) {
00209                 //   d[i] = -grad[i] / alpha;   // compute scaled gradient
00210                 // }
00211                 // alpha = inner(grad, d, nv);      // recompute alpha
00212                 //              // equal to one for large gradient
00213             }
00214             else
00215             {
00216                 MSQ_DBGOUT( 3 ) << "  o  alpha = " << alpha << std::endl;
00217             }
00218 
00219             alpha *= sigma;
00220             beta = 1.0;
00221             pd.recreate_vertices_memento( coordsMem, err );MSQ_ERRRTN( err );
00222 
00223             // TODD: Unrolling the linesearch loop.  We do a function and
00224             // gradient evaluation when beta = 1.  Otherwise, we end up
00225             // in the linesearch regime.  We expect that several
00226             // evaluations will be made, so we only do a function evaluation
00227             // and finish with a gradient evaluation.  When beta = 1, we also
00228             // check the gradient for stability.
00229 
00230             // TODD -- the Armijo linesearch is based on the objective function,
00231             //         so theoretically we only need to evaluate the objective
00232             //         function.  However, near a very accurate solution, say with
00233             //         the two norm of the gradient of the objective function less
00234             //         than 1e-5, the numerical error in the objective function
00235             //         calculation is enough that the Armijo linesearch will
00236             //         fail.  To correct this situation, the iterate is accepted
00237             //         when the norm of the gradient is also small.  If you need
00238             //         high accuracy and have a large mesh, talk with Todd about
00239             //         the numerical issues so that we can fix it.
00240 
00241             // TODD -- the Armijo linesearch here is only good for differentiable
00242             //         functions.  When projections are involved, you should change
00243             //         to a form of the linesearch meant for nondifferentiable
00244             //         functions.
00245 
00246             pd.move_free_vertices_constrained( arrptr( d ), nv, beta, err );MSQ_ERRRTN( err );
00247             fn_bool = objFunc.evaluate( pd, new_value, grad, err );
00248             if( err.error_code() == err.BARRIER_VIOLATED )
00249                 err.clear();  // barrier violated does not represent an actual error here
00250             MSQ_ERRRTN( err );
00251 
00252             if( ( fn_bool && ( original_value - new_value >= -alpha * beta - epsilon ) ) ||
00253                 ( fn_bool && ( length( arrptr( grad ), nv ) < 100 * convTol ) ) )
00254             {
00255                 // Armijo linesearch rules passed.
00256                 MSQ_DBGOUT( 3 ) << "  o  beta = " << beta << " (accepted without line search)" << std::endl;
00257             }
00258             else
00259             {
00260                 if( !fn_bool )
00261                 {
00262                     // Function undefined.  Use the higher decrease rate.
00263                     beta *= beta0;
00264                     MSQ_DBGOUT( 3 ) << "  o  beta = " << beta << " (invalid step)" << std::endl;
00265                 }
00266                 else
00267                 {
00268                     // Function defined, but not sufficient decrease
00269                     // Use the lower decrease rate.
00270                     beta *= beta1;
00271                     MSQ_DBGOUT( 3 ) << "  o  beta = " << beta << " (insufficient decrease)" << std::endl;
00272                 }
00273                 pd.set_to_vertices_memento( coordsMem, err );MSQ_ERRRTN( err );
00274 
00275                 // Standard Armijo linesearch rules
00276 
00277                 MSQ_DBGOUT( 3 ) << "  o  Doing line search" << std::endl;
00278                 while( beta >= tol1 )
00279                 {
00280                     // 6. Search along the direction
00281                     //    (a) trial = x + beta*d
00282                     pd.move_free_vertices_constrained( arrptr( d ), nv, beta, err );MSQ_ERRRTN( err );
00283                     //    (b) function evaluation
00284                     fn_bool = objFunc.evaluate( pd, new_value, err );
00285                     if( err.error_code() == err.BARRIER_VIOLATED )
00286                         err.clear();  // barrier violated does not represent an actual error here
00287                     MSQ_ERRRTN( err );
00288 
00289                     //    (c) check for sufficient decrease and stop
00290                     if( !fn_bool )
00291                     {
00292                         // function not defined at trial point
00293                         beta *= beta0;
00294                     }
00295                     else if( original_value - new_value >= -alpha * beta - epsilon )
00296                     {
00297                         // iterate is acceptable.
00298                         break;
00299                     }
00300                     else
00301                     {
00302                         // iterate is not acceptable -- shrink beta
00303                         beta *= beta1;
00304                     }
00305                     pd.set_to_vertices_memento( coordsMem, err );MSQ_ERRRTN( err );
00306                 }
00307 
00308                 if( beta < tol1 )
00309                 {
00310                     // assert(pd.set_to_vertices_memento called last)
00311 
00312                     // TODD -- Lower limit on steplength reached.  Direction does not
00313                     //         appear to make sufficient progress decreasing the
00314                     //         objective function.  This can happen when you are
00315                     //         very near a solution due to numerical errors in
00316                     //         computing the objective function.  It can also happen
00317                     //         when the direction is not a descent direction and when
00318                     //         you are projecting the iterates onto a surface.
00319                     //
00320                     //         The latter cases require the use of a linesearch on
00321                     //         a gradient step.  If this linesearch terminate with
00322                     //         insufficient decrease, then you are at a critical
00323                     //         point and should stop!
00324                     //
00325                     //         The numerical errors with the objective function cannot
00326                     //         be overcome.  Eventually, the gradient step will
00327                     //         fail to compute a new direction and you will stop.
00328 
00329                     MSQ_PRINT( 1 )
00330                     ( "Sufficient decrease not obtained in linesearch; switching to gradient.\n" );
00331 
00332                     alpha = inner( arrptr( grad ), arrptr( grad ),
00333                                    nv );        // compute norm squared of gradient
00334                     if( alpha < 1 ) alpha = 1;  // take max with constant
00335                     for( i = 0; i < nv; ++i )
00336                     {
00337                         d[i] = -grad[i] / alpha;  // compute scaled gradient
00338                     }
00339                     alpha = inner( arrptr( grad ), arrptr( d ), nv );  // recompute alpha
00340                     alpha *= sigma;                                    // equal to one for large gradient
00341                     beta = 1.0;
00342 
00343                     // Standard Armijo linesearch rules
00344                     while( beta >= tol2 )
00345                     {
00346                         // 6. Search along the direction
00347                         //    (a) trial = x + beta*d
00348                         pd.move_free_vertices_constrained( arrptr( d ), nv, beta, err );MSQ_ERRRTN( err );
00349                         //    (b) function evaluation
00350                         fn_bool = objFunc.evaluate( pd, new_value, err );
00351                         if( err.error_code() == err.BARRIER_VIOLATED )
00352                             err.clear();  // barrier violated does not represent an actual error
00353                                           // here
00354                         MSQ_ERRRTN( err );
00355 
00356                         //    (c) check for sufficient decrease and stop
00357                         if( !fn_bool )
00358                         {
00359                             // function not defined at trial point
00360                             beta *= beta0;
00361                         }
00362                         else if( original_value - new_value >= -alpha * beta - epsilon )
00363                         {
00364                             // iterate is acceptable.
00365                             break;
00366                         }
00367                         else
00368                         {
00369                             // iterate is not acceptable -- shrink beta
00370                             beta *= beta1;
00371                         }
00372                         pd.set_to_vertices_memento( coordsMem, err );MSQ_ERRRTN( err );
00373                     }
00374 
00375                     if( beta < tol2 )
00376                     {
00377                         // assert(pd.set_to_vertices_memento called last)
00378 
00379                         // TODD -- Lower limit on steplength reached.  Gradient does not
00380                         //         appear to make sufficient progress decreasing the
00381                         //         objective function.  This can happen when you are
00382                         //         very near a solution due to numerical errors in
00383                         //         computing the objective function.  Most likely you
00384                         //         are at a critical point for the problem.
00385 
00386                         MSQ_PRINT( 1 )
00387                         ( "Sufficient decrease not obtained with gradient; critical point likely "
00388                           "found.\n" );
00389                         break;
00390                     }
00391                 }
00392 
00393                 // Compute the gradient at the new point -- needed by termination check
00394                 fn_bool = objFunc.update( pd, new_value, grad, err );MSQ_ERRRTN( err );
00395             }
00396 
00397             // Prints out free vertices coordinates.
00398             //    if (MSQ_DBG(3)) {
00399             //      MSQ_DBGOUT(3) << "  o Free vertices new coordinates: \n";
00400             //      const MsqVertex* toto1 = pd.get_vertex_array(err); MSQ_ERRRTN(err);
00401             //      MsqFreeVertexIndexIterator ind(pd, err); MSQ_ERRRTN(err);
00402             //      ind.reset();
00403             //      while (ind.next()) {
00404             //        MSQ_DBGOUT(3) << "\t\t\t" << toto1[ind.value()];
00405             //      }
00406             //    }
00407 
00408             // checks stopping criterion
00409             term_crit->accumulate_patch( pd, err );MSQ_ERRRTN( err );
00410             term_crit->accumulate_inner( pd, new_value, arrptr( grad ), err );MSQ_ERRRTN( err );
00411         }
00412         MSQ_PRINT( 2 )( "FINISHED\n" );
00413     }
00414     else
00415     {
00416         std::cout << "WARNING: Feasible Newton optimization only supported for volume meshes" << std::endl
00417                   << "   and XYPlanarDomain surface meshes." << std::endl
00418                   << std::endl
00419                   << "Try a different solver such as Steepest Descent." << std::endl;
00420     }
00421 }
00422 
00423 void FeasibleNewton::terminate_mesh_iteration( PatchData& /*pd*/, MsqError& /*err*/ )
00424 {
00425 
00426     // Michael::  Should the vertices memento be delete here???
00427     //  cout << "- Executing FeasibleNewton::iteration_complete()\n";
00428 }
00429 
00430 void FeasibleNewton::cleanup()
00431 {
00432     delete coordsMem;
00433     coordsMem = NULL;
00434 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines