MeshKit
1.0
|
00001 // IARoundingNlp.cpp 00002 // Interval Assignment for Meshkit 00003 // 00004 // Adapted from 00005 // Copyright (C) 2005, 2006 International Business Machines and others. 00006 // All Rights Reserved. 00007 // This code is published under the Eclipse Public License. 00008 // 00009 // $Id: hs071_nlp.cpp 1864 2010-12-22 19:21:02Z andreasw $ 00010 // 00011 // Authors: Carl Laird, Andreas Waechter IBM 2005-08-16 00012 00013 #include "meshkit/IARoundingNlp.hpp" 00014 #include "meshkit/IAData.hpp" 00015 #include "meshkit/IPData.hpp" 00016 #include "meshkit/IASolution.hpp" 00017 #include "meshkit/IANlp.hpp" 00018 #include "meshkit/IASolverToolInt.hpp" 00019 00020 #include <math.h> 00021 #include <limits> 00022 #include <algorithm> 00023 00024 // for printf 00025 #ifdef HAVE_CSTDIO 00026 # include <cstdio> 00027 #else 00028 # ifdef HAVE_STDIO_H 00029 # include <stdio.h> 00030 # else 00031 # error "don't have header file for stdio" 00032 # endif 00033 #endif 00034 00035 00036 namespace MeshKit 00037 { 00038 00039 bool IARoundingNlp::randomize_weights_of_non_int() 00040 { 00041 IASolverToolInt sti(data, solution); 00042 return sti.randomize_weights_of_non_int(&weights, 0.023394588); 00043 } 00044 00045 00046 // the objective function we should use for weights 00047 double IARoundingNlp::f_x_value( double I_i, double x_i ) 00048 { 00049 return x_i > I_i ? 00050 (x_i - I_i) / I_i : 00051 1.103402234045 * (I_i - x_i) / x_i; 00052 // expected is 1/100 to 4 or so 00053 00054 // the range of magnitudes of these weights is too high, the weights are too non-linear if we take them to a power 00055 // was 00056 // const double fh = IANlp::eval_R_i(data->I[i], xl+1); 00057 } 00058 00059 // constructor 00060 IARoundingNlp::IARoundingNlp(const IAData *data_ptr, const IPData *ip_data_ptr, IASolution *solution_ptr, 00061 const bool set_silent): 00062 data(data_ptr), ipData(ip_data_ptr), solution(solution_ptr), baseNlp(data_ptr, solution_ptr, set_silent), 00063 weights(), 00064 silent(set_silent), debugging(false), verbose(true) // true 00065 { 00066 if (!silent) 00067 { 00068 printf("\nIARoundingNLP Problem size:\n"); 00069 printf(" number of variables: %lu\n", data->I.size()); 00070 printf(" number of constraints: %lu\n\n", data->constraints.size()); 00071 } 00072 00073 weights.resize(data->I.size()); 00074 if (0 && debugging) 00075 printf("raw linear +x weights: "); 00076 for (int i = 0; i < data->num_variables(); ++i) 00077 { 00078 const double x = ipData->relaxedSolution[i]; 00079 const double xl = floor(x); 00080 // const double xl = floor( ipData.relaxedSolution[i] ); 00081 assert(xl >= 1.); 00082 const double fl = f_x_value(data->I[i], xl); 00083 const double fh = f_x_value(data->I[i], xl+1); 00084 const double w = fh - fl; 00085 weights[i] = w; 00086 00087 if (0 && debugging) 00088 printf(" %f", w); 00089 } 00090 00091 // ensure weights are unique 00092 weights.uniquify(1., 1.e4); 00093 00094 if (debugging) 00095 { 00096 printf("\n"); 00097 for (int i = 0; i < data->num_variables(); ++i) 00098 { 00099 const double x = ipData->relaxedSolution[i]; 00100 const double xl = floor(x); 00101 printf("x %d (%f) in [%d,%d] weight %10.4f\n", i, x, (int) xl, (int) (xl + 1.), weights[i]); 00102 } 00103 } 00104 } 00105 00106 00107 // n = number of variables 00108 // m = number of constraints (not counting variable bounds) 00109 00110 00111 IARoundingNlp::~IARoundingNlp() {data = NULL;} 00112 00113 // returns the size of the problem 00114 bool IARoundingNlp::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, 00115 Index& nnz_h_lag, IndexStyleEnum& index_style) 00116 { 00117 bool base_ok = baseNlp.get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style); 00118 nnz_h_lag = 0; // nele_h 00119 return true && base_ok; 00120 // need to change this if there are more variables, such as delta-minuses 00121 } 00122 00123 // returns the variable bounds 00124 bool IARoundingNlp::get_bounds_info(Index n, Number* x_l, Number* x_u, 00125 Index m, Number* g_l, Number* g_u) 00126 { 00127 const bool base_ok = baseNlp.get_bounds_info(n, x_l, x_u, m, g_l, g_u); 00128 00129 for (Index i=0; i<n; ++i) 00130 { 00131 const double x = ipData->relaxedSolution[i]; 00132 const double xl = floor(x); 00133 assert(xl >= 1.); 00134 x_l[i] = xl; 00135 x_u[i] = xl + 1.; 00136 // if (debugging) 00137 // printf("x %d (%f) in [%d,%d]\n", i, x, (int) x_l[i], (int) x_u[i]); 00138 } 00139 00140 return true && base_ok; //means what? 00141 } 00142 00143 // returns the initial point for the problem 00144 bool IARoundingNlp::get_starting_point(Index n, bool init_x, Number* x_init, 00145 bool init_z, Number* z_L, Number* z_U, 00146 Index m, bool init_lambda, 00147 Number* lambda) 00148 { 00149 // Minimal info is starting values for x, x_init 00150 // Improvement: we can provide starting values for the dual variables if we wish 00151 assert(init_x == true); 00152 assert(init_z == false); 00153 assert(init_lambda == false); 00154 00155 // initialize x to the relaxed solution 00156 for (Index i=0; i<n; ++i) 00157 { 00158 x_init[i] = ipData->relaxedSolution[i]; 00159 } 00160 00161 return true; 00162 } 00163 00164 00165 // returns the value of the objective function 00166 bool IARoundingNlp::eval_f(Index n, const Number* x, bool new_x, Number& obj_value) 00167 { 00168 assert(n == data->num_variables()); 00169 00170 double obj = 0.; 00171 for (Index i = 0; i<n; ++i) 00172 { 00173 const double xl = floor( ipData->relaxedSolution[i] ); 00174 obj += weights[i] * (x[i] - xl); 00175 } 00176 00177 obj_value = obj; 00178 00179 return true; 00180 } 00181 00182 // return the gradient of the objective function grad_{x} f(x) 00183 bool IARoundingNlp::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f) 00184 { 00185 //printf("E "); 00186 00187 assert(n == data->num_variables()); 00188 00189 for (Index i = 0; i<n; ++i) 00190 { 00191 grad_f[i] = weights[i]; 00192 } 00193 00194 return true; 00195 } 00196 00197 // return the value of the constraints: g(x) 00198 bool IARoundingNlp::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g) 00199 { 00200 return baseNlp.eval_g(n, x, new_x, m, g); 00201 } 00202 00203 // return the structure or values of the jacobian 00204 bool IARoundingNlp::eval_jac_g(Index n, const Number* x, bool new_x, 00205 Index m, Index nele_jac, Index* iRow, Index *jCol, 00206 Number* values) 00207 { 00208 return baseNlp.eval_jac_g(n, x, new_x, m, nele_jac, iRow, jCol, values); 00209 } 00210 00211 //return the structure or values of the hessian 00212 bool IARoundingNlp::eval_h(Index n, const Number* x, bool new_x, 00213 Number obj_factor, Index m, const Number* lambda, 00214 bool new_lambda, Index nele_hess, Index* iRow, 00215 Index* jCol, Number* values) 00216 { 00217 // because the constraints are linear 00218 // and the objective function is linear 00219 // the hessian for this problem is actually empty 00220 00221 assert(nele_hess == 0); 00222 00223 // This is a symmetric matrix, fill the lower left triangle only. 00224 if (values == NULL) { 00225 // return the structure. 00226 ; 00227 } 00228 else { 00229 // return the values. 00230 ; 00231 } 00232 00233 return true; 00234 } 00235 00236 void IARoundingNlp::finalize_solution(SolverReturn status, 00237 Index n, const Number* x, const Number* z_L, const Number* z_U, 00238 Index m, const Number* g, const Number* lambda, 00239 Number obj_value, 00240 const IpoptData* ip_data, 00241 IpoptCalculatedQuantities* ip_cq) 00242 { 00243 baseNlp.finalize_solution(status, n, x, z_L, z_U, m, g, lambda, obj_value, ip_data, ip_cq); 00244 } 00245 00246 } // namespace MeshKit