diff --git a/python/demo_dimscaling.py b/python/demo_dimscaling.py index fa38b11..67c257b 100644 --- a/python/demo_dimscaling.py +++ b/python/demo_dimscaling.py @@ -42,9 +42,10 @@ def func(x): # We decided to change some of them params['n_init_samples'] = 30 -params['n_iter_relearn'] = 20 +params['n_iter_relearn'] = 1 +params['l_type'] = "mcmc" params['noise'] = 1e-10 -params['kernel_name'] = "kMaternISO5" +params['kernel_name'] = "kMaternARD5" params['kernel_hp_mean'] = [1] params['kernel_hp_std'] = [5] params['surr_name'] = "sStudentTProcessNIG" diff --git a/src/bayesoptdisc.cpp b/src/bayesoptdisc.cpp index 9d88871..263647f 100644 --- a/src/bayesoptdisc.cpp +++ b/src/bayesoptdisc.cpp @@ -21,8 +21,10 @@ ------------------------------------------------------------------------ */ + #include "bayesopt/bayesopt.hpp" +#include #include //#include "randgen.hpp" #include "lhs.hpp" @@ -67,19 +69,26 @@ namespace bayesopt void DiscreteModel::findOptimal(vectord &xOpt) { - xOpt = *mInputSet.begin(); - double min = evaluateCriteria(xOpt); + std::vector critv(mInputSet.size()); + std::transform(mInputSet.begin(),mInputSet.end(),critv.begin(), + boost::bind(&DiscreteModel::evaluateCriteria,this,_1)); + + xOpt = mInputSet[std::distance(critv.begin(), + std::max_element(critv.begin(),critv.end()))]; + + // xOpt = *mInputSet.begin(); + // double min = evaluateCriteria(xOpt); - for(vecOfvec::iterator it = mInputSet.begin(); - it != mInputSet.end(); ++it) - { - double current = evaluateCriteria(*it); - if (current < min) - { - xOpt = *it; - min = current; - } - } + // for(vecOfvec::iterator it = mInputSet.begin(); + // it != mInputSet.end(); ++it) + // { + // double current = evaluateCriteria(*it); + // if (current < min) + // { + // xOpt = *it; + // min = current; + // } + // } } //In this case, it is the trivial function diff --git a/src/inneroptimization.cpp b/src/inneroptimization.cpp index 240034b..65e536f 100644 --- a/src/inneroptimization.cpp +++ b/src/inneroptimization.cpp @@ -64,8 +64,16 @@ namespace bayesopt // It seems BOBYQA can be unstable if the same point is repeated // tested over and over. NLOPT bug? - opt.set_ftol_rel(1e-12); - opt.set_ftol_abs(1e-12); + if (algo == nlopt::LN_BOBYQA) + { + opt.set_ftol_rel(1e-8); + opt.set_ftol_abs(1e-8); + } + else + { + opt.set_ftol_rel(1e-12); + opt.set_ftol_abs(1e-12); + } std::copy(Xnext.begin(),Xnext.end(),xstd.begin()); @@ -126,14 +134,14 @@ namespace bayesopt eval_func fpointer = &(NLOPT_Optimization::evaluate_nlopt); void* objPointer = static_cast(rbobj); const size_t nIter = 20; - std::vector vd(n); - std::vector vu(n); + // std::vector vd(n); + // std::vector vu(n); - for (size_t i = 0; i < n; ++i) - { - vd[i] = Xnext(i) - 0.01; - vu[i] = Xnext(i) + 0.01; - } + // for (size_t i = 0; i < n; ++i) + // { + // vd[i] = Xnext(i) - 0.01; + // vu[i] = Xnext(i) + 0.01; + // } vectord start = Xnext; @@ -218,11 +226,22 @@ namespace bayesopt //If the point is exactly at the limit, we may have trouble. for (size_t i = 0; i < n; ++i) { - if (Xnext(i)-mDown[i] < 0.0001) Xnext(i) += 0.0001; - if (mUp[i] - Xnext(i) < 0.0001) Xnext(i) -= 0.0001; + if (Xnext(i)-mDown[i] < 0.0001) + { + Xnext(i) += 0.0001; + FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN"; + } + if (mUp[i] - Xnext(i) < 0.0001) + { + Xnext(i) -= 0.0001; + FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN"; + } } - fmin = run_nlopt(nlopt::LN_BOBYQA,fpointer,Xnext,maxf2, + // BOBYQA may fail in this point. Could it be that EI is not twice differentiable? + // fmin = run_nlopt(nlopt::LN_BOBYQA,fpointer,Xnext,maxf2, + // mDown,mUp,objPointer); + fmin = run_nlopt(nlopt::LN_COBYLA,fpointer,Xnext,maxf2, mDown,mUp,objPointer); FILE_LOG(logDEBUG) << "2nd opt " << maxf2 << "-> " << Xnext << " f() ->" << fmin; diff --git a/utils/testfunctions.hpp b/utils/testfunctions.hpp index 4115f5d..eaa5459 100644 --- a/utils/testfunctions.hpp +++ b/utils/testfunctions.hpp @@ -85,7 +85,7 @@ class BraninNormalized: public bayesopt::ContinuousModel double branin(double x, double y) { const double pi = boost::math::constants::pi(); - const double rpi = boost::math::constants::root_pi(); + const double rpi = pi*pi; return sqr(y-(5.1/(4*rpi))*sqr(x) +5*x/pi-6)+10*(1-1/(8*pi))*cos(x)+10; };