From ccb44509eedf293579b67765dacb8efd2c5acdf6 Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Mon, 16 Aug 2021 06:15:25 -0700 Subject: [PATCH 01/13] updated callback tests for MOO --- nevergrad/optimization/test_callbacks.py | 25 +++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/nevergrad/optimization/test_callbacks.py b/nevergrad/optimization/test_callbacks.py index 49d6f976f..92289207f 100644 --- a/nevergrad/optimization/test_callbacks.py +++ b/nevergrad/optimization/test_callbacks.py @@ -103,22 +103,32 @@ def test_progressbar_dump(tmp_path: Path) -> None: optimizer.tell(cand, 0) +class _EarlyStoppingTestee: + def __init__(self) -> None: + self.num_calls = 0 + + def __call__(self, *args, **kwds) -> float: + self.num_calls += 1 + return np.random.rand() + + def test_early_stopping() -> None: - instrum = ng.p.Instrumentation( - None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2)), multiobjective=True - ) + instrum = ng.p.Instrumentation(None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2))) + func = _EarlyStoppingTestee() optimizer = optimizerlib.OnePlusOne(parametrization=instrum, budget=100) early_stopping = ng.callbacks.EarlyStopping(lambda opt: opt.num_ask > 3) optimizer.register_callback("ask", early_stopping) - optimizer.minimize(_func, verbosity=2) - # below functions are inlcuded in the docstring + optimizer.minimize(func, verbosity=2) + # num_ask is set at the end of ask, so the callback sees the old value. + assert func.num_calls == 4 + # below functions are included in the docstring of EarlyStopping assert optimizer.current_bests["minimum"].mean < 12 assert optimizer.recommend().loss < 12 # type: ignore def test_optimization_logger(caplog) -> None: instrum = ng.p.Instrumentation( - None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2)), multiobjective=True + None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2)), multiobjective=False ) logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) logger = logging.getLogger(__name__) @@ -131,7 +141,8 @@ def test_optimization_logger(caplog) -> None: ) with caplog.at_level(logging.INFO): optimizer.minimize(_func, verbosity=2) + print("num objectives: ", optimizer.num_objectives) assert ( - "After 0, recommendation is Instrumentation(Tuple(None,2.0),Dict(array=Array{(3,2)},blublu=blublu,multiobjective=True))" + "After 0, recommendation is Instrumentation(Tuple(None,2.0),Dict(array=Array{(3,2)},blublu=blublu,multiobjective=False))" in caplog.text ) From 0bc525a5aac9d27dcaf02a6179718d58dbdad7ad Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Mon, 16 Aug 2021 06:39:56 -0700 Subject: [PATCH 02/13] removed stray print --- nevergrad/optimization/= | 0 nevergrad/optimization/test_callbacks.py | 1 - 2 files changed, 1 deletion(-) create mode 100644 nevergrad/optimization/= diff --git a/nevergrad/optimization/= b/nevergrad/optimization/= new file mode 100644 index 000000000..e69de29bb diff --git a/nevergrad/optimization/test_callbacks.py b/nevergrad/optimization/test_callbacks.py index 92289207f..f6a0daaef 100644 --- a/nevergrad/optimization/test_callbacks.py +++ b/nevergrad/optimization/test_callbacks.py @@ -141,7 +141,6 @@ def test_optimization_logger(caplog) -> None: ) with caplog.at_level(logging.INFO): optimizer.minimize(_func, verbosity=2) - print("num objectives: ", optimizer.num_objectives) assert ( "After 0, recommendation is Instrumentation(Tuple(None,2.0),Dict(array=Array{(3,2)},blublu=blublu,multiobjective=False))" in caplog.text From 4f4cb781e4e971b60219f4f54f66cee0aee54882 Mon Sep 17 00:00:00 2001 From: theoajc <48887209+theoajc@users.noreply.github.com> Date: Mon, 16 Aug 2021 14:42:02 +0100 Subject: [PATCH 03/13] Delete = --- nevergrad/optimization/= | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 nevergrad/optimization/= diff --git a/nevergrad/optimization/= b/nevergrad/optimization/= deleted file mode 100644 index e69de29bb..000000000 From 9e3955ca58bf30c461f7c40d36a50f6809dc5f57 Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 02:44:44 -0700 Subject: [PATCH 04/13] updated optimization logger for MOO and added get_min_losses function --- nevergrad/optimization/callbacks.py | 13 +- nevergrad/optimization/multiobjective/core.py | 122 ++++++++++++++---- nevergrad/optimization/test_callbacks.py | 4 +- 3 files changed, 113 insertions(+), 26 deletions(-) diff --git a/nevergrad/optimization/callbacks.py b/nevergrad/optimization/callbacks.py index fef931786..d0210da52 100644 --- a/nevergrad/optimization/callbacks.py +++ b/nevergrad/optimization/callbacks.py @@ -87,8 +87,17 @@ def __call__(self, optimizer: base.Optimizer, *args: tp.Any, **kwargs: tp.Any) - if time.time() >= self._next_time or self._next_tell >= optimizer.num_tell: self._next_time = time.time() + self._log_interval_seconds self._next_tell = optimizer.num_tell + self._log_interval_tells - x = optimizer.provide_recommendation() - self._logger.log(self._log_level, "After %s, recommendation is %s", optimizer.num_tell, x) + if optimizer.num_objectives == 1: + x = optimizer.provide_recommendation() + self._logger.log(self._log_level, "After %s, recommendation is %s", optimizer.num_tell, x) + else: + losses = optimizer._hypervolume_pareto.get_min_losses() + self._logger.log( + self._log_level, + "After %s, the respective minimum loss for each objective in the pareto front is %s", + optimizer.num_tell, + losses, + ) class ParametersLogger: diff --git a/nevergrad/optimization/multiobjective/core.py b/nevergrad/optimization/multiobjective/core.py index 6e9ae8cdb..4b4de7701 100644 --- a/nevergrad/optimization/multiobjective/core.py +++ b/nevergrad/optimization/multiobjective/core.py @@ -27,15 +27,26 @@ class HypervolumePareto: loss will be 0 (except if they are uniformly worse than the previous points). seed: optional int or RandomState seed to use for selecting random subsamples of the pareto + no_hypervolume: bool + Most optimizers are designed for single objective and use a float loss. + To use these in a multi-objective optimization, we provide the negative of + the hypervolume of the pareto front as the loss. + If not needed, an optimizer can set this to True. Notes ----- + When no_hypervolume is false: - This function is not stationary! - The minimum value obtained for this objective function is -h, where h is the hypervolume of the Pareto front obtained, given upper_bounds as a reference point. - The callable keeps track of the pareto_front (see attribute paretor_front) and is therefor stateful. For this reason it cannot be distributed. A user can however call the multiobjective_function remotely, and aggregate locally. This is what happens in the "minimize" method of optimizers. + when no_hypervolume is true: + - Hypervolume isn't used at all! + - We simply add every point to the pareto front and state that the pareto front needs to be filtered. + - The Pareto front is lazily kept up to date because every time you call pareto_front() + an algorithm is performed that filters the pareto front into what it should be. """ def __init__( @@ -52,12 +63,16 @@ def __init__( ) if upper_bounds is None: self._auto_bound = auto_bound - self._rng = seed if isinstance(seed, np.random.RandomState) else np.random.RandomState(seed) - self._pareto: tp.List[p.Parameter] = [] + # If we are yet to set the upper bounds, or yet to have an add since doing so, _best_volume is -inf. + # If we have set the upper bounds and all tells have been worse than them, then _best_volume is a + # negative number indicating the least amount we have ever been worse. + # If we have ever beaten the upper bounds, the _best_volume is nonnegative. In particular, it is + # the hypervolume of the current PF if we are using hypervolume, otherwise 0. self._best_volume = -float("Inf") self._hypervolume: tp.Optional[HypervolumeIndicator] = None self._pareto_needs_filtering = False self._no_hypervolume = no_hypervolume + self._pf = ParetoFront(seed=seed, no_hypervolume=no_hypervolume) @property def num_objectives(self) -> int: @@ -67,19 +82,21 @@ def num_objectives(self) -> int: def best_volume(self) -> float: return self._best_volume - def _add_to_pareto(self, parameter: p.Parameter) -> None: - self._pareto.append(parameter) - self._pareto_needs_filtering = True - def extend(self, parameters: tp.Sequence[p.Parameter]) -> float: output = 0.0 for param in parameters: output = self.add(param) return output + # pylint: disable=too-many-branches def add(self, parameter: p.Parameter) -> float: - """Given parameters and the multiobjective loss, this computes the hypervolume - and update the state of the function with new points if it belongs to the pareto front + """ + when _no_hypervolume = False + Given parameters and the multiobjective loss, this computes the hypervolume + and update the state of the function with new points if it belongs to the pareto front. + when _no_hypervolume = True + Add every point to pareto front. Don't compute hypervolume. Return 0.0 since loss + not looked at in this context. """ # pylint: disable=too-many-return-statements, too-many-branches if not isinstance(parameter, p.Parameter): @@ -96,31 +113,34 @@ def add(self, parameter: p.Parameter) -> float: if (self._upper_bounds > -float("inf")).all() and (losses > self._upper_bounds).all(): return float("inf") # Avoid uniformly worst points self._upper_bounds = np.maximum(self._upper_bounds, losses) - self._add_to_pareto(parameter) + self._pf.add_to_pareto(parameter) return 0.0 - if self._hypervolume is None: - self._hypervolume = HypervolumeIndicator(self._upper_bounds) # get rid of points over the upper bounds if (losses - self._upper_bounds > 0).any(): loss = -float(np.sum(np.maximum(0, losses - self._upper_bounds))) if loss > self._best_volume: self._best_volume = loss if self._best_volume < 0: - self._add_to_pareto(parameter) - return 0.0 if self._no_hypervolume else -loss + self._pf.add_to_pareto(parameter) + return -loss + if self._no_hypervolume: + self._pf.add_to_pareto(parameter) + return 0.0 + if self._hypervolume is None: + self._hypervolume = HypervolumeIndicator(self._upper_bounds) + self._pf._hypervolume = self._hypervolume # We compute the hypervolume - new_volume = self._hypervolume.compute([pa.losses for pa in self._pareto] + [losses]) + new_volume = self._hypervolume.compute([pa.losses for pa in self._pf.get_raw()] + [losses]) if new_volume > self._best_volume: # This point is good! Let us give him a great mono-fitness value. self._best_volume = new_volume - self._add_to_pareto(parameter) - return 0.0 if self._no_hypervolume else -new_volume - + self._pf.add_to_pareto(parameter) + return -new_volume else: # This point is not on the front # First we prune. distance_to_pareto = float("Inf") - for param in self.pareto_front(): + for param in self._pf.get_front(): stored_losses = param.losses # TODO the following is probably not good at all: # -> +inf if no point is strictly better (but lower if it is) @@ -129,6 +149,60 @@ def add(self, parameter: p.Parameter) -> float: assert distance_to_pareto >= 0 return 0.0 if self._no_hypervolume else -new_volume + distance_to_pareto + # pylint: disable=too-many-branches + def pareto_front( + self, size: tp.Optional[int] = None, subset: str = "random", subset_tentatives: int = 12 + ) -> tp.List[p.Parameter]: + """Pareto front, as a list of Parameter. The losses can be accessed through + parameter.losses + + Parameters + ------------ + size: int (optional) + if provided, selects a subset of the full pareto front with the given maximum size + subset: str + method for selecting the subset ("random, "loss-covering", "EPS", "domain-covering", "hypervolume") + EPS is the epsilon indicator described e.g. + here: https://hal.archives-ouvertes.fr/hal-01159961v2/document + subset_tentatives: int + number of random tentatives for finding a better subset + + Returns + -------- + list + the list of Parameter of the pareto front + """ + self.get_min_losses() + return self._pf.get_front(size, subset, subset_tentatives) + + def get_min_losses(self) -> None: + pf = self._pf.get_raw() + min_losses = [np.inf] * len(pf[0].value) + for point in pf: + point_losses = np.array(point.losses) + for i in range(len(point.value)): + if point_losses[i] < min_losses[i]: + min_losses[i] = point_losses[i] + return min_losses + + +class ParetoFront: + def __init__( + self, + *, + seed: tp.Optional[tp.Union[int, np.random.RandomState]] = None, + no_hypervolume: bool = False, + ) -> None: + self._pareto: tp.List[p.Parameter] = [] + self._pareto_needs_filtering = False + self._no_hypervolume = no_hypervolume + self._rng = seed if isinstance(seed, np.random.RandomState) else np.random.RandomState(seed) + self._hypervolume: tp.Optional[HypervolumeIndicator] = None + + def add_to_pareto(self, parameter: p.Parameter) -> None: + self._pareto.append(parameter) + self._pareto_needs_filtering = True + def _filter_pareto_front(self) -> None: """Filters the Pareto front""" new_pareto: tp.List[p.Parameter] = [] @@ -143,8 +217,12 @@ def _filter_pareto_front(self) -> None: self._pareto = new_pareto self._pareto_needs_filtering = False + def get_raw(self) -> tp.List[p.Parameter]: + """Retrieve current values, which may not be a Pareto front, as they have not been filtered.""" + return self._pareto + # pylint: disable=too-many-branches - def pareto_front( + def get_front( self, size: tp.Optional[int] = None, subset: str = "random", subset_tentatives: int = 12 ) -> tp.List[p.Parameter]: """Pareto front, as a list of Parameter. The losses can be accessed through @@ -173,13 +251,13 @@ def pareto_front( if subset == "random": return self._rng.choice(self._pareto, size).tolist() # type: ignore tentatives = [self._rng.choice(self._pareto, size).tolist() for _ in range(subset_tentatives)] # type: ignore - if self._hypervolume is None: - raise RuntimeError("Hypervolume not initialized, not supported") # TODO fix + if self._hypervolume is None and subset == "hypervolume": + raise RuntimeError("Hypervolume subsetting not supported as hypervolume not in use") hypervolume = self._hypervolume scores: tp.List[float] = [] for tentative in tentatives: if subset == "hypervolume": - scores += [-hypervolume.compute([pa.losses for pa in tentative])] + scores += [-hypervolume.compute([pa.losses for pa in tentative])] # type: ignore else: score: float = 0.0 for v in self._pareto: diff --git a/nevergrad/optimization/test_callbacks.py b/nevergrad/optimization/test_callbacks.py index f6a0daaef..6a0e67037 100644 --- a/nevergrad/optimization/test_callbacks.py +++ b/nevergrad/optimization/test_callbacks.py @@ -128,7 +128,7 @@ def test_early_stopping() -> None: def test_optimization_logger(caplog) -> None: instrum = ng.p.Instrumentation( - None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2)), multiobjective=False + None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2)), multiobjective=True ) logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) logger = logging.getLogger(__name__) @@ -142,6 +142,6 @@ def test_optimization_logger(caplog) -> None: with caplog.at_level(logging.INFO): optimizer.minimize(_func, verbosity=2) assert ( - "After 0, recommendation is Instrumentation(Tuple(None,2.0),Dict(array=Array{(3,2)},blublu=blublu,multiobjective=False))" + "After 0, the respective minimum loss for each objective in the pareto front is [12.0, 12.0]" in caplog.text ) From e293de1efd073b74929466ab65651696d7994eff Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 02:52:38 -0700 Subject: [PATCH 05/13] edited get_min_losses --- nevergrad/optimization/multiobjective/core.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nevergrad/optimization/multiobjective/core.py b/nevergrad/optimization/multiobjective/core.py index 4b4de7701..59b245b2e 100644 --- a/nevergrad/optimization/multiobjective/core.py +++ b/nevergrad/optimization/multiobjective/core.py @@ -180,9 +180,9 @@ def get_min_losses(self) -> None: min_losses = [np.inf] * len(pf[0].value) for point in pf: point_losses = np.array(point.losses) - for i in range(len(point.value)): - if point_losses[i] < min_losses[i]: - min_losses[i] = point_losses[i] + for i, point_loss in enumerate(point_losses): + if point_loss < min_losses[i]: + min_losses[i] = point_loss return min_losses From 2286c320fbadfa4057db3bda00127f72400133f3 Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 03:00:26 -0700 Subject: [PATCH 06/13] type fixes --- nevergrad/optimization/callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nevergrad/optimization/callbacks.py b/nevergrad/optimization/callbacks.py index d0210da52..109814706 100644 --- a/nevergrad/optimization/callbacks.py +++ b/nevergrad/optimization/callbacks.py @@ -91,7 +91,7 @@ def __call__(self, optimizer: base.Optimizer, *args: tp.Any, **kwargs: tp.Any) - x = optimizer.provide_recommendation() self._logger.log(self._log_level, "After %s, recommendation is %s", optimizer.num_tell, x) else: - losses = optimizer._hypervolume_pareto.get_min_losses() + losses = optimizer._hypervolume_pareto.get_min_losses() # type: ignore self._logger.log( self._log_level, "After %s, the respective minimum loss for each objective in the pareto front is %s", From d94fa388d82af7d1f3c081f07099eb54146e9723 Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 03:02:22 -0700 Subject: [PATCH 07/13] removed testing statement --- nevergrad/optimization/multiobjective/core.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nevergrad/optimization/multiobjective/core.py b/nevergrad/optimization/multiobjective/core.py index 59b245b2e..047176296 100644 --- a/nevergrad/optimization/multiobjective/core.py +++ b/nevergrad/optimization/multiobjective/core.py @@ -172,10 +172,9 @@ def pareto_front( list the list of Parameter of the pareto front """ - self.get_min_losses() return self._pf.get_front(size, subset, subset_tentatives) - def get_min_losses(self) -> None: + def get_min_losses(self) -> tp.ArrayLike[float]: pf = self._pf.get_raw() min_losses = [np.inf] * len(pf[0].value) for point in pf: From 6f7961dcdf55f743689d72ddc82bc7bb5de1f0ef Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 03:06:10 -0700 Subject: [PATCH 08/13] removed testing statement --- nevergrad/optimization/multiobjective/core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nevergrad/optimization/multiobjective/core.py b/nevergrad/optimization/multiobjective/core.py index 1f1b3aeea..708daf91e 100644 --- a/nevergrad/optimization/multiobjective/core.py +++ b/nevergrad/optimization/multiobjective/core.py @@ -174,7 +174,7 @@ def pareto_front( """ return self._pf.get_front(size, subset, subset_tentatives) - def get_min_losses(self) -> tp.ArrayLike[float]: + def get_min_losses(self) -> tp.List[float]: pf = self._pf.get_raw() min_losses = [np.inf] * len(pf[0].value) for point in pf: @@ -184,6 +184,7 @@ def get_min_losses(self) -> tp.ArrayLike[float]: min_losses[i] = point_loss return min_losses + class ParetoFront: def __init__( self, From 0664cc129c9d45075b5594e8b193d2f3eb832ec9 Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 03:30:58 -0700 Subject: [PATCH 09/13] updated recommendation and minimize --- nevergrad/optimization/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nevergrad/optimization/base.py b/nevergrad/optimization/base.py index 29ec7c94c..6e7858988 100644 --- a/nevergrad/optimization/base.py +++ b/nevergrad/optimization/base.py @@ -516,6 +516,8 @@ def recommend(self) -> p.Parameter: The candidate with minimal loss. :code:`p.Parameters` have field :code:`args` and :code:`kwargs` which can be directly used on the function (:code:`objective_function(*candidate.args, **candidate.kwargs)`). """ + if self.num_objectives > 1: + raise RuntimeError("No best candidate in MOO.") recom_data = self._internal_provide_recommendation() # pylint: disable=assignment-from-none if recom_data is None or any(np.isnan(recom_data)): name = "minimum" if self.parametrization.function.deterministic else "pessimistic" @@ -650,7 +652,7 @@ def minimize( (tmp_finished if x_job[1].done() else tmp_runnings).append(x_job) self._running_jobs, self._finished_jobs = tmp_runnings, tmp_finished first_iteration = False - return self.provide_recommendation() + return self.provide_recommendation() if self.num_objectives == 1 else p.Constant(None) def _info(self) -> tp.Dict[str, tp.Any]: """Easy access to debug/benchmark info""" From da829c8f0948330fe62f46cfa54ef627d6a07e5d Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 06:18:00 -0700 Subject: [PATCH 10/13] more efficient get min losses --- nevergrad/optimization/multiobjective/core.py | 9 +-------- nevergrad/optimization/test_callbacks.py | 2 +- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/nevergrad/optimization/multiobjective/core.py b/nevergrad/optimization/multiobjective/core.py index 708daf91e..b06ea1360 100644 --- a/nevergrad/optimization/multiobjective/core.py +++ b/nevergrad/optimization/multiobjective/core.py @@ -175,14 +175,7 @@ def pareto_front( return self._pf.get_front(size, subset, subset_tentatives) def get_min_losses(self) -> tp.List[float]: - pf = self._pf.get_raw() - min_losses = [np.inf] * len(pf[0].value) - for point in pf: - point_losses = np.array(point.losses) - for i, point_loss in enumerate(point_losses): - if point_loss < min_losses[i]: - min_losses[i] = point_loss - return min_losses + return np.min([p.losses for p in self._pf.get_raw()], axis=0) class ParetoFront: diff --git a/nevergrad/optimization/test_callbacks.py b/nevergrad/optimization/test_callbacks.py index 6a0e67037..60f8033a7 100644 --- a/nevergrad/optimization/test_callbacks.py +++ b/nevergrad/optimization/test_callbacks.py @@ -142,6 +142,6 @@ def test_optimization_logger(caplog) -> None: with caplog.at_level(logging.INFO): optimizer.minimize(_func, verbosity=2) assert ( - "After 0, the respective minimum loss for each objective in the pareto front is [12.0, 12.0]" + "After 0, the respective minimum loss for each objective in the pareto front is [12. 12.]" in caplog.text ) From 99e9e4c7929df8aa4698d74b0e6baa87eb07426c Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 06:23:21 -0700 Subject: [PATCH 11/13] added optimization logger test for single objective --- nevergrad/optimization/test_callbacks.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/nevergrad/optimization/test_callbacks.py b/nevergrad/optimization/test_callbacks.py index 60f8033a7..25bb166fd 100644 --- a/nevergrad/optimization/test_callbacks.py +++ b/nevergrad/optimization/test_callbacks.py @@ -127,6 +127,27 @@ def test_early_stopping() -> None: def test_optimization_logger(caplog) -> None: + instrum = ng.p.Instrumentation( + None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2)), multiobjective=False + ) + logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) + logger = logging.getLogger(__name__) + optimizer = optimizerlib.OnePlusOne(parametrization=instrum, budget=3) + optimizer.register_callback( + "tell", + callbacks.OptimizationLogger( + logger=logger, log_level=logging.INFO, log_interval_tells=10, log_interval_seconds=0.1 + ), + ) + with caplog.at_level(logging.INFO): + optimizer.minimize(_func, verbosity=2) + assert ( + "After 0, recommendation is Instrumentation(Tuple(None,2.0),Dict(array=Array{(3,2)},blublu=blublu,multiobjective=False))" + in caplog.text + ) + + +def test_optimization_logger_MOO(caplog) -> None: instrum = ng.p.Instrumentation( None, 2.0, blublu="blublu", array=ng.p.Array(shape=(3, 2)), multiobjective=True ) From 21f352be58edf5cdb1ad7404c528c645d21f2c90 Mon Sep 17 00:00:00 2001 From: Theodore Clarke Date: Wed, 18 Aug 2021 11:10:24 -0700 Subject: [PATCH 12/13] Updated error message --- nevergrad/optimization/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nevergrad/optimization/base.py b/nevergrad/optimization/base.py index 625112297..4bd9ea6de 100644 --- a/nevergrad/optimization/base.py +++ b/nevergrad/optimization/base.py @@ -519,7 +519,9 @@ def recommend(self) -> p.Parameter: on the function (:code:`objective_function(*candidate.args, **candidate.kwargs)`). """ if self.num_objectives > 1: - raise RuntimeError("No best candidate in MOO.") + raise RuntimeError( + "No best candidate in MOO. Use pareto_front function instead to get the set of all non-dominated candidates." + ) recom_data = self._internal_provide_recommendation() # pylint: disable=assignment-from-none if recom_data is None or any(np.isnan(recom_data)): name = "minimum" if self.parametrization.function.deterministic else "pessimistic" From 81258f6ba01d827f1ed51b6d0ba9ab373033352e Mon Sep 17 00:00:00 2001 From: theoajc <48887209+theoajc@users.noreply.github.com> Date: Fri, 20 Aug 2021 13:24:16 +0100 Subject: [PATCH 13/13] Update nevergrad/optimization/base.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Jérémy Rapin --- nevergrad/optimization/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nevergrad/optimization/base.py b/nevergrad/optimization/base.py index 4bd9ea6de..84a08e607 100644 --- a/nevergrad/optimization/base.py +++ b/nevergrad/optimization/base.py @@ -520,7 +520,7 @@ def recommend(self) -> p.Parameter: """ if self.num_objectives > 1: raise RuntimeError( - "No best candidate in MOO. Use pareto_front function instead to get the set of all non-dominated candidates." + "No best candidate in MOO. Use optimizer.pareto_front() instead to get the set of all non-dominated candidates." ) recom_data = self._internal_provide_recommendation() # pylint: disable=assignment-from-none if recom_data is None or any(np.isnan(recom_data)):