Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Configurable optimizer tolerance for termination #1184

Merged
merged 5 commits into from
Nov 19, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/pyhf/optimize/opt_minuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class minuit_optimizer(OptimizerMixin):
Optimizer that uses iminuit.Minuit.migrad.
"""

__slots__ = ['name', 'errordef', 'steps', 'strategy']
__slots__ = ['name', 'errordef', 'steps', 'strategy', 'tolerance']

def __init__(self, *args, **kwargs):
"""
Expand All @@ -28,11 +28,13 @@ def __init__(self, *args, **kwargs):
errordef (:obj:`float`): See minuit docs. Default is 1.0.
steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.
tolerance (:obj:`float`): tolerance for termination. See specific optimizer for detailed meaning. Default is 0.1.
"""
self.name = 'minuit'
self.errordef = kwargs.pop('errordef', 1)
self.steps = kwargs.pop('steps', 1000)
self.strategy = kwargs.pop('strategy', None)
self.tolerance = kwargs.pop('tolerance', 0.1)
super().__init__(*args, **kwargs)

def _get_minimizer(
Expand Down Expand Up @@ -101,12 +103,14 @@ def _minimize(
strategy = options.pop(
'strategy', self.strategy if self.strategy else not do_grad
)
tolerance = options.pop('tolerance', self.tolerance)
if options:
raise exceptions.Unsupported(
f"Unsupported options were passed in: {list(options.keys())}."
)

minimizer.strategy = strategy
minimizer.tol = tolerance
minimizer.migrad(ncall=maxiter)
# Following lines below come from:
# https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
Expand Down
17 changes: 12 additions & 5 deletions src/pyhf/optimize/opt_scipy.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,19 @@ class scipy_optimizer(OptimizerMixin):
Optimizer that uses :func:`scipy.optimize.minimize`.
"""

__slots__ = ['name']
__slots__ = ['name', 'tolerance']

def __init__(self, *args, **kwargs):
"""
Initialize the scipy_optimizer.

See :class:`pyhf.optimize.mixins.OptimizerMixin` for configuration options.
See :class:`pyhf.optimize.mixins.OptimizerMixin` for other configuration options.

Args:
tolerance (:obj:`float`): tolerance for termination. See specific optimizer for detailed meaning. Default is None.
"""
self.name = 'scipy'
self.tolerance = kwargs.pop('tolerance', None)
super().__init__(*args, **kwargs)

def _get_minimizer(
Expand All @@ -40,16 +44,18 @@ def _minimize(
Same signature as :func:`scipy.optimize.minimize`.

Minimizer Options:
maxiter (`int`): maximum number of iterations. Default is 100000.
verbose (`bool`): print verbose output during minimization. Default is off.
method (`str`): minimization routine. Default is 'SLSQP'.
maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
verbose (:obj:`bool`): print verbose output during minimization. Default is off.
method (:obj:`str`): minimization routine. Default is 'SLSQP'.
tolerance (:obj:`float`): tolerance for termination. See specific optimizer for detailed meaning. Default is None.

Returns:
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
verbose = options.pop('verbose', self.verbose)
method = options.pop('method', 'SLSQP')
tolerance = options.pop('tolerance', self.tolerance)
if options:
raise exceptions.Unsupported(
f"Unsupported options were passed in: {list(options.keys())}."
Expand All @@ -73,5 +79,6 @@ def _minimize(
jac=do_grad,
bounds=bounds,
constraints=constraints,
tol=tolerance,
options=dict(maxiter=maxiter, disp=bool(verbose)),
)
13 changes: 13 additions & 0 deletions tests/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,19 @@ def test_minuit_strategy_global(mocker, backend, strategy):
assert spy.spy_return.minuit.strategy == 1


def test_set_tolerance(backend):
m = pyhf.simplemodels.hepdata_like([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)

assert pyhf.infer.mle.fit(data, m, tolerance=0.01) is not None

pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.scipy_optimizer(tolerance=0.01))
assert pyhf.infer.mle.fit(data, m) is not None

pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(tolerance=0.01))
assert pyhf.infer.mle.fit(data, m) is not None


@pytest.mark.parametrize(
'optimizer',
[pyhf.optimize.scipy_optimizer, pyhf.optimize.minuit_optimizer],
Expand Down