Skip to content

Commit

Permalink
feat: Set minuit strategy automatically for gradient/non-gradient mode (
Browse files Browse the repository at this point in the history
#1183)

* Set MINUIT strategy to 0/1 automatically if using gradients/no-gradient
* Allow for MINUIT strategy to be configurable
* Add test for strategy selection
  • Loading branch information
kratsg authored Nov 19, 2020
1 parent 937d716 commit 985aafa
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 2 deletions.
13 changes: 12 additions & 1 deletion src/pyhf/optimize/opt_minuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class minuit_optimizer(OptimizerMixin):
Optimizer that uses iminuit.Minuit.migrad.
"""

__slots__ = ['name', 'errordef', 'steps']
__slots__ = ['name', 'errordef', 'steps', 'strategy']

def __init__(self, *args, **kwargs):
"""
Expand All @@ -27,10 +27,12 @@ def __init__(self, *args, **kwargs):
Args:
errordef (:obj:`float`): See minuit docs. Default is 1.0.
steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.
"""
self.name = 'minuit'
self.errordef = kwargs.pop('errordef', 1)
self.steps = kwargs.pop('steps', 1000)
self.strategy = kwargs.pop('strategy', None)
super().__init__(*args, **kwargs)

def _get_minimizer(
Expand Down Expand Up @@ -87,17 +89,24 @@ def _minimize(
Minimizer Options:
maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.
Returns:
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
return_uncertainties = options.pop('return_uncertainties', False)
# 0: Fast, user-provided gradient
# 1: Default, no user-provided gradient
strategy = options.pop(
'strategy', self.strategy if self.strategy else not do_grad
)
if options:
raise exceptions.Unsupported(
f"Unsupported options were passed in: {list(options.keys())}."
)

minimizer.strategy = strategy
minimizer.migrad(ncall=maxiter)
# Following lines below come from:
# https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
Expand All @@ -113,6 +122,8 @@ def _minimize(
n = len(x0)
hess_inv = default_backend.ones((n, n))
if minimizer.valid:
# Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.
minimizer.hesse()
hess_inv = minimizer.np_covariance()

unc = None
Expand Down
50 changes: 49 additions & 1 deletion tests/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def test_minimize(tensorlib, precision, optimizer, do_grad, do_stitch):
'no_grad-minuit-jax-64b': [0.5000493563528641, 1.0000043833614634],
# do grad, minuit, 32b
'do_grad-minuit-pytorch-32b': [0.5017611384391785, 0.9997190237045288],
'do_grad-minuit-tensorflow-32b': [0.501288652420044, 1.0000219345092773],
'do_grad-minuit-tensorflow-32b': [0.5012885928153992, 1.0000673532485962],
#'do_grad-minuit-jax-32b': [0.5029529333114624, 0.9991086721420288],
'do_grad-minuit-jax-32b': [0.5007095336914062, 0.9999282360076904],
# do grad, minuit, 64b
Expand Down Expand Up @@ -177,6 +177,54 @@ def test_minimize_do_grad_autoconfig(mocker, backend, backend_new):
assert shim.call_args[1]['do_grad'] != pyhf.tensorlib.default_do_grad


def test_minuit_strategy_do_grad(mocker, backend):
"""
ref: gh#1172
When there is a user-provided gradient, check that one automatically sets
the minuit strategy=0. When there is no user-provided gradient, check that
one automatically sets the minuit strategy=1.
"""
pyhf.set_backend(pyhf.tensorlib, 'minuit')
spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize')
m = pyhf.simplemodels.hepdata_like([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)

do_grad = pyhf.tensorlib.default_do_grad
pyhf.infer.mle.fit(data, m)
assert spy.call_count == 1
assert not spy.spy_return.minuit.strategy == do_grad

pyhf.infer.mle.fit(data, m, strategy=0)
assert spy.call_count == 2
assert spy.spy_return.minuit.strategy == 0

pyhf.infer.mle.fit(data, m, strategy=1)
assert spy.call_count == 3
assert spy.spy_return.minuit.strategy == 1


@pytest.mark.parametrize('strategy', [0, 1])
def test_minuit_strategy_global(mocker, backend, strategy):
pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy))
spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize')
m = pyhf.simplemodels.hepdata_like([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)

do_grad = pyhf.tensorlib.default_do_grad
pyhf.infer.mle.fit(data, m)
assert spy.call_count == 1
assert spy.spy_return.minuit.strategy == strategy if do_grad else 1

pyhf.infer.mle.fit(data, m, strategy=0)
assert spy.call_count == 2
assert spy.spy_return.minuit.strategy == 0

pyhf.infer.mle.fit(data, m, strategy=1)
assert spy.call_count == 3
assert spy.spy_return.minuit.strategy == 1


@pytest.mark.parametrize(
'optimizer',
[pyhf.optimize.scipy_optimizer, pyhf.optimize.minuit_optimizer],
Expand Down

0 comments on commit 985aafa

Please sign in to comment.