From b81d903c888d98183da63e6cc3ada70668b349ef Mon Sep 17 00:00:00 2001 From: wernerd-cern <138122408+wernerd-cern@users.noreply.github.com> Date: Tue, 15 Aug 2023 23:56:39 +0200 Subject: [PATCH] fix: Correct handling of strategy 0 for Minuit optimizer (#2277) * Properly guard the `strategy` option for the Minuit optimizer in the case that `strategy` option of 0 is used. Without checking `self.strategy is not None` the `if self.strategy` for self.strategy==0 evaluates to truthiness of False, giving the wrong value of `not do_grad` for do_grad of False. - Amends PR https://github.com/scikit-hep/pyhf/pull/1183 * Use the corrected behavior to improve testing of default in test_minuit_strategy_global. * Add Daniel Werner to contributors list. --- docs/contributors.rst | 1 + src/pyhf/optimize/opt_minuit.py | 2 +- tests/test_optim.py | 3 +-- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/contributors.rst b/docs/contributors.rst index 7fdf9fcce0..08a828bf19 100644 --- a/docs/contributors.rst +++ b/docs/contributors.rst @@ -31,3 +31,4 @@ Contributors include: - Jerry Ling - Nathan Simpson - Beojan Stanislaus +- Daniel Werner diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py index 49ee177ae6..9cb0ce3e14 100644 --- a/src/pyhf/optimize/opt_minuit.py +++ b/src/pyhf/optimize/opt_minuit.py @@ -102,7 +102,7 @@ def _minimize( # 0: Fast, user-provided gradient # 1: Default, no user-provided gradient strategy = options.pop( - 'strategy', self.strategy if self.strategy else not do_grad + 'strategy', self.strategy if self.strategy is not None else not do_grad ) tolerance = options.pop('tolerance', self.tolerance) if options: diff --git a/tests/test_optim.py b/tests/test_optim.py index 95fe13f521..3efb8ff0e4 100644 --- a/tests/test_optim.py +++ b/tests/test_optim.py @@ -182,10 +182,9 @@ def test_minuit_strategy_global(mocker, backend, strategy): m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata) - do_grad = pyhf.tensorlib.default_do_grad pyhf.infer.mle.fit(data, m) assert spy.call_count == 1 - assert spy.spy_return.minuit.strategy == strategy if do_grad else 1 + assert spy.spy_return.minuit.strategy == strategy pyhf.infer.mle.fit(data, m, strategy=0) assert spy.call_count == 2