diff --git a/docs/contributors.rst b/docs/contributors.rst index 7fdf9fcce0..08a828bf19 100644 --- a/docs/contributors.rst +++ b/docs/contributors.rst @@ -31,3 +31,4 @@ Contributors include: - Jerry Ling - Nathan Simpson - Beojan Stanislaus +- Daniel Werner diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py index 7e62b0875b..db7b7c5011 100644 --- a/src/pyhf/optimize/opt_minuit.py +++ b/src/pyhf/optimize/opt_minuit.py @@ -28,7 +28,10 @@ def __init__(self, *args, **kwargs): Args: errordef (:obj:`float`): See minuit docs. Default is ``1.0``. steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``. - strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``. + strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. + Default is ``None``, which results in either + :attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of + ``int(not pyhf.tensorlib.default_do_grad)``. tolerance (:obj:`float`): Tolerance for termination. See specific optimizer for detailed meaning. Default is ``0.1``. @@ -99,11 +102,14 @@ def _minimize( fitresult (scipy.optimize.OptimizeResult): the fit result """ maxiter = options.pop('maxiter', self.maxiter) - # 0: Fast, user-provided gradient - # 1: Default, no user-provided gradient - strategy = options.pop( - 'strategy', self.strategy if self.strategy else not do_grad - ) + # do_grad value results in iminuit.Minuit.strategy of either: + # 0: Fast. Does not check a user-provided gradient. + # 1: Default. Checks user-provided gradient against numerical gradient. + strategy = options.pop("strategy", self.strategy) + # Guard against None from either self.strategy defaulting to None or + # passing strategy=None as options kwarg + if strategy is None: + strategy = 0 if do_grad else 1 tolerance = options.pop('tolerance', self.tolerance) if options: raise exceptions.Unsupported( diff --git a/tests/test_optim.py b/tests/test_optim.py index 0eb4bb8785..d235cfe27e 100644 --- a/tests/test_optim.py +++ b/tests/test_optim.py @@ -173,28 +173,35 @@ def test_minuit_strategy_do_grad(mocker, backend): assert spy.spy_return.minuit.strategy == 1 -@pytest.mark.parametrize('strategy', [0, 1]) +@pytest.mark.parametrize('strategy', [0, 1, 2]) def test_minuit_strategy_global(mocker, backend, strategy): pyhf.set_backend( pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy, tolerance=0.2) ) spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize') - m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) - data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata) + model = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0]) + data = pyhf.tensorlib.astensor([125.0] + model.config.auxdata) - do_grad = pyhf.tensorlib.default_do_grad - pyhf.infer.mle.fit(data, m) + pyhf.infer.mle.fit(data, model) assert spy.call_count == 1 - assert spy.spy_return.minuit.strategy == strategy if do_grad else 1 + assert spy.spy_return.minuit.strategy == strategy - pyhf.infer.mle.fit(data, m, strategy=0) + pyhf.infer.mle.fit(data, model, strategy=None) assert spy.call_count == 2 - assert spy.spy_return.minuit.strategy == 0 + assert spy.spy_return.minuit.strategy == int(not pyhf.tensorlib.default_do_grad) - pyhf.infer.mle.fit(data, m, strategy=1) + pyhf.infer.mle.fit(data, model, strategy=0) assert spy.call_count == 3 + assert spy.spy_return.minuit.strategy == 0 + + pyhf.infer.mle.fit(data, model, strategy=1) + assert spy.call_count == 4 assert spy.spy_return.minuit.strategy == 1 + pyhf.infer.mle.fit(data, model, strategy=2) + assert spy.call_count == 5 + assert spy.spy_return.minuit.strategy == 2 + def test_set_tolerance(backend): m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])