Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Guard Minuit optimizer against provided strategy of None #2278

Merged
merged 9 commits into from
Aug 16, 2023
18 changes: 12 additions & 6 deletions src/pyhf/optimize/opt_minuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,10 @@ def __init__(self, *args, **kwargs):
Args:
errordef (:obj:`float`): See minuit docs. Default is ``1.0``.
steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.
Default is ``None``, which results in either
:attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of
``int(not pyhf.tensorlib.default_do_grad)``.
Comment on lines +31 to +34
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

tolerance (:obj:`float`): Tolerance for termination.
See specific optimizer for detailed meaning.
Default is ``0.1``.
Expand Down Expand Up @@ -99,11 +102,14 @@ def _minimize(
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
# 0: Fast, user-provided gradient
# 1: Default, no user-provided gradient
strategy = options.pop(
'strategy', self.strategy if self.strategy is not None else not do_grad
)
# do_grad value results in iminuit.Minuit.strategy of either:
# 0: Fast. Does not check a user-provided gradient.
# 1: Default. Checks user-provided gradient against numerical gradient.
strategy = options.pop("strategy", self.strategy)
# Guard against None from either self.strategy defaulting to None or
# passing strategy=None as options kwarg
if strategy is None:
strategy = 0 if do_grad else 1
tolerance = options.pop('tolerance', self.tolerance)
if options:
raise exceptions.Unsupported(
Expand Down
22 changes: 15 additions & 7 deletions tests/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,27 +173,35 @@ def test_minuit_strategy_do_grad(mocker, backend):
assert spy.spy_return.minuit.strategy == 1


@pytest.mark.parametrize('strategy', [0, 1])
@pytest.mark.parametrize('strategy', [0, 1, 2])
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding 2 as we can and I forgot it existed.

def test_minuit_strategy_global(mocker, backend, strategy):
pyhf.set_backend(
pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy, tolerance=0.2)
)
spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize')
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)
model = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + model.config.auxdata)

pyhf.infer.mle.fit(data, m)
pyhf.infer.mle.fit(data, model)
assert spy.call_count == 1
assert spy.spy_return.minuit.strategy == strategy

pyhf.infer.mle.fit(data, m, strategy=0)
pyhf.infer.mle.fit(data, model, strategy=None)
assert spy.call_count == 2
assert spy.spy_return.minuit.strategy == 0
assert spy.spy_return.minuit.strategy == int(not pyhf.tensorlib.default_do_grad)

pyhf.infer.mle.fit(data, m, strategy=1)
pyhf.infer.mle.fit(data, model, strategy=0)
assert spy.call_count == 3
assert spy.spy_return.minuit.strategy == 0

pyhf.infer.mle.fit(data, model, strategy=1)
assert spy.call_count == 4
assert spy.spy_return.minuit.strategy == 1

pyhf.infer.mle.fit(data, model, strategy=2)
assert spy.call_count == 5
assert spy.spy_return.minuit.strategy == 2


def test_set_tolerance(backend):
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
Expand Down