Skip to content

Commit

Permalink
fix(backport): Guard Minuit optimizer against provided strategy of None
Browse files Browse the repository at this point in the history
  • Loading branch information
wernerd-cern authored and matthewfeickert committed Aug 16, 2023
1 parent 616966d commit 1b09ba9
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 15 deletions.
1 change: 1 addition & 0 deletions docs/contributors.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ Contributors include:
- Jerry Ling
- Nathan Simpson
- Beojan Stanislaus
- Daniel Werner
18 changes: 12 additions & 6 deletions src/pyhf/optimize/opt_minuit.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,10 @@ def __init__(self, *args, **kwargs):
Args:
errordef (:obj:`float`): See minuit docs. Default is ``1.0``.
steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.
strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.
Default is ``None``, which results in either
:attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of
``int(not pyhf.tensorlib.default_do_grad)``.
tolerance (:obj:`float`): Tolerance for termination.
See specific optimizer for detailed meaning.
Default is ``0.1``.
Expand Down Expand Up @@ -99,11 +102,14 @@ def _minimize(
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
# 0: Fast, user-provided gradient
# 1: Default, no user-provided gradient
strategy = options.pop(
'strategy', self.strategy if self.strategy else not do_grad
)
# do_grad value results in iminuit.Minuit.strategy of either:
# 0: Fast. Does not check a user-provided gradient.
# 1: Default. Checks user-provided gradient against numerical gradient.
strategy = options.pop("strategy", self.strategy)
# Guard against None from either self.strategy defaulting to None or
# passing strategy=None as options kwarg
if strategy is None:
strategy = 0 if do_grad else 1
tolerance = options.pop('tolerance', self.tolerance)
if options:
raise exceptions.Unsupported(
Expand Down
25 changes: 16 additions & 9 deletions tests/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,28 +173,35 @@ def test_minuit_strategy_do_grad(mocker, backend):
assert spy.spy_return.minuit.strategy == 1


@pytest.mark.parametrize('strategy', [0, 1])
@pytest.mark.parametrize('strategy', [0, 1, 2])
def test_minuit_strategy_global(mocker, backend, strategy):
pyhf.set_backend(
pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy, tolerance=0.2)
)
spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize')
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)
model = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor([125.0] + model.config.auxdata)

do_grad = pyhf.tensorlib.default_do_grad
pyhf.infer.mle.fit(data, m)
pyhf.infer.mle.fit(data, model)
assert spy.call_count == 1
assert spy.spy_return.minuit.strategy == strategy if do_grad else 1
assert spy.spy_return.minuit.strategy == strategy

pyhf.infer.mle.fit(data, m, strategy=0)
pyhf.infer.mle.fit(data, model, strategy=None)
assert spy.call_count == 2
assert spy.spy_return.minuit.strategy == 0
assert spy.spy_return.minuit.strategy == int(not pyhf.tensorlib.default_do_grad)

pyhf.infer.mle.fit(data, m, strategy=1)
pyhf.infer.mle.fit(data, model, strategy=0)
assert spy.call_count == 3
assert spy.spy_return.minuit.strategy == 0

pyhf.infer.mle.fit(data, model, strategy=1)
assert spy.call_count == 4
assert spy.spy_return.minuit.strategy == 1

pyhf.infer.mle.fit(data, model, strategy=2)
assert spy.call_count == 5
assert spy.spy_return.minuit.strategy == 2


def test_set_tolerance(backend):
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
Expand Down

0 comments on commit 1b09ba9

Please sign in to comment.