Skip to content

Commit a700baf

Browse files
AndrewLister-STFCtyronereesjess-farmerpre-commit-ci[bot]
authored
Fixed corruption in checkpoint file after an exception (fitbenchmarking#1271)
* Finalise checkpoint file if there's an exception * Add test for valid cp after failed run * Use default parser in test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix line length --------- Co-authored-by: Tyrone Rees <tyrone.rees@stfc.ac.uk> Co-authored-by: Jessica Huntley <79837359+jess-farmer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 98e3393 commit a700baf

File tree

2 files changed

+111
-72
lines changed

2 files changed

+111
-72
lines changed

fitbenchmarking/cli/main.py

Lines changed: 76 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -383,76 +383,85 @@ def run(problem_sets, additional_options=None, options_file="", debug=False):
383383
pp_dfs_all_prob_sets = {}
384384
cp = Checkpoint(options=options)
385385

386-
for sub_dir in problem_sets:
387-
# Create full path for the directory that holds a group of
388-
# problem definition files
389-
data_dir = os.path.join(current_path, sub_dir)
390-
391-
test_data = glob.glob(data_dir + "/*.*")
392-
393-
if not test_data:
394-
LOGGER.warning("Problem set %s not found", data_dir)
395-
continue
396-
397-
# generate group label/name used for problem set
398-
try:
399-
with open(
400-
os.path.join(data_dir, "META.txt"), encoding="utf-8"
401-
) as f:
402-
label = f.readline().strip("\n")
403-
except OSError:
404-
label = sub_dir.replace("/", "_")
405-
406-
LOGGER.info("Running the benchmarking on the %s problem set", label)
407-
fit = Fit(
408-
options=options, data_dir=data_dir, label=label, checkpointer=cp
409-
)
410-
results, failed_problems, unselected_minimizers = fit.benchmark()
411-
412-
# If a result has error flag 4 then the result contains dummy values,
413-
# if this is the case for all results then output should not be
414-
# produced as results tables won't show meaningful values.
415-
all_dummy_results_flag = True
416-
for result in results:
417-
if result.error_flag != 4:
418-
all_dummy_results_flag = False
419-
break
420-
421-
# If the results are an empty list then this means that all minimizers
422-
# raise an exception and the tables will produce errors if they run
423-
# for that problem set.
424-
if not results or all_dummy_results_flag:
425-
message = (
426-
"\nWARNING: \nThe user chosen options and/or problem "
427-
" setup resulted in all minimizers and/or parsers "
428-
"raising an exception. Because of this, results for "
429-
f"the {label} problem set will not be displayed. "
430-
"Please see the logs for more detail on why this is "
431-
"the case."
386+
try:
387+
for sub_dir in problem_sets:
388+
# Create full path for the directory that holds a group of
389+
# problem definition files
390+
data_dir = os.path.join(current_path, sub_dir)
391+
392+
test_data = glob.glob(data_dir + "/*.*")
393+
394+
if not test_data:
395+
LOGGER.warning("Problem set %s not found", data_dir)
396+
continue
397+
398+
# generate group label/name used for problem set
399+
try:
400+
with open(
401+
os.path.join(data_dir, "META.txt"), encoding="utf-8"
402+
) as f:
403+
label = f.readline().strip("\n")
404+
except OSError:
405+
label = sub_dir.replace("/", "_")
406+
407+
LOGGER.info(
408+
"Running the benchmarking on the %s problem set", label
432409
)
433-
LOGGER.warning(message)
434-
else:
435-
LOGGER.info("Producing output for the %s problem set", label)
436-
# Display the runtime and accuracy results in a table
437-
group_results_dir, pp_dfs = save_results(
438-
group_name=label,
439-
results=results,
410+
fit = Fit(
440411
options=options,
441-
failed_problems=failed_problems,
442-
unselected_minimizers=unselected_minimizers,
443-
config=cp.config,
412+
data_dir=data_dir,
413+
label=label,
414+
checkpointer=cp,
444415
)
445-
446-
pp_dfs_all_prob_sets[label] = pp_dfs
447-
448-
LOGGER.info("Completed benchmarking for %s problem set", sub_dir)
449-
group_results_dir = os.path.relpath(
450-
path=group_results_dir, start=options.results_dir
451-
)
452-
result_dir.append(group_results_dir)
453-
group_labels.append(label)
454-
455-
cp.finalise()
416+
results, failed_problems, unselected_minimizers = fit.benchmark()
417+
418+
# If a result has error flag 4 then the result contains dummy
419+
# values, if this is the case for all results then output should
420+
# not be produced as results tables won't show meaningful values.
421+
all_dummy_results_flag = True
422+
for result in results:
423+
if result.error_flag != 4:
424+
all_dummy_results_flag = False
425+
break
426+
427+
# If the results are an empty list then this means that all
428+
# minimizers raise an exception and the tables will produce
429+
# errors if they run for that problem set.
430+
if not results or all_dummy_results_flag:
431+
message = (
432+
"\nWARNING: \nThe user chosen options and/or problem "
433+
" setup resulted in all minimizers and/or parsers "
434+
"raising an exception. Because of this, results for "
435+
f"the {label} problem set will not be displayed. "
436+
"Please see the logs for more detail on why this is "
437+
"the case."
438+
)
439+
LOGGER.warning(message)
440+
else:
441+
LOGGER.info("Producing output for the %s problem set", label)
442+
# Display the runtime and accuracy results in a table
443+
group_results_dir, pp_dfs = save_results(
444+
group_name=label,
445+
results=results,
446+
options=options,
447+
failed_problems=failed_problems,
448+
unselected_minimizers=unselected_minimizers,
449+
config=cp.config,
450+
)
451+
452+
pp_dfs_all_prob_sets[label] = pp_dfs
453+
454+
LOGGER.info(
455+
"Completed benchmarking for %s problem set", sub_dir
456+
)
457+
group_results_dir = os.path.relpath(
458+
path=group_results_dir, start=options.results_dir
459+
)
460+
result_dir.append(group_results_dir)
461+
group_labels.append(label)
462+
463+
finally:
464+
cp.finalise()
456465

457466
# Check result_dir is non empty before producing output
458467
if not result_dir:

fitbenchmarking/cli/tests/test_main.py

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@
44

55
import inspect
66
import os
7+
from json import load
78
from pathlib import Path
9+
from tempfile import TemporaryDirectory
810
from unittest import TestCase
911
from unittest.mock import patch
1012

@@ -14,6 +16,7 @@
1416
from fitbenchmarking.cost_func.nlls_cost_func import NLLSCostFunc
1517
from fitbenchmarking.parsing.parser_factory import parse_problem_file
1618
from fitbenchmarking.utils import exceptions, fitbm_result
19+
from fitbenchmarking.utils.misc import get_problem_files
1720
from fitbenchmarking.utils.options import Options
1821

1922

@@ -67,11 +70,7 @@ def test_check_no_results_produced(self, benchmark):
6770
benchmark.return_value = ([], [], {})
6871

6972
with self.assertRaises(exceptions.NoResultsError):
70-
main.run(
71-
["examples/benchmark_problems/simple_tests"],
72-
os.path.dirname(__file__),
73-
debug=True,
74-
)
73+
main.run(["examples/benchmark_problems/simple_tests"], debug=True)
7574

7675
@patch("fitbenchmarking.cli.main.Fit.benchmark")
7776
def test_all_dummy_results_produced(self, benchmark):
@@ -98,3 +97,34 @@ def test_file_path_exception_raised(self, mock):
9897
with self.assertRaises(SystemExit) as exp:
9998
main.main()
10099
self.assertEqual(exp.exception.code, 1)
100+
101+
@patch("fitbenchmarking.cli.main.save_results")
102+
@patch("fitbenchmarking.utils.misc.get_problem_files")
103+
def test_checkpoint_file_on_fail(self, get_problems, save_results):
104+
"""
105+
Checks that the checkpoint file is valid json if there's a crash.
106+
"""
107+
get_problems.side_effect = lambda path: [get_problem_files(path)[0]]
108+
save_results.side_effect = RuntimeError(
109+
"Exception raised during save..."
110+
)
111+
112+
with TemporaryDirectory() as results_dir:
113+
with self.assertRaises(RuntimeError):
114+
main.run(
115+
["examples/benchmark_problems/NIST/low_difficulty"],
116+
additional_options={
117+
"scipy_ls": ["lm-scipy"],
118+
"software": ["scipy_ls"],
119+
"num_runs": 1,
120+
"results_dir": results_dir,
121+
},
122+
debug=True,
123+
)
124+
125+
with open(f"{results_dir}/checkpoint.json", encoding="utf8") as f:
126+
# This will fail if the json is invalid
127+
contents = load(f)
128+
129+
# Check that it's not empty
130+
self.assertTrue(contents)

0 commit comments

Comments
 (0)