@@ -383,76 +383,85 @@ def run(problem_sets, additional_options=None, options_file="", debug=False):
383
383
pp_dfs_all_prob_sets = {}
384
384
cp = Checkpoint (options = options )
385
385
386
- for sub_dir in problem_sets :
387
- # Create full path for the directory that holds a group of
388
- # problem definition files
389
- data_dir = os .path .join (current_path , sub_dir )
390
-
391
- test_data = glob .glob (data_dir + "/*.*" )
392
-
393
- if not test_data :
394
- LOGGER .warning ("Problem set %s not found" , data_dir )
395
- continue
396
-
397
- # generate group label/name used for problem set
398
- try :
399
- with open (
400
- os .path .join (data_dir , "META.txt" ), encoding = "utf-8"
401
- ) as f :
402
- label = f .readline ().strip ("\n " )
403
- except OSError :
404
- label = sub_dir .replace ("/" , "_" )
405
-
406
- LOGGER .info ("Running the benchmarking on the %s problem set" , label )
407
- fit = Fit (
408
- options = options , data_dir = data_dir , label = label , checkpointer = cp
409
- )
410
- results , failed_problems , unselected_minimizers = fit .benchmark ()
411
-
412
- # If a result has error flag 4 then the result contains dummy values,
413
- # if this is the case for all results then output should not be
414
- # produced as results tables won't show meaningful values.
415
- all_dummy_results_flag = True
416
- for result in results :
417
- if result .error_flag != 4 :
418
- all_dummy_results_flag = False
419
- break
420
-
421
- # If the results are an empty list then this means that all minimizers
422
- # raise an exception and the tables will produce errors if they run
423
- # for that problem set.
424
- if not results or all_dummy_results_flag :
425
- message = (
426
- "\n WARNING: \n The user chosen options and/or problem "
427
- " setup resulted in all minimizers and/or parsers "
428
- "raising an exception. Because of this, results for "
429
- f"the { label } problem set will not be displayed. "
430
- "Please see the logs for more detail on why this is "
431
- "the case."
386
+ try :
387
+ for sub_dir in problem_sets :
388
+ # Create full path for the directory that holds a group of
389
+ # problem definition files
390
+ data_dir = os .path .join (current_path , sub_dir )
391
+
392
+ test_data = glob .glob (data_dir + "/*.*" )
393
+
394
+ if not test_data :
395
+ LOGGER .warning ("Problem set %s not found" , data_dir )
396
+ continue
397
+
398
+ # generate group label/name used for problem set
399
+ try :
400
+ with open (
401
+ os .path .join (data_dir , "META.txt" ), encoding = "utf-8"
402
+ ) as f :
403
+ label = f .readline ().strip ("\n " )
404
+ except OSError :
405
+ label = sub_dir .replace ("/" , "_" )
406
+
407
+ LOGGER .info (
408
+ "Running the benchmarking on the %s problem set" , label
432
409
)
433
- LOGGER .warning (message )
434
- else :
435
- LOGGER .info ("Producing output for the %s problem set" , label )
436
- # Display the runtime and accuracy results in a table
437
- group_results_dir , pp_dfs = save_results (
438
- group_name = label ,
439
- results = results ,
410
+ fit = Fit (
440
411
options = options ,
441
- failed_problems = failed_problems ,
442
- unselected_minimizers = unselected_minimizers ,
443
- config = cp . config ,
412
+ data_dir = data_dir ,
413
+ label = label ,
414
+ checkpointer = cp ,
444
415
)
445
-
446
- pp_dfs_all_prob_sets [label ] = pp_dfs
447
-
448
- LOGGER .info ("Completed benchmarking for %s problem set" , sub_dir )
449
- group_results_dir = os .path .relpath (
450
- path = group_results_dir , start = options .results_dir
451
- )
452
- result_dir .append (group_results_dir )
453
- group_labels .append (label )
454
-
455
- cp .finalise ()
416
+ results , failed_problems , unselected_minimizers = fit .benchmark ()
417
+
418
+ # If a result has error flag 4 then the result contains dummy
419
+ # values, if this is the case for all results then output should
420
+ # not be produced as results tables won't show meaningful values.
421
+ all_dummy_results_flag = True
422
+ for result in results :
423
+ if result .error_flag != 4 :
424
+ all_dummy_results_flag = False
425
+ break
426
+
427
+ # If the results are an empty list then this means that all
428
+ # minimizers raise an exception and the tables will produce
429
+ # errors if they run for that problem set.
430
+ if not results or all_dummy_results_flag :
431
+ message = (
432
+ "\n WARNING: \n The user chosen options and/or problem "
433
+ " setup resulted in all minimizers and/or parsers "
434
+ "raising an exception. Because of this, results for "
435
+ f"the { label } problem set will not be displayed. "
436
+ "Please see the logs for more detail on why this is "
437
+ "the case."
438
+ )
439
+ LOGGER .warning (message )
440
+ else :
441
+ LOGGER .info ("Producing output for the %s problem set" , label )
442
+ # Display the runtime and accuracy results in a table
443
+ group_results_dir , pp_dfs = save_results (
444
+ group_name = label ,
445
+ results = results ,
446
+ options = options ,
447
+ failed_problems = failed_problems ,
448
+ unselected_minimizers = unselected_minimizers ,
449
+ config = cp .config ,
450
+ )
451
+
452
+ pp_dfs_all_prob_sets [label ] = pp_dfs
453
+
454
+ LOGGER .info (
455
+ "Completed benchmarking for %s problem set" , sub_dir
456
+ )
457
+ group_results_dir = os .path .relpath (
458
+ path = group_results_dir , start = options .results_dir
459
+ )
460
+ result_dir .append (group_results_dir )
461
+ group_labels .append (label )
462
+
463
+ finally :
464
+ cp .finalise ()
456
465
457
466
# Check result_dir is non empty before producing output
458
467
if not result_dir :
0 commit comments