forked from RomeoV/pyomo-MINLP-benchmarking
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_benchmarks.py
191 lines (167 loc) · 8.62 KB
/
run_benchmarks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
import os
import sys
import logging
from datetime import datetime
from julian_datetime import get_julian_datetime
from csv import writer as csv_writer
from argparse import ArgumentParser
from contextlib import contextmanager
from importlib import import_module
from pyomo.environ import SolverFactory
from pyomo.opt import SolverStatus
from pyomo.opt import TerminationCondition as tc
# from feasability_pump import do_the_solving
from parse_to_gams import (termination_condition_to_gams_format,
solver_status_to_gams)
def parse_command_line_arguments():
parser = ArgumentParser(
description='Benchmark specified solver on problem files')
parser.add_argument('--redo-existing', dest='skip_existing', default=True,
action='store_const', const=False,
help='Redo benchmark if result file is already existing')
parser.add_argument('--no-skip-failed', dest='skip_failed', default=True,
action='store_const', const=False,
help="Skip file if file in 'failed_models.txt'")
parser.add_argument('--solver', dest='solver_name', type=str, required=True,
metavar='solver_name',
choices=['baron', 'mindtpy', 'feas-pump'])
parser.add_argument('--strategy', dest='solver_strategy', type=str,
required=False, metavar='solver_strategy',
help='Solver strategy (if applicable)')
parser.add_argument('--timelimit', dest='timelimit', type=int,
required=False, metavar='timelimit', default=10,
help='Time limit (sec) for each model')
parser.add_argument('--model-dir', dest='model_dir', default='models',
required=False, metavar='model_dir',
help='Directory where models are stored as .py files')
return parser.parse_args()
@contextmanager
def redirect_stdout(ofile_obj):
original_stdout = sys.stdout
sys.stdout = ofile_obj
yield
sys.stdout = original_stdout
@contextmanager
def load_model(model_name):
global model_scope
model_scope = import_module(model_name)
yield
del model_scope
def construct_trace_data(opt, results):
problem = results['Problem'][0]
solver = results['Solver'][0]
if args.solver_name in ['mindtpy', 'gdpopt']:
trace_data = [
model_name, # GAMS model filename
'MINLP', # LP, MIP, NLP, etc.
solver['Name'], # ...
opt.CONFIG.nlp_solver, # default NLP solver
opt.CONFIG.mip_solver, # default MIP solver
get_julian_datetime(datetime.now()), # start day/time of job
0 if problem['Sense'] in [1, 'minimize'] else 1, # direction 0=min, 1=max
results['Problem'][0]['Number of constraints'], # total number of equations
results['Problem'][0]['Number of variables'], # total number of variables
results['Problem'][0]['Number of binary variables'] + \
results['Problem'][0]['Number of integer variables'], # total number of discrete variables
'nznum?', # number of nonzeros
'nlz?', # number of nonlinear nonzeros
0, # 1= optfile included
termination_condition_to_gams_format(solver.Termination_condition), # GAMS model return status - see the GAMS return codes section.
solver_status_to_gams(solver.Status) if solver.Status is SolverStatus.ok else termination_condition_to_gams_format(solver.Termination_condition), # GAMS solver return status - see the GAMS return codes section.
problem['Upper bound'], # value of objecive function
problem['Upper bound'], # objective function estimate
solver['Wallclock time'], # resource time used (sec)
solver['Iterations'], # number of solver iterations
0, # dom used
0, # nodes used
'# automatically generated by benchmarker'
]
return trace_data
def benchmark_model(timelimit):
try:
opt = SolverFactory(args.solver_name)
opt.CONFIG.logger.propagate = False
opt.CONFIG.logger.addHandler(logging.FileHandler(sys.stdout.name, mode = sys.stdout.mode))
if args.solver_strategy is None:
results = opt.solve(model_scope.m, tee=True, time_limit=timelimit)
else:
results = opt.solve(model_scope.m, tee=True, time_limit=timelimit,
strategy=args.solver_strategy)
del opt.CONFIG.logger.handlers[0]
solving_time = results.Solver[0].Wallclock_time
print(f'Solving time: {solving_time}\n')
if results.Solver[0].Termination_condition == tc.optimal:
solving_times.append([model_name, solving_time])
elif results.Solver[0].Termination_condition == tc.maxTimeLimit:
solving_times.append([model_name, 'maxTimeLimit'])
elif results.Solver[0].Termination_condition == tc.maxIterations:
solving_times.append([model_name, 'maxIterations'])
trace_data = construct_trace_data(opt, results)
trace_file_obj.write(', '.join(str(el) for el in trace_data) + '\n')
except Exception as e:
#os.remove(result_file)
print(e)
if model_file not in prev_failed_models:
error_file_obj.write(model_file+'\n')
print(f"Failed to solve '{model_file}'", file=sys.stderr)
print(e, file=sys.stderr)
print(f"File written to '{error_file}'", file=sys.stderr)
if __name__ == '__main__':
args = parse_command_line_arguments()
####### SETUP (directories and files) #######
sys.path.insert(0, './'+args.model_dir) # necessary to import models
if not os.path.exists('results'):
print("Creating new directory: './results'")
os.makedirs('results')
# Set various filenames
model_files = [model_file for model_file in sorted(os.listdir(args.model_dir)) if model_file.endswith('.py')]
solver_dir = args.solver_name + \
(f"-{args.solver_strategy}" if args.solver_strategy else "")
error_file = f"./results/{solver_dir}/failed_models.txt"
trace_file = f"./results/{solver_dir}/trace_file.trc"
solving_times_file = f"./results/{solver_dir}/solving_times.csv"
if not os.path.exists('./results/'+solver_dir):
print(f"Creating new directory: './results/{solver_dir}'")
os.makedirs('./results/'+solver_dir)
# Load previously failed model (or create empty file)
prev_failed_models = set()
try:
with open(error_file, 'r') as error_file_obj:
for line in error_file_obj:
prev_failed_models.add(line.strip())
except FileNotFoundError:
with open(error_file, 'a'):
pass
solving_times = [['Instance name', 'Average solving time']]
print('################################')
print(f"Benchmarking solver '{args.solver_name}' " +
("with strategy '{args.solver_strategy}'" if args.solver_strategy else ""))
print(f"Writing to './results/{solver_dir}'")
print(f"Failed model files will be written to '{error_file}'")
print(f"Trace files will be written to '{trace_file}'")
print(f"Solving times will be written to '{solving_times_file}'")
print('################################')
for model_file in model_files:
model_name, _ = os.path.splitext(model_file) # removes ending
result_file = './results/'+solver_dir+'/'+model_name+'.txt'
if args.skip_existing and os.path.exists(result_file):
print(f"Skipping '{result_file}'")
print("File exists already, please use the '--redo-existing' flag to override")
continue
elif args.skip_failed and model_file in prev_failed_models:
print(f"Skipping '{result_file}'")
print("File listed in 'failed_models.txt', please use the '--no-skip-failed' flag to override")
continue
else:
print(f"Benchmarking '{model_file}'")
# This causes all stdout to be written to the results file
# and the model to be loaded as model_scope.m
with open(result_file, 'w') as result_file_obj, \
open(error_file, 'a') as error_file_obj, \
open(trace_file, 'a') as trace_file_obj, \
redirect_stdout(result_file_obj), \
load_model(model_name):
benchmark_model(args.timelimit)
with open(solving_times_file, 'w') as time_file:
time_writer = csv_writer(time_file)
time_writer.writerows(solving_times)