-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcheck_sol.py
167 lines (126 loc) · 5.04 KB
/
check_sol.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import pickle
import os
import dimod
import numpy as np
import csv
from AGV_quantum import get_results, LinearAGV, make_sol, plot_train_diagram
from pathlib import Path
def std_from_hist(value, counts):
mean = np.average(value, weights=counts)
var = np.average((value - mean)**2, weights=counts)
return mean, np.sqrt(var)
def obj_hist(hist_feas):
xs_f = list(set(hist_feas))
xs_f = np.sort(xs_f)
ys_f = np.array([hist_feas.count(x) for x in xs_f])
print(xs_f)
print(ys_f)
mean, std = std_from_hist(xs_f, ys_f)
print("mean (hist) = ", mean)
print("std (hist) = ", std)
return {"value":xs_f, "count":ys_f}
def csv_write_hist(file_name, hist, key1 = "value", key2 = "count"):
"""
write histogram to csv
input:
- file_name: string - csv file name
- hist: dict - containing histogram
- key1: string - key for value in histogram
- key2: string - key for counts in histogram
"""
with open(file_name, 'w', newline='', encoding="utf-8") as csvfile:
fieldnames = [key1, key2]
value = hist[key1]
count = hist[key2]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
for i,v in enumerate(value):
writer.writerow({key1: v, key2: count[i]})
import argparse
parser = argparse.ArgumentParser("Solve linear or quadratic")
parser.add_argument(
"--example",
type=str,
help="chose example out of [tiny, smallest, small, medium_small, medium, large, largest]",
default="small",
)
parser.add_argument(
"--hyb_solver",
type=str,
help="chose bqm or cqm",
default="cqm",
)
args = parser.parse_args()
cwd = os.getcwd()
train_diagram = False
count = "_10"
count = ""
if args.example == "tiny":
sol_folder = Path("annealing_results/tiny_2_AGV")
if args.example == "smallest":
sol_folder = Path("annealing_results/2_AGV")
if args.example == "small":
sol_folder = Path("annealing_results/4_AGV")
from examples.example_small import M, tracks, tracks_len, agv_routes, d_max, tau_pass, tau_headway, tau_operation, weights, initial_conditions
if args.example == "medium_small":
sol_folder = Path("annealing_results/6_AGV")
if args.example == "medium":
from examples.example_medium import M, tracks, tracks_len, agv_routes, d_max, tau_pass, tau_headway, tau_operation, weights, initial_conditions
sol_folder = Path("annealing_results/7_AGV")
train_diagram = True
if args.example == "large":
sol_folder = Path("annealing_results/12_AGV")
if args.example == "largest":
sol_folder = Path("annealing_results/15_AGV")
if args.example == "largest_ever":
sol_folder = Path("annealing_results/21_AGV")
lp_folder = Path(f"lp_files/lp_{args.example}.pkl")
assert args.hyb_solver in ["bqm", "cqm"]
hybrid = args.hyb_solver
with open(os.path.join(cwd, sol_folder, f"new_{hybrid}{count}.pkl"), "rb") as f:
sampleset = pickle.load(f)
with open(os.path.join(cwd, lp_folder), "rb") as f:
lp = pickle.load(f)
sampleset = dimod.SampleSet.from_serializable(sampleset)
if __name__ == '__main__':
if hybrid == "bqm":
print(sampleset.info)
p=5
lp.to_bqm_qubo_ising(p)
for d in sampleset.data(): # TODO this is UGLY but the loop has always one element
solution_vars = d.sample
solution_vars_ising = {k:2*v-1 for k, v in solution_vars.items()}
energy_computed = dimod.utilities.ising_energy(solution_vars_ising, lp.ising[0], lp.ising[1])
print("Ising energy", energy_computed)
print("Ising offset", lp.ising[2])
print("Ising energy + offset", energy_computed + lp.ising[2])
sampleset = lp.interpreter(sampleset, "BIN")
solutions = get_results(sampleset, lp)
print(solutions[0]['energy'])
constraints = len(solutions[0]['feas_constraints'][0])
not_feas = solutions[0]['feas_constraints'][1]
#print(solutions[0])
print("feasible", solutions[0]['feasible'])
print("n.o. constraints", constraints)
print("broken feas", not_feas)
print("precentage broken", not_feas/constraints)
elif hybrid == "cqm":
obj = []
solutions = get_results(sampleset, lp)
print(lp.nvars)
print(sampleset.info)
k = 0
for sol in solutions:
if sol["feasible"]:
k = k+1
obj.append(sol['objective'])
print(sol['objective'])
if train_diagram and (k == 1 or k == 60):
AGV = LinearAGV(M, tracks, tracks_len, agv_routes, d_max, tau_pass, tau_headway, tau_operation, weights,
initial_conditions)
d = make_sol(AGV.t_iter, sol["sample"])
plot_train_diagram(d, agv_routes, tracks_len, f"CQM objective = {sol['objective']}")
print("no solutions", len(solutions))
print("feasibility percentage", k/len(solutions))
d = obj_hist(obj)
file_name = f"{sol_folder}/obj_hist{count}.csv"
csv_write_hist(file_name, d, key1 = "value", key2 = "count")