-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwallclock.py
135 lines (100 loc) · 3.71 KB
/
wallclock.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import pandas as pd
import numpy as np
from river import drift
from scipy import stats
import torch
from torch import nn
import time
class Net_3_2(nn.Module):
def __init__(self):
super(Net_3_2, self).__init__()
self.fc1 = nn.Linear(3,2)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(2,2)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
output = self.fc2(x)
return output
class Net_10_2(nn.Module):
def __init__(self):
super(Net_10_2, self).__init__()
self.fc1 = nn.Linear(10,8)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(8,4)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(4,2)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
output = self.fc3(x)
return output
def determine_wallclock(dataset_names):
min_time, max_time, mean_time = None,None,0
all_times = []
#loop over all datasets
for dataset in dataset_names:
print(dataset, "dataset")
for i in range(1,6):
data = pd.read_csv(f"results/{dataset}_{i}.csv")
running_time = data["runtime"].to_list()
running_time = [run_time for run_time in running_time if run_time>0]
min_data = np.min(running_time)
max_data = np.max(running_time)
print(min_data, max_data)
if min_time == None or min_data < min_time:
min_time = min_data
if max_time == None or max_data > max_time:
max_time = max_data
all_times.extend(running_time)
mean_time = np.mean(all_times)
print(min_time, "min time")
print(max_time, "max time")
print(mean_time, "mean time")
def determine_wallclock_er(dataset_names):
min_time, max_time, mean_time = None,None,0
all_times = []
for dataset in dataset_names:
print(dataset, "dataset")
for i in range(1,6):
#get featurs and labels
features = np.load(f"datasets/features_{dataset}_{i}.npy")
labels = np.load(f"datasets/labels_{dataset}_{i}.npy")
#turn them in pytorch format
features = torch.from_numpy(features).float()
labels = torch.from_numpy(labels).long()
#get model
model = Net_10_2()
#load model weights
model.load_state_dict(torch.load(f"model_weights/{dataset}_{i}.pth"))
for i in range(0,10000):
#start timing
query_time = time.time()
#forward pass only
with torch.no_grad():
x = model(features[i])
#get predictions
predicted = torch.argmax(torch.softmax(x, dim=0), dim=0)
result = int(not labels[i] == predicted)
#end timing
query_time = time.time() - query_time
#add time
all_times.append(query_time)
if (min_time == None or query_time < min_time) and query_time >0:
min_time = query_time
if max_time == None or query_time > max_time:
max_time = query_time
mean_time = np.mean(all_times)
print(min_time*1000, "min time")
print(max_time*1000, "max time")
print(mean_time*1000, "mean time")
def main():
#params
#dataset_names = ["SEA_0_1", "SEA_0_2", "SEA_0_3", "SEA_1_2", "SEA_1_3", "SEA_2_3"]
dataset_names = ["HYP_001"]
#determine_wallclock(dataset_names)
determine_wallclock_er(dataset_names)
if __name__ == '__main__':
main()