@@ -15,7 +15,7 @@ Data preparation
15
15
16
16
import numpy as np
17
17
from traffic.core import Traffic
18
-
18
+
19
19
t = (
20
20
# trajectories during the opening hours of the sector
21
21
Traffic.from_file(" data/LFBBPT_flights_2017.pkl" )
@@ -42,14 +42,14 @@ the following `github repository <https://github.com/lbasora/sectflow>`_
42
42
43
43
.. code :: python
44
44
45
- from traffic.core.projection import Lambert93
46
-
45
+ from cartes.crs import Lambert93
46
+
47
47
# pip install git+https://github.com/lbasora/sectflow
48
48
from sectflow.clustering import TrajClust
49
-
49
+
50
50
features = [" x" , " y" , " latitude" , " longitude" , " altitude" , " log_altitude" ]
51
51
clustering = TrajClust(features)
52
-
52
+
53
53
# use the clustering API from traffic
54
54
t_cluster = t.clustering(
55
55
nb_samples = 2 , features = features, projection = Lambert93(), clustering = clustering
@@ -59,7 +59,7 @@ the following `github repository <https://github.com/lbasora/sectflow>`_
59
59
60
60
# Color distribution by cluster
61
61
from itertools import cycle, islice
62
-
62
+
63
63
n_clusters = 1 + t_cluster.data.cluster.max()
64
64
color_cycle = cycle(
65
65
" #fbbb35 #004cb9 #4cc700 #a50016 #510420 #01bcf5 #999999 #e60085 #ffa9c5" .split()
@@ -69,29 +69,29 @@ the following `github repository <https://github.com/lbasora/sectflow>`_
69
69
70
70
import matplotlib.pyplot as plt
71
71
from random import sample
72
-
72
+
73
73
from traffic.data import airways, aixm_airspaces
74
74
from traffic.drawing.markers import rotate_marker, atc_tower, aircraft
75
-
75
+
76
76
with plt.style.context(" traffic" ):
77
77
fig, ax = plt.subplots(1 , figsize = (15 , 10 ), subplot_kw = dict (projection = Lambert93()))
78
-
78
+
79
79
aixm_airspaces[" LFBBPT" ].plot(
80
80
ax, linewidth = 3 , linestyle = " dashed" , color = " steelblue"
81
81
)
82
82
for name in " UN460 UN869 UM728" .split():
83
83
airways[name].plot(ax, linestyle = " dashed" , color = " #aaaaaa" )
84
-
84
+
85
85
# do not plot outliers
86
86
for cluster in range (n_clusters):
87
-
87
+
88
88
current_cluster = t_cluster.query(f " cluster == { cluster} " )
89
-
89
+
90
90
# plot the centroid of each cluster
91
91
centroid = current_cluster.centroid(50 , projection = Lambert93())
92
92
centroid.plot(ax, color = colors[cluster], alpha = 0.9 , linewidth = 3 )
93
93
centroid_mark = centroid.at_ratio(0.45 )
94
-
94
+
95
95
# little aircraft
96
96
centroid_mark.plot(
97
97
ax,
@@ -100,22 +100,22 @@ the following `github repository <https://github.com/lbasora/sectflow>`_
100
100
s = 500 ,
101
101
text_kw = dict (s = " " ), # no text associated
102
102
)
103
-
103
+
104
104
# plot some sample flights from each cluster
105
105
sample_size = min (20 , len (current_cluster))
106
106
for flight_id in sample(current_cluster.flight_ids, sample_size):
107
107
current_cluster[flight_id].plot(
108
108
ax, color = colors[cluster], alpha = 0.1 , linewidth = 2
109
109
)
110
-
110
+
111
111
# TODO improve this: extent with buffer
112
112
ax.set_extent(
113
113
tuple (
114
114
x - 0.5 + (0 if i % 2 == 0 else 1 )
115
115
for i, x in enumerate (aixm_airspaces[" LFBBPT" ].extent)
116
116
)
117
117
)
118
-
118
+
119
119
# Equivalent of Fig. 5
120
120
121
121
@@ -134,13 +134,13 @@ The anomaly detection method is based on a stacked autoencoder
134
134
import torch
135
135
from torch import nn, optim, from_numpy, rand
136
136
from torch.autograd import Variable
137
-
137
+
138
138
from sklearn.preprocessing import minmax_scale
139
139
from tqdm.autonotebook import tqdm
140
-
141
-
140
+
141
+
142
142
# Stacked autoencoder
143
-
143
+
144
144
class Autoencoder (nn .Module ):
145
145
def __init__ (self ):
146
146
super ().__init__ ()
@@ -150,54 +150,54 @@ The anomaly detection method is based on a stacked autoencoder
150
150
self .decoder = nn.Sequential(
151
151
nn.Linear(12 , 24 ), nn.ReLU(), nn.Linear(24 , 50 ), nn.Sigmoid()
152
152
)
153
-
153
+
154
154
def forward (self , x , ** kwargs ):
155
155
x = x + (rand(50 ).cuda() - 0.5 ) * 1e-3 # add some noise
156
156
x = self .encoder(x)
157
157
x = self .decoder(x)
158
158
return x
159
-
159
+
160
160
# Regularisation term introduced in IV.B.2
161
-
161
+
162
162
def regularisation_term (X , n ):
163
163
samples = torch.linspace(0 , X.max(), 100 , requires_grad = True )
164
164
mean = samples.mean()
165
165
return torch.relu(
166
166
(torch.histc(X) / n * 100 - 1 / mean * torch.exp(- samples / mean))
167
167
).mean()
168
-
168
+
169
169
# ML part
170
-
170
+
171
171
def anomalies (t : Traffic, cluster_id : int , lambda_r : float , nb_it : int = 10000 ):
172
-
172
+
173
173
t_id = t.query(f " cluster== { cluster_id} " )
174
-
174
+
175
175
flight_ids = list (f.flight_id for f in t_id)
176
176
n = len (flight_ids)
177
177
X = minmax_scale(np.vstack(f.data.track[:50 ] for f in t_id))
178
-
178
+
179
179
model = Autoencoder().cuda()
180
180
criterion = nn.MSELoss()
181
181
optimizer = optim.Adam(model.parameters(), lr = 1e-3 , weight_decay = 1e-5 )
182
-
182
+
183
183
for epoch in tqdm(range (nb_it), leave = False ):
184
-
184
+
185
185
v = Variable(from_numpy(X.astype(np.float32))).cuda()
186
-
186
+
187
187
output = model(v)
188
188
distance = nn.MSELoss(reduction = " none" )(output, v).sum(1 ).sqrt()
189
-
189
+
190
190
loss = criterion(output, v)
191
191
# regularisation
192
192
loss = (
193
193
lambda_r * regularisation_term(distance.cpu().detach(), n)
194
194
+ criterion(output, v).cpu()
195
195
)
196
-
196
+
197
197
optimizer.zero_grad()
198
198
loss.backward()
199
199
optimizer.step()
200
-
200
+
201
201
output = model(v)
202
202
return (
203
203
(nn.MSELoss(reduction = " none" )(output, v).sum(1 )).sqrt().cpu().detach().numpy()
@@ -215,16 +215,16 @@ parameter helps reducing this trend.
215
215
.. code :: python
216
216
217
217
from scipy.stats import expon
218
-
218
+
219
219
# Equivalent of Fig. 4
220
-
220
+
221
221
with plt.style.context(" traffic" ):
222
222
fig, ax = plt.subplots(1 , figsize = (10 , 7 ))
223
223
hst = ax.hist(output, bins = 50 , density = True )
224
224
mean = output.mean()
225
225
x = np.arange(0 , output.max(), 1e-2 )
226
226
e = expon.pdf(x, 0 , output.mean())
227
-
227
+
228
228
ax.plot(x, e, color = " #e77074" )
229
229
ax.fill_between(x, e, zorder = - 2 , color = " #e77074" , alpha = 0.5 )
230
230
ax.axvline(
@@ -234,6 +234,3 @@ parameter helps reducing this trend.
234
234
235
235
.. image :: images/sectflow_distribution.png
236
236
:align: center
237
-
238
-
239
-
0 commit comments