Skip to content

Commit 19d7e11

Browse files
Add files via upload
0 parents  commit 19d7e11

10 files changed

+1724
-0
lines changed

AttentionSeq2Seqtrain.py

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
import tensorflow as tf
2+
import numpy as np
3+
import random
4+
import csv
5+
import Model
6+
from TrajectoryLoader import TrajectoryLoader
7+
8+
9+
# parameters for traning
10+
learnig_rate = 0.001
11+
num_batches = 100000
12+
batch_size = 256
13+
display_step = 50
14+
# parameters for seq2seq model
15+
n_lstm = 128
16+
encoder_length = 20
17+
decoder_length = 10
18+
19+
attention_func1 = 'dot'
20+
attention_func2 = 'general'
21+
attention_func3 = 'concat'
22+
23+
# Choose Adam optimizer.
24+
optimizer = tf.keras.optimizers.Adam(learnig_rate)
25+
26+
# Create and build encoder and decoder.
27+
encoder_a = Model.Encoder(n_lstm, batch_size)
28+
decoder_a = Model.DecoderAttention(n_lstm, batch_size, attention_func2)
29+
30+
x = np.zeros((batch_size, 1, 5), dtype=np.float32)
31+
output = encoder_a(x)
32+
decoder_a(x, output[1:], output[0])
33+
encoder_a.summary()
34+
decoder_a.summary()
35+
36+
#tensorboard
37+
summary_writer = tf.summary.create_file_writer('tensorboard')
38+
tf.summary.trace_on(profiler=True)
39+
# checkpoint
40+
checkpoint1 = tf.train.Checkpoint(EncoderAttention = encoder_a)
41+
manager1 = tf.train.CheckpointManager(checkpoint1, directory = './SaveEncoderAttention', checkpoint_name = 'EncoderAttention.ckpt', max_to_keep = 10)
42+
checkpoint2 = tf.train.Checkpoint(DecoderAttention = decoder_a)
43+
manager2 = tf.train.CheckpointManager(checkpoint2, directory = './SaveDecoderAttention', checkpoint_name = 'DecoderAttention.ckpt', max_to_keep = 10)
44+
45+
46+
def RunOptimization(source_seq, target_seq_in, target_seq_out, step):
47+
loss = 0
48+
decoder_length = target_seq_out.shape[1]
49+
with tf.GradientTape() as tape:
50+
encoder_outputs = encoder_a(source_seq)
51+
states = encoder_outputs[1:]
52+
y_sample = 0
53+
for t in range(decoder_length):
54+
'''
55+
if t == 0 or random.randint(0,1) == 0:
56+
decoder_in = tf.expand_dims(target_seq_in[:, t], 1)
57+
else:
58+
decoder_in = tf.expand_dims(y_sample, 1)
59+
'''
60+
decoder_in = tf.expand_dims(target_seq_in[:, t], 1)
61+
logit, de_state_h, de_state_c, _= decoder_a(decoder_in, states, encoder_outputs[0])
62+
# TODO scheduled sampling
63+
y_sample = logit
64+
states = de_state_h, de_state_c
65+
# loss function : RSME TODO
66+
loss_0 = tf.keras.losses.MSE(target_seq_out[:, t, 1:3], logit[:, 1:3])
67+
loss += tf.sqrt(loss_0)# TODO
68+
69+
variables = encoder_a.trainable_variables + decoder_a.trainable_variables
70+
gradients = tape.gradient(loss, variables)
71+
optimizer.apply_gradients(zip(gradients, variables))
72+
73+
loss = tf.reduce_mean(loss)
74+
loss = loss / decoder_length
75+
with summary_writer.as_default():
76+
tf.summary.scalar("loss", loss.numpy(), step = step)
77+
78+
return loss
79+
80+
# Load trajectory data.
81+
seq2seq_loader = TrajectoryLoader()
82+
seq2seq_loader.loadTrajectoryData("./DataSet/TrajectoryMillion.csv")
83+
84+
85+
for batch_index in range(1, num_batches+1):
86+
seq_encoder, seq_decoder = seq2seq_loader.getBatchSeq2Seq(batch_size, encoder_length, decoder_length)
87+
seq_decoder_in = seq_decoder[:, :decoder_length, :]
88+
seq_decoder_out = seq_decoder[:, 1:decoder_length+1, :]
89+
loss = RunOptimization(seq_encoder, seq_decoder_in, seq_decoder_out, batch_index)
90+
91+
if batch_index % display_step == 0:
92+
print("batch %d: loss %f" % (batch_index, loss.numpy()))
93+
path1 = manager1.save(checkpoint_number = batch_index)
94+
path2 = manager2.save(checkpoint_number = batch_index)
95+
96+
97+
with summary_writer.as_default():
98+
tf.summary.trace_export(name = "model_trace", step = 0, profiler_outdir = 'tensorboard')
99+

BPtrain.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import csv
2+
import tensorflow as tf
3+
import numpy as np
4+
from TrajectoryLoader import TrajectoryLoader
5+
import Model
6+
7+
# parameters for mini-batch gradient descent training
8+
learning_rate = 0.001
9+
training_step = 30000
10+
batch_size = 1024
11+
display_step = 100
12+
13+
# parameters for network model
14+
n_hidden = 128
15+
bp_step = 6
16+
17+
# Load trajectory data.
18+
bp_loader = TrajectoryLoader()
19+
bp_loader.loadTrajectoryData("./DataSet/TrajectoryMillion.csv")
20+
21+
# Create bp model and build it.
22+
neural_net = Model.BP(n_hidden, bp_step, batch_size)
23+
24+
x = np.zeros((batch_size, bp_step, 5), dtype= np.float32)
25+
neural_net(x)
26+
neural_net.summary()
27+
28+
# Choose Adam optimizer.
29+
optimizer = tf.optimizers.Adam(learning_rate)
30+
31+
#tensorboard
32+
summary_writer = tf.summary.create_file_writer('tensorboard')
33+
tf.summary.trace_on(profiler=True)
34+
35+
# checkpoint
36+
checkpoint = tf.train.Checkpoint(BP_network = neural_net)
37+
manager = tf.train.CheckpointManager(checkpoint, directory = './SaveBP', checkpoint_name = 'BPnetwork.ckpt', max_to_keep = 10)
38+
39+
# optimazation process.
40+
for batch_index in range(1, training_step + 1):
41+
x, y = bp_loader.getBatchBP(batch_size, bp_step)
42+
with tf.GradientTape() as tape:
43+
pred = neural_net(x)
44+
loss = tf.keras.losses.MSE(y, pred)
45+
loss = tf.reduce_mean(loss)
46+
with summary_writer.as_default():
47+
tf.summary.scalar("loss", loss.numpy(), step = batch_index)
48+
gradients = tape.gradient(loss, neural_net.trainable_variables)
49+
optimizer.apply_gradients(zip(gradients, neural_net.trainable_variables))
50+
51+
if batch_index % display_step == 0:
52+
pred = neural_net(x)
53+
loss = tf.keras.losses.MSE(y, pred)
54+
loss = tf.reduce_mean(loss)
55+
print("batch %d: loss %f" % (batch_index, loss.numpy()))
56+
path = manager.save(checkpoint_number = batch_index)
57+
58+
with summary_writer.as_default():
59+
tf.summary.trace_export(name = "model_trace", step = 0, profiler_outdir = 'tensorboard')
60+
61+
62+

LSTMtrain.py

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
import csv
2+
import tensorflow as tf
3+
import numpy as np
4+
from TrajectoryLoader import TrajectoryLoader
5+
import Model
6+
7+
# parameters for mini-batch gradient descent training
8+
learning_rate = 0.0005
9+
num_batches = 10000
10+
batch_size = 256
11+
display_step = 100
12+
# parameters for LSTM network
13+
n_lstm = 128
14+
lstm_step = 6
15+
seq_length = 20
16+
17+
# Choose Adam optimizer.
18+
optimizer = tf.optimizers.Adam(learning_rate)
19+
#optimizer = tf.optimizers.SGD(learning_rate)
20+
21+
# Create lstm model and build it.
22+
neural_net = Model.LSTM(n_lstm, lstm_step, batch_size)
23+
24+
x = np.zeros((batch_size, seq_length-lstm_step, lstm_step*5), dtype= np.float32)
25+
neural_net(x)
26+
neural_net.summary()
27+
28+
#tensorboard
29+
summary_writer = tf.summary.create_file_writer('tensorboard')
30+
tf.summary.trace_on(profiler=True)
31+
# checkpoint
32+
checkpoint = tf.train.Checkpoint(LSTM_network = neural_net)
33+
manager = tf.train.CheckpointManager(checkpoint, directory = './SaveLSTM', checkpoint_name = 'LSTMnetwork.ckpt', max_to_keep = 10)
34+
35+
36+
def StepProcess(input, batch_size, seq_length, lstm_step):
37+
if lstm_step == 1:
38+
return input
39+
else:
40+
seq_length_new = seq_length - lstm_step + 1
41+
output = []
42+
for i in range(lstm_step):
43+
seq_sub = input[:, i:seq_length_new+i, :]
44+
output.append(seq_sub)
45+
return np.concatenate(output, axis=2)
46+
47+
def RunOptimization(x, y, step):
48+
with tf.GradientTape() as tape:
49+
pred = neural_net(x)
50+
# loss function : RMSE TODO
51+
loss = tf.keras.losses.MSE(y[:, 1:3], pred[:, 1:3])
52+
loss = tf.sqrt(loss)
53+
54+
gradients = tape.gradient(loss, neural_net.trainable_variables)
55+
optimizer.apply_gradients(zip(gradients, neural_net.trainable_variables))
56+
57+
loss = tf.reduce_mean(loss)
58+
with summary_writer.as_default():
59+
tf.summary.scalar("loss", loss.numpy(), step = step)
60+
return loss
61+
62+
63+
# Load trajectory data.
64+
lstm_loader = TrajectoryLoader()
65+
lstm_loader.loadTrajectoryData("./DataSet/TrajectoryMillion.csv")
66+
67+
# optimazation process.
68+
for batch_index in range(1, num_batches+1):
69+
# x.shape: [batch_size, seq_length, 5], y.shape: [batch_size, 5]
70+
x, y = lstm_loader.getBatchLSTM(batch_size, seq_length)
71+
# after StepProcess, x.shape: [batch_size, seq_lenght-lstm_step+1, lstm_step*5]
72+
x = StepProcess(x, batch_size, seq_length, lstm_step)
73+
loss = RunOptimization(x, y, batch_index)
74+
75+
if batch_index % display_step == 0:
76+
print("batch %d: loss %f" % (batch_index, loss.numpy()))
77+
path = manager.save(checkpoint_number = batch_index)
78+
79+
with summary_writer.as_default():
80+
tf.summary.trace_export(name = "model_trace", step = 0, profiler_outdir = 'tensorboard')
81+
82+
83+
84+

0 commit comments

Comments
 (0)