Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
raidastauras authored Jan 7, 2018
1 parent b6c1a0c commit 5be4bae
Show file tree
Hide file tree
Showing 8 changed files with 661 additions and 223 deletions.
66 changes: 48 additions & 18 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,68 +9,98 @@
from helpers.get_historical_data import get_latest_oanda_data
import tensorflow as tf
import numpy as np
import pandas as pd
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler

# TODO: trading params like position size kelly criterion
position_size = 10000
close_current_positions = True
start_on_spot = False

# oanda access keys
accountID = ''
access_token = ''
model_name = 'lr-v2-avg_score0.204-64000'
accountID = '101-004-3943081-001'
access_token = 'fb12d7edd860927ce27467d8ec4aee94-1cb7ffc0e40d649b736315872a10c545'
model_name = 'lr-v1-avg_score1.454-2000'

# init trading session
trading_sess = TradingSession(accountID=accountID, access_token=access_token)
if close_current_positions:
trading_sess.close_all_open_positions()

# init tf model
config = tf.ConfigProto(device_count={'GPU': 0})
sess = tf.Session(config=config)
saver = tf.train.import_meta_graph('saved_models/' + model_name + '.meta')
saver.restore(sess, tf.train.latest_checkpoint('saved_models/'))
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("Placeholder:0")
tf_op_to_restore = graph.get_tensor_by_name("Softmax:0")
x = graph.get_tensor_by_name('Placeholder:0')
drop_out = graph.get_tensor_by_name('strided_slice_1:0')
y_ = graph.get_tensor_by_name('Softmax:0')

# Do stuff every period
scheduler = BlockingScheduler()
# global variables
log = pd.DataFrame()
start_time = str(datetime.datetime.now())[:-7].replace(':', '-')


@scheduler.scheduled_job(trigger='cron', day_of_week='0-6', hour='0-23', minute='0', second='5')
def do_stuff_every_period():

# retrieve data and return signal
global log
global start_time
current_time = str(datetime.datetime.now())[:-7]

# gather data and return signal
oanda_data = get_latest_oanda_data('EUR_USD', 'H1', 64)
input_data_raw, input_data_dummy = get_features(oanda_data)
input_data, input_data_dummy = remove_nan_rows([input_data_raw, input_data_dummy])
input_data_scaled_no_dummies = (input_data - min_max_scaling[1, :]) / (
min_max_scaling[0, :] - min_max_scaling[1, :])
input_data_scaled = np.concatenate([input_data_scaled_no_dummies, input_data_dummy], axis=1)
y_ = sess.run(tf_op_to_restore, feed_dict={x: input_data_scaled})
order_signal = y_.argmax() # 0 stands for buy, 1 for sell, 2 for hold

print('{} | signal: buy: {:.2f}, sell: {:.2f}, nothing: {:.2f}'.format(
str(datetime.datetime.now())[:-4], y_[0][0], y_[0][1], y_[0][2]))
# estimate signal
y_pred = sess.run(y_, feed_dict={x: input_data_scaled, drop_out: 1})
order_signal = y_pred.argmax() # 0 stands for buy, 1 for sell, 2 for hold

print('{} | price: {:.5f} | signal: buy: {:.2f}, sell: {:.2f}, nothing: {:.2f}'
.format(current_time, oanda_data[-1]['closeMid'], y_pred[0][0], y_pred[0][1], y_pred[0][2]))

# if signal long
if order_signal == 0:
if trading_sess.order_book['EUR_USD']['order_type'] == -1:
trading_sess.close_order('EUR_USD')
trading_sess.open_order('EUR_USD', 1)
trading_sess.open_order('EUR_USD', position_size)

# if signal short
elif order_signal == 1:
if trading_sess.order_book['EUR_USD']['order_type'] == 1:
trading_sess.close_order('EUR_USD')
trading_sess.open_order('EUR_USD', -1)
trading_sess.open_order('EUR_USD', -position_size)

# else (uncharted waters)
else:
print('Do nothing')

# log
new_log = pd.DataFrame([[current_time, oanda_data[-1]['closeMid'], y_pred]],
columns=['Datetime', 'Last input Price', 'y_pred'])
log = log.append(new_log)
log.to_csv('logs/log {}.csv'.format(start_time))


# start
do_stuff_every_period()
# Scheduler
scheduler = BlockingScheduler()
scheduler.add_job(do_stuff_every_period,
trigger='cron',
day_of_week='0-4',
hour='0-23',
minute='0',
second='5')

if start_on_spot:
do_stuff_every_period()
scheduler.start()

# close_order_manually(accountID, access_token, 1579)
# close_order_manually(accountID, access_token, 1603)
# trading_sess.check_open_positions()
# trading_sess.check_account_summary()
# trading_sess.order_book
121 changes: 83 additions & 38 deletions models.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,102 @@
import numpy as np


def logistic_regression(input_dim, output_dim, drop_keep_prob):
def logistic_regression(input_dim, output_dim):
"""Simple logistic regression
Returns x and y placeholders, logits and y_ (y hat)"""
tf.reset_default_graph()

x = tf.placeholder(tf.float32, [None, input_dim])
y = tf.placeholder(tf.float32, [None, output_dim])
learning_r = tf.placeholder(tf.float32, 1)[0]
drop_out = tf.placeholder(tf.float32, 1)[0]

w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.initializers.truncated_normal(mean=0.1, stddev=0.025)
weights = {0: tf.get_variable('weights1', shape=[input_dim, output_dim], initializer=w_init)}
biases = {0: tf.get_variable('bias1', shape=[output_dim], initializer=b_init)}
w = tf.get_variable('weights1', shape=[input_dim, output_dim], initializer=w_init)
b = tf.get_variable('bias1', shape=[output_dim], initializer=b_init)

logits = tf.nn.dropout(tf.matmul(x, weights[0]) + biases[0], keep_prob=drop_keep_prob)
logits = tf.matmul(tf.nn.dropout(x, keep_prob=drop_out), w) + b
y_ = tf.nn.softmax(logits)

[print(var) for var in tf.trainable_variables()]
return x, y, logits, y_
return x, y, logits, y_, learning_r, drop_out


def lstm_nn(input_dim, output_dim, time_steps, n_hidden):
"""LSTM net returns x and y placeholders, logits and y_ (y hat)"""

tf.reset_default_graph()

x = tf.placeholder(tf.float32, [None, time_steps, input_dim])
y = tf.placeholder(tf.float32, [None, output_dim])
learning_r = tf.placeholder(tf.float32, 1)[0]
drop_out = tf.placeholder(tf.float32, 1)[0]

w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.initializers.truncated_normal(mean=0.1, stddev=0.025)
w = tf.get_variable('last_weights', shape=[n_hidden[-1], output_dim], initializer=w_init)
# b = tf.get_variable('bias1', shape=[output_dim], initializer=b_init)

x_split = tf.unstack(x, time_steps, 1)

# stack lstm cells, a cell per hidden layer
stacked_lstm_cells = [] # a list of lstm cells to be inputted into MultiRNNCell
for layer_size in n_hidden:
stacked_lstm_cells.append(tf.contrib.rnn.BasicLSTMCell(layer_size, activation=tf.nn.relu))

# create the net and add dropout
lstm_cell = tf.contrib.rnn.MultiRNNCell(stacked_lstm_cells)
lstm_cell_with_dropout = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=drop_out)

# forward propagate
outputs, state = tf.contrib.rnn.static_rnn(lstm_cell_with_dropout, x_split, dtype=tf.float32)
logits = tf.matmul(outputs[-1], w) # + b # logits are used for cross entropy
y_ = tf.nn.softmax(logits)

[print(var) for var in tf.trainable_variables()]
print([print(i) for i in outputs])
print(y_)
return x, y, logits, y_, learning_r, drop_out


def cnn(input_dim, output_dim, time_steps, filter):
"""CNN returns x and y placeholders, logits and y_ (y hat)"""

tf.reset_default_graph()

x = tf.placeholder(tf.float32, [None, input_dim, time_steps, 1])
y = tf.placeholder(tf.float32, [None, output_dim])
learning_r = tf.placeholder(tf.float32, 1)[0]
drop_out = tf.placeholder(tf.float32, 1)[0]

conv1 = tf.layers.conv2d(inputs=x,
filters=filter[0],
kernel_size=(input_dim, 1),
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32),
strides=1,
padding='valid',
activation=tf.nn.relu)
conv1_dropout = tf.layers.dropout(inputs=conv1, rate=drop_out)
conv2 = tf.layers.conv2d(inputs=conv1_dropout,
filters=filter[1],
kernel_size=(1, time_steps),
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32),
strides=1,
padding='valid',
activation=tf.nn.relu)
logits_dense = tf.layers.dense(inputs=conv2,
units=output_dim,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32),
activation=None,
use_bias=False)

logits = tf.reshape(logits_dense, (-1, output_dim))
y_ = tf.nn.softmax(tf.reshape(logits_dense, (-1, output_dim)))

[print(var) for var in tf.trainable_variables()]
print(y_)
return x, y, logits, y_, learning_r, drop_out


def vanilla_nn(input_dim, output_dim, architecture, drop_layer=0, drop_keep_prob=0.9):
Expand Down Expand Up @@ -60,36 +138,3 @@ def vanilla_nn(input_dim, output_dim, architecture, drop_layer=0, drop_keep_prob
print(y_)
return x, y, layer_values[len(layer_values)-1], y_


def lstm_nn(input_dim, output_dim, time_steps, n_hidden, drop_keep_prob=0.9):
"""LSTM net returns x and y placeholders, logits and y_ (y hat)"""

tf.reset_default_graph()

x = tf.placeholder(tf.float32, [None, time_steps, input_dim])
y = tf.placeholder(tf.float32, [None, output_dim])

w_init = tf.contrib.layers.xavier_initializer()
w = tf.get_variable('last_weights', shape=[n_hidden[-1], output_dim], initializer=w_init)

x_split = tf.unstack(x, time_steps, 1)

# stack lstm cells, a cell per hidden layer
stacked_lstm_cells = [] # a list of lstm cells to be inputed into MultiRNNCell
for layer_size in n_hidden:
stacked_lstm_cells.append(tf.contrib.rnn.BasicLSTMCell(layer_size, activation=tf.nn.tanh))

# create the net and add dropout
lstm_cell = tf.contrib.rnn.MultiRNNCell(stacked_lstm_cells)
lstm_cell_with_dropout = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=drop_keep_prob)

# forwawrd propagate
outputs, state = tf.contrib.rnn.static_rnn(lstm_cell_with_dropout, x_split, dtype=tf.float32)
logits = tf.matmul(outputs[-1], w) # logits are used for cross entropy
y_ = tf.nn.softmax(logits)

[print(var) for var in tf.trainable_variables()]
print([print(i) for i in outputs])
print(y_)
return x, y, logits, y_

Loading

0 comments on commit 5be4bae

Please sign in to comment.