Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 43 additions & 41 deletions src/NeuralNet.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,16 @@ def __init__(self, layer_sizes, num_inputs):
self.layers[i] = new_layer

# Set step size for numerical integration
self.h_param = 0.000001
self.h_param = 0.0000001
# Set learning rate value
self.learning_rate = 0.4
self.learning_rate = 3.0

def sim(self, inputs):
"""Simulates the feedforward network
inputs is a list of inputs which corresponds to the number of inputs in the input layer"""
inputs is a 2darray of inputs which corresponds to the number of inputs in the input layer
Columns in the inputs are samples
Rows are elements
Ie a single input should be a single column"""

tempOutputs = inputs
for i in self.layers:
Expand All @@ -39,16 +42,17 @@ def sim(self, inputs):

def train(self, inputs, outputs, num_epochs):
"""Trains the network
inputs is a 2darray of inputs, rows are samples, columns are elements
outputs is a 2darray of corresponding network outputs, rows are samples, columns are elements
inputs is a 2darray of inputs, rows are elements, columns are samples
outputs is a 2darray of corresponding network outputs, rows are elements, columns are samples
num_epochs is the number of epochs to run
"""

# Loop through the epochs
for i in range(num_epochs):
# Find current cost
current_outputs = [self.sim(inputs[:,n]) for n in range(np.size(inputs, 1))]
current_outputs = np.concatenate(current_outputs,1)
#current_outputs = [self.sim(inputs[:,n]) for n in range(np.shape(inputs)[1])]
#current_outputs = np.concatenate(current_outputs,1)
current_outputs = self.sim(inputs)
current_cost = cost(current_outputs, outputs)
print "Cost for epoch %d: %f" % (i,current_cost)
# Find the partial derivative of Cost w.r.t. each parameter
Expand All @@ -59,8 +63,9 @@ def train(self, inputs, outputs, num_epochs):
# Change weight by self.h_param
j.w[k,m] += self.h_param
# Simulate network
perturbed_outputs = [self.sim(inputs[:,n]) for n in range(np.size(inputs, 1))]
perturbed_outputs = np.concatenate(perturbed_outputs, 1)
#perturbed_outputs = [self.sim(inputs[:,n]) for n in range(np.size(inputs, 1))]
#perturbed_outputs = np.concatenate(perturbed_outputs, 1)
perturbed_outputs = self.sim(inputs)
# Find cost
perturbed_cost = cost(perturbed_outputs, outputs)
# Calculate partial cost
Expand All @@ -74,8 +79,9 @@ def train(self, inputs, outputs, num_epochs):
# Change bias by self.h_param
j.b[k] += self.h_param
# Simulate network
perturbed_outputs = [self.sim(inputs[:,n]) for n in range(np.size(inputs, 1))]
perturbed_outputs = np.concatenate(perturbed_outputs, 1)
#perturbed_outputs = [self.sim(inputs[:,n]) for n in range(np.size(inputs, 1))]
#perturbed_outputs = np.concatenate(perturbed_outputs, 1)
perturbed_outputs = self.sim(inputs)
# Find cost
perturbed_cost = cost(perturbed_outputs, outputs)
# Calculate partial cost
Expand All @@ -92,16 +98,30 @@ def train(self, inputs, outputs, num_epochs):
for k in j.nodes:
k.update_parameters()
'''
def __str__(self):
"""Print the network"""

# Loop through the layers
a = ""
for i in range(len(self.layers)):
a+=str(self.layers[i])+"\n"
#print(a)
return a

class Layer:
def __init__(self, num_nodes, num_inputs):
"""Initialises the Layer object
num_nodes is the number of Nodes to be in the layer
num_inputs is the number of outputs from the previous layer"""
num_inputs is the number of outputs from the previous layer

self.w = np.matrix(np.random.rand(num_nodes, num_inputs))
The weights w will be of size [num_nodes x num_inputs]
The biases b will be of size [num_nodes x 1]"""
a = 10
self.num_nodes = num_nodes
self.num_inputs = num_inputs
self.w = np.matrix(a*(np.random.normal(0,1,size = [num_nodes, num_inputs])))
self.w_to_update = np.zeros_like(self.w)
self.b = np.matrix.transpose(np.matrix(np.random.rand(num_nodes)))
self.b = np.matrix.transpose(a*(np.matrix(np.random.normal(0,1,num_nodes))))
self.b_to_update = np.zeros_like(self.b)
'''
self.nodes = [0 for i in range(num_nodes)]
Expand All @@ -115,7 +135,8 @@ def sim(self, inputs):

z = self.w*inputs+self.b

outputs = sigmoid(z)
outputs = np.tanh(z)
# outputs = z
'''
for i in self.nodes:
outputs.append(i.sim(inputs))
Expand All @@ -124,7 +145,12 @@ def sim(self, inputs):
def update_parameters(self):
"""Updates the weights and biases of the Layer"""
self.w = np.matrix(self.w_to_update)
self.b = np.matrix(self.b_to_update)
self.b = np.matrix(self.b_to_update)

def __str__(self):
"""Returns the details of this layer"""

return "Layer has %s inputs and %s nodes, b: %s, w: %s" % (self.num_inputs, self.num_nodes, str(self.b), str(self.w))

def sigmoid(z):
#output = sp.logistic.cdf(z)
Expand All @@ -136,29 +162,5 @@ def cost(simulated_outputs, actual_outputs):
if len(simulated_outputs.shape) == 1:
cost_value = 1.0/(2.0*n) * np.sum(np.subtract(simulated_outputs,actual_outputs)**2)
else:
cost_value = 1.0/(2.0*n) * np.sum(np.linalg.norm(np.subtract(simulated_outputs,actual_outputs), axis=0)**2)
cost_value = 1.0/(2.0*n) * np.sum(np.linalg.norm(np.subtract(simulated_outputs,actual_outputs), axis=0))
return cost_value



# Code to test the cost function
#print cost(np.array([[3, 8, 9, 12]]).T, np.array([[4, 9, 7, 20]]).T)


# Code to test the network creation
# Fairly simple network

net = Network([5,2],4)
training_input = np.matrix([[1, 1, 3], [1, 2, 6], [4, 6, 2], [2, 8, 4]])
training_output = np.matrix([[1, 0, 0.5], [0, 1, 0.8]])

# Simple 1-node network
'''
net = Network([1],1)
training_input = np.array([1])
training_output = np.array([0.6])
'''


net.train(training_input, training_output, 1000)
print np.concatenate([net.sim(training_input[:,n]) for n in range(np.size(training_input, 1))], 1)
29 changes: 29 additions & 0 deletions src/basic_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 04 15:09:05 2018

@author: richardf
"""
import cPickle, gzip, numpy as np, matplotlib.pyplot as plt, time


# Load the MNIST data
f = gzip.open('../mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()

plt.ion()
fig = plt.figure()

#ax = fig.add_subplot(111)

# Display an image
for k in range(0,10):
pixels = train_set[0][k].reshape(28,28)
# pixels = np.array(pixels, dtype='uint8').reshape(28,28)
plt.imshow(pixels,cmap='gray')
plt.show()
fig.canvas.draw()
print("Updated")
time.sleep(0.25)

13 changes: 13 additions & 0 deletions src/mnist_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 04 15:45:08 2018

@author: richardf
"""

def load_mnist():
# Load the MNIST data
f = gzip.open('../mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
return train_set, valid_set, test_set
91 changes: 91 additions & 0 deletions src/test_script.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 04 13:09:52 2017

@author: richardf
"""

import NeuralNet as nn
import numpy as np
import matplotlib.pyplot as plt

# Code to test the cost function
#print cost(np.array([[3, 8, 9, 12]]).T, np.array([[4, 9, 7, 20]]).T)


# Code to test the network creation
# Fairly simple network

net = nn.Network([5,2],4)
training_input = np.matrix([[1, 1, 3], [1, 2, 6], [4, 6, 2], [2, 8, 4]])
training_output = np.matrix([[1, 0, 0.5], [0, 1, 0.8]])

# Simple 1-node network
'''
net = Network([1],1)
training_input = np.array([1])
training_output = np.array([0.6])
'''


net.train(training_input, training_output, 1000)
print np.concatenate([net.sim(training_input[:,n]) for n in range(np.size(training_input, 1))], 1)

#%%
'''
Create a function-learning NN
'''

reload(nn)

# function: y = sin(x)*cos(x)+x**3
x = np.arange(0,1,0.01)
y = 0.25*(np.sin(7*x)*np.cos(10*x)+(x)**3+1)

# fig, ax = plt.subplots()
# ax.cla()
# plt.figure()
plt.ion()
fig,ax = plt.subplots(1,1)
ax.plot(x,y)

training_input = np.matrix(x)
training_output = np.matrix(y)

net = nn.Network([20,10,1],1)



plt.hold(True)

for i in range(2):
net.train(training_input, training_output, 10)

# Plot the current approximation
out = net.sim(training_input)

ax.plot(x,out.T)
plt.draw()

#%%
net = nn.Network([20,10, 6, 30, 3,1],1)

reload(nn)

#a = 50
#net.layers[0].w[0] = a*(np.random.rand(1)-0.5)
#net.layers[0].w[1] = a*(np.random.rand(1)-0.5)
#net.layers[0].w[2] = a*(np.random.rand(1)-0.5)
#net.layers[0].w[3] = a*(np.random.rand(1)-0.5)
#net.layers[0].w[4] = a*(np.random.rand(1)-0.5)

#net.layers[0].b[0] = a*(np.random.rand(1)-0.5)
#net.layers[0].b[1] = a*(np.random.rand(1)-0.5)
#net.layers[0].b[2] = a*(np.random.rand(1)-0.5)
#net.layers[0].b[3] = a*(np.random.rand(1)-0.5)
#net.layers[0].b[4] = a*(np.random.rand(1)-0.5)

#net.layers[1].w = a*(np.random.rand(1,5)-0.5)
#net.layers[1].b = a*(np.random.rand(1)-0.5)
plt.cla()
plt.plot(x,net.sim(x).T)
31 changes: 31 additions & 0 deletions src/test_sin_function.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 04 15:47:23 2018

@author: richardf
"""
import math, numpy as np, NeuralNet as nn, matplotlib.pyplot as plt

# Test whether the NN can learn a sin function
train_inputs = np.random.rand(1,1000)
train_outputs = 0.5*np.sin(train_inputs*2*math.pi)+0.5

test_inputs = np.random.rand(1,10)
test_outputs = 0.5*np.sin(test_inputs*2*math.pi)+0.5

new_inputs = [np.arange(0,1,0.01)]

# Create the NN
net = nn.Network([20,5,1],1)

print(net.sim([[0]]))


print(net)

net.train(train_inputs, train_outputs, 500)

new_outputs = net.sim(new_inputs)

plt.plot(np.transpose(new_inputs), np.transpose(new_outputs))
plt.plot(np.transpose(train_inputs), np.transpose(train_outputs),'x')
49 changes: 49 additions & 0 deletions src/test_single_target_function.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 13 14:38:55 2018

@author: richardf
"""

import math, numpy as np, NeuralNet as nn, matplotlib.pyplot as plt

# Test whether the NN can learn a single target function
train_inputs = np.arange(0,1.001,0.5)
#train_outputs = -(train_inputs-0.5)**(2)/10.0+0.5
#train_outputs = train_inputs*(1.0)
train_outputs = np.abs(train_inputs -0.5)

new_inputs = [np.arange(0,1,0.01)]

# Create the NN
net = nn.Network([5,5,1],1)

print(net.sim([[0]]))


print(net)

net.train(train_inputs, train_outputs, 500)

new_outputs = net.sim(new_inputs)

plt.plot(np.transpose(new_inputs), np.transpose(new_outputs))
plt.plot(train_inputs, train_outputs)
plt.legend(('Predictions','Training data'))

#%% Try create a network

net = nn.Network([2,1],1)

#net.layers[0].b[0] = -0.8
#net.layers[0].b[1] = 0
#net.layers[0].w[0] = -10
#net.layers[0].w[1] = 1

#net.layers[1].b[0] = 0
#net.layers[1].w[0,0] = 1
#net.layers[1].w[0,1] = 1

plt.plot(np.transpose(new_inputs), np.transpose(net.sim(new_inputs)))
#plt.plot(train_inputs, train_outputs)
plt.legend(('Predictions','Training data'))