-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathperceptron.py
82 lines (65 loc) · 2.62 KB
/
perceptron.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import numpy as np
import matplotlib.pyplot as plt
class Perceptron:
def __init__(self):
np.random.seed(1)
self.weights = self.__random_init(2)
self.bias = self.__random_init(1)
@staticmethod
def __random_init(size):
return 2 * np.random.rand(size) - 1
@staticmethod
def __transfer(x):
return 1 / (1 + np.exp(-x))
@staticmethod
def __prediction(x):
return int(round(x))
def __activation(self, x):
return self.__prediction(self.__transfer(np.dot(self.weights, x) + self.bias))
@staticmethod
def __plot_loss(loss_plot, mse):
loss_plot.clear()
loss_plot.set_title('Train Loss')
loss_plot.set_xlabel('Epoch')
loss_plot.set_ylabel('MSE')
loss_plot.plot(mse)
plt.pause(.0001)
def __plot_rule(self, rule_plot, training):
rule_plot.clear()
rule_plot.set_title('Decision Rule')
space = np.arange(np.min(training)[0]-.5, np.max(training)[0]+1)
x_grid, y_grid = np.meshgrid(space, space)
z_grid = self.__transfer(x_grid * self.weights[0] + y_grid * self.weights[1] + self.bias)
rule_plot.contourf(x_grid, y_grid, z_grid, levels=[0, 0.5, 1], alpha=.5)
for observation, expectation in training:
prediction = self.__activation(observation)
c = 'green' if prediction == expectation[0] else 'red'
plt.plot(observation[0], observation[1], marker='v', markersize=10, color=c)
rule_plot.annotate('{0}$\mapsto${1}'.format(observation, prediction), xy=np.add(observation, -.1))
def fit(self, samples, epochs=100, info=True):
if info:
plt.ion()
loss_plot = plt.subplot(121)
rule_plot = plt.subplot(122)
mse = []
for epoch in range(epochs):
square_losses = []
for sample in np.random.permutation(samples):
observation, expectation = sample
prediction = self.__transfer(np.dot(self.weights, observation) + self.bias)
loss = expectation - prediction
square_losses.append(loss**2)
self.weights += loss * observation
self.bias += loss
if info:
mse.append(np.average(square_losses))
self.__plot_loss(loss_plot, mse)
self.__plot_rule(rule_plot, samples)
print('Epoch:\t{0}\tMSE:\t{1:.13f}'.format(epoch, mse[-1]))
if __name__ == '__main__':
data = [([0, 0], [0]),
([0, 1], [0]),
([1, 1], [1]),
([1, 0], [0])]
p = Perceptron()
p.fit(data)