-
Notifications
You must be signed in to change notification settings - Fork 0
/
MLP.py
180 lines (139 loc) · 3.86 KB
/
MLP.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import pandas as pd
from scipy.special import expit
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
import random
# input from file
df = pd.read_csv('data.csv', delim_whitespace=False, sep=',', header=None)
df = df.values.tolist()
# divide in test and train
train, test = train_test_split(df, test_size=0.2)
train = np.asarray(train)
test = np.array(test)
Y = train[:,72]
train = train[:,:-1]
Y_test = test[:,72]
test = test[:,:-1]
# Normalise the data set
scaler = MinMaxScaler().fit(train)
train = scaler.transform(train)
scaler = MinMaxScaler().fit(test)
test = scaler.transform(test)
Y = Y.tolist()
train = train.tolist()
Y_test = Y_test.tolist()
test = test.tolist()
# taking scaling factor = 2,10
no_of_neurons = [15,20,1];
no_of_features = 72
no_of_iterations = 4000
eta = -0.00001
# Declaring lists for weights and bias
w01 = []
b01 = []
w02 = []
b02 = []
w03 = []
b03 = []
# Function to initialise weights and bias
def init(weights,bias,index):
features = 0
for i in range(no_of_neurons[index]):
ls = []
if index==0:
features = 72
else:
features = no_of_neurons[index-1]
limit = np.sqrt(features)
limit = 1/limit
for j in range(features):
ls.append(round(random.uniform(-1*limit,limit), 3))
# ls.append(int(1))
weights.append(ls)
for i in range(no_of_neurons[index]):
bias.append(round(random.uniform(-1*limit,limit), 3))
# bias.append(int(1))
# Calling the init() methods
init(w01,b01,0)
init(w02,b02,1)
init(w03,b03,2)
w03 = w03[0]
# Lists for plotting the graphs
X_axis = []
Y_axis = []
# Iterating doing the FF and BPA
for itr in range(no_of_iterations):
print("iteration",itr)
X_axis.append(itr)
MSE = 0
for i in range(len(train)):
# Forward Pass
X = train[i]
U = np.dot(w01, X)
U = U + b01
H = expit(U)
V = np.dot(w02, H)
V = V + b02
I = expit(V)
W = np.dot(w03, I)
W = W + b03
Y_pred = expit(W)
# BACKWARD Pass
e = Y[i] - Y_pred
MSE += e*e
E = e * (Y_pred * (1 - Y_pred))
J_b03 = E
J_w03 = E * I
b03 -= eta * (J_b03)
w03 -= eta * (J_w03)
temp = I * (1 - I)
temp = (temp * w03)
J_b02 = E * temp
colu = np.reshape(np.array(temp), (np.array(temp).shape[0], 1))
rowv = np.reshape(np.array(H), (1, np.array(H).shape[0]))
res = np.multiply(rowv, colu)
J_w02 = E * res
b02 -= eta * (J_b02)
w02 -= eta * (J_w02)
temp2 = H * (1 - H)
temp3 = E * np.dot(w02.transpose(), temp) # temp * w02 * temp2
temp3 = temp3 * temp2
J_b01 = E * temp3
colu = np.reshape(np.array(temp3), (np.array(temp3).shape[0], 1))
rowv = np.reshape(np.array(X), (1, np.array(X).shape[0]))
res = np.multiply(rowv, colu)
J_w01 = E * res
b01 -= eta * (J_b01)
w01 -= eta * (J_w01)
MSE /= len(train)
Y_axis.append(MSE)
# Plotting the Cost Versus Iteration
plt.plot(X_axis,Y_axis)
plt.show()
# Testing
ans = 0
mean = 0
for i in range(len(test)):
# print("Iteration ",i)
X = test[i]
U = np.dot(w01, X)
U = U + b01
H = expit(U)
V = np.dot(w02, H)
V = V + b02
I = expit(V)
W = np.dot(w03, I)
W = W + b03
Y_pred = expit(W)
mean += Y_pred
print(Y_pred,end="")
print(Y_test[i])
if Y_test[i]==1:
if Y_pred >= float(0.5):
ans += 1
else:
if Y_pred < float(0.5):
ans+= 1
print("Accuracy:",ans/len(test))