-
Notifications
You must be signed in to change notification settings - Fork 48
/
Copy pathmain.py
94 lines (71 loc) · 2.68 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#!/usr/bin/env python
# coding: utf-8
#
# Author: Kazuto Nakashima
# URL: https://github.com/kazuto1011
# Created: 2017-08-16
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import make_blobs
def train(X, Y, model, args):
X = torch.FloatTensor(X)
Y = torch.FloatTensor(Y)
N = len(Y)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
model.train()
for epoch in range(args.epoch):
perm = torch.randperm(N)
sum_loss = 0
for i in range(0, N, args.batchsize):
x = X[perm[i : i + args.batchsize]].to(args.device)
y = Y[perm[i : i + args.batchsize]].to(args.device)
optimizer.zero_grad()
output = model(x).squeeze()
weight = model.weight.squeeze()
loss = torch.mean(torch.clamp(1 - y * output, min=0))
loss += args.c * (weight.t() @ weight) / 2.0
loss.backward()
optimizer.step()
sum_loss += float(loss)
print("Epoch: {:4d}\tloss: {}".format(epoch, sum_loss / N))
def visualize(X, Y, model):
W = model.weight.squeeze().detach().cpu().numpy()
b = model.bias.squeeze().detach().cpu().numpy()
delta = 0.001
x = np.arange(X[:, 0].min(), X[:, 0].max(), delta)
y = np.arange(X[:, 1].min(), X[:, 1].max(), delta)
x, y = np.meshgrid(x, y)
xy = list(map(np.ravel, [x, y]))
z = (W.dot(xy) + b).reshape(x.shape)
z[np.where(z > 1.0)] = 4
z[np.where((z > 0.0) & (z <= 1.0))] = 3
z[np.where((z > -1.0) & (z <= 0.0))] = 2
z[np.where(z <= -1.0)] = 1
plt.figure(figsize=(10, 10))
plt.xlim([X[:, 0].min() + delta, X[:, 0].max() - delta])
plt.ylim([X[:, 1].min() + delta, X[:, 1].max() - delta])
plt.contourf(x, y, z, alpha=0.8, cmap="Greys")
plt.scatter(x=X[:, 0], y=X[:, 1], c="black", s=10)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--c", type=float, default=0.01)
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--batchsize", type=int, default=5)
parser.add_argument("--epoch", type=int, default=10)
parser.add_argument("--device", default="cuda", choices=["cpu", "cuda"])
args = parser.parse_args()
args.device = torch.device(args.device if torch.cuda.is_available() else "cpu")
print(args)
X, Y = make_blobs(n_samples=500, centers=2, random_state=0, cluster_std=0.4)
X = (X - X.mean()) / X.std()
Y[np.where(Y == 0)] = -1
model = nn.Linear(2, 1)
model.to(args.device)
train(X, Y, model, args)
visualize(X, Y, model)