-
Notifications
You must be signed in to change notification settings - Fork 0
/
Train120x120_RecognizeTrafficSign_Resnet_Pytorch.py
180 lines (136 loc) · 6.03 KB
/
Train120x120_RecognizeTrafficSign_Resnet_Pytorch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
#
# By Alfonso Blanco García, jan 2024
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
import glob
import io
import os
import cv2
#import json
#import shutil
#import numpy as np
#import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import torchvision.models as models
from PIL import Image
train_transforms = transforms.Compose([transforms.Resize((120,120)),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# The validation set will use the same transform as the test set
test_transforms = transforms.Compose([transforms.Resize((120,120)),
transforms.CenterCrop(120),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
validation_transforms = transforms.Compose([transforms.Resize((120,120)),
transforms.CenterCrop(120),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_data = datasets.ImageFolder( 'Dir_TrafficSign_Resnet/train', transform=train_transforms)
test_data = datasets.ImageFolder( 'Dir_TrafficSign_Resnet/valid', transform=test_transforms)
train_data, valid_data = torch.utils.data.random_split(train_data, [0.7, 0.3])
trainloader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle=True)
#from torchvision.models import ResNet50_Weights
model = models.resnet50(pretrained=True)
#model = models.resnet50(weights=ResNet50_Weights.DEFAULT)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 43)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
lrscheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=3, threshold = 0.9)
def validation(model, validloader, criterion):
valid_loss = 0
accuracy = 0
# change model to work with cuda
#model.to('cuda')
# change model to work with cpu
model.to('cpu')
# Iterate over data from validloader
for ii, (images, labels) in enumerate(validloader):
# Change images and labels to work with cuda
#images, labels = images.to('cuda'), labels.to('cuda')
# Change images and labels to work with cpu
images, labels = images.to('cpu'), labels.to('cpu')
# Forward pass image though model for prediction
output = model.forward(images)
# Calculate loss
valid_loss += criterion(output, labels).item()
# Calculate probability
ps = torch.exp(output)
# Calculate accuracy
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return valid_loss, accuracy
#device = torch.device("cuda")
device = torch.device("cpu")
model = model.to(device)
#epochs = 20
epochs = 10
steps = 0
print_every = 40
# change to gpu mode
# model.to('cuda')
# change to cpu mode
model.to('cpu')
model.train()
for e in range(epochs):
running_loss = 0
# Iterating over data to carry out training step
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
#inputs, labels = inputs.to('cuda'), labels.to('cuda')
inputs, labels = inputs.to('cpu'), labels.to('cpu')
# zeroing parameter gradients
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
# Carrying out validation step
if steps % print_every == 0:
# setting model to evaluation mode during validation
model.eval()
# Gradients are turned off as no longer in training
with torch.no_grad():
valid_loss, accuracy = validation(model, validloader, criterion)
print(f"No. epochs: {e+1}, \
Training Loss: {round(running_loss/print_every,3)} \
Valid Loss: {round(valid_loss/len(validloader),3)} \
Valid Accuracy: {round(float(accuracy/len(validloader)),3)}")
# Turning training back on
model.train()
lrscheduler.step(accuracy * 100)
correct = 0
total = 0
#model.to('cuda')
model.to('cpu')
with torch.no_grad():
for data in testloader:
images, labels = data
#images, labels = images.to('cuda'), labels.to('cuda')
images, labels = images.to('cpu'), labels.to('cpu')
# Get probabilities
outputs = model(images)
# Turn probabilities into predictions
_, predicted_outcome = torch.max(outputs.data, 1)
# Total number of images
total += labels.size(0)
# Count number of cases in which predictions are correct
correct += (predicted_outcome == labels).sum().item()
print(f"Test accuracy of model: {round(100 * correct / total,3)}%")
checkpoint = {'state_dict': model.state_dict(),
'model': model.fc,
#'class_to_idx': train_data.class_to_idx,
'opt_state': optimizer.state_dict,
'num_epochs': epochs}
torch.save(checkpoint, 'checkpoint120x120_10epoch.pth')