Skip to content

Commit b687f02

Browse files
authored
Pytorch Convolution neural network for semantic segmentation
1 parent fa37b7a commit b687f02

File tree

71 files changed

+6221
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+6221
-0
lines changed

1_VGG16/1_VGG_16_model_details.txt

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
----------------------------------------------------------------
2+
Layer (type) Output Shape Param #
3+
================================================================
4+
Conv2d-1 [-1, 64, 224, 224] 1,792
5+
ReLU-2 [-1, 64, 224, 224] 0
6+
Conv2d-3 [-1, 64, 224, 224] 36,928
7+
ReLU-4 [-1, 64, 224, 224] 0
8+
MaxPool2d-5 [-1, 64, 112, 112] 0
9+
Conv2d-6 [-1, 128, 112, 112] 73,856
10+
ReLU-7 [-1, 128, 112, 112] 0
11+
Conv2d-8 [-1, 128, 112, 112] 147,584
12+
ReLU-9 [-1, 128, 112, 112] 0
13+
MaxPool2d-10 [-1, 128, 56, 56] 0
14+
Conv2d-11 [-1, 256, 56, 56] 295,168
15+
ReLU-12 [-1, 256, 56, 56] 0
16+
Conv2d-13 [-1, 256, 56, 56] 590,080
17+
ReLU-14 [-1, 256, 56, 56] 0
18+
Conv2d-15 [-1, 256, 56, 56] 590,080
19+
ReLU-16 [-1, 256, 56, 56] 0
20+
MaxPool2d-17 [-1, 256, 28, 28] 0
21+
Conv2d-18 [-1, 512, 28, 28] 1,180,160
22+
ReLU-19 [-1, 512, 28, 28] 0
23+
Conv2d-20 [-1, 512, 28, 28] 2,359,808
24+
ReLU-21 [-1, 512, 28, 28] 0
25+
Conv2d-22 [-1, 512, 28, 28] 2,359,808
26+
ReLU-23 [-1, 512, 28, 28] 0
27+
MaxPool2d-24 [-1, 512, 14, 14] 0
28+
Conv2d-25 [-1, 512, 14, 14] 2,359,808
29+
ReLU-26 [-1, 512, 14, 14] 0
30+
Conv2d-27 [-1, 512, 14, 14] 2,359,808
31+
ReLU-28 [-1, 512, 14, 14] 0
32+
Conv2d-29 [-1, 512, 14, 14] 2,359,808
33+
ReLU-30 [-1, 512, 14, 14] 0
34+
MaxPool2d-31 [-1, 512, 7, 7] 0
35+
Linear-32 [-1, 4096] 102,764,544
36+
ReLU-33 [-1, 4096] 0
37+
Dropout-34 [-1, 4096] 0
38+
Linear-35 [-1, 4096] 16,781,312
39+
ReLU-36 [-1, 4096] 0
40+
Dropout-37 [-1, 4096] 0
41+
Linear-38 [-1, 1000] 4,097,000
42+
43+
================================================================
44+
Total params: 138,357,544
45+
Trainable params: 138,357,544
46+
Non-trainable params: 0
47+
----------------------------------------------------------------
48+
5.08 KB
Binary file not shown.
3.27 KB
Binary file not shown.
3.52 MB
Binary file not shown.

1_VGG16/main.py

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
import torch
2+
import torch.nn as nn
3+
import torch.optim as optim
4+
from torch.autograd import Variable
5+
from torchvision import models
6+
import argparse
7+
import numpy as np
8+
import torchvision.datasets as datasets
9+
import torchvision.transforms as transforms
10+
import os
11+
import sys
12+
import csv
13+
import shutil
14+
import time
15+
import model
16+
import torch.nn.functional as F
17+
import matplotlib.pyplot as plt
18+
from IPython import embed
19+
from torchsummary_1 import summary as modelsummary
20+
#import torchsummary as modelsummary
21+
22+
#------ START added by Adeel ------
23+
final_model_path = 'final_trained_model/200_epoch_squeezenet.pth'
24+
best_prec1 = 0
25+
start_epoch = 1
26+
checkpoint_path = 'train_checkpoint/checkpoint.pth.tar'
27+
#------ END added by Adeel ------
28+
29+
parser = argparse.ArgumentParser('Options for training SqueezeNet in pytorch')
30+
parser.add_argument('--batch-size', type=int, default=200, metavar='N', help='batch size of train')
31+
parser.add_argument('--epoch', type=int, default=500, metavar='N', help='number of epochs to train for')
32+
parser.add_argument('--learning-rate', type=float, default=0.00001, metavar='LR', help='learning rate')
33+
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='percentage of past parameters to store')
34+
parser.add_argument('--no-cuda', action='store_true', default=False, help='use cuda for training')
35+
parser.add_argument('--log-schedule', type=int, default=1, metavar='N', help='number of epochs to save snapshot after')
36+
parser.add_argument('--seed', type=int, default=10, help='set seed to some constant value to reproduce experiments')
37+
parser.add_argument('--model_name', type=str, default=None, help='Use a pretrained model')
38+
parser.add_argument('--want_to_test', type=bool, default=False, help='make true if you just want to test')
39+
parser.add_argument('--epoch_55', action='store_true', help='would you like to use 55 epoch learning rule')
40+
parser.add_argument('--num_classes', type=int, default=10, help="how many classes training for")
41+
#------ START added by Adeel ------
42+
parser.add_argument('--resume', default=checkpoint_path, type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
43+
#------ END added by Adeel ------
44+
45+
46+
#print(torch.cuda.get_device_name(0))
47+
48+
args = parser.parse_args()
49+
args.cuda = not args.no_cuda and torch.cuda.is_available()
50+
51+
torch.manual_seed(args.seed)
52+
if args.cuda:
53+
torch.cuda.manual_seed(args.seed)
54+
torch.cuda.set_device(0)
55+
56+
57+
# get the model and convert it into cuda for if necessary
58+
#net = model.SqueezeNet()
59+
60+
net = models.vgg16(pretrained = True)
61+
torch.cuda.empty_cache()
62+
print(torch.cuda.memory_allocated(device=None))
63+
64+
if args.model_name is not None:
65+
print("loading pre trained weights")
66+
pretrained_weights = torch.load(args.model_name)
67+
net.load_state_dict(pretrained_weights)
68+
69+
if args.cuda:
70+
print("GPU Working...\n")
71+
net.cuda()
72+
73+
#------ Check Network parameters Size ----------------
74+
#
75+
#modelsummary(net.cuda(), input_size=(3, 32, 32))
76+
#
77+
#------ Check Network parameters Size ----------------
78+
79+
80+
#print(net)
81+
82+
avg_loss = list()
83+
best_accuracy = 0.0
84+
fig1, ax1 = plt.subplots()
85+
86+
87+
# create a temporary optimizer
88+
optimizer = optim.SGD(net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4)
89+
90+
def adjustlrwd(params):
91+
for param_group in optimizer.state_dict()['param_groups']:
92+
param_group['lr'] = params['learning_rate']
93+
param_group['weight_decay'] = params['weight_decay']
94+
95+
#------ END added by Adeel ------
96+
97+
#==============================================================================================
98+
class AverageMeter(object):
99+
"""Computes and stores the average and current value"""
100+
def __init__(self):
101+
self.reset()
102+
103+
def reset(self):
104+
self.val = 0
105+
self.avg = 0
106+
self.sum = 0
107+
self.count = 0
108+
109+
def update(self, val, n=1):
110+
self.val = val
111+
self.sum += val * n
112+
self.count += n
113+
self.avg = self.sum / self.count
114+
115+
#==============================================================================================
116+
117+
#====================================Main Method===============================================
118+
if __name__ == '__main__':
119+
120+
epoch_time = AverageMeter()
121+
# #------ Print and Save Network parameters ----------------
122+
# modelsummary.summary(net.cuda(), input_size=(3, 32, 32))
123+
124+
# #---- Using Customized Model Summary Class ----------
125+
modelsummary(net.cuda(), input_size=(3, 1024, 1024))
126+
# #---- Using Customized Model Summary Class ----------
127+
# #------ Print and Save Network parameters ----------------
128+
129+
#====================================Main Method===============================================
130+
#==============================================================================================

1_VGG16/model.py

Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
import torch
2+
import torch.nn as nn
3+
from torch.autograd import Variable
4+
import torch.functional as F
5+
import numpy as np
6+
import torch.optim as optim
7+
import math
8+
import sys
9+
10+
class fire(nn.Module):
11+
def __init__(self, inplanes, squeeze_planes, expand_planes):
12+
super(fire, self).__init__()
13+
self.conv1 = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1, stride=1)
14+
self.bn1 = nn.BatchNorm2d(squeeze_planes)
15+
self.relu1 = nn.ReLU(inplace=True)
16+
self.conv2 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=1, stride=1)
17+
self.bn2 = nn.BatchNorm2d(expand_planes)
18+
self.conv3 = nn.Conv2d(squeeze_planes, expand_planes, kernel_size=3, stride=1, padding=1)
19+
self.bn3 = nn.BatchNorm2d(expand_planes)
20+
self.relu2 = nn.ReLU()
21+
22+
# using MSR initilization
23+
for m in self.modules():
24+
if isinstance(m, nn.Conv2d):
25+
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
26+
m.weight.data.normal_(0, math.sqrt(2./n))
27+
28+
def forward(self, x):
29+
x = self.conv1(x)
30+
x = self.bn1(x)
31+
x = self.relu1(x)
32+
out1 = self.conv2(x)
33+
out1 = self.bn2(out1)
34+
out2 = self.conv3(x)
35+
out2 = self.bn3(out2)
36+
out = torch.cat([out1, out2], 1)
37+
out = self.relu2(out)
38+
return out
39+
40+
class additional_layer(nn.Module):
41+
def __init__(self, add_input_channels, add_output_channels):
42+
super(additional_layer, self).__init__()
43+
self.conv1 = nn.Conv2d(add_input_channels, add_output_channels, kernel_size=3, stride=1,padding=1)
44+
self.bn1 = nn.BatchNorm2d(add_output_channels)
45+
self.relu1 = nn.ReLU(inplace=True)
46+
47+
# using MSR initilization
48+
for m in self.modules():
49+
if isinstance(m, nn.Conv2d):
50+
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
51+
m.weight.data.normal_(0, math.sqrt(2./n))
52+
53+
def forward(self, x):
54+
out_add = self.conv1(x)
55+
out_add = self.bn1(x)
56+
out_add = self.relu1(x)
57+
return out_add
58+
59+
60+
class conv_layer(nn.Module):
61+
def __init__(self, add_input_channels, add_output_channels,kernel_size,stride,padding):
62+
super(conv_layer, self).__init__()
63+
self.conv1 = nn.Conv2d(add_input_channels, add_output_channels, kernel_size, stride,padding)
64+
self.bn1 = nn.BatchNorm2d(add_output_channels)
65+
self.relu1 = nn.ReLU(inplace=True)
66+
67+
# using MSR initilization
68+
for m in self.modules():
69+
if isinstance(m, nn.Conv2d):
70+
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
71+
m.weight.data.normal_(0, math.sqrt(2./n))
72+
73+
def forward(self, x):
74+
out_add = self.conv1(x)
75+
out_add = self.bn1(out_add)
76+
out_add = self.relu1(out_add)
77+
return out_add
78+
79+
80+
class SqueezeNet(nn.Module):
81+
def __init__(self):
82+
super(SqueezeNet, self).__init__()
83+
self.conv1 = nn.Conv2d(3, 96, kernel_size=3, stride=1, padding=1) # 32
84+
self.bn1 = nn.BatchNorm2d(96)
85+
self.relu = nn.ReLU()
86+
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2) # 16
87+
self.conv3x3_add_link_1 = conv_layer(96,96,kernel_size=3, stride=2, padding=1)
88+
self.fire2 = fire(96, 16, 64)
89+
self.dropout_1 = nn.Dropout2d(p=0.5,inplace=False)
90+
self.fire3 = fire(128, 16, 64)
91+
self.fire4 = fire(128, 32, 128)
92+
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2) # 8
93+
# self.conv3x3_add_link_2 = conv_layer(256,256,kernel_size=3, stride=2, padding=1)
94+
self.conv1x1_concat_1 = conv_layer(352,256,kernel_size=1, stride=1, padding=0)
95+
self.fire5 = fire(256, 32, 128)
96+
self.fire6 = fire(256, 48, 192)
97+
self.fire7 = fire(384, 48, 192)
98+
self.dropout_2 = nn.Dropout2d(p=0.5,inplace=False)
99+
self.fire8 = fire(384, 64, 256)
100+
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2) # 4
101+
# self.conv1x1_concat_2 = conv_layer(768,512,kernel_size=1, stride=1, padding=0)
102+
self.fire9 = fire(512, 64, 256)
103+
self.conv2 = nn.Conv2d(512, 10, kernel_size=1, stride=1)
104+
self.avg_pool = nn.AvgPool2d(kernel_size=4, stride=4)
105+
self.softmax = nn.LogSoftmax(dim=1)
106+
for m in self.modules():
107+
if isinstance(m, nn.Conv2d):
108+
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
109+
m.weight.data.normal_(0, math.sqrt(2. / n))
110+
elif isinstance(m, nn.BatchNorm2d):
111+
m.weight.data.fill_(1)
112+
m.bias.data.zero_()
113+
114+
115+
def forward(self, x):
116+
# print("Initital Value:",x.size())
117+
x = self.conv1(x)
118+
x = self.bn1(x)
119+
x = self.relu(x)
120+
x = self.maxpool1(x)
121+
add_link_1 = x
122+
conved_add_link_1 = self.conv3x3_add_link_1(add_link_1)
123+
# print("add_link_1: ",add_link_1.size())
124+
# print("add_link_convolved",conved_add_link_1.size())
125+
# sys.exit()
126+
x = self.fire2(x)
127+
x = self.dropout_1(x)
128+
x = self.fire3(x)
129+
x = self.fire4(x)
130+
x = self.maxpool2(x)
131+
print("X output: ",x.size())
132+
# add_link_2 = x
133+
# conved_add_link_2 = self.conv3x3_add_link_2(add_link_2)
134+
x = torch.cat([x,conved_add_link_1],1)
135+
print("X concat output: ",x.size())
136+
sys.exit();
137+
x = self.conv1x1_concat_1(x)
138+
x = self.fire5(x)
139+
x = self.fire6(x)
140+
x = self.fire7(x)
141+
x = self.dropout_2(x)
142+
x = self.fire8(x)
143+
x = self.maxpool3(x)
144+
# x = torch.cat([x,conved_add_link_2],1)
145+
# x = self.conv1x1_concat_2(x)
146+
x = self.fire9(x)
147+
x = self.conv2(x)
148+
x = self.avg_pool(x)
149+
x = self.softmax(x)
150+
return x
151+
152+
def fire_layer(inp, s, e):
153+
f = fire(inp, s, e)
154+
return f
155+
156+
def squeezenet(pretrained=False):
157+
net = SqueezeNet()
158+
# inp = Variable(torch.randn(64,3,32,32))
159+
# out = net.forward(inp)
160+
# print(out.size())
161+
return net
162+
163+
# if __name__ == '__main__':
164+
# squeezenet()

0 commit comments

Comments
 (0)