-
Notifications
You must be signed in to change notification settings - Fork 2
/
train_resnet.sh
126 lines (111 loc) · 3.45 KB
/
train_resnet.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
set -e
:<<!
*****************Instruction*****************
Here you can easily creat a model by selecting
an arbitray backbone model and global method.
You can train a model from scratch on ImageNet.
or other dataset.
Modify the following settings as you wish !
*********************************************
!
#***************Backbone model****************
#Our code provides some mainstream architectures:
#alexnet
#vgg family:vgg11, vgg11_bn, vgg13, vgg13_bn,
# vgg16, vgg16_bn, vgg19_bn, vgg19
#resnet family: resnet18, resnet34, resnet50,
# resnet101, resnet152
#mpncovresnet: mpncovresnet50, mpncovresnet101
#inceptionv3
#You can also add your own network in src/network
arch=resnet50
#*********************************************
#***************global method****************
#Our code provides some global methods at the end
#of network:
#GAvP (global average pooling),
#MPNCOV (matrix power normalized cov pooling),
#BCNN (bilinear pooling)
#CBP (compact bilinear pooling)
#...
#You can also add your own method in src/representation
image_representation=SVD_Pade
# short description of method
description=reproduce
#*********************************************
#*******************Dataset*******************
#Choose the dataset folder
benchmark=ILSVRC2012
datadir=/datasets
dataset=$datadir/$benchmark
num_classes=1000
#*********************************************
#****************Hyper-parameters*************
# Freeze the layers before a certain layer.
freeze_layer=0
# Batch size
batchsize=256
# The number of total epochs for training
epoch=60
# The inital learning rate
# decreased by step method
lr=0.0794
lr_method=step
lr_params=30\ 45\ 60
# log method
# description: lr = logspace(params1, params2, #epoch)
#is_vec=vec_no
#lr_method=log
#lr_params=-1.1\ -5.0
weight_decay=1e-4
classifier_factor=1
#*********************************************
echo "Start training!"
modeldir=Results/FromScratch-$benchmark-$arch-$image_representation-$description-lr$lr-bs$batchsize
if [ ! -d "Results" ]; then
mkdir Results
fi
if [ ! -e $modeldir/*.pth.tar ]; then
if [ ! -d "$modeldir" ]; then
mkdir $modeldir
fi
cp train.sh $modeldir
python main.py $dataset\
--benchmark $benchmark\
-a $arch\
-p 100\
--epochs $epoch\
--lr $lr\
--lr-method $lr_method\
--lr-params $lr_params\
-j 4\
-b $batchsize\
--num-classes $num_classes\
--representation $image_representation\
--freezed-layer $freeze_layer\
--classifier-factor $classifier_factor\
--benchmark $benchmark\
--modeldir $modeldir\
--weight_decay $weight_decay
else
checkpointfile=$(ls -rt $modeldir/*.pth.tar | tail -1)
python main.py $dataset\
--benchmark $benchmark\
-a $arch\
-p 100\
--epochs $epoch\
--lr $lr\
--lr-method $lr_method\
--lr-params $lr_params\
-j 4\
-b $batchsize\
--num-classes $num_classes\
--representation $image_representation\
--freezed-layer $freeze_layer\
--modeldir $modeldir\
--classifier-factor $classifier_factor\
--benchmark $benchmark\
--resume $checkpointfile\
--weight_decay $weight_decay
fi
echo "Done!"