-
Notifications
You must be signed in to change notification settings - Fork 0
/
custom_traning.py
191 lines (135 loc) · 6.26 KB
/
custom_traning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
# -*- coding: utf-8 -*-
"""custom_Traning.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1NNCwbsB_yZbIxn03OWAIpG_YuubEqicd
"""
!nvidia-smi
# Commented out IPython magic to ensure Python compatibility.
#mount drive
# %cd ..
from google.colab import drive
drive.mount('/content/gdrive')
# this creates a symbolic link so that now the path /content/gdrive/My\ Drive/ is equal to /mydrive
!ln -s /content/gdrive/My\ Drive/ /mydrive
# list the contents of /mydrive
!ls /mydrive
#Navigate to /mydrive/YOLOV4_Det
# %cd /mydrive/yolov4
!git clone https://github.com/AlexeyAB/darknet
# Commented out IPython magic to ensure Python compatibility.
# change makefile to have GPU and OPENCV enabled
# also set CUDNN, CUDNN_HALF and LIBSO to 1
# %cd darknet/
!sed -i 's/OPENCV=0/OPENCV=1/' Makefile
!sed -i 's/GPU=0/GPU=1/' Makefile
!sed -i 's/CUDNN=0/CUDNN=1/' Makefile
!sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/' Makefile
!sed -i 's/LIBSO=0/LIBSO=1/' Makefile
!chmod +x ./darknet
!make
# Commented out IPython magic to ensure Python compatibility.
# Clean the data and cfg folders first except the labels folder in data which is required
# %cd data/
!find -maxdepth 1 -type f -exec rm -rf {} \;
# %cd ..
# %rm -rf cfg/
# %mkdir cfg
# Unzip the obj.zip dataset and its contents so that they are now in /darknet/data/ folder
!unzip /mydrive/yolov4/obj.zip -d data/
# Copy the yolov4-custom.cfg file so that it is now in /darknet/cfg/ folder
!cp /mydrive/yolov4/yolov4-custom.cfg cfg
# verify if your custom file is in cfg folder
!ls cfg/
# Copy the obj.names and obj.data files from your drive so that they are now in /darknet/data/ folder
!cp /mydrive/yolov4/obj.names data
!cp /mydrive/yolov4/obj.data data
# verify if the above files are in data folder
!ls data/
!cp /mydrive/yolov4/process.py .
# run process.py ( this creates the train.txt and test.txt files in our darknet/data folder )
!python process.py
# list the contents of data folder to check if the train.txt and test.txt files have been created
!ls data/
# Download the yolov4 pre-trained weights file
!wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137
# Commented out IPython magic to ensure Python compatibility.
# %cd darknet
!chmod +x ./darknet
# train your custom detector! (uncomment %%capture below if you run into memory issues or your Colab is crashing)
#%%capture
!./darknet detector train data/obj.data cfg/yolov4-custom.cfg yolov4.conv.137 -dont_show -map
# Commented out IPython magic to ensure Python compatibility.
# %cd darknet
# train your custom detector! (uncomment %%capture below if you run into memory issues or your Colab is crashing)
#%%capture
!./darknet detector train data/obj.data cfg/yolov4-custom.cfg yolov4.conv.137 -dont_show -map
!chmod +x ./darknet
#to restart training your custom detector where you left off(using the weights that were saved last)
!./darknet detector train data/obj.data cfg/yolov4-custom.cfg /mydrive/yolov4/training/yolov4-custom_last.weights -dont_show -map
# Commented out IPython magic to ensure Python compatibility.
# define helper function imShow
def imShow(path):
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
image = cv2.imread(path)
height, width = image.shape[:2]
resized_image = cv2.resize(image,(3*width, 3*height), interpolation = cv2.INTER_CUBIC)
fig = plt.gcf()
fig.set_size_inches(18, 10)
plt.axis("off")
plt.imshow(cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB))
plt.show()
#only works if the training does not get interrupted
imShow('chart.png')
##You can check the mAP for all the saved weights to see which gives the best results ( xxxx here is the saved weight number like 4000, 5000 or 6000 snd so on )
!./darknet detector map data/obj.data cfg/yolov4-custom.cfg /mydrive/yolov4/training/yolov4-custom_5000.weights -points 0
# Commented out IPython magic to ensure Python compatibility.
#set your custom cfg to test mode
# %cd cfg
!sed -i 's/batch=64/batch=1/' yolov4-custom.cfg
!sed -i 's/subdivisions=16/subdivisions=1/' yolov4-custom.cfg
# %cd ..
!./darknet detector test data/obj.data cfg/yolov4-custom.cfg /mydrive/yolov4/training/yolov4-custom_last.weights -dont_show -ext_output < data/train.txt > result.txt
# run your custom detector with this command (upload an image to your google drive to test, the thresh flag sets the minimum accuracy required for object detection)
!./darknet detector test data/obj.data cfg/yolov4-custom.cfg /mydrive/yolov4/training/yolov4-custom_best.weights /mydrive/imagest/000004.jpg -thresh 0.3
imShow('predictions.jpg')
# run your custom detector on a video with this command (upload a video to your google drive to test, the thresh flag sets the minimum accuracy required for object detection).This saves the output video with the detections in your output path
!./darknet detector demo data/obj.data cfg/yolov4-custom.cfg /mydrive/yolov4/training/yolov4-custom_best.weights -dont_show /mydrive/imagest/crowded.mp4 -i 0 -out_filename /mydrive/imagest/finalresult.avi
# define helper function to display videos
import io
from IPython.display import HTML
from base64 import b64encode
def show_video(file_name, width=640):
# show resulting deepsort video
mp4 = open(file_name,'rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
return HTML("""
<video width="{0}" controls>
<source src="{1}" type="video/mp4">
</video>
""".format(width, data_url))
!pwd
# Commented out IPython magic to ensure Python compatibility.
# convert resulting video from avi to mp4 file format
import os
path_video = os.path.join("outputs","demo_tiny1.avi")
# %cd outputs/
!ffmpeg -y -loglevel panic -i demo_tiny1.avi output.mp4
# %cd ..
# output object tracking video
path_output = os.path.join("outputs","output.mp4")
show_video(path_output, width=960)
# Commented out IPython magic to ensure Python compatibility.
# %cd ..
# Commented out IPython magic to ensure Python compatibility.
# convert resulting video from avi to mp4 file format
import os
path_video = os.path.join("imagest","finalresult.avi")
# %cd imagest/
!ffmpeg -y -loglevel panic -i finalresult.avi output1.mp4
# %cd ..
# output object tracking video
path_output = os.path.join("imagest","output1.mp4")
show_video(path_output, width=960)