-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathyolo.py
245 lines (203 loc) · 10.9 KB
/
yolo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
import colorsys
import os
import time
import numpy as np
import torch
import torch.nn as nn
from PIL import ImageDraw, ImageFont
from models.yolo import YoloBody
from utils.utils import (cvtColor, get_anchors, get_classes, preprocess_input, resize_image)
from utils.yolo_decode import DecodeBox
'''
YOLO类,预测、测试使用
'''
class YOLO(object):
# 初始化YOLO
def __init__(self, model_path='model_data/yolov5_s.pth', classes_path='model_data/coco_classes.txt',
anchors_path='model_data/yolo_anchors.txt',
anchors_mask=None, input_shape=None, phi='s', confidence=0.5,
nms_iou=0.3, letterbox_image=True, cuda=True):
if input_shape is None:
input_shape = [640, 640]
if anchors_mask is None:
anchors_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
self.model_path = model_path
self.classes_path = classes_path
self.anchors_path = anchors_path
self.anchors_mask = anchors_mask
self.input_shape = input_shape
self.phi = phi
self.confidence = confidence # 只有得分大于置信度的预测框会被保留下来
self.nms_iou = nms_iou # 非极大抑制所用到的nms_iou大小
self.letterbox_image = letterbox_image # 是否使用letterbox_image对输入图像进行不失真的resize,
self.cuda = cuda # 是否使用Cuda
# 获得种类和先验框的数量
self.class_names, self.num_classes = get_classes(self.classes_path)
self.anchors, self.num_anchors = get_anchors(self.anchors_path)
self.bbox_util = DecodeBox(self.anchors, self.num_classes, (self.input_shape[0], self.input_shape[1]),
self.anchors_mask)
# 画框设置不同的颜色
hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))
self.generate()
# 生成模型
def generate(self):
# 建立yolo模型,载入yolo模型的权重
self.net = YoloBody(self.anchors_mask, self.num_classes, self.phi)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.net.load_state_dict(torch.load(self.model_path, map_location=device))
self.net = self.net.eval()
print('{} model, anchors, and classes loaded.'.format(self.model_path))
if self.cuda:
self.net = nn.DataParallel(self.net)
self.net = self.net.cuda()
# 检测图片
def detect_image(self, image, crop=False):
"""
检测单张图片
:param image: 待检测的图片,PIL.Image格式
:param crop: 是否裁剪检测目标,如果裁剪,目标图片会保存在img_crop中
:return:
image: 绘制目标框的图片
"""
image_shape = np.array(np.shape(image)[0:2])
image = cvtColor(image) # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
# 给图像增加灰条,实现不失真的resize 也可以直接resize进行识别
image_data = resize_image(image, (self.input_shape[1], self.input_shape[0]), self.letterbox_image)
# 添加上batch_size维度
image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
with torch.no_grad():
images = torch.from_numpy(image_data)
if self.cuda:
images = images.cuda()
# 将图像输入网络当中进行预测!
# print(images.shape)
outputs = self.net(images)
outputs = self.bbox_util.decode_box(outputs)
# 将预测框进行堆叠,然后进行非极大抑制
results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape,
image_shape, self.letterbox_image, conf_thres=self.confidence,
nms_thres=self.nms_iou)
if results[0] is None:
return image
top_label = np.array(results[0][:, 6], dtype='int32')
top_conf = results[0][:, 4] * results[0][:, 5]
top_boxes = results[0][:, :4]
# 设置字体与边框厚度
font = ImageFont.truetype(font='model_data/simhei.ttf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = int(max((image.size[0] + image.size[1]) // np.mean(self.input_shape), 1))
# 是否进行目标的裁剪
if crop:
for i, c in list(enumerate(top_boxes)):
top, left, bottom, right = top_boxes[i]
top = max(0, np.floor(top).astype('int32'))
left = max(0, np.floor(left).astype('int32'))
bottom = min(image.size[1], np.floor(bottom).astype('int32'))
right = min(image.size[0], np.floor(right).astype('int32'))
dir_save_path = "img_crop"
if not os.path.exists(dir_save_path):
os.makedirs(dir_save_path)
crop_image = image.crop([left, top, right, bottom])
crop_image.save(os.path.join(dir_save_path, "crop_" + str(i) + ".png"), quality=95, subsampling=0)
print("save crop_" + str(i) + ".png to " + dir_save_path)
# 图像绘制
for i, c in list(enumerate(top_label)):
predicted_class = self.class_names[int(c)]
box = top_boxes[i]
score = top_conf[i]
top, left, bottom, right = box
top = max(0, np.floor(top).astype('int32'))
left = max(0, np.floor(left).astype('int32'))
bottom = min(image.size[1], np.floor(bottom).astype('int32'))
right = min(image.size[0], np.floor(right).astype('int32'))
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label, top, left, bottom, right)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i], outline=self.colors[c])
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=self.colors[c])
draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def get_FPS(self, image, test_interval):
"""
测试模型的检测速度FPS,同一张图片推理多次,然后计算单次推理耗时
:param image: 检测图片
:param test_interval: 测试次数
:return:
"""
image_shape = np.array(np.shape(image)[0:2])
image = cvtColor(image)
image_data = resize_image(image, (self.input_shape[1], self.input_shape[0]), self.letterbox_image)
# 添加上batch_size维度
image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
images = torch.from_numpy(image_data)
t1 = time.time()
for _ in range(test_interval):
with torch.no_grad():
# ---------------------------------------------------------#
# 将图像输入网络当中进行预测!
# ---------------------------------------------------------#
outputs = self.net(images)
outputs = self.bbox_util.decode_box(outputs)
# ---------------------------------------------------------#
# 将预测框进行堆叠,然后进行非极大抑制
# ---------------------------------------------------------#
results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape,
image_shape, self.letterbox_image,
conf_thres=self.confidence, nms_thres=self.nms_iou)
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
def get_map_txt(self, image_id, image, class_names, map_out_path):
"""
推理单张图片,检测结果写入到txt文件
:param image_id: 图片名 exp: 2007_000645
:param image: 图片,PIL.Image形式
:param class_names: 类别名称列表 exp:[person, ...]
:param map_out_path: .txt文件保存文件夹 exp: mapout
:return:
"""
f = open(os.path.join(map_out_path, "detection-results/" + image_id + ".txt"), "w", encoding='utf-8')
image_shape = np.array(np.shape(image)[0:2])
# 在这里将图像转换成RGB图像,防止灰度图在预测时报错。代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
image = cvtColor(image)
# 给图像增加灰条,实现不失真的resize也可以直接resize进行识别
image_data = resize_image(image, (self.input_shape[1], self.input_shape[0]), self.letterbox_image)
# 添加上batch_size维度
image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
with torch.no_grad():
images = torch.from_numpy(image_data)
if self.cuda:
images = images.cuda()
# 将图像输入网络当中进行预测!
outputs = self.net(images)
outputs = self.bbox_util.decode_box(outputs)
# 将预测框进行堆叠,然后进行非极大抑制
results = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape,
image_shape, self.letterbox_image, conf_thres=self.confidence,
nms_thres=self.nms_iou)
if results[0] is None:
return
top_label = np.array(results[0][:, 6], dtype='int32')
top_conf = results[0][:, 4] * results[0][:, 5]
top_boxes = results[0][:, :4]
for i, c in list(enumerate(top_label)):
predicted_class = self.class_names[int(c)]
box = top_boxes[i]
score = str(top_conf[i])
top, left, bottom, right = box
if predicted_class not in class_names:
continue
f.write("%s %s %s %s %s %s\n" % (
predicted_class, score[:6], str(int(left)), str(int(top)), str(int(right)), str(int(bottom))))
f.close()
return