-
Notifications
You must be signed in to change notification settings - Fork 1
/
Detector.py
119 lines (85 loc) · 3.4 KB
/
Detector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# Import all the needed libraries
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import easyocr
import util
# harcascade = "model/haarcascade_russian_plate_number.xml"
# Constants
model_cfg_path = os.path.join('.', 'model', 'cfg', 'darknet-yolov3.cfg')
model_weights_path = os.path.join('.', 'model', 'weights', 'model.weights')
class_names_path = os.path.join('.', 'model', 'class.names')
class Detector:
def __init__(self) -> None:
pass
def imageDetection(self, imagePath):
# Load class names
with open(class_names_path, 'r') as f:
class_names = [j[:-1] for j in f.readlines() if len(j) > 2]
f.close()
# Load model
net = cv2.dnn.readNetFromDarknet(model_cfg_path, model_weights_path)
# Load image
img = cv2.imread(imagePath)
H, W, _ = img.shape
# convert image
blob = cv2.dnn.blobFromImage(img, 1 / 255, (416, 416), (0, 0, 0), True)
# get detections
net.setInput(blob)
detections = util.get_outputs(net)
# bboxes, class_ids, confidences
bboxes = []
class_ids = []
scores = []
for detection in detections:
# [x1, x2, x3, x4, x5, x6, ..., x85]
bbox = detection[:4]
xc, yc, w, h = bbox
bbox = [int(xc * W), int(yc * H), int(w * W), int(h * H)]
bbox_confidence = detection[4]
class_id = np.argmax(detection[5:])
score = np.amax(detection[5:])
bboxes.append(bbox)
class_ids.append(class_id)
scores.append(score)
# apply nms
bboxes, class_ids, scores = util.NMS(bboxes, class_ids, scores)
# plot
reader = easyocr.Reader(['en'])
for bbox_, bbox in enumerate(bboxes):
xc, yc, w, h = bbox
"""
cv2.putText(img,
class_names[class_ids[bbox_]],
(int(xc - (w / 2)), int(yc + (h / 2) - 20)),
cv2.FONT_HERSHEY_SIMPLEX,
7,
(0, 255, 0),
15)
"""
license_plate = img[int(yc - (h / 2)):int(yc + (h / 2)),
int(xc - (w / 2)):int(xc + (w / 2)), :].copy()
img = cv2.rectangle(img,
(int(xc - (w / 2)), int(yc - (h / 2))),
(int(xc + (w / 2)), int(yc + (h / 2))),
(0, 255, 0),
15)
license_plate_gray = cv2.cvtColor(
license_plate, cv2.COLOR_BGR2GRAY)
_, license_plate_thresh = cv2.threshold(
license_plate_gray, 64, 255, cv2.THRESH_BINARY_INV)
output = reader.readtext(license_plate)
for out in output:
text_bbox, text, text_score = out
if text_score > 0.4:
print(text, (text_score * 100))
plt.figure()
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.figure()
plt.imshow(cv2.cvtColor(license_plate, cv2.COLOR_BGR2RGB))
# plt.figure()
# plt.imshow(cv2.cvtColor(license_plate_gray, cv2.COLOR_BGR2RGB))
# plt.figure()
# plt.imshow(cv2.cvtColor(license_plate_thresh, cv2.COLOR_BGR2RGB))
plt.show()