-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathAnti-Litter System using YOLOv5.py
150 lines (125 loc) · 5.61 KB
/
Anti-Litter System using YOLOv5.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import torch
import cv2
import numpy as np
def POINTS(event, x, y, flags, param):
if event == cv2.EVENT_MOUSEMOVE:
colorsBGR = [x, y]
print(colorsBGR)
# cv2.namedWindow('ROI')
# cv2.setMouseCallback('ROI', POINTS)
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
count = 0
# global variables
frame = None # current frame
resizeF = None
fgMaskKNN = None # fg mask generated by KNN method
pKNN = None # KNN Background subtractor
# create BackgroundSubtractor objects
pKNN = cv2.createBackgroundSubtractorKNN(
dist2Threshold=600, detectShadows=False)
stream1 = cv2.VideoCapture("Demo1.mp4")
# stream1 = cv2.VideoCapture("Demo2.mp4")
# stream1 = cv2.VideoCapture("Demo3.mp4")
element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3), (1, 1))
trash = ['handbag', 'suitcase', 'bottle', 'cup', 'sports ball']
# unconditional loop
while True:
ret, frame = stream1.read()
if not ret:
break
count += 1
if count % 3 != 0:
continue
resizeF = cv2.resize(frame, (frame.shape[1] // 3, frame.shape[0] // 3))
fgMaskKNN = pKNN.apply(resizeF)
frame = cv2.resize(frame, (600, 300))
# frame = cv2.resize(frame, (300, 600))
# Resize fgMaskKNN to match the dimensions of frame
fgMaskKNN_resized = cv2.resize(fgMaskKNN, (frame.shape[1], frame.shape[0]))
# Extract Region of Interest (ROI) using foreground mask
roi = cv2.bitwise_and(frame, frame, mask=fgMaskKNN_resized)
# Pass ROI to YOLOv5 for object detection
results = model(roi)
# Create an empty list to store non-overlapping bounding boxes
non_overlapping_boxes = []
is_overlapping_person_box = False
trash_detected = False
x1_person = 0
x2_person = 0
y1_person = 0
y2_person = 0
for index, row in results.pandas().xyxy[0].iterrows():
x1 = int(row['xmin'])
y1 = int(row['ymin'])
x2 = int(row['xmax'])
y2 = int(row['ymax'])
d = (row['name'])
# Check if the current box overlaps with any of the existing non-overlapping boxes
is_overlapping = False
for box in non_overlapping_boxes:
bx1, by1, bx2, by2 = box
if x1 >= bx2 or x2 <= bx1 or y1 >= by2 or y2 <= by1:
# No overlap, add the box to the non-overlapping list
non_overlapping_boxes.append([x1, y1, x2, y2])
elif d == 'person' and ('trash' in bx2 or 'trash' in d) and (x1 >= bx2 or x2 <= bx1 or y1 >= by2 or y2 <= by1):
# Overlap with person box and trash box, do not add the box and mark it as overlapping
is_overlapping = True
break
# If the box is not overlapping, draw the rectangle
if not is_overlapping:
if d == 'person':
if trash_detected:
d = 'person littered'
print(d)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
# Put the text label above the trash box
label = 'Person'
text_size, _ = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
text_x = x1 + (x2 - x1) // 2 - text_size[0] // 2
text_y = y1 - 10
cv2.putText(frame, label, (text_x, text_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA)
elif d in trash and (len(non_overlapping_boxes) == 0 or not is_overlapping_person_box):
if len(non_overlapping_boxes) > 0:
x1_person, y1_person, x2_person, y2_person = non_overlapping_boxes[0]
is_overlapping_person_box = (
x1 >= x1_person and y1 >= y1_person and x2 <= x2_person and y2 <= y2_person
)
# Calculate the distance between the center of person and trash box
person_center_x = (x1_person + x2_person) / 2
person_center_y = (y1_person + y2_person) / 2
trash_center_x = (x1 + x2) / 2
trash_center_y = (y1 + y2) / 2
distance = np.sqrt(
(person_center_x - trash_center_x)**2 + (person_center_y - trash_center_y)**2)
# Define a threshold for the distance (adjust as needed)
distance_threshold = 400
if distance > distance_threshold:
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
trash_detected = True
# Put the text label above the trash box
label = 'Trash Detected'
text_size, _ = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)
text_x = x1 + (x2 - x1) // 2 - text_size[0] // 2
text_y = y1 - 10
cv2.putText(frame, label, (text_x, text_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2, cv2.LINE_AA)
# Reset the trash detection flag if no trash is detected
if 'trash' not in results.pandas().xyxy[0]['name'].values:
trash_detected = False
if cv2.waitKey(1) & 0xFF == 27:
break
resizeF=cv2.resize(resizeF,(600,300))
cv2.imshow("Original Video", resizeF)
frame=cv2.resize(frame,(600,300))
cv2.imshow('Output Video', frame)
fgMaskKNN=cv2.resize(fgMaskKNN,(600,300))
cv2.imshow('fgmask11', fgMaskKNN,)
# cv2.imshow('own', roi)
if cv2.waitKey(30) >= 0:
break
# Clean up
cv2.destroyAllWindows()
stream1.release()