-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
246 lines (201 loc) · 8.59 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
# scp /Users/csuftitan/PycharmProjects/pythonProject2/main.py mendel@192.168.100.2:/home/mendel
# ssh mendel@192.168.100.2
# sudo passwd mendel
# sudo nano /etc/ssh/sshd_config
"""
PasswordAuthentication yes
ChallengeResponseAuthentication no
UsePAM yes
"""
# sudo systemctl restart sshd
import os
import pathlib
import time
import cv2
from pycoral.utils import edgetpu
from pycoral.utils import dataset
from pycoral.adapters import common
from pycoral.adapters import classify
import serial
from periphery import GPIO
from flask import Flask, Response, render_template, request
from threading import Thread
from queue import Queue
button = GPIO("/dev/gpiochip0", 6, "in")
# Specify the TensorFlow model, labels, and camera device
script_dir = pathlib.Path(__file__).parent.absolute()
model_file = os.path.join(script_dir, 'Senior/model_edgetpu.tflite')
label_file = os.path.join(script_dir, 'Senior/labels.txt')
device = 1
width = 640
height = 480
# Initialize the TF interpreter
interpreter = edgetpu.make_interpreter(model_file)
interpreter.allocate_tensors()
# Open the camera device
cap = cv2.VideoCapture(device)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
ser = serial.Serial('/dev/ttyACM0', 9600)
# Initialize the Flask app
app = Flask(__name__)
detected_message = "" # empty string to hold messages
detection_queue = Queue(maxsize=1) # queue to hold most recent detection
def main():
global detected_message
camera_paused = False
pause_start_time = None # Initialize pause_start_time to None
while True:
if button.read(): # if (button press == true) enter case
print(button.read()) # print True in console
timestamp = time.strftime("%m%d%Y-%H%M%S") # assign value of timestamp
original_file_path = "Senior/captured_images/sort.jpg" # define OG file here
problem_file_path = f"Senior/captured_images/sort_problem_{timestamp}.jpg" # define new file path here
if os.path.exists(original_file_path): # if OG file exist enter case else print failed
os.rename(original_file_path, problem_file_path) # rename OG file here
print("Successfully renamed file") # print success
else:
print("Failed to rename file") # print failed
# Capture the current frame from the camera
ret, frame = cap.read()
# Convert the frame to RGB format and resize it
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
size = common.input_size(interpreter)
rgb = cv2.resize(rgb, size)
# Pass the resized frame to the interpreter
common.set_input(interpreter, rgb)
# Run an inference
interpreter.invoke()
classes = classify.get_classes(interpreter, top_k=1)
# Print the result and check the class label and confidence score
labels = dataset.read_label_file(label_file)
for c in classes:
class_label = labels.get(c.id, c.id)
confidence = c.score
print('%s detected: Confidence = %.2f%%' % (class_label, confidence * 100))
detected_message = ('%s detected: Confidence = %.2f%%' % (class_label, confidence * 100))
detection = (class_label, confidence)
if detection_queue.full():
detection_queue.get_nowait()
detection_queue.put_nowait(detection)
if class_label == 'Base' and confidence > 0.80:
# Check if the camera is already paused
print("Base case here do nothing")
elif class_label == 'Waste' and confidence > 0.80:
# Check if the camera is already paused
print("Trigger Arduino for Waste")
if not camera_paused:
# Pause the camera by setting the variable to True
camera_paused = True
# Trigger the recycling process
if ret and frame is not None:
cv2.imwrite('Senior/captured_images/sort.jpg', frame)
print("image captured sort.jpg")
ser.write(b'trash')
time.sleep(3)
# Exit the loop to prevent multiple instances of triggering
break
elif class_label == 'Recycling' and confidence > 0.80:
# Check if the camera is already paused
print("Trigger Arduino for recycle")
if not camera_paused:
# Pause the camera by setting the variable to True
camera_paused = True
# Trigger the recycling process
if ret and frame is not None:
cv2.imwrite('Senior/captured_images/sort.jpg', frame)
print("image captured sort.jpg")
ser.write(b'recycle')
time.sleep(3)
# Exit the loop to prevent multiple instances of triggering
break
elif class_label == 'Compost' and confidence > 0.80:
# Check if the camera is already paused
print("Compost Trigger Arduino ")
if not camera_paused:
# Pause the camera by setting the variable to True
camera_paused = True
# Trigger the recycling process
if ret and frame is not None:
cv2.imwrite('Senior/captured_images/sort.jpg', frame)
print("image captured sort.jpg")
ser.write(b'compost')
time.sleep(3)
# Exit the loop to prevent multiple instances of triggering
break
time.sleep(0.25)
# If the camera is not paused, display the frame and check for user input
if not camera_paused:
# Display the frame with the confidence value
cv2.putText(frame, "Confidence: %.2f" % confidence, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('Object Detection', frame)
# Exit on 'c' key
if cv2.waitKey(1) & 0xFF == ord('c'):
break
# If the camera is paused, wait for a key press to resume
else:
# Display a message indicating that the camera is paused
cv2.putText(frame, "Camera paused. Waiting for 3 seconds to resume...", (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 0, 255), 2)
cv2.imshow('Object Detection', frame)
# Wait for 3 seconds
if not pause_start_time:
pause_start_time = time.time()
elif time.time() - pause_start_time >= 3:
pause_start_time = None
camera_paused = False
# Exit on 'c' key
if cv2.waitKey(1) & 0xFF == ord('c'):
break
# Release the camera and close the window
cap.release()
cv2.destroyAllWindows()
ser.close()
def gen_video_feed():
while True:
# Capture the current frame from the camera
ret, frame = cap.read()
# Convert the frame to JPG format
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
# Yield the frame to the Flask app to see on web feed
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def message_stream():
global detected_message
while True:
# Stream the Message feed from the detection cases
if detected_message:
yield f'data: {detected_message}\n\n'
detected_message = "" # empty string
time.sleep(0.25)
@app.route('/')
def index():
# Render the HTML template with the video feed
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
# Stream the video feed from the camera
return Response(gen_video_feed(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/message_stream')
def message_stream_route():
# Stream the Message feed from the detection cases
return Response(message_stream(), content_type='text/event-stream')
if __name__ == '__main__':
main_thread = Thread(target=main)
main_thread.start()
app.run(host='0.0.0.0', debug=False)
# https://automaticwastesorter.ngrok.app/
# ngrok http --domain=automaticwastesorter.ngrok.app 5000
"""
git commands to use ->
cp -r /home/mendel/captured_images /home/mendel/Senior/
cd Senior
git add .
git commit -m "Added captured images"
git push origin main
to update after changing the model use this
git pull origin main
"""