-
Notifications
You must be signed in to change notification settings - Fork 0
/
detect_face_mask.py
87 lines (66 loc) · 3.02 KB
/
detect_face_mask.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import numpy as np
import cv2
import random
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
face_cascade = cv2.CascadeClassifier('data\\xml\\haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('data\\xml\\haarcascade_eye.xml')
mouth_cascade = cv2.CascadeClassifier('data\\xml\\haarcascade_mcs_mouth.xml')
upper_body = cv2.CascadeClassifier('data\\xml\\haarcascade_upperbody.xml')
# Adjust threshold value in range 80 to 105 based on your light.
bw_threshold = 80
# User message
font = cv2.FONT_HERSHEY_SIMPLEX
org = (30, 30)
weared_mask_font_color = (255, 255, 255)
not_weared_mask_font_color = (0, 0, 255)
thickness = 2
font_scale = 1
weared_mask = "Face Mask Detected"
not_weared_mask = "Face Mask Not Detected"
# Read video
cap = cv2.VideoCapture(0)
while 1:
# Get individual frame
ret, img = cap.read()
img = cv2.flip(img,1)
# Convert Image into gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Convert image in black and white
(thresh, black_and_white) = cv2.threshold(gray, bw_threshold, 255, cv2.THRESH_BINARY)
#cv2.imshow('black_and_white', black_and_white)
# detect face
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Face prediction for black and white
faces_bw = face_cascade.detectMultiScale(black_and_white, 1.1, 4)
if(len(faces) == 0 and len(faces_bw) == 0):
cv2.putText(img, "No face found...", org, font, font_scale, weared_mask_font_color, thickness, cv2.LINE_AA)
elif(len(faces) == 0 and len(faces_bw) == 1):
# It has been observed that for white mask covering mouth, with gray image face prediction is not happening
cv2.putText(img, weared_mask, org, font, font_scale, weared_mask_font_color, thickness, cv2.LINE_AA)
else:
# Draw rectangle on face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
# Detect lips counters
mouth_rects = mouth_cascade.detectMultiScale(gray, 1.5, 5)
# Face detected but Lips not detected which means person is wearing mask
if(len(mouth_rects) == 0):
cv2.putText(img, weared_mask, org, font, font_scale, weared_mask_font_color, thickness, cv2.LINE_AA)
else:
for (mx, my, mw, mh) in mouth_rects:
if(y < my < y + h):
# Face and Lips are detected but lips coordinates are within face cordinates which `means lips prediction is true and
# person is not wearing mask
cv2.putText(img, not_weared_mask, org, font, font_scale, not_weared_mask_font_color, thickness, cv2.LINE_AA)
#cv2.rectangle(img, (mx, my), (mx + mh, my + mw), (0, 0, 255), 3)
break
# Show frame with results
cv2.imshow('Mask Detection', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Release video
cap.release()
cv2.destroyAllWindows()