This repository has been archived by the owner on Apr 4, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
face_recognized.py
160 lines (127 loc) · 5.45 KB
/
face_recognized.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
from pathlib import Path
from imutils import paths
import face_recognition
import os
import cv2
import numpy as np
import argparse
ap = argparse.ArgumentParser()
ap.add_argument(
"--registered_faces_dir",
required=True,
type=str,
help="Specify the directory for registered faces."
)
ap.add_argument(
"--recognize_image",
type=str,
help="Specify the directory of an image to be recognized."
)
args = vars(ap.parse_args())
def anayze_faces(args):
# Initialize the variables for operation.
known_face_encodings = []
known_face_names = []
is_video = True
face_names = []
process_this_frame = True
faces_path = Path(args["registered_faces_dir"])
# Check whether the registered_faces_dir exist
if not os.path.exists(faces_path):
print("Invalid registered_faces_dir")
return
# Check whether it is an image input or video stream.
if args["recognize_image"]:
recognize_image = Path(args["recognize_image"])
is_video = False
# Get all the registered faces.
face_files = paths.list_images(faces_path)
# Read image and encode it.
for face_file in face_files:
print("Process the file :{}".format(face_file))
name = face_file.split(os.path.sep)[-1].split('.')[-2]
image = face_recognition.load_image_file(face_file)
known_face_names.append(name)
known_face_encodings.append(face_recognition.face_encodings(image)[0])
# Loop until the user terminate
while True:
if is_video:
# Get a reference to a webcame index as 0.
# Change parameter as required.
video_capture = cv2.VideoCapture(0)
# Grab a single frame of video
_, frame = video_capture.read()
else:
frame = cv2.imread(str(recognize_image))
if frame is None:
print("Invalid Image.")
break
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
# Tune the tolerance parameters as desired
# Based on the tolerance ratio, we check whether the input
# face is similar to which registered faces.
matches = face_recognition.compare_faces(
known_face_encodings,
face_encoding,
tolerance=0.5
)
name = "UNKNOWN"
# Find the face that look the most similar to the registered faces.
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
similar_rate = (1 - face_distances[best_match_index]) * 100
# Check if the detected face is a known face
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append((name, similar_rate))
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), (name, similar_rate) in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
font = cv2.FONT_HERSHEY_DUPLEX
if name == "UNKNOWN":
color = (0, 0, 255)
else:
color = (0, 255, 0)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)
# Draw the text on the frame
cv2.putText(frame, "{:.2f}%".format(similar_rate), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.putText(frame, name, (left + 6, top - 6), font, 1.0, (255, 255, 0), 1)
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), color, 2)
# Display the resulting image
cv2.imshow('Video', frame)
if not is_video:
# Wait for any key to be pressed.
cv2.waitKey(0)
# Save the detected image to the same directory with suffix _detected
new_filename = recognize_image.stem + "_detected.jpg"
new_filepath = recognize_image.parents[0]
cv2.imwrite(str(new_filepath/new_filename), frame)
# Exit the loop as it is not a video stream.
break
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if is_video:
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
anayze_faces(args)