-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathface_extractor.py
executable file
·85 lines (65 loc) · 2.79 KB
/
face_extractor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Import libraries
import os
import cv2
import numpy as np
def draw_border(img, pt1, pt2, color, thickness, r, d):
x1, y1 = pt1
x2, y2 = pt2
# Top left
cv2.line(img, (x1 + r, y1), (x1 + r + d, y1), color, thickness)
cv2.line(img, (x1, y1 + r), (x1, y1 + r + d), color, thickness)
cv2.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, thickness)
# Top right
cv2.line(img, (x2 - r, y1), (x2 - r - d, y1), color, thickness)
cv2.line(img, (x2, y1 + r), (x2, y1 + r + d), color, thickness)
cv2.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, thickness)
# Bottom left
cv2.line(img, (x1 + r, y2), (x1 + r + d, y2), color, thickness)
cv2.line(img, (x1, y2 - r), (x1, y2 - r - d), color, thickness)
cv2.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, thickness)
# Bottom right
cv2.line(img, (x2 - r, y2), (x2 - r - d, y2), color, thickness)
cv2.line(img, (x2, y2 - r), (x2, y2 - r - d), color, thickness)
cv2.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, thickness)
# Define paths
base_dir = os.path.dirname(__file__)
prototxt_path = os.path.join(base_dir + '/model_data/deploy.prototxt')
caffemodel_path = os.path.join(base_dir + '/model_data/weights.caffemodel')
# Read the model
model = cv2.dnn.readNetFromCaffe(prototxt_path, caffemodel_path)
# Create directory 'faces' if it does not exist
if not os.path.exists('faces'):
print("New directory created")
os.makedirs('faces')
# Loop through all images and strip out faces
count = 0
for file in os.listdir(base_dir + '/images'):
file_name, file_extension = os.path.splitext(file)
if (file_extension in ['.png','.jpg']):
image = cv2.imread(base_dir + '/images/' + file)
try:
(h, w) = image.shape[:2]
except:
continue
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
model.setInput(blob)
detections = model.forward()
# Identify each face
for i in range(0, detections.shape[2]):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
confidence = detections[0, 0, i, 2]
# If confidence > 0.5, save it as a separate file
if (confidence > 0.5):
count += 1
frame = image[startY:endY, startX:endX]
if frame.any():
cv2.imwrite('faces/' + str(i) + '_' + file, frame)
draw_border(image, (startX, startY),
(endX, endY), (225,249,126), 2, 5, 10)
cv2.putText(image, 'DETECTED FACE', (startX, startY-5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (225,249,126), 1)
cv2.imwrite("results/"+ file_name + file_extension, image)
#my_frame = cv2.resize(frame, (224, 224))
#grayscale = cv2.cvtColor(my_frame, cv2.COLOR_BGR2GRAY)
print("Extracted " + str(count) + " faces from all images")