-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathgaze.py
96 lines (90 loc) · 3.05 KB
/
gaze.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import cv2
import numpy as np
import dlib
# For internal use only
def process_eye(split):
split = cv2.GaussianBlur(split,(5,5),0)
split = cv2.adaptiveThreshold(split,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
split = cv2.dilate(split, None, iterations=1)
return split
# For internal use only
def filter_eye(split):
split = cv2.medianBlur(split,5)
split = cv2.bilateralFilter(split,9,75,75)
return split
# For internal use only
def cross_spread(split):
first= [0,0]
last = [split.shape[1],split.shape[0]]
for i in range(split.shape[0]):
for j in range(split.shape[1]):
if split[i][j]==0 :
first = [j,i]
for k in range(j,split.shape[1]):
if split[i][k]==0:
last[0]=j
for i in range(i,split.shape[0]):
if split[i][j]==0:
last[1]=i
break
centre = [(last[0]+first[0])/2, (last[1]+first[1])/2]
return centre
# Video capture via webcam
def detect_gaze_direction(video_capture,predictor):
cam = cv2.VideoCapture(video_capture)
cam.set(3,640)
cam.set(4,480)
video_capture = cam
detector = dlib.get_frontal_face_detector()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
if ret:
frame_color = frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dets = detector(frame, 1)
for k, d in enumerate(dets):
# Get the landmarks/parts for the face in box d.
shape = predictor(frame, d)
# print(type(shape.part(1).x))
cv2.circle(frame_color,(shape.part(36).x,shape.part(36).y),2,(0,0,255))
cv2.circle(frame_color,(shape.part(39).x,shape.part(39).y),2,(0,0,255))
cv2.circle(frame_color,(shape.part(42).x,shape.part(42).y),2,(0,0,255))
cv2.circle(frame_color,(shape.part(45).x,shape.part(45).y),2,(0,0,255))
x1 = shape.part(36).x
y1 = shape.part(37).y-2
x2 = shape.part(39).x
y2 = shape.part(40).y+2
split = frame[y1:y2,x1:x2]
split = process_eye(split)
split = filter_eye(split)
centre = cross_spread(split)
frame[y1:y2,x1:x2]=split
y1 = y1+2
y2 = y2-2
centre[1]=centre[1]-2
# cv2.rectangle(frame_color,(x1,y1), (x2,y2), (0, 0, 255), 1)
# cv2.circle(frame_color,(x1+centre[0],y1+centre[1]),2,(0,0,255))
cv2.line(frame_color,(x1+centre[0],y1+centre[1]), (int((3*x1+4*centre[0]-x2)/2),int((3*y1+4*centre[1]-y2)/2)),(255,0,0))
x1 = shape.part(42).x
y1 = shape.part(43).y-2
x2 = shape.part(45).x
y2 = shape.part(46).y+2
split = frame[y1:y2,x1:x2]
split = process_eye(split)
split = filter_eye(split)
centre = cross_spread(split)
frame[y1:y2,x1:x2]=split
y1 = y1+2
y2 = y2-2
centre[1]=centre[1]-2
# cv2.rectangle(frame_color,(x1,y1), (x2,y2), (0, 0, 255), 1)
# cv2.circle(frame_color,(x1+centre[0],y1+centre[1]),2,(0,0,255))
cv2.line(frame_color,(x1+centre[0],y1+centre[1]), (int((3*x1+4*centre[0]-x2)/2),int((3*y1+4*centre[1]-y2)/2)),(255,0,0))
# Display the resulting frame
cv2.imshow('Video', frame_color)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release video capture
video_capture.release()
cv2.destroyAllWindows()