1
+ # USAGE
2
+ # python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.dat --video blink_detection_demo.mp4
3
+ # python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.dat
4
+
5
+ # import the necessary packages
6
+ from scipy .spatial import distance as dist
7
+ from imutils .video import FileVideoStream
8
+ from imutils .video import VideoStream
9
+ from imutils import face_utils
10
+ import numpy as np
11
+ import argparse
12
+ import imutils
13
+ import time
14
+ import dlib
15
+ import cv2
16
+
17
+ def eye_aspect_ratio (eye ):
18
+ # compute the euclidean distances between the two sets of
19
+ # vertical eye landmarks (x, y)-coordinates
20
+ A = dist .euclidean (eye [1 ], eye [5 ])
21
+ B = dist .euclidean (eye [2 ], eye [4 ])
22
+
23
+ # compute the euclidean distance between the horizontal
24
+ # eye landmark (x, y)-coordinates
25
+ C = dist .euclidean (eye [0 ], eye [3 ])
26
+
27
+ # compute the eye aspect ratio
28
+ ear = (A + B ) / (2.0 * C )
29
+
30
+ # return the eye aspect ratio
31
+ return ear
32
+
33
+ # construct the argument parse and parse the arguments
34
+ # ap = argparse.ArgumentParser()
35
+ # ap.add_argument("-p", "--shape-predictor", required=True,
36
+ # help="path to facial landmark predictor")
37
+ # ap.add_argument("-v", "--video", type=str, default="",
38
+ # help="path to input video file")
39
+ # args = vars(ap.parse_args())
40
+
41
+ # define two constants, one for the eye aspect ratio to indicate
42
+ # blink and then a second constant for the number of consecutive
43
+ # frames the eye must be below the threshold
44
+ EYE_AR_THRESH = 0.3
45
+ EYE_AR_CONSEC_FRAMES = 3
46
+
47
+ # initialize the frame counters and the total number of blinks
48
+ COUNTER = 0
49
+ TOTAL = 0
50
+
51
+ # initialize dlib's face detector (HOG-based) and then create
52
+ # the facial landmark predictor
53
+ print ("[INFO] loading facial landmark predictor..." )
54
+ detector = dlib .get_frontal_face_detector ()
55
+ # print(type(args["shape_predictor"]))
56
+ # exit()
57
+ # predictor = dlib.shape_predictor(args["shape_predictor"])
58
+ predictor = dlib .shape_predictor ('shape_predictor_68_face_landmarks.dat' )
59
+ # print(type(predictor))
60
+ # exit()
61
+ # predictor="shape_predictor_68_face_landmarks.dat"
62
+
63
+ # grab the indexes of the facial landmarks for the left and
64
+ # right eye, respectively
65
+ (lStart , lEnd ) = face_utils .FACIAL_LANDMARKS_IDXS ["left_eye" ]
66
+ (rStart , rEnd ) = face_utils .FACIAL_LANDMARKS_IDXS ["right_eye" ]
67
+
68
+ # start the video stream thread
69
+ print ("[INFO] starting video stream thread..." )
70
+ vs = cv2 .VideoCapture (0 )
71
+ fileStream = True
72
+ # vs = VideoStream(src=0).start()
73
+ # vs = VideoStream(usePiCamera=True).start()
74
+ # fileStream = False
75
+ # time.sleep(1.0)
76
+ ret , frame = vs .read ()
77
+
78
+ # loop over frames from the video stream
79
+ while ret :
80
+ # if this is a file video stream, then we need to check if
81
+ # there any more frames left in the buffer to process
82
+
83
+ # grab the frame from the threaded video file stream, resize
84
+ # it, and convert it to grayscale
85
+ # channels)
86
+ ret ,frame = vs .read ()
87
+ frame = imutils .resize (frame , width = 450 )
88
+ gray = cv2 .cvtColor (frame , cv2 .COLOR_BGR2GRAY )
89
+ cv2 .imshow ('frame' ,frame )
90
+
91
+ # detect faces in the grayscale frame
92
+ rects = detector (gray , 0 )
93
+
94
+ # loop over the face detections
95
+ for rect in rects :
96
+ # determine the facial landmarks for the face region, then
97
+ # convert the facial landmark (x, y)-coordinates to a NumPy
98
+ # array
99
+ shape = predictor (gray , rect )
100
+ shape = face_utils .shape_to_np (shape )
101
+
102
+ # extract the left and right eye coordinates, then use the
103
+ # coordinates to compute the eye aspect ratio for both eyes
104
+ leftEye = shape [lStart :lEnd ]
105
+ rightEye = shape [rStart :rEnd ]
106
+ leftEAR = eye_aspect_ratio (leftEye )
107
+ rightEAR = eye_aspect_ratio (rightEye )
108
+
109
+ # average the eye aspect ratio together for both eyes
110
+ ear = (leftEAR + rightEAR ) / 2.0
111
+
112
+ # compute the convex hull for the left and right eye, then
113
+ # visualize each of the eyes
114
+ leftEyeHull = cv2 .convexHull (leftEye )
115
+ rightEyeHull = cv2 .convexHull (rightEye )
116
+ cv2 .drawContours (frame , [leftEyeHull ], - 1 , (0 , 255 , 0 ), 1 )
117
+ cv2 .drawContours (frame , [rightEyeHull ], - 1 , (0 , 255 , 0 ), 1 )
118
+
119
+ # check to see if the eye aspect ratio is below the blink
120
+ # threshold, and if so, increment the blink frame counter
121
+ if ear < EYE_AR_THRESH :
122
+ COUNTER += 1
123
+
124
+ # otherwise, the eye aspect ratio is not below the blink
125
+ # threshold
126
+ else :
127
+ # if the eyes were closed for a sufficient number of
128
+ # then increment the total number of blinks
129
+ if COUNTER >= EYE_AR_CONSEC_FRAMES :
130
+ TOTAL += 1
131
+
132
+ # reset the eye frame counter
133
+ COUNTER = 0
134
+
135
+ # draw the total number of blinks on the frame along with
136
+ # the computed eye aspect ratio for the frame
137
+ cv2 .putText (frame , "Blinks: {}" .format (TOTAL ), (10 , 30 ),
138
+ cv2 .FONT_HERSHEY_SIMPLEX , 0.7 , (0 , 0 , 255 ), 2 )
139
+ cv2 .putText (frame , "EAR: {:.2f}" .format (ear ), (300 , 30 ),
140
+ cv2 .FONT_HERSHEY_SIMPLEX , 0.7 , (0 , 0 , 255 ), 2 )
141
+
142
+ # show the frame
143
+ cv2 .imshow ("Frame" , frame )
144
+ key = cv2 .waitKey (1 ) & 0xFF
145
+
146
+ # if the `q` key was pressed, break from the loop
147
+ if key == ord ("q" ):
148
+ break
149
+
150
+ # do a bit of cleanup
151
+ vs .release ()
152
+ cv2 .destroyAllWindows ()
0 commit comments