# USAGE# python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.dat --video blink_detection_demo.mp4# python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.datimport cv2
import dlib
import imutils
from imutils import face_utils
# import the necessary packagesfrom scipy.spatial import distance as dist
defeye_aspect_ratio(eye):# compute the euclidean distances between the two sets of# vertical eye landmarks (x, y)-coordinates 欧氏距离
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])# compute the euclidean distance between the horizontal# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])'''
[1] [2]
[0] [3]
[5] [4]
'''# compute the eye aspect ratio
ear =(A + B)/(2.0* C)# return the eye aspect ratioreturn ear
# # 我注释掉的 construct the argument parse and parse the arguments# ap = argparse.ArgumentParser()# ap.add_argument("-p", "--shape-predictor", required=True,# help="path to facial landmark predictor")# ap.add_argument("-v", "--video", type=str, default="",# help="path to input video file")# args = vars(ap.parse_args())# define two constants, one for the eye aspect ratio to indicate# blink and then a second constant for the number of consecutive# frames the eye must be below the threshold
EYE_AR_THRESH =0.3
EYE_AR_CONSEC_FRAMES =3# initialize the frame counters and the total number of blinks
COUNTER =0
TOTAL =0# 初始化dlib's face detector (HOG-based),然后创建“面部标志预测器”facial landmark predictorprint("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()# 创建识别器'''
print("detector:",help(detector))
This object represents a sliding window histogram-of-oriented-gradients based object detector.
此对象表示基于定向梯度的对象检测器的滑动窗口直方图。
'''# predictor = dlib.shape_predictor(args["shape_predictor"])
predictor = dlib.shape_predictor('./1.dat')# 读取训练好的模型"""
print("predictor",help(predictor))
This object is a tool that takes in an image region containing some
object and outputs a set of point locations that define the pose of the object.
The classic example of this is human face pose prediction, where you take
an image of a human face as input and are expected to identify the locations of
important facial landmarks such as the corners of the mouth and eyes, tip of the nose, and so forth。
此对象是一个工具,它接受包含某些对象的图像区域,并输出一组定义对象姿势的点位置。
这方面的经典例子是人脸姿势预测,在这里,您可以将人脸的图像作为输入,
并期望识别重要面部标志的位置,如嘴角和眼睛、鼻尖等。
"""# 分别地抓取人脸的左眼和右眼的坐标 respectively(lStart, lEnd)= face_utils.FACIAL_LANDMARKS_IDXS["left_eye"](rStart, rEnd)= face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]# face_utils.FACIAL_LANDMARKS_IDXS:Dictionary that remembers insertion order# 开始读取视频流print("[INFO] starting camera stream thread...")# vs = FileVideoStream(args["video"]).start()
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)# 从视频流循环帧whileTrue:# if this is a file video stream, then we need to check if# there any more frames left in the buffer to process# grab the frame from the threaded video file stream, resize# it, and convert it to grayscale# channels)
_, frame = cap.read()# 读取一针
frame = imutils.resize(frame, width=1000)# 设置宽度 ·450
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)# 创建灰度图识别器 进行识别加快速度
rects = detector(gray,0)# loop over the face detectionsfor rect in rects:# determine the facial landmarks for the face region, then# convert the facial landmark (x, y)-coordinates to a NumPy# array
shape = predictor(gray, rect)# 进行预测 返回值包括眼睛鼻子嘴的坐标
shape = face_utils.shape_to_np(shape)# extract the left and right eye coordinates, then use the# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)# average the eye aspect ratio together for both eyes
ear =(leftEAR + rightEAR)/2.0# compute the convex hull for the left and right eye, then# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame,[leftEyeHull],-1,(0,255,0),1)
cv2.drawContours(frame,[rightEyeHull],-1,(0,255,0),1)# check to see if the eye aspect ratio is below the blink# threshold, and if so, increment the blink frame counterif ear < EYE_AR_THRESH:
COUNTER +=1# otherwise, the eye aspect ratio is not below the blink# thresholdelse:# if the eyes were closed for a sufficient number of# then increment the total number of blinksif COUNTER >= EYE_AR_CONSEC_FRAMES:
TOTAL +=1# reset the eye frame counter
COUNTER =0# draw the total number of blinks on the frame along with# the computed eye aspect ratio for the frame
cv2.putText(frame,"Blinks: {}".format(TOTAL),(10,30),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)
cv2.putText(frame,"EAR: {:.2f}".format(ear),(300,30),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)&0xFF# if the `Esc` key was pressed, break from the loopif key ==27:break# do a bit of cleanup
cv2.destroyAllWindows()
cap.release()