基于dlib的眼睛检测

# -*- coding=utf-8 -*-
import numpy as np 
import cv2
import dlib
from scipy.spatial import distance
import os
from imutils import face_utils

img_eye = cv2.imread("eye.png")
switch = False
VECTOR_SIZE = 3
def queue_in(queue, data):
    ret = None
    if len(queue) >= VECTOR_SIZE:
        ret = queue.pop(0)
    queue.append(data)
    return ret, queue

def eye_aspect_ratio(eye):
    # print(eye)
    A = distance.euclidean(eye[1], eye[5])
    B = distance.euclidean(eye[2], eye[4])
    C = distance.euclidean(eye[0], eye[3])
    ear = (A + B) / (2.0 * C)
    return ear
    
pwd = os.getcwd()
model_path = os.path.join(pwd, 'model')
shape_detector_path = os.path.join(model_path, 'shape_predictor_68_face_landmarks.dat')

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_detector_path)

# 导入模型
border = 2

EYE_AR_THRESH = 0.3# EAR阈值
EYE_AR_CONSEC_FRAMES = 3# 当EAR小于阈值时,接连多少帧一定发生眨眼动作

# 对应特征点的序号
RIGHT_EYE_START = 37 - 1
RIGHT_EYE_END = 42 - 1
LEFT_EYE_START = 43 - 1
LEFT_EYE_END = 48 - 1

frame_counter = 0
blink_counter = 0
ear_vector = []
cap = cv2.VideoCapture("eye.mp4")
while(1):
    ret, img = cap.read()

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 0)
    for rect in rects:
        print('-'*20)
        shape = predictor(gray, rect)
        points = face_utils.shape_to_np(shape)# convert the facial landmark (x, y)-coordinates to a NumPy array
        # points = shape.parts()
        leftEye = points[LEFT_EYE_START:LEFT_EYE_END + 1]
        rightEye = points[RIGHT_EYE_START:RIGHT_EYE_END + 1]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)
        #print('leftEAR = {0}'.format(leftEAR))
        #print('rightEAR = {0}'.format(rightEAR))

        ear = (leftEAR + rightEAR) / 2.0

        leftEyeHull = cv2.convexHull(leftEye)
        rightEyeHull = cv2.convexHull(rightEye)

        if switch == True :

            xl,yl,wl,hl = cv2.boundingRect(leftEyeHull)
            img_eyel = cv2.resize(img_eye,(wl,hl),interpolation=cv2.INTER_NEAREST)
            img[yl:yl+hl,xl:xl+wl] = img_eyel
            #cv2.rectangle(img,(xl-border,yl-border),(xl+wl+border,yl+hl+border),(0,255,9),2)


            xr,yr,wr,hr = cv2.boundingRect(rightEyeHull)
            img_eyer = cv2.resize(img_eye,(wr,hr),interpolation=cv2.INTER_NEAREST)
            img[yr:yr+hr,xr:xr+wr] = img_eyer
            #cv2.rectangle(img,(xr-border,yr-border),(xr+wr+border,yr+hr+border),(0,255,9),2)

        #cv2.drawContours(img, [leftEyeHull], -1, (0, 255, 0), 1)
        #cv2.drawContours(img, [rightEyeHull], -1, (0, 255, 0), 1)

        ret, ear_vector = queue_in(ear_vector, ear)
        if(len(ear_vector) == VECTOR_SIZE):
            print(ear_vector)
            input_vector = []
            input_vector.append(ear_vector)

        cv2.putText(img, "Blinks:{0}".format(blink_counter), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)
        cv2.putText(img, "EAR:{:.2f}".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)

    key = cv2.waitKey(1) & 0xFF
    
    if  key == ord('s'):
        switch = True
    if key== ord('q'):
        switch = False
    if key== 27:
        break

    cv2.imshow("Frame", img)


cap.release()
cv2.destroyAllWindows()

效果:
在这里插入图片描述
思路:
基于dlib,以及训练好的模型,检测出人眼位置;
之后,利用轮廓近似,绘制出矩形框;
随后,加载一张眼睛照片,将形状大小与矩形框一致;
最后,眼睛图片替换矩形框区域。
模型:
链接:https://pan.baidu.com/s/1A8x-EJL73QxLZhSWIy1Atg
提取码:sr3p
说明;按下s键,眼睛被图片眼睛替换,按下q键去除图片,按下Esc键退出。

  • 2
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

佐倉

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值