疲劳检测(哈欠、点头、眨眼)

#sleeping.py
from ear import eye_aspect_ratio
from mar import mouth_aspect_ratio
from head import get_head_pose
import head
import cv2
import dlib
from imutils import face_utils
from scipy.spatial import distance as dist
import numpy as np
import math

eye1 = 0.25
eye2 = 2
ecounter = 0
etotal = 0
mouth1=0.5
mouth2=3
mcounter=0
mtotal=0
head1=0.3
head2=3
hcounter=0
htotal=0
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
              [4, 5], [5, 6], [6, 7], [7, 4],
              [0, 4], [1, 5], [2, 6], [3, 7]]

print("loading~请稍后")
# 打开摄像头,0表示打开电脑内置摄像头,也可以是视频文件的路径???cap
cap = cv2.VideoCapture(0)
# 加载人脸检测模块,获得脸部位置检测器(画框)
detector = dlib.get_frontal_face_detector()
# 使用dlib.shape_predictor获得脸部特征位置检测器
predictor = dlib.shape_predictor('F:/fatigue1/shape_predictor_68_face_landmarks.dat')
# 建cv2摄像头对象,这里使用电脑自带摄像头,如果接了外部摄像头,则自动切换到外部摄像头


# 左开右闭,获得的值是(a,b)
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["mouth"]

while True:
    # 读取图片并存在frame中。ret 为True 或者False,代表有没有读取到图片

    ret, frame = cap.read()
    # 把frame进行灰度处理
    gray = cv2.cvtColor(frame, code=cv2.COLOR_BGR2GRAY)
    # 识别人脸,第二个参数越大,代表讲原图放大多少倍在进行检测,提高小人脸的检测效果。
    faces = detector(gray, 1)
    # 画人脸识别的框
    for face in faces:
        x1 = face.left()
        y1 = face.top()
        x2 = face.right()
        y2 = face.bottom()
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255))
        # 在gray中识别图片的face中提取特征点
        landmarks = predictor(gray, face)
        # 68个特征点的坐标
        landmark = face_utils.shape_to_np(landmarks)
        # 获取68个特征点的坐标
        for n in range(0, 68):
            x = landmarks.part(n).x
            y = landmarks.part(n).y
            cv2.circle(frame, (x, y), 1, (0, 0, 255))
        leftEye = landmark[lStart:lEnd]
        rightEye = landmark[rStart:rEnd]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)
        ear = (leftEAR + rightEAR) / 2.0

        mouth = landmark[mStart:mEnd]
        mar = mouth_aspect_ratio(mouth)
        print("ear is {},mar is {}".format(ear, mar))

        if ear < eye1:  # 眼睛长宽比:0.2
            ecounter += 1

        else:
            # 如果连续3次都小于阈值,则表示进行了一次眨眼活动
            if ecounter >= eye2:  # 阈值:3
                etotal += 1
            # 重置眼帧计数器
            ecounter = 0

            # 第十四步:进行画图操作,同时使用cv2.putText将眨眼次数进行显示
        cv2.putText(frame, "Faces: {}".format(len(faces)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "COUNTER: {}".format(ecounter), (150, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "Blinks: {}".format(etotal), (450, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)

        '''
            计算张嘴评分,如果小于阈值,则加1,如果连续3次都小于阈值,则表示打了一次哈欠,同一次哈欠大约在3帧
        '''
        # 同理,判断是否打哈欠
        if mar > mouth1:  # 张嘴阈值0.5
            mcounter += 1
            cv2.putText(frame, "Yawning!", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        else:
            # 如果连续3次都小于阈值,则表示打了一次哈欠
            if mcounter >= mouth2:  # 阈值:3
                mtotal += 1
            # 重置嘴帧计数器
            mcounter = 0
        cv2.putText(frame, "COUNTER: {}".format(mcounter), (150, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "MAR: {:.2f}".format(mar), (300, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(frame, "Yawning: {}".format(mtotal), (450, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
        """
        瞌睡点头
        """
        reprojectdst, euler_angle = get_head_pose(landmark)
        har = euler_angle[0, 0]
        if har > head1:  # 点头阈值0.3
            hcounter += 1
        else:
            # 如果连续3次都小于阈值,则表示瞌睡点头一次
            if hcounter >= head2:  # 阈值:3
                htotal += 1
            # 重置点头帧计数器
            hcounter = 0

        # 绘制正方体12轴
        for start, end in line_pairs:
            starts = (int(reprojectdst[start][0]), int(reprojectdst[start][1]))
            ends = (int(reprojectdst[end][0]), int(reprojectdst[end][1]))
            cv2.line(frame, starts, ends, (0, 0, 255))
        # 显示角度结果
        cv2.putText(frame, "X: " + "{:7.2f}".format(euler_angle[0, 0]), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (0, 255, 0), thickness=2)  # GREEN
        cv2.putText(frame, "Y: " + "{:7.2f}".format(euler_angle[1, 0]), (150, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (255, 0, 0), thickness=2)  # BLUE
        cv2.putText(frame, "Z: " + "{:7.2f}".format(euler_angle[2, 0]), (300, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (0, 0, 255), thickness=2)  # RED
        cv2.putText(frame, "Nod: {}".format(htotal), (450, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)

        print('嘴巴实时长宽比:{:.2f} '.format(mar) + "\t是否张嘴:" + str([False, True][mar >= 3]))
        print('眼睛实时长宽比:{:.2f} '.format(ear) + "\t是否眨眼:" + str([False, True][ecounter >= 1]))

    cv2.imshow("img", frame)
    key = cv2.waitKey(500)
    if key == ord('q'):
        break
cap.release()
cv2.destroyAllWindows
#eye.py

from scipy.spatial import distance as dist

def eye_aspect_ratio(eye):
    # 垂直眼标志(X,Y)坐标
    A = dist.euclidean(eye[1], eye[5])  # 计算两个集合之间的欧式距离
    B = dist.euclidean(eye[2], eye[4])
    # 计算水平之间的欧几里得距离
    # 水平眼标志(X,Y)坐标
    C = dist.euclidean(eye[0], eye[3])
    # 眼睛长宽比的计算
    ear = (A + B) / (2.0 * C)
    # 返回眼睛的长宽比
    return ear
#head.py
import numpy as np
import cv2
import math
object_pts = np.float32([[6.825897, 6.760612, 4.402142],  # 33左眉左上角
                         [1.330353, 7.122144, 6.903745],  # 29左眉右角
                         [-1.330353, 7.122144, 6.903745],  # 34右眉左角
                         [-6.825897, 6.760612, 4.402142],  # 38右眉右上角
                         [5.311432, 5.485328, 3.987654],  # 13左眼左上角
                         [1.789930, 5.393625, 4.413414],  # 17左眼右上角
                         [-1.789930, 5.393625, 4.413414],  # 25右眼左上角
                         [-5.311432, 5.485328, 3.987654],  # 21右眼右上角
                         [2.005628, 1.409845, 6.165652],  # 55鼻子左上角
                         [-2.005628, 1.409845, 6.165652],  # 49鼻子右上角
                         [2.774015, -2.080775, 5.048531],  # 43嘴左上角
                         [-2.774015, -2.080775, 5.048531],  # 39嘴右上角
                         [0.000000, -3.116408, 6.097667],  # 45嘴中央下角
                         [0.000000, -7.415691, 4.070434]])  # 6下巴角
# 相机坐标系(XYZ):添加相机内参
K = [6.5308391993466671e+002, 0.0, 3.1950000000000000e+002,
     0.0, 6.5308391993466671e+002, 2.3950000000000000e+002,
     0.0, 0.0, 1.0]  # 等价于矩阵[fx, 0, cx; 0, fy, cy; 0, 0, 1]
# 图像中心坐标系(uv):相机畸变参数[k1, k2, p1, p2, k3]
D = [7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000]
# 像素坐标系(xy):填写凸轮的本征和畸变系数
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)
reprojectsrc = np.float32([[10.0, 10.0, 10.0],
                           [10.0, 10.0, -10.0],
                           [10.0, -10.0, -10.0],
                           [10.0, -10.0, 10.0],
                           [-10.0, 10.0, 10.0],
                           [-10.0, 10.0, -10.0],
                           [-10.0, -10.0, -10.0],
                           [-10.0, -10.0, 10.0]])
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
              [4, 5], [5, 6], [6, 7], [7, 4],
              [0, 4], [1, 5], [2, 6], [3, 7]]

def get_head_pose(shape):  # 头部姿态估计
    # (像素坐标集合)填写2D参考点,注释遵循https://ibug.doc.ic.ac.uk/resources/300-W/
    # 17左眉左上角/21左眉右角/22右眉左上角/26右眉右上角/36左眼左上角/39左眼右上角/42右眼左上角/
    # 45右眼右上角/31鼻子左上角/35鼻子右上角/48左上角/54嘴右上角/57嘴中央下角/8下巴角
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35],
                            shape[48], shape[54], shape[57], shape[8]])
    # solvePnP计算姿势——求解旋转和平移矩阵:
    # rotation_vec表示旋转矩阵,translation_vec表示平移矩阵,cam_matrix与K矩阵对应,dist_coeffs与D矩阵对应。
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)
    # projectPoints重新投影误差:原2d点和重投影2d点的距离(输入3d点、相机内参、相机畸变、r、t,输出重投影2d点)
    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix, dist_coeffs)
    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))  # 以8行2列显示

    # 计算欧拉角calc euler angle
    # 参考https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#decomposeprojectionmatrix
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)  # 罗德里格斯公式(将旋转矩阵转换为旋转向量)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))  # 水平拼接,vconcat垂直拼接
    # decomposeProjectionMatrix将投影矩阵分解为旋转矩阵和相机矩阵
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    pitch, yaw, roll = [math.radians(_) for _ in euler_angle]

    pitch = math.degrees(math.asin(math.sin(pitch)))
    roll = -math.degrees(math.asin(math.sin(roll)))
    yaw = math.degrees(math.asin(math.sin(yaw)))
    print('pitch:{}, yaw:{}, roll:{}'.format(pitch, yaw, roll))

    return reprojectdst, euler_angle  # 投影误差,欧拉角
#mar.py
import numpy as np
def mouth_aspect_ratio(mouth):
    A = np.linalg.norm(mouth[2] - mouth[9])  # 51, 59
    B = np.linalg.norm(mouth[4] - mouth[7])  # 53, 57
    C = np.linalg.norm(mouth[0] - mouth[6])  # 49, 55
    mar = (A + B) / (2.0 * C)
    return mar

差可视化

  • 4
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值