正脸检测算法

(1)前期:人脸检测、关键点检测、闭眼检测、清晰度检测、单人脸多人脸朝向角度检测;
(2)中期:单人脸裁剪、多人脸自动裁剪和写入、联调图片读取和写入、绘制自适应人脸圆角矩形框;
(3)后期:线下展厅摄像头调试、测试优化效果、改多线程运行、优化运行时间。

import multiprocessing
from PIL import Image
import numpy as np
import cv2 as cv
import dlib
from scipy.spatial import distance
import math
import os
import cv2 as cv
import time
# 导入time库
import matplotlib.pyplot as plt
import time  # 获取时间戳
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib as mpl
# 保存图片
# 代码开始运行
# 保存图片
pixel = 240 #过滤像素阈值
face_angle = 10 #正脸角度
def face_detect(i):

    print(i, 'yunxing')
    capture_usb = cv.VideoCapture(i, cv.CAP_DSHOW)
    print('0')
    # 打开自带的摄像头
    if capture_usb.isOpened():
        print('-----------')
        # capture_usb.set(cv.CAP_PROP_FRAME_WIDTH, 3840)
        # capture_usb.set(cv.CAP_PROP_FRAME_HEIGHT, 2160)
        capture_usb.set(cv.CAP_PROP_FRAME_WIDTH, 3840)
        capture_usb.set(cv.CAP_PROP_FRAME_HEIGHT, 2160)
        scale = 2
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
        # 持续读取摄像头数据
        now = int(time.time())
        print('1')
        while True:
            read_code2, frame = capture_usb.read()
            # image=frame
            # print(len(image))
            # print(len(image[0]))
            # print(len(image[0][0]))
            # exit()
            frame2 = frame
            # gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
            # cv.imshow('Window', frame)
            # cv.waitKey(1)
            now2 = int(time.time())
            if now2 - now >= 5:
                now = now2
                print('2')
                ###3#pic_path = save_img(now2, frame)
                # import matplotlib.pyplot as plt
                # 绘制矩形人脸框
                def draw_border(img, pt1, pt2, color, thickness, r, d):
                    x1, y1 = pt1
                    x2, y2 = pt2
                    # Top left
                    cv.line(img, (x1 + r, y1), (x1 + r + d, y1), color, thickness)
                    cv.line(img, (x1, y1 + r), (x1, y1 + r + d), color, thickness)
                    cv.ellipse(img, (x1 + r, y1 + r), (r, r), 180, 0, 90, color, thickness)

                    # Top right
                    cv.line(img, (x2 - r, y1), (x2 - r - d, y1), color, thickness)
                    cv.line(img, (x2, y1 + r), (x2, y1 + r + d), color, thickness)
                    cv.ellipse(img, (x2 - r, y1 + r), (r, r), 270, 0, 90, color, thickness)

                    # Bottom left
                    cv.line(img, (x1 + r, y2), (x1 + r + d, y2), color, thickness)
                    cv.line(img, (x1, y2 - r), (x1, y2 - r - d), color, thickness)
                    cv.ellipse(img, (x1 + r, y2 - r), (r, r), 90, 0, 90, color, thickness)

                    # Bottom right
                    cv.line(img, (x2 - r, y2), (x2 - r - d, y2), color, thickness)
                    cv.line(img, (x2, y2 - r), (x2, y2 - r - d), color, thickness)
                    cv.ellipse(img, (x2 - r, y2 - r), (r, r), 0, 0, 90, color, thickness)
                    return img

                # 闭眼检测
                def calculate_EAR(eye):
                    A = distance.euclidean(eye[1], eye[5])
                    B = distance.euclidean(eye[2], eye[4])
                    C = distance.euclidean(eye[0], eye[3])
                    ear_aspect_ratio = (A + B) / (2.0 * C)
                    return ear_aspect_ratio

                # 测算鼻子与水平线的角度
                def cal_ang(point_1, point_2, point_3):
                    a = math.sqrt(
                        (point_2[0] - point_3[0]) * (point_2[0] - point_3[0]) + (point_2[1] - point_3[1]) * (
                                point_2[1] - point_3[1]))
                    b = math.sqrt(
                        (point_1[0] - point_3[0]) * (point_1[0] - point_3[0]) + (point_1[1] - point_3[1]) * (
                                point_1[1] - point_3[1]))
                    c = math.sqrt(
                        (point_1[0] - point_2[0]) * (point_1[0] - point_2[0]) + (point_1[1] - point_2[1]) * (
                                point_1[1] - point_2[1]))
                    B = round(math.degrees(math.acos((b * b - a * a - c * c) / (-2 * a * c))), 0)
                    return B

                # 清晰度检测
                def getImageVar(image):
                    # image = cv.imread(imgPath)
                    img2gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
                    imageVar = cv.Laplacian(img2gray, cv.CV_64F).var()
                    return imageVar

                ###3###image = cv.imread(pic_path)
                image = frame


                # detector = dlib.get_frontal_face_detector()
                # end1 = time.perf_counter()
                # print('2222')
                # predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
                # end2 = time.perf_counter()
                # end1 = time.perf_counter()
                # runTime1 = end1 - start
                # runTime2 = end2 - start
             
                # print("关键点运行时间:", runTime2)
                gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
                height, width = gray.shape[:2]
                # img.shape=(512,512,3) 数组分别代表的是图像的高、宽、通道,上面这句将前两个数值赋给了height和width下面要用到
                # print("原图分辨率:", height, "*", width)
                enlarge = cv.resize(gray, ( width//scale, height//scale))
                print("缩放:放大后:", gray.shape[:2])
            

                start = time.perf_counter()
                faces = detector(enlarge, 1)
                # if len(faces)>0:
                #     faces = []
                #     faces = detector(gray, 1)

                end1 = time.perf_counter()
                runTime1 = end1 - start
                print("人脸识别运行时间:", runTime1)

                img = frame
                ##3###img = Image.open(frame)  ## 打开chess.png文件,并赋值给img
                i = 0
                ###3###imge = cv.imread(frame)
                imge = frame
                for face in faces:
                    shape = predictor(image, face)
                    x1, y1, x2, y2 = shape.part(27).x, shape.part(27).y, shape.part(30).x, shape.part(30).y
                    ang = cal_ang((x1, y2), (x1, y1), (x2, y2))
                    if ang < face_angle:
                        print(ang, '正面属性通过')
                    else:
                        print(ang, '非正面')
                    leftEye = []
                    rightEye = []
                    # 找左眼
                    for n in range(36, 42):  # 36~41 代表左眼
                        x = shape.part(n).x
                        y = shape.part(n).y
                        leftEye.append((x, y))
                        next_point = n + 1
                        if n == 41:  # 绕回来好实现最后对眼睛的画圈
                            next_point = 36
                        x2 = shape.part(next_point).x
                        y2 = shape.part(next_point).y
                        # cv.line(frame, (x, y), (x2, y2), (0, 255, 0), 1)  # 圈出左眼

                    # 找右眼
                    for n in range(42, 48):  # 42~47代表右眼
                        x = shape.part(n).x
                        y = shape.part(n).y
                        rightEye.append((x, y))
                        next_point = n + 1
                        if n == 47:  # 绕回来好实现对眼睛的画圈
                            next_point = 42
                        x2 = shape.part(next_point).x
                        y2 = shape.part(next_point).y
                        # cv2.line(frame, (x, y), (x2, y2), (0, 255, 0), 1)  # 圈出右眼

                    # 计算每只眼睛的睁开程度
                    left_ear = calculate_EAR(leftEye)
                    right_ear = calculate_EAR(rightEye)
                    # 做个平均
                    EAR = (left_ear + right_ear) / 2
                    EAR = round(EAR, 2)
                    if EAR < 0.28:  # 如果距离小于0.26,进行提示
                        print("闭眼")
                    else:
                        print('睁眼')
                    print(EAR)
                    # print('44444')
                    if EAR >= 0.18 and ang < 10:  # 检测照片是否符合条件 0.28, 10
                        x1, y1 = face.left(), face.top()
                        x2, y2 = face.right(), face.bottom()  # 获取脸框坐标

                        a1, b1, a2, b2 = int(round(1.5 * x1 - 0.5 * x2, 0)*scale), int(round(1.75 * y1 - 0.75 * y2, 0)*scale), int(round(
                            1.5 * x2 - 0.5 * x1, 0)*scale), int(round(1.25 * y2 - 0.25 * y1, 0)*scale)

                        img3 = frame
                        img4 = Image.fromarray(cv.cvtColor(img3, cv.COLOR_BGR2RGB))
                        if a1 <0 or a2 > img3.shape[1] or b1 < 0 or b2 > img3.shape[0]:#如果裁剪框超过原照片
                            print('人脸框超过原照片,本条不保存')
                        else:
                            region = img4.crop((a1, b1, a2, b2))
                        # a1 = np.maximum(a1, 0)
                        # a2 = np.minimum(a2, img3.shape[1])
                        # b1 = np.maximum(b1, 0)
                        # b2 = np.minimum(b2, img3.shape[0])
                            if region.shape[1] >pixel and region.shape[0] > pixel:#判断图像的像素,如果太小则不保存
                                # imageVar = getImageVar(image)
                                # print("清晰度:",imageVar)
                                # image_arr = np.array(img3)
                                #
                                #
                                # a1 = np.maximum(a1, 0)
                                # a2 = np.minimum(a2, img3.shape[1])
                                # b1 = np.maximum(b1, 0)
                                # b2 = np.minimum(b2, img3.shape[0])
                                #
                                #
                                # # Crop image
                                # image_arr = image_arr[b1:b2,a1:a2]
                                #
                                # # Convert array to image
                                # region = Image.fromarray(image_arr)
                                # cv.imshow('Window', region)
                                # cv.waitKey(1)

                                # region = img3[a1:a2, b1:b2,:]
                                i += 1
                                # face_path = pic_path + str(i) + '.jpg'
                                t = int(time.time())  # 获取时间戳
                                #os.makedirs('./' + str(t) + '_' + str(i), exist_ok=True)  # 创建文件夹

                                region.save('D:/Video_faceswap_control/app/static/data' + './' + str(t)  + str(i) + '.jpg')  ## 将裁剪下来的图片保存到 举例.png
                                ##3##imge2 = cv.imread(frame)

                                # imge2 = frame
                                # fig = draw_border(imge2, (int(a1), int(b1)), (int(a2), int(b2)), (255, 90, 0),
                                #                   int((a2 - a1) // 18),
                                #                   int((a2 - a1) // 8), int((a2 - a1) // 8))
                                # cv.imwrite('./' + str(t) + '_' + str(i) + '/' + 'pic' + '.jpg', fig)
                                print('照片路径为:','./' + str(t)  + str(i) + '.jpg')
                            else:
                                print(region.shape[0],region.shape[1],'照片像素过小,不保存')
                end3 = time.perf_counter()
                runTime3 = end3 - end1
                print("dddd运行时间:", runTime3)

            #     # capture_usb.release()
            #     break
        # 释放资源
        capture_usb.release()
        cv2.destroyWindow("screen_title")
        cv2.destroyWindow("screen_title_usb")


if __name__ == '__main__':
    # 创建进程
    # 1. 导入 multiprocessing 模块
    # 2. multiprocessing.Process() 创建子进程
    # 3. start() 方法启动进程
    # p1 = multiprocessing.Process(group=None, target=face_detect(0))
    # p1.start()

    for i in range(4):
        p1 = multiprocessing.Process(group=None, target=face_detect, args=([i]))
        p1.start()

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值