视频人脸检测——opencv+dlib两种方法

图片人脸检测:https://blog.csdn.net/Lee_01/article/details/89140668

一、使用opencv的haarcascades检测模型

代码如下:

import sys
import cv2


def _help():
    print("Usage:")
    print("     python video_face_detect_cv.py")
    print("     python video_face_detect_cv.py <path of a video>")
    print("For example:")
    print("     python video_face_detect_cv.py video/lee.mp4")
    print("If the path of a video is not provided, the camera will be used as the input.Press q to quit.")


def _face_detect(color_image, detector):
    # 将彩色图片转换为灰色
    gray_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
    # 用detector检测人脸
    face_rects = detector.detectMultiScale(gray_image, scaleFactor=1.2, minNeighbors=3, minSize=(100, 100))

    color = (0, 0, 255)  # 定义绘制颜色为红色
    line_width = 2  # 定义绘图线宽为2
    if (len(face_rects)) > 0:
        for face_rect in face_rects:
            x, y, w, h = face_rect  # (x,y)是矩阵左上角的坐标,w是矩阵的宽,h是矩阵的高
            cv2.rectangle(color_image, (x, y), (x + w, y + h), color, line_width)

    cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    cv2.imshow("Image", color_image)


def face_detect(video_path=0):
    """
    思路:把视频逐帧分解成图片,基于图片检测标识出人脸的位置,把处理的图片逐帧绘制给用户
    """
    # 获取OpenCV人脸识别分类器
    detector = cv2.CascadeClassifier("opencv-master/data/haarcascades/haarcascade_frontalface_default.xml")
    # 获取视频或者摄像头输入流
    cap = cv2.VideoCapture(video_path)
    # 逐帧显示
    while True:
        ret, img = cap.read()
        if img is None:
            break
        _face_detect(img, detector)
        # 如果输入的是“q”,则跳出循环结束检测
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()  # 释放摄像头
    cv2.destroyAllWindows()


if len(sys.argv) > 2 or "-h" in sys.argv or "--help" in sys.argv:
    _help()
elif len(sys.argv) == 2:
    face_detect(sys.argv[1])
else:
    face_detect()

检测效果:速度还行

二、使用dlib的shape_predictor_68_face_landmarks模型

代码如下:

import sys
import cv2
import dlib


def _help():
    print("Usage:")
    print("     python video_face_detect_dlib.py")
    print("     python video_face_detect_dlib.py <path of a video>")
    print("For example:")
    print("     python video_face_detect_dlib.py video/lee.mp4")
    print("If the path of a video is not provided, the camera will be used as the input.Press q to quit.")


def _face_detect(color_image, detector):
    gray_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
    # img_row, img_col = gray_image.shape[:2]
    # scale = 0.3
    # gray_image = cv2.resize(gray_image, (int(scale * img_col), int(scale * img_row)))
    faces = detector(gray_image, 1)
    for face in faces:
        left = face.left()
        top = face.top()
        right = face.right()
        bottom = face.bottom()
        cv2.rectangle(color_image, (left, top), (right, bottom), (0, 255, 0), 2)
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", color_image)


def face_detect(video_path=0):
    detector = dlib.get_frontal_face_detector()
    cap = cv2.VideoCapture(video_path)
    while True:
        ret, img = cap.read()
        if img is None:
            break
        _face_detect(img, detector)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()


if len(sys.argv) > 2 or "-h" in sys.argv or "--help" in sys.argv:
    _help()
elif len(sys.argv) == 2:
    face_detect(sys.argv[1])
else:
    face_detect()

检测效果:速度较opencv慢不少。可以考虑多开启一个后台线程用于读取视频。

改进后的代码:

from threading import Thread
import threading
import sys
import cv2
import dlib


def _help():
    print("Usage:")
    print("     python video_face_detect_dlib.py")
    print("     python video_face_detect_dlib.py <path of a video>")
    print("For example:")
    print("     python video_face_detect_dlib.py video/lee.mp4")
    print("If the path of a video is not provided, the camera will be used as the input.Press q to quit.")


def _face_detect(color_image, detector):
    gray_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
    # img_row, img_col = gray_image.shape[:2]
    # scale = 0.3
    # gray_image = cv2.resize(gray_image, (int(scale * img_col), int(scale * img_row)))
    faces = detector(gray_image, 1)
    for face in faces:
        left = face.left()
        top = face.top()
        right = face.right()
        bottom = face.bottom()
        cv2.rectangle(color_image, (left, top), (right, bottom), (0, 255, 0), 2)
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", color_image)


frame = None
lock = threading.RLock()


def get_frame(video_path):
    cap = cv2.VideoCapture(video_path)
    global frame
    while True:
        _, img = cap.read()
        if img is None:
            break
        else:
            lock.acquire()
            frame = img
            lock.release()
    cap.release()


def face_detect(video_path=0):
    detector = dlib.get_frontal_face_detector()
    # 开启后台线程不断获取视频输入流,并将视频帧保存到全局变量frame中
    t = Thread(target=get_frame, name="get_video_stream", args=(video_path,))
    t.daemon = True
    t.start()
    while True:
        if frame is not None:
            # 对最近保存在frame中的视频帧进行人脸检测
            lock.acquire()
            frame_copy = frame.copy()
            lock.release()
            _face_detect(frame_copy, detector)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()


if len(sys.argv) > 2 or "-h" in sys.argv or "--help" in sys.argv:
    _help()
elif len(sys.argv) == 2:
    face_detect(sys.argv[1])
else:
    face_detect()

把68个特征点也画出来,同时使用imutils库简化一些文件读取,获取特征点坐标的工作。

from imutils.video import FileVideoStream
from imutils.video import VideoStream
import imutils
import dlib
import cv2
import sys


def _help():
    print("Usage:")
    print("     python landmark_detect.py")
    print("     python landmark_detect.py <path of a video>")
    print("For example:")
    print("     python landmark_detect.py video/lee.mp4")
    print("If the path of a video is not provided, the camera will be used as the input.Press q to quit.")


def landmark_detection(vs, file_stream):
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("model/shape_predictor_68_face_landmarks.dat")

    print("[INFO] starting video stream thread...")
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if file_stream and not vs.more():
            break

        frame = vs.read()
        if frame is not None:
            frame = imutils.resize(frame, width=450)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            rects = detector(gray, 0)

            for rect in rects:
                shape = predictor(gray, rect)
                # shape = face_utils.shape_to_np(shape)
                for idx, pt in enumerate(shape.parts()):
                    pt_pos = (pt.x, pt.y)
                    cv2.circle(frame, pt_pos, 1, (0, 0, 255), 1)
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(frame, str(idx + 1), pt_pos, font, 0.2, (255, 0, 0), 1, cv2.LINE_AA)

            cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

    cv2.destroyAllWindows()
    vs.stop()


if len(sys.argv) > 2 or "-h" in sys.argv or "--help" in sys.argv:
    _help()
elif len(sys.argv) == 2:
    vs = FileVideoStream(sys.argv[1]).start()
    file_stream = True
    landmark_detection(vs, file_stream)
else:
    vs = VideoStream(src=0).start()
    file_stream = False
    landmark_detection(vs, file_stream)

 

参考文章:

http://www.cnblogs.com/vipstone/p/8933916.html

http://www.cnblogs.com/vipstone/p/9026767.html

  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值