openCV+python实现人脸实时检测

 

一、静态的图像人脸检测

import numpy as np
import cv2 as cv

path = 'haarcascade_frontalface_default.xml'
face_cascade = cv.CascadeClassifier(path)
path = 'haarcascade_eye.xml'
eye_cascade = cv.CascadeClassifier(path)


# 静态图像人脸检测
img = cv.imread('test.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)


faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
    cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
    roi_gray = gray[y:y+h, x:x+w]
    roi_color = img[y:y+h, x:x+w]
    eyes = eye_cascade.detectMultiScale(roi_gray)
    for (ex,ey,ew,eh) in eyes:
        cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv.imshow('img',img)
cv.waitKey(0)
cv.destroyAllWindows()

 

二、视频人脸实时检测及保存

 

# 摄像头动态人脸检测 及 视频保存

import numpy as np
import cv2 as cv


path = 'haarcascade_frontalface_default.xml'
face_cascade = cv.CascadeClassifier(path)
path = 'haarcascade_eye.xml'
eye_cascade = cv.CascadeClassifier(path)


#1.来自视频图像
# cap = cv.VideoCapture('/Users/admin/opencv-4.0.0/samples/data/vtest.avi')
#2. 来自摄像头
cap = cv.VideoCapture(0)
print(cap.isOpened())
count = 0


# 视频保存的参数设置
sz = (int(cap.get(cv.CAP_PROP_FRAME_WIDTH)),
        int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)))
fps = 5
#fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
#fourcc = cv2.VideoWriter_fourcc('m', 'p', 'e', 'g')
fourcc = cv.VideoWriter_fourcc(*'mpeg')
## open and set props
vout = cv.VideoWriter()
vout.open('output2.mp4',fourcc,fps,sz,True)
 

while(True):
    count += 1
    ret, img = cap.read()
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#     cv.imshow('FRAME', gray)
#     cv.imwrite('FRAME_%d.png'%count, gray)
    
    
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x,y,w,h) in faces:
        cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = img[y:y+h, x:x+w]
        eyes = eye_cascade.detectMultiScale(roi_gray)
        for (ex,ey,ew,eh) in eyes:
            cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,255),2)
    cv.imshow('img',img)
    vout.write(img)
    
    
    if cv.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
vout.release()
cv.destroyAllWindows()

 

  • 2
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
下面是使用OpenCVPython实现人脸表情识别的代码示例: ```python import cv2 # 加载分类器和表情标签 cascade_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') emotions = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Neutral', 'Sad', 'Surprised'] # 加载模型 model = cv2.dnn.readNetFromTensorflow('emotion_detection_model.pb') # 读取摄像头视频流 cap = cv2.VideoCapture(0) while True: # 逐帧读取视频 ret, frame = cap.read() # 灰度化处理 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 探测人脸 faces = cascade_classifier.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) # 对每张脸进行表情检测 for (x, y, w, h) in faces: face = gray[y:y+h, x:x+w] # 从灰度图像中提取人脸区域 face = cv2.resize(face, (48, 48)) # 将人脸区域缩放为48x48像素 face = face.reshape((1, 48, 48, 1)) face = face.astype('float32') / 255.0 # 使用模型进行表情预测 predictions = model.predict(face) emotion_label = emotions[predictions.argmax()] # 在人脸上标注表情标签 cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2) cv2.putText(frame, emotion_label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) # 显示视频帧 cv2.imshow('Facial Expression Recognition', frame) # 按下"q"键退出程序 if cv2.waitKey(1) & 0xFF == ord('q'): break # 清理资源 cap.release() cv2.destroyAllWindows() ``` 这段代码实现了通过摄像头进行实时人脸表情识别。其中,使用了OpenCV的级联分类器(haarcascade_frontalface_default.xml)探测人脸,并使用了一个基于TensorFlow的模型进行表情识别。模型结构可以从OpenCV的GitHub仓库中下载(emotion_detection_model.pb)。在检测人脸后,将其提取并缩放到48x48像素的大小,然后将其输入到模型中进行表情预测。最后,在人脸上标注预测出的表情标签。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值