今年已经大四啦,上学期的校内实习过程中我们组做的项目是人脸情绪识别系统,在搜索过程中发现好多资料不仅晦涩难懂而且可能CSDN上大佬都特别厉害特别缺钱吧,两句话没说直接来一句下载地址在这里哦,然后收你5个币15个币,咱也不敢说,咱也不敢问,总而言之本片博客纯属大家交流使用,而且都是一些入门级别的东西,所以博客中涉及到的图片集,以及代码全部公开供大家下载,好了废话不多说下面进入正题。
------------------------------------------
2023.2.20更新:
新做了一个界面框架,相对于这个框架的效果更好,链接在:Pyqt5+opencv实现的情绪识别界面框架_小马哥得挣钱呀的博客-CSDN博客
------------------------------------------
本次实验的实验环境为:
系统: Ubuntu18.04 (怎么给电脑装双系统?)
Python版本:3.6.9
Pycharm版本:2019.3.1
OpenCv版本:3.4.0
Tensorflow版本:1.14.0
Keras版本:1.18.0
Pyqt5版本:5.14.0(不要着急下载,后面会有对应的教程)
numpy版本:2.3.1
成果演示视频
OpenCv实现的情绪识别系统演示
环境搭建完成了,那接下来就开始我们的实验,那接下来就开始实现系统吧,本次实验绝大部分使用了github上的开源项目oarriaga的face_classification,项目地址GitHub - oarriaga/face_classification: Real-time face detection and emotion/gender classification using fer2013/imdb datasets with a keras CNN model and openCV.,直接下载下来即可,如果觉得git上下载的很慢也可以直接进入我创建的群聊中下载,已经上传进去。
下载完成后解压后使用pycharm打开项目即可
可以看到这边的项目目录,在这里我们打开video_emotion_color_demo.py,可以发现这个项目是可以直接运行的。
所以我在本次实验中所做的主要工作就是将这个demo和Pyqt5的界面设计结合起来。
那已经知道接下来的目标了,咱们就进入Pyqt5阶段
什么是Pyqt5
经过上述过程相信大家已经明白如何使用Pyqt5了,那接下来就是如何把Pyqt5和这个项目结合起来了。
首先我们在定义目录下右键调用qtdesigner,设计一个界面有三个按钮和一个Label
右键组件可以设置文本(用于显示)和id(用于点击事件)
完成后,将该.ui文件转化为.py文件
然后在src文件夹下新建一个.py文件用于打开创建的窗口以及调用情绪识别程序。我的.py文件为test3,代码如下
from statistics import mode
import sys,os
import threading
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog
import test
import cv2
from keras.models import load_model
import numpy as np
from src.utils.datasets import get_labels
from src.utils.inference import detect_faces
from src.utils.inference import draw_text
from src.utils.inference import draw_bounding_box
from src.utils.inference import apply_offsets
from src.utils.inference import load_detection_model
from src.utils.preprocessor import preprocess_input
flag=1
fileName=""
fileType=""
class FaceD(QMainWindow,test.Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
test.Ui_MainWindow.__init__(self)
self.setupUi(self)
#在这里注册线程以及事件的点击事件
self.th = threading.Thread(target=self.faceShow)
self.pushButton.clicked.connect(self.click1)
self.openFile.clicked.connect(self.click2)
self.bStop.clicked.connect(self.click3)
#调用摄像头的事件
def click1(self):
#self.faceShow()
# self.cap = cv2.VideoCapture(0)
global flag
flag=1
self.th = threading.Thread(target=self.faceShow)
self.th.start()
#播放视频事件
def click2(self):
global flag,fileName,fileType
flag=2
fileName,fileType=QFileDialog.getOpenFileName(self,"选取文件",os.getcwd())
print(fileName)
print(fileType)
# self.th = threading.Thread(target=self.faceShow)
self.th = threading.Thread(target=self.faceShow)
self.th.start()
#停止按钮
def click3(self):
video_capture.release()
#self.th.setDaemon()
self.label.clear()
self.label.clear()
def faceShow(self):
global flag,fileType,fileName,video_capture
# parameters for loading data and images
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml' # 选择分类其
emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
emotion_labels = get_labels('fer2013')
# hyper-parameters for bounding boxes shape
frame_window = 10
emotion_offsets = (20, 40)
# loading models
face_detection = load_detection_model(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
# getting input model shapes for inference
emotion_target_size = emotion_classifier.input_shape[1:3]
# starting lists for calculating modes
emotion_window = []
# starting video streaming
# cv2.namedWindow('window_frame')
if (flag==1):
video_capture = cv2.VideoCapture(0)
elif(flag==2):
video_capture = cv2.VideoCapture(fileName)
while True:
bgr_image = video_capture.read()[1]
if bgr_image is None:
break
fps=video_capture.get(cv2.CAP_PROP_FPS)
print(fps)
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray_image)
#for循环中识别人脸并加以处理
for face_coordinates in faces:
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
gray_face = cv2.resize(gray_face, (emotion_target_size))
except:
continue
gray_face = preprocess_input(gray_face, True)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_prediction = emotion_classifier.predict(gray_face)
emotion_probability = np.max(emotion_prediction)
emotion_label_arg = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label_arg]
emotion_window.append(emotion_text)
if len(emotion_window) > frame_window:
emotion_window.pop(0)
try:
emotion_mode = mode(emotion_window)
except:
continue
if emotion_text == 'angry':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 255, 0))
elif emotion_text == 'surprise':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
draw_bounding_box(face_coordinates, rgb_image, color)
draw_text(face_coordinates, rgb_image, emotion_mode,
color, 0, -45, 1, 1)
bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
img = QImage(bgr_image.data, bgr_image.shape[1], bgr_image.shape[0], QImage.Format_BGR888)
# im=Image.fromarray(bgr_image)
self.label.setPixmap(QPixmap.fromImage(img))
# cv2.imshow('window_frame', bgr_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# video_capture.release()
# cv2.destroyAllWindows()
# def Display(self):
# while self.cap.isOpened():
# success, frame = self.cap.read()
# frame = cv2.flip(frame, 1)
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# img = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
# self.label.setPixmap(QPixmap.fromImage(img))
if __name__=='__main__':
app=QApplication(sys.argv)
fd=FaceD()
fd.show()
sys.exit(app.exec_())