基于卷积神经网络(CNN)的情感分析

基于卷积神经网络(CNN)的情感分析
目录结构如下图所示。

 

1.1 构建表情识别模型
1)导入库

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
2)构建神经网络

构建自定义CNN网络

class CustomNet:
   @staticmethod
   def build(width, height, depth, classes):
       # 初始化模型
       # "#默认通道在后
       model = Sequential()
       inputShape = (height, width, depth)
       chanDim = -1
       
       # 如果通道在前,我们更换输入数据通道轴的位置
       # and channels dimension
       if K.image_data_format() == "channels_first":
           inputShape = (depth, height, width)
           chanDim = 1
       
       # 第一层 CONV => RELU => CONV => RELU => POOL layer set
       model.add(Conv2D(32, (3, 3), padding="same",
           input_shape=inputShape))
       model.add(Activation("relu"))
       model.add(BatchNormalization(axis=chanDim))
       model.add(Conv2D(32, (3, 3), padding="same"))
       model.add(Activation("relu"))
       model.add(BatchNormalization(axis=chanDim))
       model.add(MaxPooling2D(pool_size=(2, 2)))
       model.add(Dropout(0.25))
       
       # 第二层 CONV => RELU => CONV => RELU => POOL layer set
       model.add(Conv2D(64, (3, 3), padding="same"))
       model.add(Activation("relu"))
       model.add(BatchNormalization(axis=chanDim))
       model.add(Conv2D(64, (3, 3), padding="same"))
       model.add(Activation("relu"))
       model.add(BatchNormalization(axis=chanDim))
       model.add(MaxPooling2D(pool_size=(2, 2)))
       model.add(Dropout(0.25))
       
       # first (and only) set of FC => RELU layers
       model.add(Flatten())
       model.add(Dense(512))
       model.add(Activation("relu"))
       model.add(BatchNormalization())
       model.add(Dropout(0.5))
       
       # softmax classifier
       model.add(Dense(classes))
       model.add(Activation("softmax"))
       
       # return the constructed network architecture
       return model
初始化模型

model = CustomNet.build(28,28,1,2)
3)打印模型信息

打印模型结构信息。

注:安装如下软件和Python包
1. apt-get install graphviz
   apt-get install graphviz graphviz-doc
2. pip install graphviz
3. pip install pydot

plot_model(model, show_shapes=True)   #绘制网络结构图
打印模型参数信息。

model.summary()
 
1.2 训练表情识别模型
1)设置超参数及路径

TARGET_WIDTH = 28
TARGET_HEIGHT = 28
BATCH_SIZE = 64
EPOCHS = 15
LR_INIT = 0.1
DECAY = LR_INIT/EPOCHS
MOMENTUM = 0.6

dataset_path = 'images'
output_model_path = 'models/face_expression.hdf5'
output_plot_path = 'plots/face_expression.png'
2)初始化数据预处理器

from oldcare.preprocessing import AspectAwarePreprocessor
aap = AspectAwarePreprocessor(TARGET_WIDTH, TARGET_HEIGHT)
from oldcare.preprocessing import ImageToArrayPreprocessor
iap = ImageToArrayPreprocessor()
3)获取数据集

print("[INFO] loading images...")
from imutils import paths
imagePaths = list(paths.list_images(dataset_path))
#创建数据加载器
from oldcare.datasets import SimpleDatasetLoader
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, 500, True)
4)数据预处理

特征缩放

data = data.astype("float") / 255.0 # 特征缩放,是非常重要的步骤
对标签进行one-hot编码

# 对标签进行one-hot编码
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder().fit(labels)
from tensorflow.keras.utils import to_categorical
labels = to_categorical(le.transform(labels), 2)
划分训练集和测试集

from sklearn.model_selection import train_test_split
(trainX, testX, trainY, testY) = train_test_split(data,
   labels, test_size=0.20, stratify=labels, random_state=42)
5)创建模型

print("[INFO] compiling model...")
#创建MiniVGGNet实例
from oldcare.conv import MiniVGGNet
model = MiniVGGNet.build(width=TARGET_WIDTH, 
                        height=TARGET_HEIGHT, depth=1, classes=2)
#创建优化器
from tensorflow.keras.optimizers import SGD
opt = SGD(learning_rate=LR_INIT, decay=DECAY, momentum = MOMENTUM, nesterov=True)
#编译模型
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
6)引入TensorBoard

用于实时监控模型训练过程

from oldcare.callbacks import TrainingMonitor
callbacks = [TrainingMonitor(output_plot_path)]
7)训练模型

print("[INFO] training network...")
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=BATCH_SIZE, epochs=EPOCHS,
callbacks = callbacks, verbose=1)
 

1.3 评估模型
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=64)
#输出分类报告
from sklearn.metrics import classification_report
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=le.classes_))
1.4 保存模型
print("[INFO] serializing network...")
model.save(output_model_path)
 
2. 情感分析模型的优化
从优化器的角度优化。

当前优化器使用的是SGD,即梯度下降。SGD有4个比较重要的参数,分别是lr、decay、momentum、nesterov。

从这4个参数的原理出发,不断使用不同的参数值做优化。

优化后这4个参数的值是:

LR_INIT = 0.01

DECAY = LR_INIT/EPOCHS

MOMENTUM = 0.9

 
3. 老人情感分析
传入视频文件(或者通过电脑摄像头捕捉视频),对每一帧做表情识别。

#!/usr/bin/env python
# coding: utf-8

# # <center>应用表情识别模型</center>

# ## 1. 导入库 

# In[1]:


from keras.preprocessing.image import img_to_array
from keras.models import load_model
from oldcare.facial import FaceUtil
import numpy as np
import imutils
import cv2
import time


# ## 2.  设置路径

# In[2]:


model_path = 'models/face_expression.hdf5'
input_video = 'tests/room_04.avi'


# # 3. 设置超参数

# In[3]:


FACIAL_EXPRESSION_TARGET_WIDTH = 28
FACIAL_EXPRESSION_TARGET_HEIGHT = 28


# ## 4. 加载模型

# In[4]:


model = load_model(model_path)


# ## 5. 设置视频资源的获取

# In[5]:


#如果没有提供video,则调用网络摄像头
if not input_video:
    camera = cv2.VideoCapture(0)
    time.sleep(2)
else:
    camera = cv2.VideoCapture(input_video)


# ## 6. 创建面部识别工具类

# In[6]:


faceutil = FaceUtil()


# ## 7. 读取视频的每帧进行识别

# In[7]:


while True:
    # 获取当前帧
    (grabbed, frame) = camera.read()


    #当前帧为空,则视频结束,停止循环
    if input_video and not grabbed:
        break

    if not input_video:
        frame = cv2.flip(frame, 1)

    #调整帧大小,转成灰度形式,
    frame = imutils.resize(frame, width=600)

    face_location_list = faceutil.get_face_location(frame)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # 遍历边界框
    for (left, top, right, bottom) in face_location_list:

        #从灰度图像提取roi区域,调整大小为28x28,然后经过cnn进行分类
        roi = gray[top:bottom, left:right]
        roi = cv2.resize(roi, (FACIAL_EXPRESSION_TARGET_WIDTH, 
                         FACIAL_EXPRESSION_TARGET_HEIGHT))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)

        # 判断面部表情
        (neutral, smile) = model.predict(roi)[0]
        label = "Neutral" if neutral > smile else "Smile"

        # 在输出的帧上显示表情类别和面部边界框
        cv2.putText(frame, label, (left, top - 10),
            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
        cv2.rectangle(frame, (left, top), (right, bottom),
            (0, 0, 255), 2)

    # 显示面部表情识别结果
    cv2.imshow("Facial Expression Detect", frame)

    # 按 'ESC' 退出视频
    k = cv2.waitKey(100) & 0xff 
    if k == 27:
        break


# ## 8. 释放相关资源

# In[8]:


# 关闭摄像头和窗口等资源
camera.release()
cv2.destroyAllWindows()

  • 4
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

程序老猫

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值