Python Keras CNN卷积网络人脸表情识别代码分享


前言

随着人工智能的不断发展,机器学习这门技术也越来越重要,很多人都开启了学习机器学习,本文主要使用卷积网络实现对人脸7中表情的识别。

提示:以下是本篇文章正文内容,下面案例可供参考

一、数据集及数据处理

本文使用fer2013 数据集,使用python语言 pandas库 和kears框架实现对数据的处理。 fer2013 数据集免费下载链接

代码实现(示例):

import keras
import pandas as pd
import numpy as np


def dataRead(path, x_data, y_data, data_size_begin, data_size_end):
    # 加载训练集
    train_data = pd.read_csv(path)
    num_of_instances = len(train_data)
    min_data = train_data.iloc[data_size_begin:data_size_end]
    pixels = min_data['pixels']
    emotions = min_data['emotion']
    print("数据集加载完成,数据集大小")
    print(len(pixels))

    # 表情类别数
    num_classes = 7
    # x_train, y_train = [], []
    # x_test, y_test = [], []
    import os
    import keras

    for emotion, img in zip(emotions, pixels):
        try:
            emotion = keras.utils.to_categorical(emotion, num_classes)  # 独热向量编码
            val = img.split(" ")
            pixels = np.array(val, 'float32')
            x_data.append(pixels)
            y_data.append(emotion)
        except:
            print("111")

    print("表情 分类完成 finish")
    print(len(x_data))

    x_data = np.array(x_data)
    y_data = np.array(y_data)
    x_data = x_data.reshape(-1, 48, 48, 1)
    print("数据集 格式转换完成")
    print(len(x_data))
    res = [];
    res.append(x_data)
    res.append(y_data)
    return res;

二、搭建、训练卷积网络

1.模型搭建、训练

代码实现(示例):

from keras import regularizers
from data_Reader import dataRead
# 加载训练集
path_train = "C:\\Users\\Administrator\\PycharmProjects\\pythonProject\\fer2013原-csv\\train.csv"
train_data_x =[];
train_data_y =[];
train_size_begin = 0;
train_size_end = 30000;
train = dataRead(path_train,train_data_x,train_data_y,train_size_begin,train_size_end)
# 加载测试集
path_test = "C:\\Users\\Administrator\\PycharmProjects\\pythonProject\\fer2013原-csv\\test.csv"
test_data_x=[]
test_data_y=[]
test_size_begin = 0;
test_size_end = 10000;
test = dataRead(path_test,test_data_x,test_data_y,test_size_begin,test_size_end)

from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Activation, Dropout, Flatten, Dense, BatchNormalization
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator

batch_size = 256;
epochs = 80;

model = Sequential()

# 第一层卷积层
model.add(Conv2D(input_shape=(48, 48, 1), filters=32, kernel_size=3, padding='same',
                 kernel_regularizer=regularizers.l2(0.001)))

model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=3, padding='same'))

model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=2, strides=2))

# 第二层卷积层
model.add(Conv2D(filters=64, kernel_size=3, padding='same', kernel_regularizer=regularizers.l2(0.001)))

model.add(Activation('relu'))
model.add(Conv2D(filters=64, kernel_size=3, padding='same'))

model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=2, strides=2))

# 第三层卷积层
model.add(Conv2D(filters=128, kernel_size=3, padding='same', kernel_regularizer=regularizers.l2(0.001)))

model.add(Activation('relu'))
model.add(Conv2D(filters=128, kernel_size=3, padding='same', kernel_regularizer=regularizers.l2(0.001)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=2, strides=2))

model.add(Flatten())

# 全连接层
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))

# 进行训练
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
history = model.fit(train[0], train[1], batch_size=batch_size, epochs=epochs, validation_data=(test[0],test[1]),shuffle='true')

train_score = model.evaluate(train[0], train[1], verbose=0)
print('Train loss:', train_score[0])
print('Train accuracy:', 100 * train_score[1])

model.save('my_base_model2.h5')

from paint_Process import paint_process

name = "test_one"
paint_process(history, name);

2.绘制模型训练过程

代码实现:

import matplotlib.pyplot as plt


def paint_process(history,name):
    epoch = len(history.history['loss'])
    plt.plot(range(epoch), history.history['loss'], label='loss')
    plt.plot(range(epoch), history.history['accuracy'], label='train_acc')
    plt.plot(range(epoch), history.history['val_accuracy'], label='val_acc')
    plt.plot(range(epoch), history.history['val_loss'], label='val_loss')
    plt.legend()
    path = "C:\\Users\\Administrator\\PycharmProjects\\pythonProject\\trian_image\\"
    plt.savefig(path+name);
    plt.show()

3.调用模型对人脸表情进行判断

import cv2
import numpy as np
import numpy as np
import os
from tensorflow import keras


def resize_image(image):
    image = cv2.resize(image, (48, 48))
    print(image.shape)
    # ima=change_image_channels(image)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    print(gray.shape)
    x = np.array(gray).reshape(48, 48, 1)
    x = np.expand_dims(x, axis=0)
    return x


def displayText(img, result):
    text1 = "W:" + str(result[0][0])
    cv2.putText(img, text1, (40, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0, (10, 10, 210), 2)
    text2 = "W:" + str(result[0][1])
    cv2.putText(img, text2, (40, 80), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)
    text3 = "W:" + str(result[0][2])
    cv2.putText(img, text3, (40, 110), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)
    text4 = "W:" + str(result[0][3])
    cv2.putText(img, text4, (40, 140), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)
    text5 = "W:" + str(result[0][4])
    cv2.putText(img, text5, (40, 170), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)
    text6 = "W:" + str(result[0][5])
    cv2.putText(img, text6, (40, 200), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)
    text7 = "W:" + str(result[0][6])
    cv2.putText(img, text7, (40, 230), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)


def displayEmotion(img, result,x,y,w,h):
    a = max(result)
    l = a.tolist()
    c = l.index(max(l))
    if c == 0:
        text = "anger:" #+ str(result[0][0])
    if c == 1:
        text = "disgust:" #+ str(result[0][1])
    if c == 2:
        text = "fear:" #+ str(result[0][2])
    if c == 3:
        text = "happy:" #+ str(result[0][3])
    if c == 4:
        text = "sad:" #+ str(result[0][4])
    if c == 5:
        text = "surprised:" #+ str(result[0][5])
    if c == 6:
        text = "normal:" #+ str(result[0][6])
    cv2.rectangle(img, (x-1, y), (x+w+1, y-20), (0, 255, 0), thickness=-1)
    cv2.putText(img, text, (x+20, y), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 0), 2)


def findMax(a):
    b = max(a)
    c = b.index(max(b))
    if c == 0:
        return "anger"
    if c == 1:
        return "disgust"
    if c == 2:
        return "fear"
    if c == 3:
        return "happy"
    if c == 4:
        return "sad"
    if c == 5:
        return "surprised"
    if c == 6:
        return "normal"


# from keras.preprocessing import image
from matplotlib.pyplot import imshow

face_cascade = cv2.CascadeClassifier(r'C:\ProgramData\Anaconda3\envs\tfenv\Lib\site-packages\cv2\data\haarcascade_frontalface_alt.xml')
# face_cascade.load("D:\Build\OpenCV\opencv-4.1.2\modules\core\src\persistence_xml\haarcascade_frontalface_alt.xml")
# eye_cascade = cv2.CascadeClassifier("D:\face_recognized\haarcascade_eye.xml")

model2 = keras.models.load_model("../Model/my_VGG_11_model_10000_256_100.h5")

cap = cv2.VideoCapture(0)

while True:
    ret, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.1, 5)
    cv2.imshow("img", img)
    if len(faces) > 0:
        for faceRect in faces:
            x, y, w, h = faceRect
            image = img[y - 10: y + h + 10, x - 10: x + w + 10]
            # cv2.imshow("face",image)

            image = resize_image(image)
            result = model2.predict(image)
            #print(result)
            # print(model2.predict(image))
            displayEmotion(img, result,x,y,w,h)
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
            roi_gray = gray[y:y + h // 2, x:x + w]
            roi_color = img[y:y + h // 2, x:x + w]

            # eyes = eye_cascade.detectMultiScale(roi_gray,1.1,1,cv2.CASCADE_SCALE_IMAGE,(2,2))
        # for (ex,ey,ew,eh) in eyes:
        # cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
    cv2.imshow("img", img)
    if cv2.waitKey(10) & 0xFF == ord('q'):
        break


总结

以上就是今天要讲的内容,本文仅仅简单实现了卷积网络对fer2013数据集的学习模型,实现对人脸表情的实时识别。
  • 3
    点赞
  • 55
    收藏
    觉得还不错? 一键收藏
  • 9
    评论
好的,以下是一个基于PythonKeras框架实现的CNN卷积神经网络人脸识别代码示例: ```python # 导入必要的库 import numpy as np from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from keras.datasets import mnist from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing import image # 定义数据集相关参数 batch_size = 128 num_classes = 2 epochs = 10 img_rows, img_cols = 28, 28 input_shape = (img_rows, img_cols, 3) # 加载数据集 train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( 'train', target_size=(img_rows, img_cols), batch_size=batch_size, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( 'test', target_size=(img_rows, img_cols), batch_size=batch_size, class_mode='categorical') # 构建CNN模型 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) # 编译模型 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # 训练模型 model.fit_generator( train_generator, steps_per_epoch=2000 // batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=800 // batch_size) # 预测新图像 img_path = 'test/cat/cat.1.jpg' img = image.load_img(img_path, target_size=(img_rows, img_cols)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) preds = model.predict_classes(x) print('预测结果:', preds[0]) ``` 这是一个简单的人脸识别例子,其中使用了一个2层的CNN卷积神经网络模型,并且使用Keras提供的ImageDataGenerator来加载和处理数据集。你可以将此代码作为一个起点,并进行修改以满足你的具体需求。
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值