主要实现人脸表情识别
设置类参数
ExpressionRecognitionSetting
FaceCsvPath = "D:/Code/Python/data/FaceExpression/data.csv"
ModelSavePath = "D:/Code/Python/module/ExpressionRecognition.h5"
数据预处理
主要功能:
读取图像,并将图像数据和标签分别存储在不同的两个文件中
ExpressionRecognitionFormatConversion
'''
this file trans the format of image to csv, and save it
'''
import cv2 as cv
import os
import numpy as np
import pandas as pd
import csv
import matplotlib.pyplot as plt
import pickle
ClassifierPath = "D:/VS_2019/opencv3.4.1/opencv/build/etc/haarcascades/haarcascade_frontalface_alt2.xml"
ImagePathOrigin = "D:/Code/Python/data/FaceExpression/jaffedbase/"
ImagePathIn = 'D:/Code/Python/data/FaceExpression/aaa/'
ImageDataPathOut = "D:/Code/Python/data/FaceExpression/aaa/Image.pkl"
LabelDataPathOut = "D:/Code/Python/data/FaceExpression/aaa/label.pkl"
#人脸检测
def FaceDetect(img):
classier = cv.CascadeClassifier(ClassifierPath)
face = classier.detectMultiScale(img,scaleFactor=1.2,minNeighbors=3,minSize=(32,32))
ret = []
for(x,y,w,h)in face:
a = [x,y,x+w,y+h]
ret.append(a)
return ret
if __name__ == "__main__":
data = np.ones((213, 48, 48, 3))
label = np.ones(213)
#读取数据集文件夹下的每个照片的名字
ImageName = os.listdir(ImagePathOrigin)
#对每个表情照片进行处理
count = 0#计数器,记录图像及标签数据
for name in ImageName:
img = cv.imread(ImagePathOrigin+name)
#返回人脸的坐标
ret = FaceDetect(img)
for i in ret:
x,y,x1,y1 = i
#提取人脸部分的数据
new_img = img[x:x1,y:y1]
#转换成数组类型
abs_x = abs(x-x1)
abs_y = abs(y-y1)
roi = np.uint8([abs_y,abs_x])
roi = new_img
#将人脸尺寸限定为48*48,故需要保存new_roi这个数据到scv中
new_roi = cv.resize(roi,(48,48))
#保存表情数据数据
cv.imwrite(ImagePathIn + str(count) + '.jpg',new_roi)
data[count] = new_roi
#获取图像标签,并保存
label_temp = name[3:5]
if label_temp == 'AN':
label[count] = 0
elif label_temp == 'DI':
label[count] = 1
elif label_temp == 'FE':
label[count] = 2
elif label_temp == 'HA':
label[count] = 3
elif label_temp == 'SA':
label[count] = 4
elif label_temp == 'SU':
label[count] = 5
elif label_temp == 'NE':
label[count] = 6
else:
print("get label error ......\n")
pickle.dump((data), open(ImageDataPathOut, 'wb'))
pickle.dump((label), open(LabelDataPathOut, 'wb'))
count = count + 1
print("Finish Saving Data Of Image\n")
主体部分
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import keras
from scipy import misc
import ExpressionRecognitionSetting as setting
import os
import pickle
import PIL #其中一个图像库
import glob
emotion = {0:'Angry',1:'Disgust',2:'Fear',3:'Happy',4:'Sad',5:'Surprise',6:'Neutral'}
dropout = 0.5
class_sum = 7
ImageData = "D:/Code/Python/data/FaceExpression/aaa/Image.pkl"
LabelData = "D:/Code/Python/data/FaceExpression/aaa/label.pkl"
def jpg2array(jpg_dir, out_file, width=48, height=48, jpg_num=1, jpg_type=3):
i = 0
data = np.ones((jpg_num, width, height, jpg_type))
label = np.array([])
for jpg_file in glob.glob(jpg_dir + r'\*.jpg'):
img = PIL.Image.open(jpg_file)
img = np.array(img)
data[i] = img
pickle.dump(data, open(out_file, 'wb'))
#从文件中读图像数据和类别
def load_data(file_dir):
data= pickle.load(open(file_dir, 'rb'))
return data
#划分数据集及标签
def DivideData(data,label):
train_data = data[0:142]
test_data = data[142:213]
train_label = label[0:142]
test_label = label[142:213]
return train_data,train_label,test_data,test_label
#创建模型
def CreatModel():
model = keras.models.Sequential()
#卷积
model.add(keras.layers.Conv2D(32, (3, 3), padding="same", activation="relu",input_shape=(48,48,3)))
model.add(keras.layers.BatchNormalization(momentum=0.9))
#卷积
model.add(keras.layers.Conv2D(32, (3, 3), padding="same", activation="relu"))
model.add(keras.layers.BatchNormalization(momentum=0.9))
#卷积
model.add(keras.layers.MaxPool2D(pool_size=(2, 2), padding="same"))
model.add((keras.layers.Dropout(rate=0.25)))
#卷积
model.add(keras.layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(keras.layers.BatchNormalization(momentum=0.9))
#卷积
model.add(keras.layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(keras.layers.BatchNormalization(momentum=0.9))
#池化
model.add(keras.layers.MaxPool2D(pool_size=(2, 2), padding="same"))
model.add((keras.layers.Dropout(rate=0.25)))
#全连接层
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(512, activation="relu"))
model.add(keras.layers.BatchNormalization(momentum=0.9))
model.add((keras.layers.Dropout(rate=0.25)))
#全连接
model.add(keras.layers.Dense(7, activation="softmax"))
return model
#画训练曲线
def draw(history):
epochs = range(1, len(history['loss']) + 1)
plt.plot(epochs, history['loss'], 'bo', label='Training loss')
plt.plot(epochs, history['val_loss'], 'b', label='Validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
# plt.imsave('E:/acc_and_loss/Training and Validation loss.jpg')
plt.figure()
epochs = range(1, len(history['acc']) + 1)
plt.plot(epochs, history['acc'], 'bo', label='Training acc')
plt.plot(epochs, history['val_acc'], 'b', label='validation acc')
plt.title('Training and validation acc')
plt.xlabel('Epochs')
plt.ylabel('acc')
plt.legend()
if __name__ == "__main__":
data = load_data(ImageData)
label = load_data(LabelData)
train_data,train_label,test_data,test_label = DivideData(data,label)
model = CreatModel()
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(train_data,train_label,epochs=50)
#history = model.fit(train_data, train_label, epochs=50)
model.save(setting.ModelSavePath)
# result_loss,result_prediction = model.evaluate(test_data,test_label)
# print("验证集:",result_prediction)
print(train_data.shape)
#draw(history)
cv.waitKey(0)
实现的效果不佳,采用的是日本女性表情数据集,其中数据只有213张,太小了
不过只是为了了解一下自行搭建神经网络,数据预处理,感觉还可以。
由于数据集当时申请的时候似乎有保密的,然后就不提供的,有需要的大佬可行性下载其他数据集