基于CNN的人脸表情识别

这次的项目是基于卷积神经网络的人脸表情识别,根据公开数据集训练一个能帮我们判断人脸表情的模型,然后随便在网上找一些图片来试验一下效果。一开始用的数据集是FER2013(链接:https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data),但是后来发现这个数据集本身就有一些被错误标签的图片,在这个数据集上训练的模型精度也并不高,因此后面用的是一个改进过的数据集:FER2013plus(链接:https://www.worldlink.com.cn/osdir/ferplus.html),关于两个数据集的异同大家在对应网站上一看便知。

原本数据是csv格式,先转换成图片:

import os
import csv
import argparse
import numpy as np
from itertools import islice
from PIL import Image
import sys
import pandas as pd
from tqdm import tqdm

def str_to_image(image_blob):
    image_string = image_blob.split(' ')
    image_data = np.asarray(image_string, dtype=np.uint8).reshape(48,48)
    return Image.fromarray(image_data)

folder_names = {'Training'   : 'train',
                'PublicTest' : 'val',
                'PrivateTest': 'test'}

base_folder='fer2013plus'
fer_path='icml_face_data.csv'
ferplus_path='fer2013new.csv'

for key, value in folder_names.items():
    folder_path = os.path.join(base_folder, value)
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)

ferplus_entries = []

with open(ferplus_path,'r') as csvfile: 
    ferplus_rows = csv.reader(csvfile, delimiter=',')       
    for row in islice(ferplus_rows, 1, None):  
        ferplus_entries.append(row)

index=0

with open(fer_path,'r') as csvfile:
    fer_rows = csv.reader(csvfile, delimiter=',')    
    for row in tqdm(islice(fer_rows, 1, None)):      
        ferplus_row = ferplus_entries[index]      
        file_name = ferplus_row[1].strip()    
        label=np.asarray(ferplus_row[2:])
        label=np.argmax(label)
        if len(file_name) > 0:
            image = str_to_image(row[2]) 
            image_path = os.path.join(base_folder, folder_names[row[1]], str(label), file_name)
            image.save(image_path, compress_level=0)                
        index += 1 

在训练模型之前还对图片做了数据增强的操作,因为数据本身的分布就不太均衡,大家画一下分布图会发现不同标签图片的样本量差距还是比较大的,数据增强包括中心裁剪、随机裁剪、水平翻转等等,如下所示:

数据增强的策略和每一类的样本量有关,样本量比较多的happy就只增加了一倍的样本量,像样本量本来特别少的disgust就增加了二十倍左右,具体大家可以参考如下代码(根据样本量的不同把标签分成了 large middle small 三个大类):

import torchvision.transforms as transforms
import numpy as np
import os
from PIL import Image

transform_centercrop = transforms.Compose([
    transforms.ToTensor(),
    transforms.CenterCrop((38, 38)),
    transforms.Resize((48,48)),
    transforms.ToPILImage()
])
transform_randomcrop = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomCrop((40,40)),
    transforms.ToPILImage(),
    transforms.Resize((48,48))
])
transform_randomhorizon = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomHorizontalFlip(p=1),
    transforms.ToPILImage()
])
transform_randomrotation = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomRotation(degrees=30),
    transforms.ToPILImage()
])
transform_randomerase = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomErasing(p=1),
    transforms.ToPILImage()
])
transform_randomaffine = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomAffine(degrees=30, translate=(0, 0.2), scale=(0.9, 1)),
    transforms.ToPILImage()
])

transform = [transforms.CenterCrop((38, 38)),
             transforms.RandomCrop((40, 40)), 
             transforms.RandomHorizontalFlip(p=1), 
             transforms.RandomRotation(degrees=30),
             transforms.RandomErasing(p=1),
             transforms.RandomAffine(degrees=30, translate=(0, 0.2), scale=(0.9, 1))]
transform_randomchoice_large = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomChoice(transform),
    transforms.ToPILImage(),
    transforms.Resize((48,48))])


transform = [transforms.CenterCrop((38, 38)),
             transforms.RandomCrop((40, 40))]
transform_randomchoice_middle1 = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomChoice(transform),
    transforms.ToPILImage(),
    transforms.Resize((48,48))])
transform_randomchoice_middle2 = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomHorizontalFlip(p=1),
    transforms.ToPILImage(),
    transforms.Resize((48,48))])    
transform = [transforms.RandomRotation(degrees=30),
             transforms.RandomErasing(p=1),
             transforms.RandomAffine(degrees=30, translate=(0, 0.2), scale=(0.9, 1))]
transform_randomchoice_middle3 = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomChoice(transform),
    transforms.ToPILImage(),
    transforms.Resize((48,48))])

def DataAugmentation_small_content(sourth_path, aim_dir, label):
    
    name=0
    file_list=os.listdir(sourth_path)
 
    for i in file_list:
        img=Image.open('%s\%s'%(sourth_path,i))
        img.save('%s/%s.png'%(aim_dir,str(name)+'_'+str(label)))
        
        name+=1
        img_new = transform_centercrop(img)
        img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
 
        for j in range(0, 5):
            name+=1
            img_new = transform_randomcrop(img)
            img_new.save('%s/%s.png'%(aim_dir,str(name)+'_'+str(label)))
 
        name+=1
        img_new = transform_randomhorizon(img)
        img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
        
        for j in range(0, 5):
            name+=1
            img_new = transform_randomrotation(img)
            img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
            
        for j in range(0, 5):
            name+=1
            img_new = transform_randomaffine(img)
            img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
        
        for j in range(0, 5):
            name+=1
            img_new = transform_randomerase(img)
            img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
            
        name+=1
        
def DataAugmentation_middle_content(sourth_path, aim_dir, label):
    
    name=0
    file_list=os.listdir(sourth_path)
 
    for i in file_list:
        img=Image.open('%s\%s'%(sourth_path,i))
        img.save('%s/%s.png'%(aim_dir,str(name)+'_'+str(label)))
        
        name+=1
        img_new = transform_randomchoice_middle1(img)
        img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
        
        name+=1
        img_new = transform_randomchoice_middle2(img)
        img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
        
        name+=1
        img_new = transform_randomchoice_middle3(img)
        img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
            
        name+=1
        
def DataAugmentation_large_content(sourth_path, aim_dir, label):
    
    name=0
    file_list=os.listdir(sourth_path)
 
    for i in file_list:
        img=Image.open('%s\%s'%(sourth_path,i))
        img.save('%s/%s.png'%(aim_dir,str(name)+'_'+str(label)))
        
        name+=1
        img_new = transform_randomchoice_large(img)
        img_new.save('%s/%s.png' % (aim_dir, str(name)+'_'+str(label)))
            
        name+=1

small = [5, 6, 7]
for label in small:
    sourth_path = os.path.join('fer2013plus/train', str(label))
    aim_dir = os.path.join('fer2013plus_DA1/train', str(label))
    DataAugmentation_small_content(sourth_path, aim_dir, label)

middle = [2, 3, 4]
for label in middle:
    sourth_path = os.path.join('fer2013plus/train', str(label))
    aim_dir = os.path.join('fer2013plus_DA1/train', str(label))
    DataAugmentation_middle_content(sourth_path, aim_dir, label)
    
large = [0, 1]
for label in large:
    sourth_path = os.path.join('fer2013plus/train', str(label))
    aim_dir = os.path.join('fer2013plus_DA1/train', str(label))
    DataAugmentation_large_content(sourth_path, aim_dir, label)

其中样本量最多的happy和样本量最少的disgust数据增强前的数量比为50:1,数据增强后的比在5:1左右,各标签的样本量对比图如下:

然后就是模型的训练部分,CNN的框架采用图片分类中比较经典的VGG16模型,但是在实际训练的时候发现过拟合还是比较严重的,所以网络就在VGG16的基础上减少了几层,具体的对比可以从下面的结构图中看出(左边是VGG16网络,右边是这次项目用的结构):

在网络的搭建和训练过程中还用了L2-reg、BN、dropout、学习率递减等策略,具体大家可以参考下面用pytorch框架实现的代码,包括了dataset、dataloader的构建,以及网络的构建和训练评估等等:

import numpy as np
import pandas as pd
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
import os
os.environ["KMP_DUPLICATE_LIB_OK"]='TRUE'

totensor = transforms.ToTensor()
print('----------------------------data pre-processing----------------------------\n')

# define dataset
class dataset(Dataset):
    def __init__(self, root_dir, label_dir):
        self.root_dir = root_dir
        self.label_dir = label_dir
        self.path = os.path.join(self.root_dir, self.label_dir)
        self.img_path = os.listdir(self.path)
    def __getitem__(self, item):
        img_name = self.img_path[item]
        img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)
        img = Image.open(img_item_path)
        img = totensor(img)
        label = self.label_dir
        return img, label
    def __len__(self):
        return len(self.img_path)

# 0=neutral, 1=happiness, 2=surprise, 3=sadness, 4=anger, 5=disgust, 6=fear, 7=contempt
label_dir = {}
expressions = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt']
for i, label in zip(range(0, 8), expressions):
    label_dir[label+'_label_dir'] = str(i)

# generating train datasets for pytorch
train_root_dir = 'FER2013plus_DA1/train'
train_data_sets = {}
for label in expressions:
    train_data_sets[label+'_dataset'] = dataset(train_root_dir, label_dir[label+'_label_dir'])
for i, key in zip(range(0, 8), train_data_sets):
    if i == 0:
        train_dataset = train_data_sets[key]
    else:
        train_dataset += train_data_sets[key]

# generating val datasets for pytorch
val_root_dir = 'FER2013plus_DA1/val'
val_data_sets = {}
for label in expressions:
    val_data_sets[label+'_dataset'] = dataset(val_root_dir, label_dir[label+'_label_dir'])
for i, key in zip(range(0, 8), val_data_sets):
    if i == 0:
        val_dataset = val_data_sets[key]
    else:
        val_dataset += val_data_sets[key]

# generating test datasets for pytorch
test_root_dir = 'FER2013plus_DA1/test'
test_data_sets = {}
for label in expressions:
    test_data_sets[label+'_dataset'] = dataset(test_root_dir, label_dir[label+'_label_dir'])
for i, key in zip(range(0, 8), test_data_sets):
    if i == 0:
        test_dataset = test_data_sets[key]
    else:
        test_dataset += test_data_sets[key]

# some useful parameters
n_train = len(train_dataset)
n_val = len(val_dataset)
n_test = len(test_dataset)
batchsize = 64                 
epochs = 31                    
learning_rate = 0.0006         
weightdecay = 0.00005          
                               
# get dataloader
train_loader = DataLoader(dataset = train_dataset, batch_size = batchsize, shuffle = True)
val_loader = DataLoader(dataset = val_dataset, batch_size = batchsize, shuffle = True)

print('----------------------------data pre-processing is done----------------------------\n')

print('----------------------------network constructing----------------------------\n')

# define CNN model, which is based on VGG16
class modified_VGG16(nn.Module):
    def __init__(self):
        super(modified_VGG16, self).__init__()
        self.conv_pool_1 = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),

            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),

            nn.MaxPool2d(kernel_size=2, stride=2),
        )  # 48*48*1 to 24*24*64
        self.conv_pool_2 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),

            nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),

            nn.MaxPool2d(kernel_size=2, stride=2),
        )  # 24*24*64 to 12*12*128
        self.conv_pool_3 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),

            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),

            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),

            nn.MaxPool2d(kernel_size=2, stride=2),
        )  # 12*12*128 to 6*6*256
        self.conv_pool_4 = nn.Sequential(
            nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),

            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),

            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),

            nn.MaxPool2d(kernel_size=2, stride=2),
        )  # 6*6*256 to 3*3*512
        self.flatten_fc = nn.Sequential(
            nn.Flatten(),

            nn.Linear(3*3*512, 1024),
            nn.BatchNorm1d(1024),
            nn.ReLU(),
            nn.Dropout(0.5),  #0.5

            nn.Linear(1024, 1024),
            nn.BatchNorm1d(1024),
            nn.ReLU(),
            nn.Dropout(0.5),  #0.5

            nn.Linear(1024, 8),
        )
    def forward(self, x):
        x = self.conv_pool_1(x)
        x = self.conv_pool_2(x)
        x = self.conv_pool_3(x)
        x = self.conv_pool_4(x)
        x = self.flatten_fc(x)
        return x

# some settings and using GPU
device = torch.device('cuda:0')
myCNN = modified_VGG16()
myCNN = myCNN.to(device)
loss_f = nn.CrossEntropyLoss()
loss_f = loss_f.to(device)
optimizer = torch.optim.Adam(myCNN.parameters(), lr = learning_rate, betas = (0.9, 0.99), weight_decay = weightdecay)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.8) #0.8

print('----------------------------network constructing is done----------------------------\n')

print('----------------------------model training----------------------------\n')


train_accuracy = []
val_accuracy = []

for epoch in range(epochs):

    myCNN.train()
    total_train_accuracy = 0
    for imgs, targets in train_loader:
        imgs = imgs.cuda()
        targets = np.array(targets).astype(int)
        targets = torch.from_numpy(targets).long()
        targets = targets.cuda()
        outputs = myCNN(imgs)
        loss = loss_f(outputs, targets)
        accu = (outputs.argmax(1) == targets).sum()
        total_train_accuracy = total_train_accuracy + accu
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    print('train accuracy after ' + str(epoch) + 'th epoch is ' + str((total_train_accuracy / n_train).cpu().detach().numpy()) + '\n')
    train_accuracy.append((total_train_accuracy / n_train).cpu().detach().numpy())

    myCNN.eval()
    total_val_accuracy = 0
    with torch.no_grad():
        for imgs, targets in val_loader:
            imgs = imgs.cuda()
            targets = np.array(targets).astype(int)
            targets = torch.from_numpy(targets).long()
            targets = targets.cuda()
            outputs = myCNN(imgs)
            loss = loss_f(outputs, targets)
            accu = (outputs.argmax(1) == targets).sum()
            total_val_accuracy = total_val_accuracy + accu
    print('val accuracy after ' + str(epoch) + 'th epoch is ' + str((total_val_accuracy / n_val).cpu().detach().numpy()) + '\n')
    val_accuracy.append((total_val_accuracy / n_val).cpu().detach().numpy())
    scheduler.step()

print('----------------------------model training is done----------------------------\n')

print('----------------------------test set predicting----------------------------\n')

test_loader = DataLoader(dataset = test_dataset, batch_size = batchsize, shuffle = True)
total_test_accuracy = 0
confusion_matrix = np.zeros((8, 8))
myCNN.eval()
with torch.no_grad():
    for imgs, targets in test_loader:
        imgs = imgs.cuda()
        targets = np.array(targets).astype(int)
        targets = torch.from_numpy(targets).long()
        targets = targets.cuda()
        outputs = myCNN(imgs)
        loss = loss_f(outputs, targets)
        accu = (outputs.argmax(1) == targets).sum()
        total_test_accuracy = total_test_accuracy + accu
        for true, prediction in zip(targets, outputs.argmax(1)):
            confusion_matrix[true, prediction] += 1
print('final test accuracy is ' + str((total_test_accuracy / n_test).cpu().detach().numpy()) + '\n')

accuracy_vector = pd.DataFrame(np.diagonal(confusion_matrix)/np.sum(confusion_matrix, axis=1), index=expressions)
print(accuracy_vector)
confusion_matrix = pd.DataFrame(confusion_matrix, index=expressions, columns=expressions, dtype='int')
print(confusion_matrix)

print('----------------------------test set predicting is done----------------------------')

然后就是数据增强前后各标签预测准确率的对比图,可以看出来数据增强对于原本样本量比较少的类别的预测准确率还是有着比较大的提升:

除了和数据增强前的结果对比之外,我还和多元逻辑回归对比了一下,把每个图片拆成48*48个特征,用PCA降维后跑多元逻辑回归去预测,最后的差距有33.7%(CNN准确率82.46%,MLR准确率48.76%),最后的最后就是在网上随便找了点图片,用cv2裁剪一下人脸并resize成模型输入的尺寸,然后用训练好的模型做一下预测,找的这几张图片预测还是挺准的:

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值