参考这位大佬的文章。他对于原理讲解和CNN的参考代码以及比较详细了。
【精选】【Pytorch】基于卷积神经网络实现的面部表情识别-CSDN博客
然而运行他的Resnet和VGG模型时出现了问题
参考代码中提供了使用Resnet模型和VGG模型实现同样功能的代码。但许多同学实验复现后表示代码有误,所训练出的模型在测试集和验证集中的正确率均高达100%,但在进行上述第六步视频验证时结果却频频出错,只能输出类别0“angry”。
猜想可能是数据加载器的问题。给出的代码中使用的 torchvision.datasets.ImageFolder配合torch.utils.data.DataLoader数据加载器。在网上查找相关资料可知使用前需要将之前加载的表情图片按照表情分开打包训练,而不能整体训练。
data_train = torchvision.datasets.ImageFolder(root=path_train,transform=transforms_train)
data_vaild = torchvision.datasets.ImageFolder(root=path_vaild,transform=transforms_vaild)
train_set = torch.utils.data.DataLoader(dataset=data_train,batch_size=BATCH_SIZE,shuffle=True)
vaild_set = torch.utils.data.DataLoader(dataset=data_vaild,batch_size=BATCH_SIZE,shuffle=False)
鉴于修改较为复杂,本人突发奇想能不能使用作者给出的cnn的自制的数据加载器呢?因此将作者给出的几个代码拼接了一下,以cnn代码为主题,将模型换成resnet模型,结果训练完成,训练51个epoch后,模型正确率达到了57%(还能增加)。利用此模型识别摄像头中的人脸也有一定正确率。与cnn不同的修改部分的resnet代码附上(训练的数据集放在了和cnn一样的地方,只是改变了使用和生成的模型)
def train(train_dataset, val_dataset, batch_size, epochs, learning_rate, wt_decay):
# 载入数据并分割batch
train_loader = data.DataLoader(train_dataset, batch_size)
# 构建模型
# model = FaceCNN()
model = resnet #使用的模型在后面
model.to("cpu")
# 损失函数
loss_function = nn.CrossEntropyLoss()
# 优化器
optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=wt_decay)
# 学习率衰减
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.8)
# 逐轮训练
for epoch in range(epochs):
# 记录损失值
loss_rate = 0
# scheduler.step() # 学习率衰减
model.train() # 模型训练
for images, emotion in train_loader:
# 梯度清零
optimizer.zero_grad()
# 前向传播
output = model.forward(images)
# 误差计算
loss_rate = loss_function(output, emotion)
# 误差的反向传播
loss_rate.backward()
# 更新参数
optimizer.step()
# 打印每轮的损失
print('After {} epochs , the loss_rate is : '.format(epoch+1), loss_rate.item())
if epoch % 5 == 0:
model.eval() # 模型评估
acc_train = validate(model, train_dataset, batch_size)
acc_val = validate(model, val_dataset, batch_size)
print('After {} epochs , the acc_train is : '.format(epoch+1), acc_train)
print('After {} epochs , the acc_val is : '.format(epoch+1), acc_val)
return model
class ResNet(nn.Module):
def __init__(self, *args):
super(ResNet, self).__init__()
def forward(self, x):
return x.view(x.shape[0],-1)
class GlobalAvgPool2d(nn.Module):
# 全局平均池化层可通过将池化窗口形状设置成输入的高和宽实现
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
return F.avg_pool2d(x, kernel_size=x.size()[2:])
# 残差神经网络
class Residual(nn.Module):
def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):
super(Residual, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return F.relu(Y + X)
def resnet_block(in_channels, out_channels, num_residuals, first_block=False):
if first_block:
assert in_channels == out_channels # 第一个模块的通道数同输入通道数一致
blk = []
for i in range(num_residuals):
if i == 0 and not first_block:
blk.append(Residual(in_channels, out_channels, use_1x1conv=True, stride=2))
else:
blk.append(Residual(out_channels, out_channels))
return nn.Sequential(*blk)
resnet = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7 , stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
resnet.add_module("resnet_block1", resnet_block(64, 64, 2, first_block=True))
resnet.add_module("resnet_block2", resnet_block(64, 128, 2))
resnet.add_module("resnet_block3", resnet_block(128, 256, 2))
resnet.add_module("resnet_block4", resnet_block(256, 512, 2))
resnet.add_module("global_avg_pool", GlobalAvgPool2d()) # GlobalAvgPool2d的输出: (Batch, 512, 1, 1)
resnet.add_module("fc", nn.Sequential(ResNet(), nn.Linear(512, 7)))
#下面记得注释掉cnn中的对应部分
#optimizer = optim.SGD(model.parameters(),lr=LR,momentum=0.9)
#optim.Adam(model.parameters())
#criterion = nn.CrossEntropyLoss()
#train_loss = []
#train_ac = []
#vaild_loss = []
#vaild_ac = []
#y_pred = []
def main(): #使用了和cnn一样的数据加载器
# 数据集实例化(创建数据集)
train_dataset = FaceDataset(root='D:\\pytorch\\DataFiles\\face-main\\face_images\\train_set')
val_dataset = FaceDataset(root='D:\\pytorch\\DataFiles\\face-main\\face_images\\verify_set')
# 超参数可自行指定
model = train(train_dataset, val_dataset, batch_size=128, epochs=51, learning_rate=0.1, wt_decay=0)
# 保存模型
torch.save(model, 'D:\\pytorch\\DataFiles\\face-main\\model\\model_resnet.pkl')
if __name__ == '__main__':
main()
而对于VGG模型,使用同样的方法修改,代码可以成功运行,但在测试集训练开始就立即收敛,在测试集和训练集的准确率均不变为25%,损失也不变。很显然这是不如人意的。尝试修改了一下模型的本体(增加了一些vgg_block,也增加了一些全连接层),也改变过超参数(batch_size,learning_rate等等,但均无任何效果)。希望有人可以解决这个问题。修改部分代码还是附上
#BATCH_SIZE = 64
#LR = 0.001
EPOCH = 1
CUDA=torch.cuda.is_available()
DEVICE = torch.device("cuda" if CUDA else "cpu")
class VGG(nn.Module):
def __init__(self, *args):
super(VGG, self).__init__()
def forward(self, x):
return x.view(x.shape[0],-1)
def vgg_block(num_convs, in_channels, out_channels):
blk = []
for i in range(num_convs):
if i == 0:
blk.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
else:
blk.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))
blk.append(nn.ReLU())
blk.append(nn.MaxPool2d(kernel_size=2, stride=2)) # 这里会使宽高减半
return nn.Sequential(*blk)
conv_arch = ((2, 1, 32), (3, 32, 64), (3, 64, 128),(3,128,256),) #这里为什么256要匹配2304
#conv_arch = ((2, 1, 32), (3, 32, 64), (3, 64, 128)) #匹配fc_feature=128*6*6=4608
# 经过5个vgg_block, 宽高会减半5次, 变成 224/32 = 7
fc_features =2304 #试错试出来的2304,是4608的一半?
#fc_features = 128 * 6* 6 # c * w * h #128是卷积层最后输出的数
#fc_hidden_units = 8192 # 任意
fc_hidden_units = 4096 # 任意
def vgg(conv_arch, fc_features, fc_hidden_units):
net = nn.Sequential()
# 卷积层部分
for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch):
# 每经过一个vgg_block都会使宽高减半
net.add_module("vgg_block_" + str(i+1), vgg_block(num_convs, in_channels, out_channels))
# 全连接层部分
net.add_module("fc", nn.Sequential(
VGG(),
nn.Linear(fc_features,2048),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(2048, fc_hidden_units),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_hidden_units, fc_hidden_units), #多加了一层全连接层
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_hidden_units,1024), #多加了一层全连接层
nn.ReLU(),
nn.Linear(1024, 256), #多加了一层全连接层
nn.ReLU(),
nn.Linear(256, 7),
#nn.ReLU(),
))
return net
my_vgg = vgg(conv_arch, fc_features, fc_hidden_units)
my_vgg.to(DEVICE)
def train(train_dataset, val_dataset, batch_size, epochs, learning_rate, wt_decay):
# 载入数据并分割batch
train_loader = data.DataLoader(train_dataset, batch_size)
# 构建模型
model = my_vgg #记得改成vgg模型
# 损失函数
loss_function = nn.CrossEntropyLoss()
# 优化器
optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=wt_decay)
# 学习率衰减
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.8)
# 逐轮训练
for epoch in range(epochs):
# 记录损失值
loss_rate = 0
# scheduler.step() # 学习率衰减
model.train() # 模型训练
for images, emotion in train_loader:
# 梯度清零
optimizer.zero_grad()
# 前向传播
output = model.forward(images)
# 误差计算
loss_rate = loss_function(output, emotion)
# 误差的反向传播
loss_rate.backward()
# 更新参数
optimizer.step()
# 打印每轮的损失
print('After {} epochs , the loss_rate is : '.format(epoch+1), loss_rate.item())
if epoch % 5 == 0:
model.eval() # 模型评估
acc_train = validate(model, train_dataset, batch_size)
acc_val = validate(model, val_dataset, batch_size)
print('After {} epochs , the acc_train is : '.format(epoch+1), acc_train)
print('After {} epochs , the acc_val is : '.format(epoch+1), acc_val)
return model