(超详细)用PyTorch搭建深度学习神经网络并在FashionMNIST上进行分类
最近在整理以前写过的代码,发现有一些元老级别的代码在在占用我那小电脑的空间,于是就整理出一些具有代表意义的(框架类似的代码)代码上传到网上便于查看。
注意:要用PyTorch搭建深度学习网络首先你得在你电脑上安装好Python并安装好Pytorch和其他一些需要用到的包
1、首先,在你的编译器里新建一个py文件,在我这是train.py,然后在建好的问价里导入需要用到的包
import matplotlib.pyplot as plt
import torch # 导入pytorch
from torch import nn, optim, erf # 导入神经网络与优化器对应的类
import torch.nn.functional as F
from torchvision import datasets, transforms # 导入数据集与数据预处理的方法
import os
import time
2、加载数据集。设置好数据预处理操作即transform,然后加载数据集,我是直接用的torchvision里面dataset中的数据集,直接加载就可以,第一次运行的时候会自动帮你下载数据集到指定的文件夹(我是在项目的根目录建了一个‘dataset’的文件夹,国内下载比较慢,可以在控制台中复制出现的下载链接到迅雷下载然后复制到对应的文件夹里),后面就会自动到该文件夹里去找,训练集设置train=True。同时设置好每次输入网络的图片数batch_size,我设置的是64
# 数据预处理:标准化图像数据,使得灰度数据在-1到+1之间
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
# 下载Fashion-MNIST训练集数据,并构建训练集数据载入器trainloader,每次从训练集中载入64张图片,每次载入都打乱顺序
trainset = datasets.FashionMNIST('dataset/', download=True, train=True, transform=transform)
train_data_loader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# 下载Fashion-MNIST测试集数据,并构建测试集数据载入器trainloader,每次从测试集中载入64张图片,每次载入都打乱顺序
testset = datasets.FashionMNIST('dataset/', download=True, train=False, transform=transform)
valid_data_loader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
3、准备工作完成了,就开始编写深度学习网络代码,我这里只简单设置了20层线性连接网络(因为当初作业需要,其实只要设置简单几层线性网络就可以有不错的效果)
class Classifier(nn.Module):
def __init__(self):
super().__init__()
net = []
net.append(nn.Linear(784, 392))
net.append(nn.BatchNorm1d(392))
net.append(nn.ReLU())
net.append(nn.Linear(392, 100))
net.append(nn.BatchNorm1d(100))
net.append(nn.ReLU())
for i in range(13):
net.append(nn.Linear(100, 100))
net.append(nn.BatchNorm1d(100))
net.append(nn.ReLU())
for i in range(4):
net.append(nn.Linear(100, 100))
net.append(nn.BatchNorm1d(100))
net.append(nn.ReLU())
# net.append(nn.AlphaDropout(0.5))
net.append(nn.Linear(100, 10))
self.main = nn.Sequential(*net)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = self.main(x)
x = F.log_softmax(x, dim=1)
return x
4、设置好显卡,把数据传输到GPU上进行训练(若电脑带有GPU)
# 如果有显卡,可以转到GPU
device = torch.device("cuda:0")
5、实例化网络对象,并设置好损失函数、优化器和训练批次
# 对上面定义的Classifier类进行实例化
model = Classifier().to(device)
# 定义损失函数为负对数损失函数
loss_criterion = nn.NLLLoss()
# 优化方法为Adam梯度下降方法,学习率为0.003
optimizer = optim.Adagrad(model.parameters(), lr=0.003)
# 对训练集的全部数据学习15遍,这个数字越大,训练时间越长
epochs = 50
6、开始训练并打印打印损失函数和模型准确率
acc_t = []
acc_v = []
loss_t = []
loss_v = []
best_acc = 0.0
for epoch in range(epochs):
epoch_start = time.time()
print("Epoch: {}/{}".format(epoch + 1, epochs))
model.train()
train_loss = 0.0
train_acc = 0.0
valid_loss = 0.0
valid_acc = 0.0
t = 0
t1 = 0
for i, (inputs, labels) in enumerate(train_data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
t_loss = loss_criterion(outputs, labels)
train_loss += t_loss.item()
t_loss.backward()
optimizer.step()
ret, predictions = torch.max(outputs.data, 1)
t_acc = torch.sum(predictions == labels) / outputs.shape[0]
train_acc += t_acc.item()
t = t + 1
with torch.no_grad():
model.eval()
for j, (inputs, labels) in enumerate(valid_data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
v_loss = loss_criterion(outputs, labels)
valid_loss += v_loss.item()
ret, predictions = torch.max(outputs.data, 1)
v_acc = torch.sum(predictions == labels) / outputs.shape[0]
valid_acc += v_acc.item()
t1 = t1 + 1
train_loss = train_loss / t
train_acc = train_acc / t
valid_loss = valid_loss / t1
valid_acc = valid_acc / t1
acc_t.append(train_acc)
acc_v.append(valid_acc)
loss_t.append(train_loss)
loss_v.append(valid_loss)
epoch_end = time.time()
print("Epoch : {:03d}, Training: Loss: {:.4f}, Accuracy: {:.4f}%, "
"\t\tValidation : Loss : {:.4f}, Accuracy: {:.4f}%, Time: {:.4f}s".format(
epoch + 1, train_loss, train_acc * 100, valid_loss, valid_acc * 100,
epoch_end - epoch_start))
7、保存好训练的模型
save_model(valid_acc, best_acc, 'model', 'fashionResnet_best_model.pth', 'fashionResnet_last_model.pth', epochs,
epoch)
我这里是调用了自己写的一个函数,需要在训练前面写好
# 保存模型
def save_model(now_acc, best_acc, save_path, best_model_name, last_model_name, epochs, epoch):
# 保存最好的模型权重
if now_acc > best_acc:
folder = save_path
if not os.path.exists(folder):
os.mkdir(save_path)
best_acc = valid_acc
print(f"save best model, 第{epoch + 1}轮")
torch.save(model.state_dict(), save_path + '/' + best_model_name)
# 保存最后一轮的权重文件
if epoch == epochs - 1:
torch.save(model.state_dict(), save_path + '/' + last_model_name)
8、绘制损失函数和准确率图
matplot_acc(acc_t, acc_v)
matplot_loss(loss_t, loss_v)
同样这里是调用了自己写的绘图函数
# 解决中文显示问题
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 定义画图函数
def matplot_loss(train_loss, val_loss):
plt.plot(train_loss, label='train_loss')
plt.plot(val_loss, label='val_loss')
plt.legend(loc='best')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.title("训练集和验证集loss值得对比图")
plt.show()
def matplot_acc(train_acc, val_acc):
plt.plot(train_acc, label='train_acc')
plt.plot(val_acc, label='val_acc')
plt.legend(loc='best')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.title("训练集和验证集acc值得对比图")
plt.show()
9、以上就是编写深度学习神经网络并进行训练的基本流程,其实任何复杂的网络都是按这样的套路走的只是各个部分复杂程度不一样而已,接下就是加载已经训练好的网络模型对数据进行测试。(我是新建了一个test.py文件进行测试,所以构建网络的代码需要从新编写,当然从train.py里面导入也是可以)
# 如果有显卡,可以转到GPU
device = torch.device("cuda:0")
# 数据预处理:标准化图像数据,使得灰度数据在-1到+1之间
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
testset = datasets.FashionMNIST('dataset/', download=True, train=False, transform=transform)
valid_data_loader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
class Classifier(nn.Module):
def __init__(self):
super().__init__()
net = []
net.append(nn.Linear(784, 392))
net.append(nn.BatchNorm1d(392))
net.append(nn.ReLU())
net.append(nn.Linear(392, 100))
net.append(nn.BatchNorm1d(100))
net.append(nn.ReLU())
for i in range(13):
net.append(nn.Linear(100, 100))
net.append(nn.BatchNorm1d(100))
net.append(nn.ReLU())
for i in range(4):
net.append(nn.Linear(100, 100))
net.append(nn.BatchNorm1d(100))
net.append(nn.ReLU())
# net.append(nn.AlphaDropout(0.5))
net.append(nn.Linear(100, 10))
self.main = nn.Sequential(*net)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = self.main(x)
x = F.log_softmax(x, dim=1)
return x
model = Classifier().to(device)
# 加载模型
model.load_state_dict(torch.load("model/fashion_best_model.pth"))
# 进入验证阶段
model.eval()
n = len(testset)
nu = 0
for i in range(n):
x, y = testset[i][0], testset[i][1]
# plt.imshow(test_dataset.data[i][:, :, :])
# plt.show()
# show(x).show()
x = Variable(torch.unsqueeze(x, dim=0).float(), requires_grad=True).to(device)
x = torch.tensor(x).to(device)
with torch.no_grad():
pred = model(x)
# predicted, actual =classes[torch.argmax(pred[0])], classes[y]
# print(f'predicted:"{predicted}",Actual:"{actual}"')
# print(f'predicted:"{classes[torch.argmax(pred[0]).item()]}",Actual:"{classes[y]}"')
if torch.argmax(pred[0]).item()==y :
nu+=1
acc = nu*100/n
print("模型准确率:"+str(acc)+"%")