- 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
- 🍖 原作者:K同学啊
前言
要求:
1.学习如何编写一个完整的深度学习程序
2.手动推导卷积层与池化层的计算过程
实验环境:
- 语言环境:Python3.10
- 编译器:Pycharm
- 深度学习环境:
1.torch == 2.1.0
2.torchvision ==0.16.0
一、前期准备
1.设置GPU
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torchvision
# 设置硬件设备,如果有GPU则使用,没有则使用cpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
代码输出:
cuda
2.导入数据
输入代码:
#----------------导入数据----------------
#-----------------使用dataset下载CIFAR10数据集,并划好训练集与测试集-----------------
train_ds = torchvision.datasets.CIFAR10('data', train=True, transform=torchvision.transforms.ToTensor(), download=True) #将数据类型转化为Tensor
test_ds = torchvision.datasets.CIFAR10('data', train=False, transform=torchvision.transforms.ToTensor(), download=True) #将数据类型转化为Tensor
代码输出:
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to data\cifar-10-python.tar.gz
100%|██████████| 170498071/170498071 [03:42<00:00, 767970.86it/s]
Extracting data\cifar-10-python.tar.gz to data
Files already downloaded and verified
使用dataloader加载数据,并设置好基本的batch_size
#----------------------使用dataloader加载数据,并设置好基本的batch_size------------------------
batch_size = 32
train_dl = torch.utils.data.DataLoader(train_ds, batch_size=batch_size, shuffle=True)
test_dl = torch.utils.data.DataLoader(test_ds, batch_size=batch_size)
#取一个批次查看数据格式
#数据的shape为:[batch_size, channel, height, weight]
#其中batch_size为自已设定,channel. height和weight分别是图片的通道数,高度和宽度。
imgs, labels = next(iter(train_dl))
#imgs.shape
print(imgs.shape)
代码输出:
torch.Size([32, 3, 32, 32])
3.数据可视化
import numpy as np
#---------------------------数据可视化--------------------------------
#指定图片大小,图像大小为20宽,5高的绘图(单位为英寸inch)
plt.figure(figsize=(20, 5))
for i, imgs in enumerate(imgs[:20]):#维度缩减
npimg = imgs.numpy().transpose((1, 2, 0)) #进行轴变换
plt.subplot(2, 10, i+1) #将整个figure分成2行10列,绘制第i+1个子图。
plt.imshow(npimg, cmap=plt.cm.binary)
plt.axis('off')
plt.show()
代码输出:
二、构建简单的CNN网络
import torch.nn.functional as F
#-------------------------------构建简单的cnn网络----------------------------
num_classes = 10 #图片的类别数
class Model(nn.Module):
def __init__(self):
super().__init__() #特征提取网络
self.conv1 = nn.Conv2d(3, 64, kernel_size=3) #第一层卷积,卷积核大小为3*3
self.pool1 = nn.MaxPool2d(kernel_size=2) #设置池化层,池化核大小为2*2
self.conv2 = nn.Conv2d(64, 64, kernel_size=3) #第二层卷积,卷积核大小为3*3
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3) # 第三层卷积,卷积核大小为3*3
self.pool3 = nn.MaxPool2d(kernel_size=2)
# 分类网络
self.fc1 = nn.Linear(512, 256)
self.fc2 = nn.Linear(256, num_classes)
# 向前传播
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
加载并打印模型
from torchinfo import summary
#------------------------加载并打印模型-------------------
# 将模型转移到GPU中(我们模型运行均在GPU中进行)
model = Model().to(device)
summary(model)
代码输出:
=================================================================
Layer (type:depth-idx) Param #
=================================================================
Model --
├─Conv2d: 1-1 1,792
├─MaxPool2d: 1-2 --
├─Conv2d: 1-3 36,928
├─MaxPool2d: 1-4 --
├─Conv2d: 1-5 73,856
├─MaxPool2d: 1-6 --
├─Linear: 1-7 131,328
├─Linear: 1-8 2,570
=================================================================
Total params: 246,474
Trainable params: 246,474
Non-trainable params: 0
=================================================================
三、训练模型
1.设置超参数
#-----------设置超参数-------------------
loss_fn = nn.CrossEntropyLoss() #创建损失函数
learn_rate = 1e-2 #学习率
opt = torch.optim.SGD(model.parameters(), lr=learn_rate)
2.编写训练函数
#-------------------编写训练函数-----------------
#训练循环
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset) #训练集的大小,一共60000张图片
num_batches = len(dataloader) #批次数目,1875(60000/32)
train_loss, train_acc = 0, 0 #初始化训练损失和正确率
for X, y in dataloader: #获取图片及其标签
X, y = X.to(device), y.to(device)
#计算预测误差
pred = model(X) #网络输出
loss =loss_fn(pred, y) #计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失。
#反向传播
optimizer.zero_grad() #grad属性归零
loss.backward() #反向传播
optimizer.step() #每一步自动更新
#记录acc与loss
train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
train_loss += loss.item()
train_acc /= size
train_loss /= num_batches
return train_acc, train_loss
3.编写测试函数
#--------------------编写测试函数-------------------------------
def test (dataloader, model, loss_fn):
size = len(dataloader.dataset) #测试集的大小,一共10000张图片
num_batches = len(dataloader) #批次数目,313(10000/32=312.5,向上取整)
test_loss, test_acc = 0, 0
#当不进行训练时,停止梯度更新,节省计算内存消耗
with torch.no_grad():
for imgs, target in dataloader:
imgs, target = imgs.to(device), target.to(device)
#计算loss
target_pred = model(imgs)
loss = loss_fn(target_pred, target)
test_loss += loss.item()
test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()
test_acc /= size
test_loss /= num_batches
return test_acc, test_loss
4.正式训练
#---------------------------正式训练-------------------------
epochs = 10
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epochs):
model.train()
epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)
model.eval()
epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
train_acc.append(epoch_train_acc)
train_loss.append(epoch_train_loss)
test_acc.append(epoch_test_acc)
test_loss.append(epoch_test_loss)
template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}')
print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss))
print('Done')
代码输出:
Epoch: 1, Train_acc:14.2%, Train_loss:2.269, Test_acc:21.5%, Test_loss:2.105
Epoch: 2, Train_acc:24.8%, Train_loss:2.016, Test_acc:28.9%, Test_loss:1.913
Epoch: 3, Train_acc:33.2%, Train_loss:1.820, Test_acc:37.5%, Test_loss:1.691
Epoch: 4, Train_acc:40.8%, Train_loss:1.626, Test_acc:43.8%, Test_loss:1.549
Epoch: 5, Train_acc:44.9%, Train_loss:1.515, Test_acc:42.6%, Test_loss:1.577
Epoch: 6, Train_acc:48.8%, Train_loss:1.419, Test_acc:49.4%, Test_loss:1.380
Epoch: 7, Train_acc:52.2%, Train_loss:1.335, Test_acc:52.9%, Test_loss:1.319
Epoch: 8, Train_acc:54.9%, Train_loss:1.264, Test_acc:55.0%, Test_loss:1.261
Epoch: 9, Train_acc:57.7%, Train_loss:1.198, Test_acc:54.9%, Test_loss:1.237
Epoch:10, Train_acc:59.8%, Train_loss:1.142, Test_acc:60.6%, Test_loss:1.122
Done
四、结果可视化
#--------------------------------结果可视化------------------------
warnings.filterwarnings("ignore") #忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
plt.rcParams['figure.dpi'] = 100 #分辨率
epochs_range = range(epochs)
plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
代码输出:
五、总结
本周练习过程中学习如何编写一个完整的深度学习程序。在学习过程中,要敲代码跑通起来,对代码要了解透彻。关于导入数据这块,也学到了好多。还学习了手动推导卷积层与池化层的计算过程。