深度学习Week20——Pytorch实现残差网络和ResNet50V2算法

文章目录
深度学习Week20——Pytorch实现残差网络和ResNet50V2算法
一、前言
二、我的环境
三、代码复现
3.1 配置数据集
3.2 构建模型
四、模型应用与评估
4.1 编写训练函数
4.2 编写测试函数
4.3 训练模型
4.4 结果可视化

一、前言

本周我们使用了Pytorch复现代码。

二、我的环境

  • 电脑系统:Windows 10
  • 语言环境:Python 3.8.0
  • 编译器:Pycharm2023.2.3
    深度学习环境:Pytorch
    显卡及显存:RTX 3060 8G

三、代码复现

3.1 配置数据集

import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets
import os,PIL,pathlib,warnings
 
warnings.filterwarnings("ignore")             #忽略警告信息
 
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_dir = "/home/mw/input/data7619//bird_photos/bird_photos"

data_dir = pathlib.Path(data_dir)
 
data_paths  = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[0] for path in data_paths]
print(classeNames)
['/home/mw/input/data7619/bird_photos/bird_photos/Bananaquit', '/home/mw/input/data7619/bird_photos/bird_photos/Black Throated Bushtiti', '/home/mw/input/data7619/bird_photos/bird_photos/Cockatoo', '/home/mw/input/data7619/bird_photos/bird_photos/Black Skimmer']
image_count = len(list(data_dir.glob('*/*')))
print("图片总数为:",image_count)

图片总数为: 565
数据增强
import torchvision.transforms as transforms
from torchvision import datasets

# 这里我们运用上前面学习到的数据增强的方式
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.RandomVerticalFlip(),  # 随机垂直翻转
    transforms.RandomRotation(15),  # 随机旋转图片,范围为-15度到15度
    transforms.ToTensor(),  # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
])

test_transform = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.ToTensor(),  # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(  # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = datasets.ImageFolder("/home/mw/input/data7619//bird_photos/bird_photos", transform=train_transforms)
print(total_data.class_to_idx)
{'Bananaquit': 0, 'Black Skimmer': 1, 'Black Throated Bushtiti': 2, 'Cockatoo': 3}
划分训练集、测试集
# 按比例将数据集分割为训练集和测试集
train_size = int(0.8 * len(total_data))  # 计算训练集的大小,占总数据集的80%
test_size = len(total_data) - train_size  # 计算测试集的大小,占总数据集的20%
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])  # 随机分割数据集

# 定义批量大小
batch_size = 32

# 创建训练集的数据加载器
train_dl = torch.utils.data.DataLoader(
    train_dataset,  # 训练集数据
    batch_size=batch_size,  # 每个批次的大小为32
    shuffle=True,  # 打乱数据
    num_workers=0  # 使用的工作线程数
)

# 创建测试集的数据加载器
test_dl = torch.utils.data.DataLoader(
    test_dataset,  
    batch_size=batch_size,  
    shuffle=True,  
    num_workers=0  
)


for X, y in test_dl:
    print("Shape of X [N, C, H, W]: ", X.shape) 
    print("Shape of y: ", y.shape, y.dtype)  
    break  
Shape of X [N, C, H, W]:  torch.Size([32, 3, 224, 224])
Shape of y:  torch.Size([32]) torch.int64

2、 构建模型

首先我们需要定义残差块(block2)和堆栈(stack2),然后创建ResNet50V2模型。

class Block2(nn.Module):
    def __init__(self, in_channels, filters, stride=1, conv_shortcut=False):
        super(Block2, self).__init__()
        self.conv_shortcut = conv_shortcut
        self.preact_bn = nn.BatchNorm2d(in_channels)
        self.preact_relu = nn.ReLU(inplace=True)
        
        if conv_shortcut:
            self.shortcut = nn.Conv2d(in_channels, 4 * filters, kernel_size=1, stride=stride)
        else:
            self.shortcut = nn.Identity()
            if stride > 1:
                self.shortcut = nn.MaxPool2d(kernel_size=1, stride=stride)
        
        self.conv1 = nn.Conv2d(in_channels, filters, kernel_size=1, stride=1, bias=False)
        self.bn1 = nn.BatchNorm2d(filters)
        
        self.conv2 = nn.Conv2d(filters, filters, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(filters)
        
        self.conv3 = nn.Conv2d(filters, 4 * filters, kernel_size=1)

    def forward(self, x):
        shortcut = self.shortcut(x)
        x = self.preact_bn(x)
        x = self.preact_relu(x)
        
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)
        
        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)
        
        x = self.conv3(x)
        x += shortcut
        return x

class Stack2(nn.Module):
    def __init__(self, in_channels, filters, blocks, stride1=2):
        super(Stack2, self).__init__()
        self.blocks = nn.ModuleList()
        self.blocks.append(Block2(in_channels, filters, stride=stride1, conv_shortcut=True))
        for _ in range(1, blocks):
            self.blocks.append(Block2(4 * filters, filters))
            
    def forward(self, x):
        for block in self.blocks:
            x = block(x)
        return x

class ResNet50V2(nn.Module):
    def __init__(self, num_classes=1000, include_top=True):
        super(ResNet50V2, self).__init__()
        self.include_top = include_top
        
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        
        self.conv2 = Stack2(64, 64, 3, stride1=1)
        self.conv3 = Stack2(256, 128, 4)
        self.conv4 = Stack2(512, 256, 6)
        self.conv5 = Stack2(1024, 512, 3, stride1=1)
        
        self.post_bn = nn.BatchNorm2d(2048)
        self.post_relu = nn.ReLU(inplace=True)
        
        if include_top:
            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
            self.fc = nn.Linear(2048, num_classes)
        
    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        
        x = self.post_bn(x)
        x = self.post_relu(x)
        
        if self.include_top:
            x = self.avgpool(x)
            x = torch.flatten(x, 1)
            x = self.fc(x)
        
        return x

if __name__ == '__main__':
    model = ResNet50V2(num_classes=1000)
    print(model)
ResNet50V2(
  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (relu): ReLU(inplace=True)
  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
  (conv2): Stack2(
    (blocks): ModuleList(
      (0): Block2(
        (preact_bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))
        (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))
      )
      (1): Block2(
        (preact_bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))
      )
      (2): Block2(
        (preact_bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))
      )
    )
  )
  (conv3): Stack2(
    (blocks): ModuleList(
      (0): Block2(
        (preact_bn): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2))
        (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      )
      (1): Block2(
        (preact_bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      )
      (2): Block2(
        (preact_bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      )
      (3): Block2(
        (preact_bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1))
      )
    )
  )
  (conv4): Stack2(
    (blocks): ModuleList(
      (0): Block2(
        (preact_bn): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2))
        (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      )
      (1): Block2(
        (preact_bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      )
      (2): Block2(
        (preact_bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      )
      (3): Block2(
        (preact_bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      )
      (4): Block2(
        (preact_bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      )
      (5): Block2(
        (preact_bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1))
      )
    )
  )
  (conv5): Stack2(
    (blocks): ModuleList(
      (0): Block2(
        (preact_bn): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1))
        (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1))
      )
      (1): Block2(
        (preact_bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1))
      )
      (2): Block2(
        (preact_bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (preact_relu): ReLU(inplace=True)
        (shortcut): Identity()
        (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1))
      )
    )
  )
  (post_bn): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (post_relu): ReLU(inplace=True)
  (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
  (fc): Linear(in_features=2048, out_features=1000, bias=True)
)

四 、模型应用与评估

4.1 编写训练函数

def train(dataloader, model, loss_fn, optimizer):
    model.train()
    train_loss, train_acc = 0, 0
    for X, y in dataloader:
        X, y = X.to(device), y.to(device)
        
        # 计算预测误差
        pred = model(X)
        loss = loss_fn(pred, y)
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()
            
    train_acc  /= len(dataloader.dataset)
    train_loss /= len(dataloader)

    return train_acc, train_loss

4.2 编写测试函数

def test(dataloader, model, loss_fn):
    model.eval()
    test_loss, test_acc = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            
            pred = model(X)
            loss = loss_fn(pred, y)
            
            test_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
            test_loss += loss.item()
 
    test_acc  /= len(dataloader.dataset)
    test_loss /= len(dataloader)

    return test_acc, test_loss

4.3 训练模型

import copy
import torch.nn.functional as F

optimizer  = torch.optim.Adam(model.parameters(), lr= 1e-4)
loss_fn    = nn.CrossEntropyLoss() # 创建损失函数
 
epochs = 10
 
train_loss = []
train_acc = []
test_loss = []
test_acc = []
 
best_acc = 0  # 设置一个最佳准确率,作为最佳模型的判别指标
 
for epoch in range(epochs):
    # 更新学习率(使用自定义学习率时使用)
    # adjust_learning_rate(optimizer, epoch, learn_rate)
 
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)
    # scheduler.step() # 更新学习率(调用官方动态学习率接口时使用)
 
    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
 
    # 保存最佳模型到 best_model
    if epoch_test_acc > best_acc:
        best_acc = epoch_test_acc
        best_model = copy.deepcopy(model)
 
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
 
    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']
 
    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
    print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss,
                          epoch_test_acc * 100, epoch_test_loss, lr))
 
# 保存最佳模型到文件中
PATH = './best_model.pth'  # 保存的参数文件名
torch.save(model.state_dict(), PATH)
 
print('Done')

Epoch: 1, Train_acc:51.8%, Train_loss:4.475, Test_acc:27.4%, Test_loss:5.794, Lr:1.00E-04
Epoch: 2, Train_acc:73.2%, Train_loss:1.766, Test_acc:30.1%, Test_loss:4.920, Lr:1.00E-04
Epoch: 3, Train_acc:79.0%, Train_loss:0.846, Test_acc:54.0%, Test_loss:2.211, Lr:1.00E-04
Epoch: 4, Train_acc:81.0%, Train_loss:0.567, Test_acc:76.1%, Test_loss:0.809, Lr:1.00E-04
Epoch: 5, Train_acc:85.8%, Train_loss:0.428, Test_acc:78.8%, Test_loss:0.761, Lr:1.00E-04
Epoch: 6, Train_acc:83.0%, Train_loss:0.473, Test_acc:79.6%, Test_loss:0.725, Lr:1.00E-04
Epoch: 7, Train_acc:83.6%, Train_loss:0.530, Test_acc:77.9%, Test_loss:0.620, Lr:1.00E-04
Epoch: 8, Train_acc:85.0%, Train_loss:0.479, Test_acc:78.8%, Test_loss:0.830, Lr:1.00E-04
Epoch: 9, Train_acc:85.6%, Train_loss:0.434, Test_acc:82.3%, Test_loss:0.824, Lr:1.00E-04
Epoch:10, Train_acc:88.5%, Train_loss:0.396, Test_acc:70.8%, Test_loss:0.865, Lr:1.00E-04
Done

4.4 结果可视化

import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore")               #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
plt.rcParams['figure.dpi']         = 100        #分辨率

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

在这里插入图片描述

  • 4
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值