pt第6章 微调预训练的VGG网络

import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
import matplotlib.pyplot as plt
import seaborn as sns
import hiddenlayer as hl
import torch
import torch.nn as nn
from torch.optim import SGD,Adam
import torch.utils.data as Data
from torchvision import models
from torchvision import transforms
from torchvision.datasets import ImageFolder
vgg16 = models.vgg16(pretrained=True)
vgg = vgg16.features
for param in vgg.parameters():
    param.requires_grad_(False)
class MyVggModel(nn.Module):
    def __init__(self):
        super(MyVggModel,self).__init__()
        self.vgg = vgg
        self.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7,256),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(256,10),
            nn.Softmax(dim=1)
        )
    def forward(self, x):
        x = self.vgg(x)
        x = x.view(x.size(0), -1)
        output = self.classifier(x)
        return output
##输出网络结构
Myvggc = MyVggModel()
print(Myvggc)
MyVggModel(
  (vgg): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace=True)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace=True)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace=True)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace=True)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace=True)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace=True)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace=True)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace=True)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace=True)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace=True)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace=True)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=256, bias=True)
    (1): ReLU()
    (2): Dropout(p=0.5, inplace=False)
    (3): Linear(in_features=256, out_features=10, bias=True)
    (4): Softmax(dim=1)
  )
)
train_data_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
                                            transforms.RandomHorizontalFlip(),
                                            transforms.ToTensor(),
                                            transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])])
val_data_transforms = transforms.Compose([
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
                    ])
##读取训练集图像
train_data_dir = "10-monkey-species/training"
train_data = ImageFolder(train_data_dir, transform=train_data_transforms)
train_data_loader = Data.DataLoader(train_data,batch_size=32,shuffle=True,num_workers=2)
##读取验证集
val_data_dir = "10-monkey-species/validation"
val_data = ImageFolder(val_data_dir, transform=val_data_transforms)
val_data_loader = Data.DataLoader(val_data,batch_size=32,shuffle=True,num_workers=2)
print("训练样本数:",len(train_data.targets))
print("验证集样本数:",len(val_data.targets))
训练样本数: 1097
验证集样本数: 272
for step,(b_x,b_y) in enumerate(train_data_loader):
    if step > 0:
        break
    ##可视化训练集其中一个batch的图像
    mean = np.array([0.485,0.456,0.406])
    std = np.array([0.229,0.224,0.225])
    '''
    mean 数组:[0.485, 0.456, 0.406] 表示图像三个颜色通道(RGB)的均值。
    在图像处理中,均值通常用于数据的归一化,使得数据的分布更加集中。
    std 数组:[0.229, 0.224, 0.225] 表示图像三个颜色通道的标准差,它衡量了数据分布的离散程度。
    在归一化过程中,标准差用于缩放数据,使其具有单位方差。
    '''
    plt.figure(figsize=(12,6))
    # figure() 函数是 plt 模块中的一个方法,用于创建一个新的图形窗口。
    # figsize 参数是一个元组,表示图形窗口的宽度和高度,单位是英寸。在这个例子中,
    # figsize=(12,6)指定了图形窗口的宽度为12英寸,高度为6英寸。

    for ii in np.arange(len(b_y)):
        ##用于创建一个4行8列的子图网格,并选择第 ii+1 个子图作为当前的绘图区域。
        plt.subplot(4,8,ii+1)
        '''将PyTorch张量转换为NumPy数组,并使用 .transpose((1,2,0)) 将图像数据的维度从 
        (channels, height, width) 转换为 (height, width, channels),
        这是Matplotlib .imshow() 函数所需的格式'''
        image = b_x[ii,:,:,:].numpy().transpose((1,2,0))
        image = std * image + mean ##将图像数据标准化
        image = np.clip(image,0,1) ##用于确保图像数据的值在0到1的范围内
        plt.imshow(image) ##在当前的子图区域显示图像
        plt.title(b_y[ii].data.numpy()) ##给每个子图设置标题,标题是 b_y 中对应元素的值
        plt.axis("off") ##关闭坐标轴的显示
    plt.subplots_adjust(hspace=0.3) ##调整子图之间的间距

在这里插入图片描述

##定义优化器
optimizer = torch.optim.Adam(Myvggc.parameters(), lr=0.003)
loss_func = nn.CrossEntropyLoss()
##记录训练过程指标
history1 = hl.History()
##使用Canvas可视化
canvas1 = hl.Canvas()

# 进行模型迭代训练,对所有数据训练10个epoch
for epoch in range(10):
    train_loss_epoch = 0  # 初始化训练损失
    val_loss_epoch = 0    # 初始化验证损失
    train_corrects = 0    # 初始化训练正确预测数
    val_corrects = 0      # 初始化验证正确预测数

    # 将模型设置为训练模式
    Myvggc.train()
    # 迭代训练数据加载器
    for step, (b_x, b_y) in enumerate(train_data_loader):
        # 执行模型的前向传播
        output = Myvggc(b_x)
        # 计算损失
        loss = loss_func(output, b_y)
        # 获取模型预测的最有可能的类别标签
        pre_lab = torch.argmax(output, dim=1)
        # 清零梯度
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 更新模型参数
        optimizer.step()
        # 累加训练损失
        train_loss_epoch += loss.item() * b_x.size(0)
        # 累加训练正确预测数
        train_corrects += torch.sum(pre_lab == b_y)

    # 计算一个epoch的平均训练损失和准确率
    train_loss = train_loss_epoch / len(train_data.targets)
    train_acc = 100. * train_corrects / len(train_data.targets)

    # 将模型设置为评估模式
    Myvggc.eval()
    # 禁用梯度计算
    with torch.no_grad():
        # 迭代验证数据加载器
        for step, (val_x, val_y) in enumerate(val_data_loader):
            # 执行模型的前向传播
            output = Myvggc(val_x)
            # 计算损失
            loss = loss_func(output, val_y)
            # 累加验证损失
            val_loss_epoch += loss.item() * val_x.size(0)
            # 获取模型预测的最有可能的类别标签
            pre_lab = torch.argmax(output, dim=1)
            # 累加验证正确预测数
            val_corrects += torch.sum(pre_lab == val_y)

    # 计算一个epoch的平均验证损失和准确率
    val_loss = val_loss_epoch / len(val_data.targets)
    val_acc = 100. * val_corrects / len(val_data.targets)

    # 保存每个epoch上的输出损失和准确率
    history1.log(epoch, train_loss=train_loss, train_acc=train_acc.item(), val_loss=val_loss, val_acc=val_acc.item())

    # 打印每个epoch的训练和验证损失及准确率
    print(f'Epoch {epoch+1}/{10}, 训练损失: {train_loss:.4f}, 训练准确率: {train_acc:.2f}%, 验证损失: {val_loss:.4f}, 验证准确率: {val_acc:.2f}%')
Epoch 1/10, 训练损失: 1.4848, 训练准确率: 97.90%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 2/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 3/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 4/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 5/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 6/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 7/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 8/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 9/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
Epoch 10/10, 训练损失: 1.4611, 训练准确率: 100.00%, 验证损失: 1.4611, 验证准确率: 100.00%
# 使用 Metric 对象的 data 属性来绘制图表
with canvas1:
    canvas1.draw_plot([history1["train_loss"], history1["val_loss"]], labels=["Train Loss", "Validation Loss"])
    canvas1.draw_plot([history1["train_acc"], history1["val_acc"]], labels=["Train Accuracy", "Validation Accuracy"])

在这里插入图片描述


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值