NNDL 实验11 卷积神经网络(2)

基于LeNet实现手写体数字识别实验


 数据集下载网站

手写数字识别数据集:MNIST数据集

1.模型构建:

分布

import json
import gzip
# 打印并观察数据集分布情况
#使用json.load()和gzip.open()函数,以gzip压缩格式从指定路径读取MNIST数据集文件
train_set, dev_set, test_set = json.load(gzip.open(r'C:\Users\320\PycharmProjects\pythonProject\mnist.json.gz'))
#通过切片操作,提取出训练集、验证集和测试集的前3000个样本作为训练数据,前200个样本作为验证数据和测试数据
train_images, train_labels = train_set[0][:3000], train_set[1][:3000]
dev_images, dev_labels = dev_set[0][:200], dev_set[1][:200]
test_images, test_labels = test_set[0][:200], test_set[1][:200]
#重新组织成train_set、dev_set和test_set
train_set, dev_set, test_set = [train_images, train_labels], [dev_images, dev_labels], [test_images, test_labels]
print('Length of train/dev/test set:{}/{}/{}'.format(len(train_set[0]), len(dev_set[0]), len(test_set[0])))

查看

import numpy as np
from PIL import Image
import matplotlib.pyplot as plt

image, label = train_set[0][0], train_set[1][0]#从训练集中获取第一个样本的图像和标签
image, label = np.array(image).astype('float32'), int(label)#标签转化
# 原始图像数据为长度784的行向量,需要调整为[28,28]大小的图像
image = np.reshape(image, [28, 28])
image = Image.fromarray(image.astype('uint8'), mode='L') #将numpy数组转化成图片形式
print("The number in the picture is {}".format(label))
plt.figure(figsize=(5, 5))
plt.imshow(image, cmap='gray') #cmap='gray'灰度图片
plt.show()

数据预处理

调用torch包中自带的方法

import torchvision.transforms as transforms

# 数据预处理
transforms = transforms.Compose(
    [transforms.Resize(32), transforms.ToTensor(), transforms.Normalize(mean=[0.5], std=[0.5])])
import random
import torch.utils.data as io


class MNIST_dataset(io.Dataset):#继承自torch.utils.data.Dataset类,用于处理数据集
    def __init__(self, dataset, transforms, mode='train'):#接受dataset(数据集)、transforms(转换函数)和mode(模式)
        self.mode = mode
        self.transforms = transforms
        self.dataset = dataset

    def __getitem__(self, idx):#定义了数据集类的索引方法,用于获取数据集中特定索引的样本
        # 获取图像和标签
        image, label = self.dataset[0][idx], self.dataset[1][idx]
        image, label = np.array(image).astype('float32'), int(label)
        image = np.reshape(image, [28, 28])
        image = Image.fromarray(image.astype('uint8'), mode='L')
        image = self.transforms(image)

        return image, label

    def __len__(self):#获取数据集大小
        return len(self.dataset[0])
# 固定随机种子
random.seed(0)
# 加载 mnist 数据集
train_dataset = MNIST_dataset(dataset=train_set, transforms=transforms, mode='train')
test_dataset = MNIST_dataset(dataset=test_set, transforms=transforms, mode='test')
dev_dataset = MNIST_dataset(dataset=dev_set, transforms=transforms, mode='dev')

模型构建

LeNet-5模型

自定义卷积算子和池化层

  1. C3层没有使用连接表来减少卷积数量。
  2. 汇聚层使用了简单的平均汇聚,没有引入权重和偏置参数以及非线性激活函数。
  3. 卷积层的激活函数使用ReLU函数。
  4. 最后的输出层为一个全连接线性层。
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class Conv2D(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
        super(Conv2D, self).__init__()
        # 创建卷积核
        self.weight = torch.nn.Parameter(torch.ones(out_channels, in_channels, kernel_size, kernel_size))
        # 创建偏置
        self.bias = torch.nn.Parameter(torch.zeros(out_channels, 1))
        self.stride = stride
        self.padding = padding
        # 输入通道数
        self.in_channels = in_channels
        # 输出通道数
        self.out_channels = out_channels

    # 基础卷积运算
    def single_forward(self, X, weight):
        # 零填充
        new_X = torch.zeros([X.shape[0], X.shape[1] + 2 * self.padding, X.shape[2] + 2 * self.padding])
        new_X[:, self.padding:X.shape[1] + self.padding, self.padding:X.shape[2] + self.padding] = X
        u, v = weight.shape
        output_w = (new_X.shape[1] - u) // self.stride + 1
        output_h = (new_X.shape[2] - v) // self.stride + 1
        output = torch.zeros([X.shape[0], output_w, output_h])
        for i in range(0, output.shape[1]):
            for j in range(0, output.shape[2]):
                output[:, i, j] = torch.sum(
                    new_X[:, self.stride * i:self.stride * i + u, self.stride * j:self.stride * j + v] * weight,
                    dim=[1, 2])
        return output

    def forward(self, inputs):
        """
        输入:
            - inputs:输入矩阵,shape=[B, D, M, N]
            - weights:P组二维卷积核,shape=[P, D, U, V]
            - bias:P个偏置,shape=[P, 1]
        """
        feature_maps = []
        # 进行多次多输入通道卷积运算
        p = 0
        for w, b in zip(self.weight, self.bias):  # P个(w,b),每次计算一个特征图Zp
            multi_outs = []
            # 循环计算每个输入特征图对应的卷积结果
            for i in range(self.in_channels):
                single = self.single_forward(inputs[:, i, :, :], w[i])
                multi_outs.append(single)
                # print("Conv2D in_channels:",self.in_channels,"i:",i,"single:",single.shape)
            # 将所有卷积结果相加
            feature_map = torch.sum(torch.stack(multi_outs), dim=0) + b  # Zp
            feature_maps.append(feature_map)
            # print("Conv2D out_channels:",self.out_channels, "p:",p,"feature_map:",feature_map.shape)
            p += 1
        # 将所有Zp进行堆叠
        out = torch.stack(feature_maps, 1)
        return out


class Pool2D(nn.Module):
    def __init__(self, size=(2, 2), mode='max', stride=1):
        super(Pool2D, self).__init__()
        # 汇聚方式
        self.mode = mode
        self.h, self.w = size
        self.stride = stride

    def forward(self, x):
        output_w = (x.shape[2] - self.w) // self.stride + 1
        output_h = (x.shape[3] - self.h) // self.stride + 1
        output = torch.zeros([x.shape[0], x.shape[1], output_w, output_h])
        # 汇聚
        for i in range(output.shape[2]):
            for j in range(output.shape[3]):
                # 最大汇聚
                if self.mode == 'max':
                    output[:, :, i, j] = torch.max(
                        x[:, :, self.stride * i:self.stride * i + self.w, self.stride * j:self.stride * j + self.h])

                # 平均汇聚
                elif self.mode == 'avg':
                    output[:, :, i, j] = torch.mean(
                        x[:, :, self.stride * i:self.stride * i + self.w, self.stride * j:self.stride * j + self.h],
                        dim=[2, 3])

        return output


class Model_LeNet(nn.Module):
    def __init__(self, in_channels, num_classes=10):
        super(Model_LeNet, self).__init__()
        # 卷积层:输出通道数为6,卷积核大小为5×5
        self.conv1 = Conv2D(in_channels=in_channels, out_channels=6, kernel_size=5)
        # 汇聚层:汇聚窗口为2×2,步长为2
        self.pool2 = Pool2D(size=(2, 2), mode='max', stride=2)
        # 卷积层:输入通道数为6,输出通道数为16,卷积核大小为5×5,步长为1
        self.conv3 = Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1)
        # 汇聚层:汇聚窗口为2×2,步长为2
        self.pool4 = Pool2D(size=(2, 2), mode='avg', stride=2)
        # 卷积层:输入通道数为16,输出通道数为120,卷积核大小为5×5
        self.conv5 = Conv2D(in_channels=16, out_channels=120, kernel_size=5, stride=1)
        # 全连接层:输入神经元为120,输出神经元为84
        self.linear6 = nn.Linear(120, 84)
        # 全连接层:输入神经元为84,输出神经元为类别数
        self.linear7 = nn.Linear(84, num_classes)

    def forward(self, x):
        # C1:卷积层+激活函数
        output = F.relu(self.conv1(x))
        # S2:汇聚层
        output = self.pool2(output)
        # C3:卷积层+激活函数
        output = F.relu(self.conv3(output))
        # S4:汇聚层
        output = self.pool4(output)
        # C5:卷积层+激活函数
        output = F.relu(self.conv5(output))
        # 输入层将数据拉平[B,C,H,W] -> [B,CxHxW]
        output = torch.squeeze(output, dim=3)
        output = torch.squeeze(output, dim=2)
        # F6:全连接层
        output = F.relu(self.linear6(output))
        # F7:全连接层
        output = self.linear7(output)
        return output

测试一下上面的LeNet-5模型,构造一个形状为 [1,1,32,32]的输入数据送入网络,观察每一层特征图的形状变化


# 这里用np.random创建一个随机数组作为输入数据
inputs = np.random.randn(*[1, 1, 32, 32])
inputs = inputs.astype('float32')

# 创建Model_LeNet类的实例,指定模型名称和分类的类别数目
model = Model_LeNet(in_channels=1, num_classes=10)
# 通过调用LeNet从基类继承的modules()函数,查看LeNet中所包含的子层
print(model.modules())
x = torch.tensor(inputs)
for item in model.children():
    # item是LeNet类中的一个子层
    # 查看经过子层之后的输出数据形状
    item_shapex = 0
    names = []
    parameter = []
    for name in item.named_parameters():
        names.append(name[0])
        parameter.append(name[1])
        item_shapex += 1
    try:
        x = item(x)
    except:
        # 如果是最后一个卷积层输出,需要展平后才可以送入全连接层
        x = x.reshape([x.shape[0], -1])
        x = item(x)

    if item_shapex == 2:
        # 查看卷积和全连接层的数据和参数的形状,
        # 其中item.parameters()[0]是权重参数w,item.parameters()[1]是偏置参数b
        print(item, x.shape, parameter[0].shape, parameter[1].shape)
    else:
        # 汇聚层没有参数
        print(item, x.shape)

测试两个网络的运算速度

import numpy as np
import time
import torch

# 这里用np.random创建一个随机数组作为测试数据
inputs = np.random.randn(*[1, 1, 32, 32])
inputs = inputs.astype('float32')
x = torch.tensor(inputs)

# 创建Model_LeNet类的实例,指定模型名称和分类的类别数目
model = Model_LeNet(in_channels=1, num_classes=10)
# 创建PyTorch_LeNet类的实例,指定模型名称和分类的类别数目
torch_model = PyTorch_LeNet(in_channels=1, num_classes=10)

# 计算Model_LeNet类的运算速度
model_time = 0
for i in range(60):
    strat_time = time.time()
    out = model(x)
    end_time = time.time()
    # 预热10次运算,不计入最终速度统计
    if i < 10:
        continue
    model_time += (end_time - strat_time)
avg_model_time = model_time / 50
print('Model_LeNet speed:', avg_model_time, 's')

# 计算torch_LeNet类的运算速度
torch_model_time = 0
for i in range(60):
    strat_time = time.time()
    torch_out = torch_model(x)
    end_time = time.time()
    # 预热10次运算,不计入最终速度统计
    if i < 10:
        continue
    torch_model_time += (end_time - strat_time)
avg_torch_model_time = torch_model_time / 50

print('torch_LeNet speed:', avg_torch_model_time, 's')

令两个网络加载同样的权重,测试一下两个网络的输出结果是否一致。 

import numpy as np
import torch

# 这里用np.random创建一个随机数组作为测试数据
inputs = np.random.randn(*[1, 1, 32, 32])
inputs = inputs.astype('float32')
x = torch.tensor(inputs)

# 创建Model_LeNet类的实例,指定模型名称和分类的类别数目
model = Model_LeNet(in_channels=1, num_classes=10)
# 获取网络的权重
params = model.state_dict()
# 自定义Conv2D算子的bias参数形状为[out_channels, 1]
# torch API中Conv2D算子的bias参数形状为[out_channels]
# 需要进行调整后才可以赋值
for key in params:
    if 'bias' in key:
        params[key] = params[key].squeeze()
# 创建torch_LeNet类的实例,指定模型名称和分类的类别数目
torch_model = PyTorch_LeNet(in_channels=1, num_classes=10)
# 将Model_LeNet的权重参数赋予给torch_LeNet模型,保持两者一致
torch_model.load_state_dict(params)

# 打印结果保留小数点后6位
torch.set_printoptions(6)
# 计算Model_LeNet的结果
output = model(x)
print('Model_LeNet output: ', output)
# 计算torch_LeNet的结果
torch_output = torch_model(x)
print('torch_LeNet output: ', torch_output)

统计LeNet-5模型的参数量和计算量


import torchsummary

model = PyTorch_LeNet(in_channels=1, num_classes=10)
model = model.cuda()
params_info = torchsummary.summary(model, (1, 32, 32))
print(params_info)

2.模型训练

使用交叉熵损失函数,并用随机梯度下降法作为优化器来训练LeNet-5网络。
用RunnerV3在训练集上训练5个epoch,并保存准确率最高的模型作为最佳模型。

class RunnerV3(object):
    def __init__(self, model, optimizer, loss_fn, metric, **kwargs):
        self.model = model
        self.optimizer = optimizer
        self.loss_fn = loss_fn
        self.metric = metric  # 只用于计算评价指标

        # 记录训练过程中的评价指标变化情况
        self.dev_scores = []

        # 记录训练过程中的损失函数变化情况
        self.train_epoch_losses = []  # 一个epoch记录一次loss
        self.train_step_losses = []  # 一个step记录一次loss
        self.dev_losses = []

        # 记录全局最优指标
        self.best_score = 0

    def train(self, train_loader, dev_loader=None, **kwargs):
        # 将模型切换为训练模式
        self.model.train()

        # 传入训练轮数,如果没有传入值则默认为0
        num_epochs = kwargs.get("num_epochs", 0)
        # 传入log打印频率,如果没有传入值则默认为100
        log_steps = kwargs.get("log_steps", 100)
        # 评价频率
        eval_steps = kwargs.get("eval_steps", 0)

        # 传入模型保存路径,如果没有传入值则默认为"best_model.pdparams"
        save_path = kwargs.get("save_path", "best_model.pdparams")

        custom_print_log = kwargs.get("custom_print_log", None)

        # 训练总的步数
        num_training_steps = num_epochs * len(train_loader)

        if eval_steps:
            if self.metric is None:
                raise RuntimeError('Error: Metric can not be None!')
            if dev_loader is None:
                raise RuntimeError('Error: dev_loader can not be None!')

        # 运行的step数目
        global_step = 0

        # 进行num_epochs轮训练
        for epoch in range(num_epochs):
            # 用于统计训练集的损失
            total_loss = 0
            for step, data in enumerate(train_loader):
                X, y = data
                # 获取模型预测
                logits = self.model(X)
                loss = self.loss_fn(logits, y)  # 默认求mean
                total_loss += loss

                # 训练过程中,每个step的loss进行保存
                self.train_step_losses.append((global_step, loss.item()))

                if log_steps and global_step % log_steps == 0:
                    print(
                        f"[Train] epoch: {epoch}/{num_epochs}, step: {global_step}/{num_training_steps}, loss: {loss.item():.5f}")

                # 梯度反向传播,计算每个参数的梯度值
                loss.backward()

                if custom_print_log:
                    custom_print_log(self)

                # 小批量梯度下降进行参数更新
                self.optimizer.step()
                # 梯度归零
                self.optimizer.zero_grad()

                # 判断是否需要评价
                if eval_steps > 0 and global_step > 0 and \
                        (global_step % eval_steps == 0 or global_step == (num_training_steps - 1)):

                    dev_score, dev_loss = self.evaluate(dev_loader, global_step=global_step)
                    print(f"[Evaluate]  dev score: {dev_score:.5f}, dev loss: {dev_loss:.5f}")

                    # 将模型切换为训练模式
                    self.model.train()

                    # 如果当前指标为最优指标,保存该模型
                    if dev_score > self.best_score:
                        self.save_model(save_path)
                        print(
                            f"[Evaluate] best accuracy performence has been updated: {self.best_score:.5f} --> {dev_score:.5f}")
                        self.best_score = dev_score

                global_step += 1

            # 当前epoch 训练loss累计值
            trn_loss = (total_loss / len(train_loader)).item()
            # epoch粒度的训练loss保存
            self.train_epoch_losses.append(trn_loss)

        print("[Train] Training done!")

    # 模型评估阶段,使用'torch.no_grad()'控制不计算和存储梯度
    @torch.no_grad()
    def evaluate(self, dev_loader, **kwargs):
        assert self.metric is not None

        # 将模型设置为评估模式
        self.model.eval()

        global_step = kwargs.get("global_step", -1)

        # 用于统计训练集的损失
        total_loss = 0

        # 重置评价
        self.metric.reset()

        # 遍历验证集每个批次
        for batch_id, data in enumerate(dev_loader):
            X, y = data

            # 计算模型输出
            logits = self.model(X)

            # 计算损失函数
            loss = self.loss_fn(logits, y).item()
            # 累积损失
            total_loss += loss

            # 累积评价
            self.metric.update(logits, y)

        dev_loss = (total_loss / len(dev_loader))
        dev_score = self.metric.accumulate()

        # 记录验证集loss
        if global_step != -1:
            self.dev_losses.append((global_step, dev_loss))
            self.dev_scores.append(dev_score)

        return dev_score, dev_loss

    # 模型评估阶段,使用'torch.no_grad()'控制不计算和存储梯度
    @torch.no_grad()
    def predict(self, x, **kwargs):
        # 将模型设置为评估模式
        self.model.eval()
        # 运行模型前向计算,得到预测值
        logits = self.model(x)
        return logits

    def save_model(self, save_path):
        torch.save(self.model.state_dict(), save_path)

    def load_model(self, model_path):
        state_dict = torch.load(model_path)
        self.model.load_state_dict(state_dict)

class Accuracy():
    def __init__(self):
        """
        输入:
           - is_logist: outputs是logist还是激活后的值
        """
 
        # 用于统计正确的样本个数
        self.num_correct = 0
        # 用于统计样本的总数
        self.num_count = 0
 
        self.is_logist = True
 
    def update(self, outputs, labels):
        """
        输入:
           - outputs: 预测值, shape=[N,class_num]
           - labels: 标签值, shape=[N,1]
        """
 
        # 判断是二分类任务还是多分类任务,shape[1]=1时为二分类任务,shape[1]>1时为多分类任务
        if outputs.shape[1] == 1: # 二分类
            outputs = torch.squeeze(outputs, dim=-1)
            if self.is_logist:
                # logist判断是否大于0
                preds = torch.can_cast((outputs>=0), dtype=torch.float32)
            else:
                # 如果不是logist,判断每个概率值是否大于0.5,当大于0.5时,类别为1,否则类别为0
                preds = torch.can_cast((outputs>=0.5), dtype=torch.float32)
        else:
            # 多分类时,使用'torch.argmax'计算最大元素索引作为类别
            preds = torch.argmax(outputs, dim=1).int()
 
        # 获取本批数据中预测正确的样本个数
        labels = torch.squeeze(labels, dim=-1)
        batch_correct = torch.sum(torch.tensor(preds == labels, dtype=torch.float32)).numpy()
        batch_count = len(labels)
 
        # 更新num_correct 和 num_count
        self.num_correct += batch_correct
        self.num_count += batch_count
 
    def accumulate(self):
        # 使用累计的数据,计算总的指标
        if self.num_count == 0:
            return 0
        return self.num_correct / self.num_count
 
    def reset(self):
        # 重置正确的数目和总数
        self.num_correct = 0
        self.num_count = 0
 
    def name(self):
        return "Accuracy"

训练

import torch.optim as opti
from torch.utils.data import DataLoader

# 学习率大小
lr = 0.1
# 批次大小
batch_size = 64
# 加载数据
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
dev_loader = DataLoader(dev_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)
# 定义LeNet网络
# 飞桨API实现的LeNet-5
model = PyTorch_LeNet(in_channels=1, num_classes=10)
# 定义优化器
optimizer = opti.SGD(model.parameters(), 0.2)
# 定义损失函数
loss_fn = F.cross_entropy
# 定义评价指标
metric = Accuracy()
# 实例化 RunnerV3 类,并传入训练配置。
runner = RunnerV3(model, optimizer, loss_fn, metric)
# 启动训练
log_steps = 15
eval_steps = 15
runner.train(train_loader, dev_loader, num_epochs=6, log_steps=log_steps,
             eval_steps=eval_steps, save_path="best_model.pdparams")

 可视化

import matplotlib.pyplot as plt


# 可视化误差
def plot(runner, fig_name):
    plt.figure(figsize=(10, 5))

    plt.subplot(1, 2, 1)
    train_items = runner.train_step_losses[::30]
    train_steps = [x[0] for x in train_items]
    train_losses = [x[1] for x in train_items]

    plt.plot(train_steps, train_losses, color='#8E004D', label="Train loss")
    if runner.dev_losses[0][0] != -1:
        dev_steps = [x[0] for x in runner.dev_losses]
        dev_losses = [x[1] for x in runner.dev_losses]
        plt.plot(dev_steps, dev_losses, color='#E20079', linestyle='--', label="Dev loss")
    # 绘制坐标轴和图例
    plt.ylabel("loss", fontsize='x-large')
    plt.xlabel("step", fontsize='x-large')
    plt.legend(loc='upper right', fontsize='x-large')

    plt.subplot(1, 2, 2)
    # 绘制评价准确率变化曲线
    if runner.dev_losses[0][0] != -1:
        plt.plot(dev_steps, runner.dev_scores,
                 color='#E20079', linestyle="--", label="Dev accuracy")
    else:
        plt.plot(list(range(len(runner.dev_scores))), runner.dev_scores,
                 color='#E20079', linestyle="--", label="Dev accuracy")
    # 绘制坐标轴和图例
    plt.ylabel("score", fontsize='x-large')
    plt.xlabel("step", fontsize='x-large')
    plt.legend(loc='lower right', fontsize='x-large')

    plt.savefig(fig_name)
    plt.show()


runner.load_model('best_model.pdparams')
plot(runner, 'cnn-loss1.pdf')

3.模型评价

# 加载最优模型
runner.load_model('best_model.pdparams')
# 模型评价
score, loss = runner.evaluate(test_loader)
print("[Test] accuracy/loss: {:.4f}/{:.4f}".format(score, loss))

 

4.模型预测

# 获取测试集中第一条数据
X, label = next(iter(test_loader))
logits = runner.predict(X)
# 多分类,使用softmax计算预测概率
pred = F.softmax(logits,dim=1)
# 获取概率最大的类别
pred_class = torch.argmax(pred[2]).numpy()
label = label[2].numpy()
# 输出真实类别与预测类别
print("The true category is {} and the predicted category is {}".format(label, pred_class))
# 可视化图片
plt.figure(figsize=(2, 2))
image, label = test_set[0][2], test_set[1][2]
image= np.array(image).astype('float32')
image = np.reshape(image, [28,28])
image = Image.fromarray(image.astype('uint8'), mode='L')
plt.imshow(image)
plt.savefig('cnn-number2.pdf')

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值