深度学习第十一次实验(LeNet实现MNIST数据集的识别)

1、模型构建

import json
import gzip
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt

# 打印并观察数据集分布情况
train_set, dev_set, test_set = json.load(gzip.open('mnist.json.gz'))
train_images, train_labels = train_set[0][:1000], train_set[1][:1000]
dev_images, dev_labels = dev_set[0][:200], dev_set[1][:200]
test_images, test_labels = test_set[0][:200], test_set[1][:200]
train_set, dev_set, test_set = [train_images, train_labels], [dev_images, dev_labels], [test_images, test_labels]
print('Length of train/dev/test set:{}/{}/{}'.format(len(train_set[0]), len(dev_set[0]), len(test_set[0])))

# 可视化数据集
image, label = train_set[0][0], train_set[1][0]
image, label = np.array(image).astype('float32'), int(label)

# 原始图像数据为长度784的行向量,需要调整为[28,28]大小的图像
image = np.reshape(image*255, [28, 28])
image = Image.fromarray(image.astype('uint8'), mode='L')
print("The number in the picture is {}".format(label))
plt.figure(figsize=(6, 6))
plt.imshow(image, cmap='gray')
plt.show()

import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import numpy as np
import random

# 数据预处理
transform = transforms.Compose([
    transforms.Resize(32),  # 将图片大小调整为32x32
    transforms.ToTensor(),  # 转换为Tensor并且归一化到[0, 1]
    transforms.Normalize(mean=[0.5], std=[0.5])  # 正常化到[-1, 1],因为归一化常常是(mean, std)在[0, 1]之间
])


class MNISTDataset(Dataset):
    def __init__(self, dataset, transform=None, mode='train'):
        """
        dataset: tuple of (images, labels)
        transform: 图像预处理
        mode: train/dev/test
        """
        self.dataset = dataset
        self.transform = transform
        self.mode = mode

    def __getitem__(self, idx):
        # 获取图像和标签
        image, label = self.dataset[0][idx], self.dataset[1][idx]

        # 将图像转换为 NumPy 数组,确保数据类型是 float32
        image = np.array(image).astype('float32')

        # 将图像数据从(784,)转为 (28, 28),因为MNIST是28x28像素的图像
        image = np.reshape(image, [28, 28])

        # 将图像转换为PIL对象以便于预处理
        image = Image.fromarray(image.astype('uint8'), mode='L')

        # 如果有预处理函数,应用预处理
        if self.transform:
            image = self.transform(image)

        return image, label

    def __len__(self):
        # 数据集的长度
        return len(self.dataset[0])


# 固定随机种子
random.seed(0)
torch.manual_seed(0)

# 加载 MNIST 数据集
# 假设你已经加载了train_set, test_set, dev_set,这里不包含加载数据的代码
# train_set, dev_set, test_set 结构为 tuple (images, labels)

train_dataset = MNISTDataset(dataset=train_set, transform=transform, mode='train')
test_dataset = MNISTDataset(dataset=test_set, transform=transform, mode='test')
dev_dataset = MNISTDataset(dataset=dev_set, transform=transform, mode='dev')

# 使用 DataLoader 来批处理数据
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
dev_loader = DataLoader(dev_dataset, batch_size=64, shuffle=False)

# 通过 DataLoader 获取批次
for images, labels in train_loader:
    print(images.shape, labels.shape)
    break

import torch
import torch.nn as nn
import torch.nn.functional as F


# 自定义Pool2D类
class Pool2D(nn.Module):
    def __init__(self, size=(2, 2), mode='max', stride=1):
        super(Pool2D, self).__init__()
        self.mode = mode
        self.h, self.w = size
        self.stride = stride

    def forward(self, x):
        output_w = (x.shape[2] - self.w) // self.stride + 1
        output_h = (x.shape[3] - self.h) // self.stride + 1
        output = torch.zeros([x.shape[0], x.shape[1], output_w, output_h])

        # 汇聚
        for i in range(output.shape[2]):
            for j in range(output.shape[3]):
                if self.mode == 'max':
                    output[:, :, i, j] = torch.max(
                        x[:, :, self.stride * i:self.stride * i + self.w, self.stride * j:self.stride * j + self.h],
                        dim=3)[0].max(dim=2)[0]
                elif self.mode == 'avg':
                    output[:, :, i, j] = torch.mean(
                        x[:, :, self.stride * i:self.stride * i + self.w, self.stride * j:self.stride * j + self.h],
                        dim=[2, 3])

        return output


# 自定义Conv2D类
class Conv2D(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
        super(Conv2D, self).__init__()
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.in_channels = in_channels
        self.out_channels = out_channels

        # 定义卷积层的权重和偏置
        self.weight = nn.Parameter(torch.ones(out_channels, in_channels, kernel_size, kernel_size))
        self.bias = nn.Parameter(torch.zeros(out_channels))

    def single_forward(self, X, weight):
        # 零填充
        new_X = torch.zeros((X.shape[0], X.shape[1] + 2 * self.padding, X.shape[2] + 2 * self.padding))
        new_X[:, self.padding:X.shape[1] + self.padding, self.padding:X.shape[2] + self.padding] = X
        u, v = weight.shape
        output_w = (new_X.shape[1] - u) // self.stride + 1
        output_h = (new_X.shape[2] - v) // self.stride + 1
        output = torch.zeros((X.shape[0], output_w, output_h))

        # 进行卷积运算
        for i in range(output.shape[1]):
            for j in range(output.shape[2]):
                output[:, i, j] = torch.sum(
                    new_X[:, self.stride * i:self.stride * i + u, self.stride * j:self.stride * j + v] * weight,
                    dim=[1, 2])
        return output

    def forward(self, inputs):
        feature_maps = []

        # 进行多通道卷积运算
        for p in range(self.out_channels):
            multi_outs = []

            # 每个卷积核遍历输入通道
            for i in range(self.in_channels):
                single = self.single_forward(inputs[:, i, :, :], self.weight[p, i, :, :])
                multi_outs.append(single)

            # 汇总卷积结果
            feature_map = torch.sum(torch.stack(multi_outs), dim=0) + self.bias[p]
            feature_maps.append(feature_map)

        # 返回多个输出特征图
        out = torch.stack(feature_maps, dim=1)
        return out


# LeNet 模型
class Model_LeNet(nn.Module):
    def __init__(self, in_channels, num_classes=10):
        super(Model_LeNet, self).__init__()
        # 卷积层1
        self.conv1 = Conv2D(in_channels=in_channels, out_channels=6, kernel_size=5)
        # 最大池化层
        self.pool2 = Pool2D((2,2), mode='max', stride=2)
        # 卷积层2
        self.conv3 = Conv2D(in_channels=6, out_channels=16, kernel_size=5)
        # 平均池化层
        self.pool4 = Pool2D((2,2), mode='avg', stride=2)
        # 卷积层3
        self.conv5 = Conv2D(in_channels=16, out_channels=120, kernel_size=5)
        # 全连接层1
        self.linear6 = nn.Linear(120, 84)
        # 全连接层2
        self.linear7 = nn.Linear(84, num_classes)

    def forward(self, x):
        # C1 -> ReLU -> Conv1
        output = F.relu(self.conv1(x))
        # S2 -> Pool2
        output = self.pool2(output)
        # C3 -> ReLU -> Conv3
        output = F.relu(self.conv3(output))
        # S4 -> Pool4
        output = self.pool4(output)
        # C5 -> ReLU -> Conv5
        output = F.relu(self.conv5(output))
        # 展平层
        output = output.view(output.size(0), -1)
        # F6 -> Linear6
        output = F.relu(self.linear6(output))
        # F7 -> Linear7 -> 输出类别
        output = self.linear7(output)
        return output


# 这里用np.random创建一个随机数组作为输入数据
inputs = np.random.randn(*[1, 1, 32, 32])
inputs = inputs.astype('float32')

# 创建LeNet模型实例 输入通道为1,类别数为10
model = Model_LeNet(in_channels=1, num_classes=10)
# 获取模型中所有子模块的名称,存储在列表c中
c = []
for a, b in model.named_children():
    c.append(a)
print('模型中所有子模块的名称:', c)  # 打印出所有子模块的名称

# 将数据转换为tensor
x = torch.tensor(inputs)
# 对x进行逐层处理,遍历所有子模块。若出错,会对输入数据进行重塑,进行下一层的处理
for a, item in model.named_children():
    try:
        x = item(x)
    except:
        x = torch.reshape(x, [x.shape[0], -1])
        x = item(x)

    # 收集模型中每一层参数的名称,并储存在列表d中
    d = []
    e = []
    for b, c in item.named_parameters():
        d.append(b)
        e.append(c)
    # 若某一层的参数有两个,则打印该层的名称、形状,第1和第2个参数的形状
    if len(e) == 2:
        print(a, x.shape, e[0].shape,
              e[1].shape)
    else:
        # 汇聚层没有参数
        print(a, x.shape)

# 测试网络速度
import time

# 这里用np.random创建一个随机数组作为测试数据
inputs = np.random.randn(*[1, 1, 32, 32])
inputs = inputs.astype('float32')
x = torch.tensor(inputs)

# 创建Model_LeNet类的实例,指定模型名称和分类的类别数目
model = Model_LeNet(in_channels=1, num_classes=10)

# 计算Model_LeNet类的运算速度
model_time = 0
for i in range(60):
    strat_time = time.time()
    out = model(x)
    end_time = time.time()
    # 预热10次运算,不计入最终速度统计
    if i < 10:
        continue
    model_time += (end_time - strat_time)
avg_model_time = model_time / 50
print('Model_LeNet speed:', avg_model_time, 's')

2、模型训练、评价与测试

# -*- coding: utf-8 -*-

import numpy as np
import torch
import torch.nn as nn
import torch.optim as opt
import torch.nn.functional as F
from matplotlib import pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets
from torchvision.transforms import Compose, Resize, Normalize, ToTensor
from nndl.Accuracy import Accuracy
from nndl.Runnerv3 import Runnerv3
from PIL import Image

# 定义LeNet网络
class PyTorch_LeNet(nn.Module):
    def __init__(self, in_channels, num_classes=10):
        super(PyTorch_LeNet, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=6, kernel_size=5)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
        self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)
        self.conv5 = nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5)
        self.linear6 = nn.Linear(in_features=120, out_features=84)
        self.linear7 = nn.Linear(in_features=84, out_features=num_classes)

    def forward(self, x):
        output = F.relu(self.conv1(x))
        output = self.pool2(output)
        output = F.relu(self.conv3(output))
        output = self.pool4(output)
        output = F.relu(self.conv5(output))
        output = torch.flatten(output, 1)  # 展平
        output = F.relu(self.linear6(output))
        output = self.linear7(output)
        return output

# 学习率大小
lr = 0.1
# 批次大小
batch_size = 64

# 下载并加载 MNIST 数据集
train_set = datasets.MNIST(root='./data', train=True, download=True)
test_set = datasets.MNIST(root='./data', train=False, download=True)

# 减小数据集的大小并切割成 2000(训练集)/200(验证集)/200(测试集)
train_images, train_labels = train_set.data[:2000], train_set.targets[:2000]
dev_images, dev_labels = train_set.data[2000:2200], train_set.targets[2000:2200]
test_images, test_labels = test_set.data[:200], test_set.targets[:200]

# 创建新的训练集、验证集和测试集
train_set = [train_images, train_labels]
dev_set = [dev_images, dev_labels]
test_set = [test_images, test_labels]

print(f"Length of train/dev/test set: {len(train_set[0])}/{len(dev_set[0])}/{len(test_set[0])}")

# 数据预处理
transforms = Compose([Resize(32), ToTensor(), Normalize(mean=[0.5], std=[0.5])])

# 自定义数据集类
class MNIST_dataset(Dataset):
    def __init__(self, dataset, transforms, mode='train'):
        self.mode = mode
        self.transforms = transforms
        self.dataset = dataset

    def __getitem__(self, idx):
        # 获取图像和标签
        image, label = self.dataset[0][idx], self.dataset[1][idx]
        image = np.array(image).astype('float32')  # 转为 numpy 数组
        image = np.reshape(image, [28, 28])  # 确保形状正确
        image = Image.fromarray(image.astype('uint8'), mode='L')  # 转为PIL图像
        image = self.transforms(image)  # 预处理

        return image, label

    def __len__(self):
        return len(self.dataset[0])

# 加载训练集、验证集和测试集
train_dataset = MNIST_dataset(dataset=train_set, transforms=transforms, mode='train')
dev_dataset = MNIST_dataset(dataset=dev_set, transforms=transforms, mode='dev')
test_dataset = MNIST_dataset(dataset=test_set, transforms=transforms, mode='test')

# 数据集迭代
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
dev_loader = DataLoader(dev_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)

# 初始化模型
model = PyTorch_LeNet(in_channels=1, num_classes=10)

# 优化器
optimizer = opt.SGD(lr=lr, params=model.parameters())

# 损失函数
loss_fn = F.cross_entropy

# 评价指标
metric = Accuracy(is_logist=True)

# 实例化 RunnerV3 类,并传入训练配置。
runner = Runnerv3(model, optimizer, loss_fn, metric)

# 启动训练
log_steps = 15
eval_steps = 15
runner.train(train_loader, dev_loader, num_epochs=10, log_steps=log_steps, eval_steps=eval_steps, save_path="best_model.pdparams")


# 可视化
def plot(runner, fig_name):
    plt.figure(figsize=(10, 5))

    plt.subplot(1, 2, 1)
    train_items = runner.train_step_losses[::30]
    train_steps = [x[0] for x in train_items]
    train_losses = [x[1] for x in train_items]

    plt.plot(train_steps, train_losses, color='#8E004D', label="Train loss")
    if runner.dev_losses[0][0] != -1:
        dev_steps = [x[0] for x in runner.dev_losses]
        dev_losses = [x[1] for x in runner.dev_losses]
        plt.plot(dev_steps, dev_losses, color='#E20079', linestyle='--', label="Dev loss")
    # 绘制坐标轴和图例
    plt.ylabel("loss", fontsize='x-large')
    plt.xlabel("step", fontsize='x-large')
    plt.legend(loc='upper right', fontsize='x-large')

    plt.subplot(1, 2, 2)
    # 绘制评价准确率变化曲线
    if runner.dev_losses[0][0] != -1:
        plt.plot(dev_steps, runner.dev_scores,
                 color='#E20079', linestyle="--", label="Dev accuracy")
    else:
        plt.plot(list(range(len(runner.dev_scores))), runner.dev_scores,
                 color='#E20079', linestyle="--", label="Dev accuracy")
    # 绘制坐标轴和图例
    plt.ylabel("score", fontsize='x-large')
    plt.xlabel("step", fontsize='x-large')
    plt.legend(loc='lower right', fontsize='x-large')

    plt.savefig(fig_name)
    plt.show()

plot(runner, 'cnn-loss1.pdf')

# 加载最优模型
runner.load_model('best_model.pdparams')
# 模型评价
score, loss = runner.evaluate(test_loader)
print("[Test] accuracy/loss: {:.4f}/{:.4f}".format(score, loss))

# 获取测试集中第一条数据
X, label = next(iter(test_loader))
logits = runner.predict(X)
# 多分类,使用softmax计算预测概率
pred = F.softmax(logits,dim=1)
# 获取概率最大的类别
pred_class = torch.argmax(pred[2]).numpy()
label = label[2].numpy()
# 输出真实类别与预测类别
print("The true category is {} and the predicted category is {}".format(label, pred_class))
# 可视化图片
plt.figure(figsize=(2, 2))
image, label = test_set[0][2], test_set[1][2]
image= np.array(image).astype('float32')
image = np.reshape(image, [28,28])
image = Image.fromarray(image.astype('uint8'), mode='L')
plt.imshow(image)
plt.savefig('cnn-number2.pdf')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值