LeNET的学习以及代码实现

根据李沐老师的动手深度学习的书进行学习

import torch
from torch import nn

net = nn.Sequential(
    nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2, stride=2),
    nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2, stride=2),
    nn.Flatten(),
    nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
    nn.Linear(120, 84), nn.Sigmoid(),
    nn.Linear(84, 10))

输入的图像28×28 首先经过卷积层,使用6个5×5的卷积核进行卷积,会得到6个输出特征图,每个卷积核会生成一个特征图,特征图的大小通常用下面的公式所确定:

输出特征图大小 = (输入特征图大小 - 卷积核大小 + 2 × 填充大小)/ 步长 + 1

AvgPool2d池化操作 通过计算每个窗口内的元素的平均值,将均值作为输出特征图对应位置的值

具体的作用有:

1 特征降维 将输入的特征图的维度降低,从而减少计算量和参数数量。控制模型复杂度和减少过拟合有很大的帮助。

2 具有一定的平移不变性 输入的特征图中相同特征的位置发生平移时,输出特征图中对应的位置不会改变。

经过池化层处理 变成6个14×14的特征图,接下来继续卷积和池化。再通过全连接层得到输出。

X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)
for layer in net:
    X = layer(X)
    print(layer.__class__.__name__,'output shape: \t',X.shape)

实现代码如下,用cpu训练模型

import time

import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import torchvision.transforms as transforms

import torchvision
import matplotlib.pyplot as plt

class Animator:
    def __init__(self, xlabel='', xlim=None, legend=None):
        self.xlabel = xlabel
        self.xlim = xlim
        self.legend = legend
        self.fig, self.ax = plt.subplots()
        self.lines = []
        self.xdata, self.ydata = [], []
        self.init_plot()

    def init_plot(self):
        if self.xlim is not None:
            self.ax.set_xlim(*self.xlim)
        if self.legend is not None:
            self.ax.legend(self.legend)

    def add(self, x, y):
        self.xdata.append(x)
        self.ydata.append(y)
        self.update_plot()

    def update_plot(self):
        self.ax.clear()
        for line, label, y in zip(self.lines, self.legend, self.ydata):
            line.set_data(self.xdata, y)
            self.ax.plot(self.xdata, y, label=label)
        self.ax.set_xlabel(self.xlabel)
        self.ax.legend(self.legend)
        self.fig.canvas.draw()

    def show(self):
        plt.show()

net = nn.Sequential(
    nn.Conv2d(1, 6, kernel_size=5, padding=2),nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2, stride=2),
    nn.Conv2d(6, 16, kernel_size=5),nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2, stride=2),
    nn.Flatten(),
    nn.Linear(16 * 5 * 5, 120),nn.Sigmoid(),
    nn.Linear(120, 84),nn.Sigmoid(),
    nn.Linear(84, 10)
)

X = torch.rand(size=(1, 1, 28, 28),dtype=torch.float32)
for layer in net:
    X = layer(X)
    print(layer.__class__.__name__, 'output shape: \t', X.shape)

batch_size = 256

# Define the data transforms for resizing and normalization
transform = transforms.Compose([
    transforms.Resize((28, 28)),
    transforms.ToTensor(),          # Convert images to tensors
    transforms.Normalize((0.5,), (0.5,))
])

# Load the Fashion MNIST dataset
mnist_train = torchvision.datasets.FashionMNIST(
    root='C:/Users/10854/pythonProject4', train=True, transform=transform, download=True
)
mnist_test = torchvision.datasets.FashionMNIST(
    root='C:/Users/10854/pythonProject4', train=False, transform=transform, download=True
)

# Create data loaders for training and testing
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
device = torch.device("cpu")
# Training loop
lr, num_epochs = 0.9, 10
def evaluate_accuracy(net, data_iter, device=None):
    if isinstance(net, nn.Module):
        net.eval()  # 设置为评估模式
        if not device:
            device = next(iter(net.parameters())).device

    # 正确预测的数量,总预测的数量
    correct_predictions = 0
    total_predictions = 0

    with torch.no_grad():
        for X, y in data_iter:
            X = X.to(device)
            y = y.to(device)
            y_hat = net(X)
            _, predicted = torch.max(y_hat, 1)
            correct_predictions += (predicted == y).sum().item()
            total_predictions += y.numel()

    accuracy = correct_predictions / total_predictions
    return accuracy

def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
    """用CPU训练模型"""
    def init_weights(m):
        if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
            nn.init.xavier_uniform_(m.weight)
    net.apply(init_weights)
    print('training on', device)
    net.to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=lr)
    loss = nn.CrossEntropyLoss()
    animator = Animator(xlabel='epoch', xlim=[1, num_epochs],
                        legend=['train loss', 'train acc', 'test acc'])
    timer, num_batches = time.time(), len(train_iter)
    for epoch in range(num_epochs):
        # 训练损失之和,训练准确率之和,样本数
        metric = [0.0, 0.0, 0]
        net.train()
        for i, (X, y) in enumerate(train_iter):
            optimizer.zero_grad()
            X, y = X.to(device), y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            l.backward()
            optimizer.step()
            with torch.no_grad():
                metric[0] += l.item() * X.shape[0]
                correct = torch.eq(torch.argmax(y_hat, dim=1), y).float()
                metric[1] += torch.sum(correct).item()
                metric[2] += X.shape[0]
            train_l = metric[0] / metric[2]
            train_acc = metric[1] / metric[2]
            if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
                animator.add(epoch + (i + 1) / num_batches,
                             (train_l, train_acc, None))
        test_acc = evaluate_accuracy(net, test_iter, device)
        animator.add(epoch + 1, (None, None, test_acc))
    print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
          f'test acc {test_acc:.3f}')
    print(f'{metric[2] * num_epochs / (time.time() - timer):.1f} examples/sec '
          f'on {str(device)}')


train_ch6(net, train_iter, test_iter, num_epochs, lr, device)

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值