LeNet-5实现分类MINST数据集(学习笔记四)

1. 分析

论文:LeNet-5论文,论文中的image是1X32X32的照片,MINST的image是1X28X28,因此代码:self.hidden1 = nn.Linear(256, 120)中第一个维度是256。否则如果image是1X32X32时,代码应该是:self.hidden1 = nn.Linear(400, 120)
python代码——训练模型:

import torch
import torchvision
import torch.nn as nn
import torch.utils.data as Data

EPOCH = 5
BATCH_SIZE = 50
LR = 0.002
DOWNLOAD_MINST = True

train_data = torchvision.datasets.MNIST(
    root='./mnist',
    train=True,
    transform=torchvision.transforms.ToTensor(),
    download=DOWNLOAD_MINST
)

test_data = torchvision.datasets.MNIST(
    root='./mnist',
    train=False,
    transform=torchvision.transforms.ToTensor(),
    download=DOWNLOAD_MINST,
)

train_loader = Data.DataLoader(
    dataset=train_data,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=4,
)

test_loader = Data.DataLoader(
    dataset=test_data,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=4,
)


class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=6,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding=0,
            ),
            nn.ReLU(),
            nn.AvgPool2d(
                kernel_size=2, stride=2,
            )
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(
                in_channels=6,
                out_channels=16,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding=0,
            ),
            nn.ReLU(),
            nn.AvgPool2d(
                kernel_size=2, stride=2,
            )
        )
        self.flat = nn.Flatten()
        self.hidden1 = nn.Linear(256, 120)
        self.hidden2 = nn.Linear(120, 84)
        self.predict = nn.Linear(84, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.flat(x)
        x = torch.relu(self.hidden1(x))
        x = torch.relu(self.hidden2(x))
        out = self.predict(x)

        return out


net = CNN()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
loss_function = torch.nn.CrossEntropyLoss()


def train(data_loader, model, loss_function, optimizer):
    size = len(data_loader.dataset)
    model.train()
    for batch, (X, y) in enumerate(data_loader):
        X, y = X.to(device), y.to(device)

        # Compute prediction error
        prediction = model(X)
        loss = loss_function(prediction, y)

        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch % 100 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")


def test(data_loader, model, loss_fn):
    size = len(data_loader.dataset)
    num_batches = len(data_loader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in data_loader:
            X, y = X.to(device), y.to(device)
            prediction = model(X)
            test_loss += loss_fn(prediction, y).item()
            correct += (prediction.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(f"Test Error:\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")


def call_model():
    for t in range(EPOCH):
        print(f"Epoch {t + 1}\n-------------------------------")
        train(train_loader, net, loss_function, optimizer)
        test(test_loader, net, loss_function)

    torch.save(net.state_dict(), 'net.pkl')
    print("Saved PyTorch Model State to model.pth")


if __name__ == '__main__':
    call_model()

LeNet-5模型中激活函数使用的是sigmoid或者tanh函数,而在本文的代码中用的是relu,也就是针对每一个卷积层都是使用:nn.ReLU(),但如果如果一定要参照原文,则使用:nn.Tanh(),或者nn.Sigmoid()
运行结果:

Epoch 1
-------------------------------
loss: 2.306098  [    0/60000]
loss: 0.412156  [ 5000/60000]
loss: 0.412486  [10000/60000]
loss: 0.152816  [15000/60000]
loss: 0.309540  [20000/60000]
loss: 0.100331  [25000/60000]
loss: 0.130987  [30000/60000]
loss: 0.114419  [35000/60000]
loss: 0.151787  [40000/60000]
loss: 0.164818  [45000/60000]
loss: 0.046076  [50000/60000]
loss: 0.023506  [55000/60000]
Test Error: 
Accuracy: 97.9%, Avg loss: 0.065859 

Epoch 2
-------------------------------
loss: 0.085328  [    0/60000]
loss: 0.045330  [ 5000/60000]
loss: 0.114169  [10000/60000]
loss: 0.186654  [15000/60000]
loss: 0.104414  [20000/60000]
loss: 0.014921  [25000/60000]
loss: 0.102247  [30000/60000]
loss: 0.136939  [35000/60000]
loss: 0.111537  [40000/60000]
loss: 0.004142  [45000/60000]
loss: 0.025103  [50000/60000]
loss: 0.085847  [55000/60000]
Test Error: 
Accuracy: 98.3%, Avg loss: 0.063019 

Epoch 3
-------------------------------
loss: 0.062379  [    0/60000]
loss: 0.004411  [ 5000/60000]
loss: 0.028583  [10000/60000]
loss: 0.045782  [15000/60000]
loss: 0.230601  [20000/60000]
loss: 0.026126  [25000/60000]
loss: 0.044285  [30000/60000]
loss: 0.016973  [35000/60000]
loss: 0.015370  [40000/60000]
loss: 0.006403  [45000/60000]
loss: 0.018655  [50000/60000]
loss: 0.017848  [55000/60000]
Test Error: 
Accuracy: 98.7%, Avg loss: 0.042296 

Epoch 4
-------------------------------
loss: 0.045252  [    0/60000]
loss: 0.102186  [ 5000/60000]
loss: 0.030442  [10000/60000]
loss: 0.055944  [15000/60000]
loss: 0.056801  [20000/60000]
loss: 0.057802  [25000/60000]
loss: 0.060436  [30000/60000]
loss: 0.010024  [35000/60000]
loss: 0.012362  [40000/60000]
loss: 0.021393  [45000/60000]
loss: 0.004517  [50000/60000]
loss: 0.002426  [55000/60000]
Test Error: 
Accuracy: 98.7%, Avg loss: 0.040705 

Epoch 5
-------------------------------

2. 加载模型,测试所有数据集

import torch
import torchvision
import torch.nn as nn
import torch.utils.data as Data

EPOCH = 5
BATCH_SIZE = 50
LR = 0.002
DOWNLOAD_MINST = True

train_data = torchvision.datasets.MNIST(
    root='./mnist',
    train=True,
    transform=torchvision.transforms.ToTensor(),
    download=DOWNLOAD_MINST
)

test_data = torchvision.datasets.MNIST(
    root='./mnist',
    train=False,
    transform=torchvision.transforms.ToTensor(),
    download=DOWNLOAD_MINST,
)

train_loader = Data.DataLoader(
    dataset=train_data,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=4,
)

test_loader = Data.DataLoader(
    dataset=test_data,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=4,
)


class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=6,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding=0,
            ),
            nn.ReLU(),
            nn.AvgPool2d(
                kernel_size=2, stride=2,
            )
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(
                in_channels=6,
                out_channels=16,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding=0,
            ),
            nn.ReLU(),
            nn.AvgPool2d(
                kernel_size=2, stride=2,
            )
        )
        self.flat = nn.Flatten()
        self.hidden1 = nn.Linear(256, 120)
        self.hidden2 = nn.Linear(120, 84)
        self.predict = nn.Linear(84, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.flat(x)
        x = torch.relu(self.hidden1(x))
        x = torch.relu(self.hidden2(x))
        out = self.predict(x)
        return out


net = CNN()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
loss_function = torch.nn.CrossEntropyLoss()


def train(data_loader, model, loss_function, optimizer):
    size = len(data_loader.dataset)
    model.train()
    for batch, (X, y) in enumerate(data_loader):
        X, y = X.to(device), y.to(device)

        # Compute prediction error
        prediction = model(X)
        loss = loss_function(prediction, y)

        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch % 100 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")


def test(data_loader, model, loss_fn):
    size = len(data_loader.dataset)
    num_batches = len(data_loader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in data_loader:
            X, y = X.to(device), y.to(device)
            prediction = model(X)
            test_loss += loss_fn(prediction, y).item()
            correct += (prediction.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")


def call_model():
    for t in range(EPOCH):
        print(f"Epoch {t + 1}\n-------------------------------")
        train(train_loader, net, loss_function, optimizer)
        test(test_loader, net, loss_function)

    torch.save(net.state_dict(), 'net.pkl')
    print("Saved PyTorch Model State to model.pth")


def load_model():
    model = CNN()
    model.load_state_dict(torch.load("model.pth"))
    classes = [
        "T-shirt/top",
        "Trouser",
        "Pullover",
        "Dress",
        "Coat",
        "Sandal",
        "Shirt",
        "Sneaker",
        "Bag",
        "Ankle boot",
    ]
    model.eval()
    with torch.no_grad():
        accuracy = 0
        for (images, labels) in test_loader:
            predict = model(images)
            for i in range(len(predict)):
                predicted, actual = classes[predict[i].argmax(0)], classes[labels[i]]
                print(f'Predicted: "{predicted}", Actual: "{actual}"')
                if predicted == actual:
                    accuracy += 1
        print("accuracy:%.4f" % (accuracy / len(test_data.data)))
        print('num:%d' % accuracy)


if __name__ == '__main__':
    load_model()

运行结果:

...
accuracy:0.9885
num:9885
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值