手写数字识别的MPL和CNN模型代码实现

1.代码实现

from torchvision import datasets,transforms
from torch.utils.data import DataLoader
import torch.nn as nn
from torch import optim
import os,torch


BATCH_SIZE=64
TEST_BATCH_SIZE=1000

input_size = 28 * 28
hidden_size = 128
output_size = 10

learning_rate = 0.001
num_epochs=10

save_path="./mnist_MPL_model"
if not os.path.exists(save_path):
    os.makedirs(save_path)

# 1.数据准备
def get_dataload(train=True,batch_size=BATCH_SIZE):
    dataset = datasets.MNIST('./mnist_data', train=train, download=False,
                             transform=transforms.Compose([
                               transforms.ToTensor(),
                               # transforms.Normalize((0.1307,), (0.3081,))
                             ]))
    #准备数据迭代器
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    return data_loader

# 构建数据加载器
trainset = get_dataload(True,BATCH_SIZE)
testset = get_dataload(False,TEST_BATCH_SIZE)

# 2.模型构建 MLP 模型
class MLP(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = x.view(x.size(0), -1)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x

# 构建 CNN 模型
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
        self.fc1 = nn.Linear(64 * 7 * 7, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x

# 初始化模型
model = CNN()

# 初始化模型
# model = MLP(input_size, hidden_size, output_size)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 3.模型训练
def train():
    for epoch in range(num_epochs):
        running_loss = 0.0
        for i, data in enumerate(trainset):
            inputs, labels = data

            optimizer.zero_grad()

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            running_loss += loss.item()

        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss / len(trainset):.4f}')
        # 4.模型保存
        if epoch==num_epochs-1:
            torch.save(model.state_dict(), save_path+"/model.pkl")
            torch.save(optimizer.state_dict(), save_path+"/optimizer.pkl")


# 5.模型测试
def test():
    correct = 0
    total = 0
    model.eval()  # 设置模型为评估模式
    with torch.no_grad():
        for data in testset:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print(f'Accuracy on the test set: {(100 * correct / total):.2f}%')


train()
test()

2.结果

2.1 MPL多层感知机

Epoch [1/10], Loss: 0.3481
Epoch [2/10], Loss: 0.1575
Epoch [3/10], Loss: 0.1081
Epoch [4/10], Loss: 0.0806
Epoch [5/10], Loss: 0.0637
Epoch [6/10], Loss: 0.0515
Epoch [7/10], Loss: 0.0423
Epoch [8/10], Loss: 0.0340
Epoch [9/10], Loss: 0.0288
Epoch [10/10], Loss: 0.0235
Accuracy on the test set: 97.68%

2.2 CNN卷积神经网络

Epoch [1/10], Loss: 0.1832
Epoch [2/10], Loss: 0.0508
Epoch [3/10], Loss: 0.0352
Epoch [4/10], Loss: 0.0272
Epoch [5/10], Loss: 0.0201
Epoch [6/10], Loss: 0.0151
Epoch [7/10], Loss: 0.0123
Epoch [8/10], Loss: 0.0096
Epoch [9/10], Loss: 0.0075
Epoch [10/10], Loss: 0.0084
Accuracy on the test set: 99.21%

  • 15
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值