pytorch 例子2——手写数字识别

pytorch 例子2——手写数字识别

import numpy as np
import matplotlib.pyplot as plt
from IPython import embed
import torch
import torchvision
from torchvision import datasets, transforms
from torch import nn

std = 0.5
mean = 0.5
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(
    mean=mean, std=std)])
data_train = datasets.MNIST(
    root="./data/", transform=transform, train=True, download=True)
data_test = datasets.MNIST(root="./data/", transform=transform, train=False)

data_loader_train = torch.utils.data.DataLoader(
    data_train, batch_size=64, shuffle=True)
data_loader_test = torch.utils.data.DataLoader(
    data_test, batch_size=64, shuffle=True)

images, labels = next(iter(data_loader_train))
img = torchvision.utils.make_grid(images)
img = img.numpy().transpose(1, 2, 0)
img = img * std + mean
plt.imshow(img)
plt.show()


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(stride=2, kernel_size=2)
        )
        self.dense = nn.Sequential(
            nn.Linear(14*14*128, 1024),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(1024, 10)
        )

    def forward(self, x):
        x = self.conv1(x)
        x = x.view(-1, 14*14*128)
        x = self.dense(x)
        return x


model = Model()
optimizer = torch.optim.Adam(model.parameters())
cost = nn.CrossEntropyLoss()

print(model)

num_epochs = 2

for epoch in range(num_epochs):
    running_loss = 0.0
    running_correct = 0
    print(f"Epoch: {epoch}/{num_epochs}")
    print("-"*10)
    i = 0
    for data in data_loader_train:
        X_train, y_train = data
        # X_train, y_train = torch.Tensor(X_train), torch.Tensor(y_train)
        outputs = model(X_train)
        _, pred = torch.max(outputs.data, 1)
        optimizer.zero_grad()
        loss = cost(outputs, y_train)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        batch_correct = torch.sum(pred == y_train.data).item()
        running_correct += batch_correct
        if i % 10 == 0:
            print(
                f"i: {i}, Loss: {running_loss / len(data_train)}, train accuracy: {batch_correct / len(data)}")
        i += 1

    test_correct = 0
    for data in data_loader_test:
        X_test, y_test = data
        # X_test, y_test = torch.tensor(X_test), torch.tensor(y_test)
        outputs = model(X_test)
        _, pred = torch.max(outputs.data, 1)
        test_correct += torch.sum(pred == y_test.data).item()

    print(f"Loss: {running_loss / len(data_train)}, train accuracy: {running_correct / len(data_train)}, test accuracy: {test_correct / len(data_test)}")

正确率大概在98%左右。

©️2020 CSDN 皮肤主题: 大白 设计师:CSDN官方博客 返回首页