手写数字识别

代码实现

"""

"""
import torch
import torchvision
# torchvision包的主要功能是实现数据的处理,导入和预览等
from torchvision import datasets
from torchvision import transforms
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn
import pylab


transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, ))])
# 首先获取手写数字的训练集和测试集
# root 用于指定数据集在下载之后的存放路径
# transform 用于指定导入数据集需要对数据进行那种变化操作
# 设置为True 则载入的是该数据集的训练集部分,设置为FALSE 则载入的是该数据集的测试集部分
data_train = datasets.MNIST(
    root="./data/",
    transform=transform,
    train=True,
    download=True
)
data_test = datasets.MNIST(
    root="./data/",
    transform=transform,
    train=False,
    download=True
)
#数据装载
# dataset 参数用于指定我们载入的数据集名称
# batch_size参数设置了每个包中的图片数据个数
# shuffle在装载的过程会将数据随机打乱顺序并进打包
data_loader_train = torch.utils.data.DataLoader(
    dataset=data_train,
    batch_size=64,
    shuffle=True
)
data_loader_test = torch.utils.data.DataLoader(
    dataset=data_test,
    batch_size=64,
    shuffle=True
)

# 数据预览
def preview():
    images, labels = next(iter(data_loader_train))
    img = torchvision.utils.make_grid(images)
    img = img.numpy().transpose(1, 2, 0)
    std = [0.5, 0.5, 0.5]
    mean = [0.5, 0.5, 0.5]
    img = img*std+mean
    print([labels[i] for i in range(64)])
    plt.imshow(img)
    pylab.show()

preview()

class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = torch.nn.Sequential(torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
                                        torch.nn.ReLU(),
                                        torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
                                        torch.nn.ReLU(),
                                        torch.nn.MaxPool2d(stride=2, kernel_size=2))
        self.dense = torch.nn.Sequential(torch.nn.Linear(14*14*128, 1024),
                                        torch.nn.ReLU(),
                                        torch.nn.Dropout(p=0.5),
                                        torch.nn.Linear(1024, 10))
    def forward(self, x):
        x = self.conv1(x)
        x = x.view(-1, 14*14*128)
        x = self.dense(x)
        return x

model = Model()
print(model)
#调用GPU
if torch.cuda.is_available():
    model.cuda()#将所有的模型参数移动到GPU上
LOSS_FUNCTION = torch.nn.CrossEntropyLoss()
optimzer = torch.optim.Adam(model.parameters())

EPOCH_N = 5

for EPOCH in range(EPOCH_N):

    #初始化损失值
    running_loss = 0.0
    running_correct = 0

    print(f"-------------{EPOCH}/{EPOCH_N}-------------")
    for data in data_loader_train:
        #取出数据
        X_train, y_train = data
        X_train, y_train = X_train.cuda(), y_train.cuda()
        X_train, y_train = Variable(X_train), Variable(y_train)

        #前向传播
        outputs = model(X_train)
        _, pred = torch.max(outputs.data, 1)

        #计算损失函数
        LOSS = LOSS_FUNCTION(outputs, y_train)

        #清零梯度准备计算
        optimzer.zero_grad()

        #反向传播
        LOSS.backward()

        #更新训练参数
        optimzer.step()

        running_loss += LOSS.item()
        running_correct += torch.sum(pred == y_train.data)

    testing_correct = 0
    for data in data_loader_test:
        X_test, y_test = data
        # 有GPU加下面这行,没有不用加
        X_test, y_test = X_test.cuda(), y_test.cuda()
        X_test, y_test = Variable(X_test), Variable(y_test)
        outputs = model(X_test)
        _, pred = torch.max(outputs, 1)
        testing_correct += torch.sum(pred == y_test.data)
    print("Loss is :{:.4f},Train Accuracy is:{:.4f}%,Test Accuracy is:{:.4f}%".format(running_loss / len(data_train),
                                                                                     100 * running_correct / len(
                                                                                         data_train),
                                                                                     100 * testing_correct / len(
                                                                                         data_test)))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值