使用Pytorch训练CNN进行MNIST手写字符识别

使用Pytorch训练CNN进行MNIST手写字符识别


环境:
Python 3.6.8
cuda 10.0


Name: torch
Version: 1.3.0
Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration
Home-page: https://pytorch.org/
Author: PyTorch Team
Author-email: packages@pytorch.org
License: BSD-3
Location: /home/andrew/.local/lib/python3.6/site-packages
Requires: numpy
Required-by: torchvision

Name: torchvision
Version: 0.4.1
Summary: image and video datasets and models for torch deep learning
Home-page: https://github.com/pytorch/vision
Author: PyTorch Core Team
Author-email: soumith@pytorch.org
License: BSD
Location: /home/andrew/.local/lib/python3.6/site-packages
Requires: numpy, torch, six, pillow
Required-by:


代码附上:

import torch
from torch import nn,optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
use_cuda = torch.cuda.is_available()
print('use_cuda: ',use_cuda)
use_cuda:  True
torch.manual_seed(1)
batch_size = 128
learning_rate = 1e-2
num_epoches = 10
train_dataset = datasets.MNIST(
    root='./data',
    train=True,
    transform=transforms.ToTensor(),
    download=True
)
test_dataset = datasets.MNIST(
    root='./data',
    train=False,
    transform=transforms.ToTensor()
)

train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=False)
class Cnn(nn.Module):
    def __init__(self,in_dim,n_class):
        super(Cnn,self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_dim,6,3,stride=1,padding=1),    #1*28*28 -> 6*28*28
            nn.ReLU(True),
            nn.MaxPool2d(2,2),                           #6*28*28 -> 6*14*14
            nn.Conv2d(6,16,5,stride=1,padding=0),        #6*14*14 -> 16*10*10
            nn.ReLU(True),
            nn.MaxPool2d(2,2)                            #16*5*5 = 400
        )
        self.fc = nn.Sequential(
            nn.Linear(400,120),
            nn.Linear(120,84),
            nn.Linear(84,n_class)
        )
    def forward(self,x):
        out = self.conv(x)
        out = out.view(out.size(0),400)
        out = self.fc(out)
        return out
    

model = Cnn(1,10).cuda() if use_cuda else Cnn(1,10)
print(model)
Cnn(
  (conv): Sequential(
    (0): Conv2d(1, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (fc): Sequential(
    (0): Linear(in_features=400, out_features=120, bias=True)
    (1): Linear(in_features=120, out_features=84, bias=True)
    (2): Linear(in_features=84, out_features=10, bias=True)
  )
)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=learning_rate)
#训练网络
for epoch in range(num_epoches):
    runing_loss = 0.0
    runing_acc = 0.0
    for i,data in enumerate(train_loader,1):
        img,label = data
        if use_cuda:
            img = Variable(img).cuda()
            label = Variable(label).cuda()
        else:
            img = Variable(img)
            label = Variable(label)
        #前向传播
        out = model(img)
        loss = criterion(out,label)
        runing_loss += loss.item()*label.size(0)
        _,predicted = torch.max(out,1)
        num_correct = (predicted==label).sum()
        runing_acc += num_correct.item()
        #后向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    print("Train {} epoch, Loss: {:.6f}, Acc: {:.6f}".format(
        epoch+1,runing_loss/(len(train_dataset)),runing_acc/(len(train_dataset))))
Train 1 epoch, Loss: 2.285776, Acc: 0.221550
Train 2 epoch, Loss: 1.370810, Acc: 0.636083
Train 3 epoch, Loss: 0.411639, Acc: 0.878833
Train 4 epoch, Loss: 0.294589, Acc: 0.912050
Train 5 epoch, Loss: 0.231720, Acc: 0.930100
Train 6 epoch, Loss: 0.188469, Acc: 0.942767
Train 7 epoch, Loss: 0.158934, Acc: 0.952717
Train 8 epoch, Loss: 0.139244, Acc: 0.958150
Train 9 epoch, Loss: 0.125946, Acc: 0.961917
Train 10 epoch, Loss: 0.115719, Acc: 0.965033
#模型测试
model.eval()
eval_loss = 0
eval_acc = 0
for data in test_loader:
    img,label = data
    with torch.no_grad():
        if use_cuda:
            img = Variable(img).cuda()
            label = label.cuda()
        else:
            img = Variable(img)
    out = model(img)
    loss = criterion(out,label)
    eval_loss += loss.item()*label.size(0)
    _,predicted = torch.max(out,1)
    num_correct = (predicted==label).sum()
    eval_acc += num_correct.item()
print("Test Loss: {:.6f},Acc: {:.6f}".format(eval_loss/(len(test_dataset)),eval_acc/(len(test_dataset))))
    
Test Loss: 0.101989,Acc: 0.967800

  • 1
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值