Pytorch学习(4)神经网络实现MNIST数字识别

网络搭建:搭建一个包含两个隐含层的神经网络采用ReLU激活函数。

import torch
from torch import nn, optim

class Batch_Net(nn.Module):
    def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super(Batch_Net, self).__init__()
        self.layer1 = nn.Sequential(      # 2 hidden layers
                nn.Linear(in_dim, n_hidden_1),
                nn.BatchNorm1d(n_hidden_1), nn.ReLU(True))
        self.layer2 = nn.Sequential(
                nn.Linear(n_hidden_1, n_hidden_2),
                nn.BatchNorm1d(n_hidden_2), nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))
        
    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        
        return x

训练和测试:

import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
#from network import Batch_Net
import network

# Hyperparameters
batch_size = 64
learning_rate = 1e-2
num_epoches = 20

data_tf = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize([0.5], [0.5])])

# download MNIST dataset
train_dataset = datasets.MNIST(
        root = './data', train = True, transform = data_tf, download = True)

test_dataset = datasets.MNIST(root = './data', train = False, transform = data_tf)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

model = network.Batch_Net(28 * 28, 300, 100, 10)

# define loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)

# train model
train_loss_all = []
train_acc_all = []

print('training...')
for epoch in range(num_epoches):
    loss_sum = 0
    acc_sum = 0
    for data in train_loader:
        img, label = data
        img = img.view(img.size(0), -1)
        img = Variable(img)
        label = Variable(label)
        
        # farward
        out = model(img)
        loss = criterion(out, label)
        
        loss_sum += loss.data.item() * label.size(0)
        _, pred = torch.max(out, 1)
        num_correct = (pred == label).sum()
        acc_sum += num_correct.data.item()
    
        # backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
        train_loss = loss_sum / len(train_dataset)
        train_acc = acc_sum / len(train_dataset)
    train_loss_all.append(train_loss)
    train_acc_all.append(train_acc)
    
    print('epoch: {}, train Loss: {:.6f}, Acc: {:.6f}'.format(
            epoch + 1, train_loss, train_acc))

print('train done!')

# plot train curve
plt.figure()
index = np.arange(len(train_loss_all))
plt.plot(index, train_loss_all, 'r')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()

plt.figure()
plt.plot(index, train_acc_all, 'b')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()

# test model
model.eval()
eval_loss = 0
eval_acc = 0
print('testing...')
for data in test_loader:
    img, label = data
    img = img.view(img.size(0), -1)
    img = Variable(img, volatile = True)
    label = Variable(label, volatile = True)
    
    out = model(img)
    loss = criterion(out, label)
    eval_loss += loss.data.item() * label.size(0)
    _, pred = torch.max(out, 1)
    num_correct = (pred == label).sum()
    eval_acc += num_correct.data.item()
    
print('Test Loss: {:.6f}, Acc: {}'.format(
        eval_loss / len(test_dataset),
        eval_acc / len(test_dataset)))

print('test done!')

训练损失下降曲线: 

训练准确率曲线:  

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值