PyTorch搭建BP神经网络识别MNIST数据集

PyTorch搭建BP神经网络识别MNIST数据集

一、数据集介绍

MNIST数据集(Mixed National Institute of Standards and Technology database)是美国国家标准与技术研究院收集整理的大型手写数字数据库,包含60,000个示例的训练集以及10,000个示例的测试集。其中,每张图像的尺寸为28×28

二、导入依赖

# coding=gbk
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader

三、搭建BP神经网络

该BP神经网络由3个隐含层和1个输出层,每个隐含层为20个节点,输出层为10个节点。该网络采用Sigmoid激活函数。

# input layer:784 nodes; hidden layer:three hidden layers with 20 nodes in each layer
# output layer:10 nodes
class BP:
    def __init__(self):
        self.input = np.zeros((100, 784))   # 100 samples per round
        self.hidden_layer_1 = np.zeros((100, 20))
        self.hidden_layer_2 = np.zeros((100, 20))
        self.hidden_layer_3 = np.zeros((100, 20))
        self.output_layer = np.zeros((100, 10))
        self.w1 = 2 * np.random.random((784, 20)) - 1   # limit to (-1, 1)
        self.w2 = 2 * np.random.random((20, 20)) - 1
        self.w3 = 2 * np.random.random((20, 20)) - 1
        self.w4 = 2 * np.random.random((20, 10)) - 1
        self.error = np.zeros(10)
        self.learning_rate = 0.1

    def sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def sigmoid_deri(self, x):
        return x * (1 - x)

    def forward_prop(self, data, label):   # label:100 X 10,data: 100 X 784
        self.input = data
        self.hidden_layer_1 = self.sigmoid(np.dot(self.input, self.w1))
        self.hidden_layer_2 = self.sigmoid(np.dot(self.hidden_layer_1, self.w2))
        self.hidden_layer_3 = self.sigmoid(np.dot(self.hidden_layer_2, self.w3))
        self.output_layer = self.sigmoid(np.dot(self.hidden_layer_3, self.w4))
        # error
        self.error = label - self.output_layer
        return self.output_layer

    def backward_prop(self):
        output_diff = self.error * self.sigmoid_deri(self.output_layer)
        hidden_diff_3 = np.dot(output_diff, self.w4.T) * self.sigmoid_deri(self.hidden_layer_3)
        hidden_diff_2 = np.dot(hidden_diff_3, self.w3.T) * self.sigmoid_deri(self.hidden_layer_2)
        hidden_diff_1 = np.dot(hidden_diff_2, self.w2.T) * self.sigmoid_deri(self.hidden_layer_1)
        # update
        self.w4 += self.learning_rate * np.dot(self.hidden_layer_3.T, output_diff)
        self.w3 += self.learning_rate * np.dot(self.hidden_layer_2.T, hidden_diff_3)
        self.w2 += self.learning_rate * np.dot(self.hidden_layer_1.T, hidden_diff_2)
        self.w1 += self.learning_rate * np.dot(self.input.T, hidden_diff_1)

三、加载数据

我们可以使用torchvision.datasets.MNIST()函数加载数据

torchvision.datasets.MNIST(root: str, train: bool = True, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False)

  • root (string): 表示数据集的根目录,其中根目录存在MNIST/processed/training.pt和MNIST/processed/test.pt的子目录
  • train (bool, optional): 如果为True,则从training.pt创建数据集,否则从test.pt创建数据集
  • download (bool, optional): 如果为True,则从internet下载数据集并将其放入根目录。如果数据集已下载,则不会再次下载
  • transform (callable, optional): 接收PIL图片并返回转换后版本图片的转换函数
  • target_transform (callable, optional): 接收PIL接收目标并对其进行变换的转换函数
# from torchvision load data
def load_data():
    # 第一次运行时download=True
    datasets_train = torchvision.datasets.MNIST(root='./data/', train=True, transform=transforms.ToTensor(), download=False)
    datasets_test = torchvision.datasets.MNIST(root='./data/', train=False, transform=transforms.ToTensor(), download=False)
    data_train = datasets_train.data
    X_train = data_train.numpy()
    X_test = datasets_test.data.numpy()
    X_train = np.reshape(X_train, (60000, 784))
    X_test = np.reshape(X_test, (10000, 784))
    Y_train = datasets_train.targets.numpy()
    Y_test = datasets_test.targets.numpy()

    real_train_y = np.zeros((60000, 10))
    real_test_y = np.zeros((10000, 10))
    # each y has ten dimensions
    for i in range(60000):
        real_train_y[i, Y_train[i]] = 1
    for i in range(10000):
        real_test_y[i, Y_test[i]] = 1
    index = np.arange(60000)
    np.random.shuffle(index)
    # shuffle train_data
    X_train = X_train[index]
    real_train_y = real_train_y[index]

    X_train = np.int64(X_train > 0)
    X_test = np.int64(X_test > 0)

    return X_train, real_train_y, X_test, real_test_y

四、网络训练

def bp_network():
    nn = BP()
    X_train, Y_train, X_test, Y_test = load_data()
    batch_size = 100
    epochs = 6000
    for epoch in range(epochs):
        start = (epoch % 600) * batch_size
        end = start + batch_size
        print(start, end)
        nn.forward_prop(X_train[start: end], Y_train[start: end])
        nn.backward_prop()

    return nn

五、网络测试

def bp_test():
    nn = bp_network()
    sum = 0
    X_train, Y_train, X_test, Y_test = load_data()
    # test:
    for i in range(len(X_test)):
        res = nn.forward_prop(X_test[i], Y_test[i])
        res = res.tolist()
        index = res.index(max(res))
        if Y_test[i, index] == 1:
            sum += 1

    print('accuracy:', sum / len(Y_test))


if __name__ == '__main__':
    bp_test()

运行代码,得到的预测准确率为:

accuracy: 0.9291

PyTorch简洁实现

import torch
import torchvision
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt


n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 10

train_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('./data/', train=True, download=False,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,), (0.3081,))
                               ])), batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('./data/', train=False, download=False,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,), (0.3081,))
                               ])), batch_size=batch_size_test, shuffle=True)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)


network = Net().cuda()  # gpu加速
optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)

train_losses = []
train_counter = []
test_losses = []
test_counter = [i * len(train_loader.dataset) for i in range(n_epochs + 1)]


def train(epoch):
    network.train()  # 训练模式
    for batch_idx, (data, target) in enumerate(train_loader):
        optimizer.zero_grad()  # 梯度清零
        output = network(data.cuda())
        loss = F.nll_loss(output, target.cuda())
        loss.backward()  # 反向传播
        optimizer.step()  # 参数更新
        if batch_idx % log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data),
                                                                           len(train_loader.dataset),
                                                                           100. * batch_idx / len(train_loader),
                                                                           loss.item()))
            train_losses.append(loss.item())
            train_counter.append((batch_idx * 64) + ((epoch - 1) * len(train_loader.dataset)))
            torch.save(network.state_dict(), './model/model_mnist.pth')
            torch.save(optimizer.state_dict(), './model/optimizer_mnist.pth')


def test():
    network.eval()  # 测试模式
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data_cuda = data.cuda()
            target_cuda = target.cuda()
            output = network(data_cuda)
            test_loss += F.nll_loss(output, target_cuda, reduction='sum').item()
            pred = output.data.max(1, keepdim=True)[1]
            correct += pred.eq(target_cuda.data.view_as(pred)).sum()
    test_loss /= len(test_loader.dataset)
    test_losses.append(test_loss)
    print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


# network.load_state_dict(torch.load('./model/model_mnist.pth'))  # 加载模型
# optimizer.load_state_dict(torch.load('./model/optimizer_mnist.pth'))  # 加载优化器

# test()
for epoch in range(1, n_epochs + 1):
    train(epoch)
    test()
#
# fig = plt.figure()
# plt.plot(train_counter, train_losses, color='blue')
# plt.scatter(test_counter, test_losses, color='red')
# plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
# plt.xlabel('number of training examples seen')
# plt.ylabel('negative log likelihood loss')


# examples = enumerate(test_loader)
# batch_idx, (example_data, example_targets) = next(examples)
# with torch.no_grad():
#     output = network(example_data.cuda())
# fig = plt.figure()
# for i in range(6):
#     plt.subplot(2, 3, i + 1)
#     plt.tight_layout()
#     plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
#     plt.title("Prediction: {}".format(output.data.max(1, keepdim=True)[1][i].item()))
#     plt.xticks([])
#     plt.yticks([])
# plt.show()

运行结果为:

Test set: Avg. loss: 0.0607, Accuracy: 9806/10000 (98%)

对手写数字的预测:

  • 6
    点赞
  • 42
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
以下是使用PyTorch搭建BP神经网络的代码示例: ``` import torch import torch.nn as nn import torch.optim as optim # 定义神经网络模型 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 64) self.fc3 = nn.Linear(64, 10) self.relu = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.softmax(self.fc3(x)) return x # 定义训练函数 def train(model, train_loader, optimizer, criterion): model.train() for i, (inputs, targets) in enumerate(train_loader): optimizer.zero_grad() outputs = model(inputs.view(-1, 784)) loss = criterion(outputs, targets) loss.backward() optimizer.step() # 定义测试函数 def test(model, test_loader): model.eval() correct = 0 total = 0 with torch.no_grad(): for inputs, targets in test_loader: outputs = model(inputs.view(-1, 784)) _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += (predicted == targets).sum().item() accuracy = correct / total return accuracy # 加载数据集 train_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST('./data', train=True, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST('./data', train=False, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=64, shuffle=True) # 初始化模型、损失函数、优化器 model = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.1) # 训练模型 for epoch in range(10): train(model, train_loader, optimizer, criterion) accuracy = test(model, test_loader) print(f'Epoch {epoch+1}, Test Accuracy: {accuracy:.4f}') ``` 在上面的代码中,我们首先定义了一个`Net`类来搭建神经网络模型。该模型有三个全连接层和两个激活函数(ReLU和Softmax)。`forward`方法实现前向传播过程。 接着,我们定义了训练函数`train`和测试函数`test`,分别用于训练和测试模型。在训练函数中,我们首先将模型设置为训练模式,然后遍历训练数据集,计算模型输出和损失,并进行反向传播和参数更新。在测试函数中,我们将模型设置为评估模式,然后使用测试数据集进行模型测试,并计算模型的准确率。 最后,我们加载MNIST数据集,初始化模型、损失函数和优化器,并开始训练模型。在训练过程中,我们遍历数据集若干次(这里是10次),每次训练时都调用`train`函数更新模型,然后调用`test`函数测试模型的准确率。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值