这是一个用python搭建2层NN(一个隐藏层)识别mnist手写数据集的示例
mnist.py文件提供了mnist数据集(6万张训练图,1万张测试图)的在线下载,每张图片是 28 ∗ 28 28*28 28∗28的尺寸,拉长为 1 ∗ 784 1*784 1∗784的向量作为NN的输入,隐藏层设置了50个神经元,输出层由于是十分类(数字0-9)所以设置了10个神经元,所以网络的架构是784-50-10;
设置的迭代学习次数是10000次,学习率0.1,mini-batch的大小为100个,程序中计算了训练精度,测试精度,训练的损失,并通过打印和画图呈现出来,还计算了程序运行时间,可以看出损失函数确实在不断减小,训练精度和测试精度都在上升且差距很小,说明NN正在正确地学习。
源码:
import time
import numpy as np
from dataset.mnist import load_mnist
from SimpleNet import simpleNet
import matplotlib.pyplot as plt
start = time.clock() # 程序计时
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
train_loss_list = []
train_acc_list = []
test_acc_list = []
# 超参数
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
iter_per_epoch = max(train_size / batch_size, 1)
network = simpleNet(input_size=784, hidden_size=50, output_size=10)
for i in range(iters_num):
# 获取mini-batch
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 计算梯度
# grad = network.numerical_gradient(x_batch, t_batch)
grad = network.gradient(x_batch, t_batch)
# 更新参数
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# 记录学习过程的损失变化
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
# 画损失函数的变化
x1 = np.arange(len(train_loss_list))
ax1 = plt.subplot(121)
plt.plot(x1, train_loss_list)
plt.xlabel("iteration")
plt.ylabel("loss")
# 画训练精度,测试精度随着epoch的变化
markers = {
'train': 'o', 'test': 's'}
x2 = np.arange(len(train_acc_list))
ax2 = plt.subplot(122)
plt.plot(x2, train_acc_list, label='train acc')
plt.plot(x2, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel