import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import pickle
import sys, os
sys.path.append(os.pardir)
In [3]
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 监督数据是one-hot-vector的情况下,转换为正确解标签的索引
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
In [4]
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
return (1.0 - sigmoid(x)) * sigmoid(x)
In [5]
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # 溢出对策
return np.exp(x) / np.sum(np.exp(x))
In [6]
def numerical_gradient(f, x):
h = 1e-4
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 还原值
it.iternext()
return grad
In [7]
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
# 初始化权重
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
# x:输入数据, t:监督数据
def loss(self, x, t):
y = self.predict(x)
return cross_entropy_error(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x:输入数据, t:监督数据
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
grads = {}
batch_num = x.shape[0]
# forward
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
# backward
dy = (y - t) / batch_num
grads['W2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
da1 = np.dot(dy, W2.T)
dz1 = sigmoid_grad(a1) * da1
grads['W1'] = np.dot(x.T, dz1)
grads['b1'] = np.sum(dz1, axis=0)
return grads
In [8]
try:
import urllib.request
except ImportError:
raise ImportError('You should use Python 3.x')
import os.path
import gzip
url_base = 'http://yann.lecun.com/exdb/mnist/'
key_file = {
'train_img':'train-images-idx3-ubyte.gz',
'train_label':'train-labels-idx1-ubyte.gz',
'test_img':'t10k-images-idx3-ubyte.gz',
'test_label':'t10k-labels-idx1-ubyte.gz'
}
dataset_dir = os.path.dirname(os.path.abspath("mnist.pkl"))
save_file = dataset_dir + "/mnist.pkl"
train_num = 60000
test_num = 10000
img_dim = (1, 28, 28)
img_size = 784
def _download(file_name):
file_path = dataset_dir + "/" + file_name
if os.path.exists(file_path):
return
print("Downloading " + file_name + " ... ")
urllib.request.urlretrieve(url_base + file_name, file_path)
print("Done")
def download_mnist():
for v in key_file.values():
_download(v)
def _load_label(file_name):
file_path = dataset_dir + "/" + file_name
print("Converting " + file_name + " to NumPy Array ...")
with gzip.open(file_path, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
print("Done")
return labels
def _load_img(file_name):
file_path = dataset_dir + "/" + file_name
print("Converting " + file_name + " to NumPy Array ...")
with gzip.open(file_path, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, img_size)
print("Done")
return data
def _convert_numpy():
dataset = {}
dataset['train_img'] = _load_img(key_file['train_img'])
dataset['train_label'] = _load_label(key_file['train_label'])
dataset['test_img'] = _load_img(key_file['test_img'])
dataset['test_label'] = _load_label(key_file['test_label'])
return dataset
def init_mnist():
download_mnist()
dataset = _convert_numpy()
print("Creating pickle file ...")
with open(save_file, 'wb') as f:
pickle.dump(dataset, f, -1)
print("Done!")
def _change_one_hot_label(X):
T = np.zeros((X.size, 10))
for idx, row in enumerate(T):
row[X[idx]] = 1
return T
def load_mnist(normalize=True, flatten=True, one_hot_label=False):
if not os.path.exists(save_file):
init_mnist()
with open(save_file, 'rb') as f:
dataset = pickle.load(f)
if normalize:
for key in ('train_img', 'test_img'):
dataset[key] = dataset[key].astype(np.float32)
dataset[key] /= 255.0
if one_hot_label:
dataset['train_label'] = _change_one_hot_label(dataset['train_label'])
dataset['test_label'] = _change_one_hot_label(dataset['test_label'])
if not flatten:
for key in ('train_img', 'test_img'):
dataset[key] = dataset[key].reshape(-1, 1, 28, 28)
return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label'])
if __name__ == '__main__':
init_mnist()
Converting train-images-idx3-ubyte.gz to NumPy Array ... Done Converting train-labels-idx1-ubyte.gz to NumPy Array ... Done Converting t10k-images-idx3-ubyte.gz to NumPy Array ... Done Converting t10k-labels-idx1-ubyte.gz to NumPy Array ... Done Creating pickle file ... Done!
In [16]
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
iters_num = 60000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.007524
train_loss_list = []
train_acc_list = []
test_acc_list = []
flag = 0
iter_per_epoch = max(train_size / batch_size, 1)
In [17]
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 计算梯度
# grad = network.numerical_gradient(x_batch, t_batch) #无GPU耗时且可能存在BUG
grad = network.gradient(x_batch, t_batch)
# 更新参数
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
flag += 1
print(f"[epoch:{flag}] train acc:{train_acc}, test acc :{test_acc} " )
[epoch:1] train acc:0.09736666666666667, test acc :0.0982 [epoch:2] train acc:0.11236666666666667, test acc :0.1135 [epoch:3] train acc:0.11236666666666667, test acc :0.1135 [epoch:4] train acc:0.18013333333333334, test acc :0.1818 [epoch:5] train acc:0.29581666666666667, test acc :0.2994 [epoch:6] train acc:0.3939166666666667, test acc :0.394 [epoch:7] train acc:0.50935, test acc :0.5129 [epoch:8] train acc:0.5595166666666667, test acc :0.5658 [epoch:9] train acc:0.6160666666666667, test acc :0.6224 [epoch:10] train acc:0.6736666666666666, test acc :0.6785 [epoch:11] train acc:0.7101666666666666, test acc :0.7168 [epoch:12] train acc:0.74035, test acc :0.745 [epoch:13] train acc:0.7673166666666666, test acc :0.7714 [epoch:14] train acc:0.7877, test acc :0.7919 [epoch:15] train acc:0.80485, test acc :0.8079 [epoch:16] train acc:0.8182333333333334, test acc :0.8209 [epoch:17] train acc:0.82785, test acc :0.8292 [epoch:18] train acc:0.8348, test acc :0.8375 [epoch:19] train acc:0.8422333333333333, test acc :0.8462 ...... [epoch:81] train acc:0.92035, test acc :0.9228 [epoch:82] train acc:0.92095, test acc :0.9228 [epoch:83] train acc:0.92095, test acc :0.9233 [epoch:84] train acc:0.9211, test acc :0.9234 [epoch:85] train acc:0.9218333333333333, test acc :0.924 [epoch:86] train acc:0.9218333333333333, test acc :0.9239 [epoch:87] train acc:0.9222833333333333, test acc :0.9247 [epoch:88] train acc:0.9226, test acc :0.9249 [epoch:89] train acc:0.9228833333333334, test acc :0.9259 [epoch:90] train acc:0.9231833333333334, test acc :0.9257 [epoch:91] train acc:0.9234833333333333, test acc :0.9261 [epoch:92] train acc:0.9237, test acc :0.9266 [epoch:93] train acc:0.9242333333333334, test acc :0.9263 [epoch:94] train acc:0.9243833333333333, test acc :0.9261 [epoch:95] train acc:0.925, test acc :0.926 [epoch:96] train acc:0.9251333333333334, test acc :0.926 [epoch:97] train acc:0.9252666666666667, test acc :0.9273 [epoch:98] train acc:0.9254833333333333, test acc :0.9269 [epoch:99] train acc:0.92595, test acc :0.9268 [epoch:100] train acc:0.9262833333333333, test acc :0.9277
In [31]
# 绘制图形
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.style.use("seaborn")
plt.plot(x, train_acc_list, c="blue",label='train')
plt.plot(x, test_acc_list, c="red",label='test', linestyle='-.')
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='upper left')
plt.show()