基于深入浅出深度学习中的例子,将书上代码改为python3实现,并提高了一些效率(可能)
首先加载数据,数据来源于书本github
import pickle as cPickle
import gzip
import numpy as np
def load_data():
f = gzip.open('mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f,encoding='latin1')
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e
前馈神经网络
class Network(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward_n_backprop(self,x,y=None,isforwardonly=True):
a=x
if isforwardonly:
for w,b in zip(self.weights,self.biases):
a=sigmoid(np.dot(w,a)+b)
return a
else:
activations=[a]
zs=[]
for w,b in zip(self.weights,self.biases):
a=np.dot(w,a)+b
zs.append(a)
a=sigmoid(a)
activations.append(a)
delta=((activations[-1]-y))*sigmoid_prime(zs[-1])
nabla_b=[np.zeros_like(b) for b in self.biases]
nabla_w=[np.zeros_like(w) for w in self.weights]
nabla_b[-1]=delta
nabla_w[-1]=np.dot(delta,activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
if test_data:
test_data=list(test_data)
n_test = len(test_data)
print(n_test)
traindata=list(training_data)
n=len(traindata)
for j in range(epochs):
mini_batches = [
traindata[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print( "Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test))
else:
print( "Epoch {0} complete".format(j))
def update_mini_batch(self, mini_batch, eta):
nabla_b = [np.zeros_like(b) for b in self.biases]
nabla_w = [np.zeros_like(w) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.feedforward_n_backprop(x, y,False)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len((mini_batch))*nw) for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len((mini_batch))*nb) for b, nb in zip(self.biases, nabla_b)]
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward_n_backprop(x)),y) for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
实例:
train_data,validate_data,test_data = load_data_wrapper()
net=Network([784,30,10])
net.SGD(train_data, 30, 10, 3.0, test_data=test_data)
运行结果:
10000 Epoch 0: 9079 / 10000 Epoch 1: 9277 / 10000 Epoch 2: 9316 / 10000 Epoch 3: 9340 / 10000 Epoch 4: 9367 / 10000 Epoch 5: 9384 / 10000 Epoch 6: 9382 / 10000 Epoch 7: 9421 / 10000 Epoch 8: 9437 / 10000 Epoch 9: 9441 / 10000