摘要
本文代码主要讲解基于Python的简单神经网络构建用于识别手写数据集,类模块具有通用性,在分析清楚问题后可以加以改动,运用于其他方面。
代码
import numpy
# scipy.special for the sigmoid function expit()
import scipy.special
import matplotlib.pyplot as plt
# neural network class definition
class neuralNetwork:
# initialise the neural network
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# link weight matrices, wih and who
# weight inside the arrays are w_i_j, where link is from node i to node j in the next layer
# w11 w21
# w12 w22 etc
# 创建的两个链接权重矩阵
# self.wih = (numpy.random.rand(self.hnodes, self.inodes) - 0.5)
# self.who = (numpy.random.rand(self.onodes, self.hnodes) - 0.5)
# 正态分布初始化值,第一个参数表示正态分布中心,第二个参数表示标准方差,第三个参数表示形状
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
# learning rate
self.lr = learningrate
# activation function is the sigmoid function
# 相当于创建一个函数,函数接收x,返回scipy.special.expit(x),调用时使用self.activation_function(...)即可
self.activation_function = lambda x: scipy.special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# error is the (target - actual)
output_errors = targets - final_outputs
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who.T, output_errors)
# update the weights for the links between the hidden and output layer
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
# update the weights for the links between the input and hidden layers
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
pass
# query the network
def query(self, inputs_list):
# convert inputs list to 2d array
# ndmin指定数组最小维度,ndmin=2表示强制将数组转换为2维
inputs = numpy.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
# number of input, hidden and output nodes
input_nodes = 784 # 784个输入节点
hidden_nodes = 200 # 100个隐藏节点
output_nodes = 10 # 10个输出节点
# learning_rate is 0.5
learning_rate = 0.1
# create instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# load the mnist training data CSV file into a list
training_data_file = open("mnist_data/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines() # 读入文件,将之变为一个列表,以行为单位
training_data_file.close()
# train the neural network
# epochs is the number of times the training data set is used for training
epochs = 5
for e in range(epochs):
# go through all records in the training data set
for record in training_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# scale and shift the inputs
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01 # 1*784,输入层有784个节点
# create the target output values (all 0.01, except the desired label which is 0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
# test the neural network
test_data_file = open("mnist_data/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# scorecard for how well the network performs, initially empty
scorecard = []
# go through all the records in the test data set
for record in test_data_list:
# split the record by the ',' commas
all_values = record.split(',')
# correct answer is first value
correct_label = int(all_values[0])
print(correct_label, "correct label")
# scale and shift the inputs
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# query the network
outputs = n.query(inputs)
# the index of the highest value correesponds to the label
label = numpy.argmax(outputs) # 返回数组中最大索引值
print(label, "network's answer")
# append correct or incorrect to list
if label == correct_label:
# network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
# network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
pass
pass
# calculate the performance score, the fraction of correct answers
scorecard_array = numpy.asarray(scorecard) # 将输入转化为数组
print("performance = ", scorecard_array.sum() / scorecard_array.size)
总结
- 网络训练中矩阵计算起到重要作用,要特别注意两个矩阵相乘时的矩阵形状
- sigmoid函数的定义域与值域
- 反向误差传播中计算部分要深入理解,书中的三层网络结构中最后一层输出层在计算式也要使用sigmoid函数
- 在一定程度上多次使用数据集训练网络可以提高识别率