花了三天读了塔里克的python神经网络编程,是一本真正的神经网络初学者书籍,让我学会了最简单的三层神经网络编程,强烈安利给大伙,下面发注释代码记录一下。
import numpy # 数值计算库
import scipy.special # 提供sigmoid函数的库
import matplotlib.pyplot # 绘图库
class neuralNetwork:
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# 初始化神经网络,需要有发射层,隐藏层,输出层
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# 使用正态分布取权重矩阵,参数:均值,标准差,矩阵大小 wih,who分别是输入层和输出层对于隐藏层的权重
# 这里要注意矩阵创建的时候是几乘几的,不然下面做矩阵乘法会出错
# 还有学习率和激活函数的创建
self.wih = numpy.random.normal(0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0 ,pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
self.lr = learningrate
self.activation_function = lambda x: scipy.special.expit(x)
def train(self, inputs_list, targets_list):
# 训练并对权重矩阵进行更新,更新公式书上有一步一步推导,最后两行是核心,一定要理解
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
output_errors = targets - final_outputs
hidden_errors = numpy.dot(self.who.T, output_errors)
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1 - final_outputs)), numpy.transpose(hidden_outputs))
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1 - hidden_outputs)), numpy.transpose(inputs))
def query(self, inputs_list):
# 测试,简单矩阵计算得出结果
inputs = numpy.array(inputs_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
input_nodes = 784 # 为什么784,因为作者给的训练集重一个图片对应785个数字,第一个数字是答案本身,所以输入只要输入784
hidden_nodes = 100 # 为什么隐藏层是100个节点? 比输入层小,比输出层大
output_nodes = 10
learning_rate = 0.3
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate) # 创建了!
training_data_file = open("C:/Users/15186/Desktop/mnist_dataset/mnist_train_100.csv",'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
def training(): # 训练出一个合适的神经网络,改变网络的权重
for record in training_data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:])/255.0 * 0.99) + 0.01 # asfarray将原本是字符的列表转换成整数列表,并控制数在0.1到0.99
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99 # 给定目标集正确答案
n.train(inputs, targets)
epochs = 100 # 训练回合数
for _ in range(epochs):
training()
print("训练完成")
test_data_file = open("C:/Users/15186/Desktop/mnist_dataset/mnist_test_10.csv",'r')
test_data_list =test_data_file.readlines() #读取数据
test_data_file.close
scorecard = [] # 积分列表
for record in test_data_list:
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (numpy.asfarray(all_values[1:])/225.0*0.99)+0.01
outputs = n.query(inputs) # 测试
label = numpy.argmax(outputs) # 取最大数的标签
if(label == correct_label):
scorecard.append(1)
else:
scorecard.append(0)
pass
print(scorecard)