python的神经网络编程_Python神经网络编程 第二章 使用Python进行DIY

使用神经网络识别手写数字:

import numpy

# scipy.special for the sigmoid function expit(),即S函数

import scipy.special

# library for plotting arrays

import matplotlib.pyplot

# ensure the plots are inside this notebook, not an external window

%matplotlib inline // 在notebook上绘图,而不是独立窗口

# neural network class definition

class neuralNetwork:

# initialise the neural network

def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):

# set number of nodes in each input, hidden, output layer

self.inodes = inputnodes

self.hnodes = hiddennodes

self.onodes = outputnodes

# link weight matrices, wih and who

# weights inside the arrays are w_i_j, where link is from node i to node j in the next layer

# w11 w21

# w12 w22 etc

# numpy.random.normal(loc,scale,size) loc:概率分布的均值;scale:概率分布的方差;size:输出的shape

self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))

self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))

# learning rate

self.lr = learningrate

# activation function is the sigmoid function

# 使用lambda创建的函数是没有名字的

self.activation_function = lambda x: scipy.special.expit(x)

pass

# train the neural network

def train(self, inputs_list, targets_list):

# convert inputs list to 2d array

inputs = numpy.array(inputs_list, ndmin=2).T

targets = numpy.array(targets_list, ndmin=2).T

# calculate signals into hidden layer

hidden_inputs = numpy.dot(self.wih, inputs)

# calculate the signals emerging from hidden layer

hidden_outputs = self.activation_function(hidden_inputs)

# calculate signals into final output layer

final_inputs = numpy.dot(self.who, hidden_outputs)

# calculate the signals emerging from final output layer

final_outputs = self.activation_function(final_inputs)

# output layer error is the (target - actual)

output_errors = targets - final_outputs

# hidden layer error is the output_errors, split by weights, recombined at hidden nodes

hidden_errors = numpy.dot(self.who.T, output_errors)

# update the weights for the links between the hidden and output layers

self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))

# update the weights for the links between the input and hidden layers

self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))

pass

# query the neural network

def query(self, inputs_list):

# convert inputs list to 2d array

inputs = numpy.array(inputs_list, ndmin=2).T

# calculate signals into hidden layer

hidden_inputs = numpy.dot(self.wih, inputs)

# calculate the signals emerging from hidden layer

hidden_outputs = self.activation_function(hidden_inputs)

# calculate signals into final output layer

final_inputs = numpy.dot(self.who, hidden_outputs)

# calculate the signals emerging from final output layer

final_outputs = self.activation_function(final_inputs)

return final_outputs

# number of input, hidden and output nodes

# 选择784个输入节点是28*28的结果,即组成手写数字图像的像素个数

input_nodes = 784

# 选择使用100个隐藏层不是通过使用科学的方法得到的。通过选择使用比输入节点的数量小的值,强制网络尝试总结输入的主要特点。

# 但是,如果选择太少的隐藏层节点,会限制网络的能力,使网络难以找到足够的特征或模式。

# 同时,还要考虑到输出层节点数10。

# 这里应该强调一点。对于一个问题,应该选择多少个隐藏层节点,并不存在一个最佳方法。同时,我们也没有最佳方法选择需要几层隐藏层。

# 就目前而言,最好的办法是进行实验,直到找到适合你要解决的问题的一个数字。

hidden_nodes = 200

output_nodes = 10

# learning rate,需要多次尝试,0.2是最佳值

learning_rate = 0.1

# create instance of neural network

n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)

# load the mnist training data CSV file into a list

training_data_file = open("mnist_dataset/mnist_train.csv", 'r')

training_data_list = training_data_file.readlines()

training_data_file.close()

# train the neural network

# epochs is the number of times the training data set is used for training

# 就像调整学习率一样,需要使用几个不同的世代进行实验并绘图,以可视化这些效果。直觉告诉我们,所做的训练越多,所得到的的性能越好。

# 但太多的训练实际上会过犹不及,这是由于网络过度拟合训练数据。

# 在大约5或7个世代时,有一个甜蜜点。在此之后,性能会下降,这可能是过度拟合的效果。

# 性能在6个世代的情况下下降,这可能是运行中出了问题,导致网络在梯度下降过程中被卡在了一个局部的最小值中。

# 事实上,由于没有对每个数据点进行多次实验,无法减小随机过程的影响。

# 神经网络的学习过程其核心是随机过程,有时候工作得不错,有时候很糟。

# 另一个可能的原因是,在较大数目的世代情况下,学习率可能设置过高了。在更多世代的情况下,减小学习率确实能够得到更好的性能。

# 如果打算使用更长的时间(多个世代)探索梯度下降,那么可以采用较短的步长(学习率),总体上可以找到更好的路径。

# 要正确、科学地选择这些参数,必须为每个学习率和世代组合进行多次实验,尽量减少在梯度下降过程中随机性的影响。

# 还可尝试不同的隐藏层节点数量,不同的激活函数。

epochs = 5

for e in range(epochs):

# go through all records in the training data set

for record in training_data_list:

# split the record by the ',' commas

all_values = record.split(',')

# scale and shift the inputs

# 输入值需要避免0,输出值需要避免1

inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01

# create the target output values (all 0.01, except the desired label which is 0.99)

targets = numpy.zeros(output_nodes) + 0.01

# all_values[0] is the target label for this record

targets[int(all_values[0])] = 0.99

n.train(inputs, targets)

pass

pass

# load the mnist test data CSV file into a list

test_data_file = open("mnist_dataset/mnist_test.csv", 'r')

test_data_list = test_data_file.readlines()

test_data_file.close()

# test the neural network

# scorecard for how well the network performs, initially empty

scorecard = []

# go through all the records in the test data set

for record in test_data_list:

# split the record by the ',' commas

all_values = record.split(',')

# correct answer is first value

correct_label = int(all_values[0])

# scale and shift the inputs

inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01

# query the network

outputs = n.query(inputs)

# the index of the highest value corresponds to the label

label = numpy.argmax(outputs)

# append correct or incorrect to list

if (label == correct_label):

# network's answer matches correct answer, add 1 to scorecard

scorecard.append(1)

else:

# network's answer doesn't match correct answer, add 0 to scorecard

scorecard.append(0)

pass

pass

# calculate the performance score, the fraction of correct answers

scorecard_array = numpy.asarray(scorecard)

print ("performance = ", scorecard_array.sum() / scorecard_array.size)

# performance = 0.9712

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值