make your own neural network+bp神经网络代码深度剖析+详细注释+手写数字

make your own neural network

手写数字数据库

# python notebook for Make Your Own Neural Network
# code for a 3-layer neural network, and code for learning the MNIST dataset
# (c) Tariq Rashid, 2016
# license is GPLv2
import numpy
# scipy.special for the sigmoid function expit()
import scipy.special
# library for plotting arrays
import matplotlib.pyplot


# ensure the plots are inside this notebook, not an external window

# neural network class definition
class neuralNetwork:

    # initialise the neural network
    def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
        # set number of nodes in each input, hidden, output layer
        self.inodes = inputnodes
        self.hnodes = hiddennodes
        self.onodes = outputnodes

        # link weight matrices, wih and who
        # weights inside the arrays are w_i_j, where link is from node i to node j in the next layer
        # w11 w21
        # w12 w22 etc
        # input_nodes = 784=self.inodes
        # # 数据集是10000*785的,第一列为标志位,即1,2,3,4.....,故剩余784个特征
        # hidden_nodes = 200 =self.hnodes
        # output_nodes = 10=self.onodes                               200               784
        self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
        # 如果一个节点有三个链接,那么weight的值的范围一般在(-1/根号31/根号310             200
        self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
        # numpy.random.normal(loc, scale, size) : 产生正态分布的数组, loc为均值,scale为标准差,size为形状
        #  pow(self.hnodes, -0.5)self.hnodes的-0.5次方
        # learning rate
        self.lr = learningrate

        # activation function is the sigmoid function
        self.activation_function = lambda x: scipy.special.expit(x)

        pass

    # train the neural network
    def train(self, inputs_list, targets_list):
        # convert inputs list to 2d array
        inputs = numpy.array(inputs_list, ndmin=2).T    # 把输入的行矩阵转置成列矩阵  784 个特征输入
        targets = numpy.array(targets_list, ndmin=2).T

        # calculate signals into hidden layer
        hidden_inputs = numpy.dot(self.wih, inputs)
        # x=win点乘input
        # 点乘   self.wih=200*784   inputs=784*1     得到200*1的矩阵
        # calculate the signals emerging from hidden layer
        # calculate the signals emerging from hidden layer
        hidden_outputs = self.activation_function(hidden_inputs)
        # 输入层的输出需要经过sigmoid函数激活之后才能进入隐藏层和输出层之间的神经网络,
        # 与who汇合计算产生 作为输出(此输出作为输出层的输入,)
        # calculate signals into final output layer
        final_inputs = numpy.dot(self.who, hidden_outputs)
        #  self.who= 10*200 hidden_outputs=200*1   得到 10*1的输出 然后和target比较,修正
        # calculate the signals emerging from final output layer
        final_outputs = self.activation_function(final_inputs)
        # 经过  sigmiod  函数激活,产生最终的输出
        # output layer error is the (target - actual)  target如下
        # [0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.99 0.01]
        # [0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.99]经过训练,输出的其中一个越来越接近0.99
        #                                                                     剩余的越来越接近0.01
        output_errors = targets - final_outputs
        # hidden layer error is the output_errors, split by weights, recombined at hidden nodes
        hidden_errors = numpy.dot(self.who.T, output_errors)
        # 误差反向传播errors_hidden= W.T 点乘 output_errors
        # update the weights for the links between the hidden and output layers
        self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),
                                        numpy.transpose(hidden_outputs))
        # output_errors 输出层的误差修正上一层(hidden_output)的权重
        #  numpy.transpose(hidden_outputs)   功能相当于转置
        # NEW_Wjk=OLD_Wjk-lr*alphaE/alphaWjk
        # alphaE/alphaWjk=errors*Ok(1-Ok)点乘Oj 的转置    Ok表示第k层(输出层的输出)Oj表示第j层(隐藏层)的输出
        # update the weights for the links between the input and hidden layers
        self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),
                                        numpy.transpose(inputs))
        # 隐藏层的误差修正上一神经网络(input_hidden)的权重
        # 同上
        pass

    # query the neural network
    def query(self, inputs_list):
        # convert inputs list to 2d array
        inputs = numpy.array(inputs_list, ndmin=2).T

        # calculate signals into hidden layer
        hidden_inputs = numpy.dot(self.wih, inputs)
        # calculate the signals emerging from hidden layer
        hidden_outputs = self.activation_function(hidden_inputs)

        # calculate signals into final output layer
        final_inputs = numpy.dot(self.who, hidden_outputs)
        # calculate the signals emerging from final output layer
        final_outputs = self.activation_function(final_inputs)

        return final_outputs


# number of input, hidden and output nodes
input_nodes = 784
# 数据集是10000*785的,第一列为标志位,即1,2,3,4.....,故剩余784个特征
hidden_nodes = 200
output_nodes = 10

# learning rate
learning_rate = 0.1

# create instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# load the mnist training data CSV file into a list
training_data_file = open(r"C:\Users\Administrator\Desktop\mnist_test.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# train the neural network

# epochs is the number of times the training data set is used for training
epochs = 5
#   数据处理,处理之后送入训练网络中去训练
for e in range(epochs):
    # go through all records in the training data set
    for record in training_data_list:
        # split the record by the ',' commas
        all_values = record.split(',')# all_values的类型为list,所以下边才可以通过all_values[0]获得第一列的标号数据
        # scale and shift the inputs
        inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
        # asfarrary不仅可以对多维数组做数值处理    为了避免0的出现,所以对矩阵做数值处理,然后加0.01
        # create the target output values (all 0.01, except the desired label which is 0.99)
        targets = numpy.zeros(output_nodes) + 0.01  # 打印出来是[0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01]
        # all_values[0] is the target label for this record
        targets[int(all_values[0])] = 0.99
        # 如果标号为第一类,或者其他类,则可通过int(all_values[0])获得第几类的数字标号
        # 然后其他位全为0.01,若为第二类,则矩阵的第二个数字为0.09,如下
        # [0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.99 0.01]
        # [0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.01 0.99]
        n.train(inputs, targets)
        pass
    pass
# load the mnist test data CSV file into a list
test_data_file = open(r"C:\Users\Administrator\Desktop\mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# test the neural network

# scorecard for how well the network performs, initially empty
scorecard = []

# go through all the records in the test data set
for record in test_data_list:
    # split the record by the ',' commas
    all_values = record.split(',')
    # 原来这样6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
    # 后来这种['6', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0',
    # correct answer is first value
    correct_label = int(all_values[0])
    # scale and shift the inputs
    inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
    # 与训练样本做同样的处理 ,保持不变
    # query the network
    outputs = n.query(inputs)
    # 把测试数据放入神经网络中,识别分类
    # the index of the highest value corresponds to the label
    label = numpy.argmax(outputs)
    # 找到0.99位于哪个位置,输出他在矩阵中的位置,即第几个,即是要求的第几类
    # a = [[1, 4, 3]]
    # label = numpy.argmax(a)
    # 如上,得出的就是最大值在的位置,1
    # append correct or incorrect to list
    if (label == correct_label):
        # network's answer matches correct answer, add 1 to scorecard
        scorecard.append(1)
        # 如果分类正确,则在计分板中增加一个1,最后计算1所占有的比例,即是正确率
    else:
        # network's answer doesn't match correct answer, add 0 to scorecard
        scorecard.append(0)
        pass

    pass

# calculate the performance score, the fraction of correct answers
scorecard_array = numpy.asarray(scorecard)
print("performance = ", scorecard_array.sum() / scorecard_array.size)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值