第一个神经网络(完整版)

本文详细介绍了如何从零开始构建第一个神经网络,涵盖了神经网络的基本概念、模型搭建、损失函数以及优化算法。通过实例,读者将了解到神经网络的工作原理,并能动手实现一个简单的神经网络模型。
摘要由CSDN通过智能技术生成
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 10:49:20 2019

@author: txx
"""

# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 09:01:33 2019

@author: txx
"""

# neural network class definition
import numpy
import scipy.special
import matplotlib.pyplot
class neuralNetwork:
    
    # initialize the neural network
    def __init__(self,inputnodes,hiddennodes,outputnodes,
                 learningrate):
        
        #set number of nodes in each input,hidden,output layer
        self.inodes=inputnodes
        self.hnodes=hiddennodes
        self.onodes=outputnodes
        
        
        # link weight matrices,with and who
        # weights inside the arrays are w_i_j,where link is from
        # node i to node j in the next layer
        # w11 w21
        # w12 w22 etc
        self.wih=numpy.random.normal(0.0,pow(self.hnodes,-0.5),
                                     (self.hnodes,self.inodes))
        self.who=numpy.random.normal(0.0,pow(self.onodes,-0.5),
                                     (self.onodes,self.hnodes))
        
        
        # learning rate
        self.lr=learningrate
        
        # activation function is the signmoid function
        self.activation_function=lambda x: scipy.special.expit(x)
        
        pass
    
    
    # train the neutal network
    def train(self,inputs_list,targets_list):
        
        #convert inputs list to 2d array
        inputs=numpy.array(inputs_list,ndmin=2).T
        targets=numpy.array(targets_list,ndmin=2).T
        
        # calculate signals into hidden layer
        hidden_inputs=numpy.dot(self.wih,inputs)
        #calculate the signals emering from hidden layer
        hidden_outputs=self.activation_function(hidden_inputs)
        
        #calculate signals into final output layer
        final_inputs=numpy.dot(self.who,hidden_outputs)
        #calcluate the signals emerging from final output layer
        final_outputs=self.activation_function(final_inputs)
        
        
        # output layer error is the (target - actual)
        output_errors=targets - final_outputs
        # hidden layer error is the output_errors,split by weights,
# recombined at hidden nodes
        hidden_errors=numpy.dot(self.who.T,output_errors)
        
        
        #update the weigths for the links between the hidden 
#and output layers
        
        self.who +=self.lr * numpy.dot((output_errors *
 final_outputs*(1.0-final_outputs)),
 numpy.transpose(hidden_outputs))
        
        #update the weights for the links between the input and hidden layers
        self.wih +=self.lr * numpy.dot((hidden_errors *
 hidden_outputs * (1.0-hidden_outputs)),numpy.transpose(inputs))
        pass
    
    
    
    # query the neural network
    def query(self,inputs_list):
        # convert inputs list to 2d array
        inputs=numpy.array(inputs_list,ndmin=2).T
        
        #cacluate signals into hidden layer
        hidden_inputs=numpy.dot(self.wih,inputs)
        #calculate the signals emerging from hidden layer
        hidden_outputs=self.activation_function(hidden_inputs)
        #calculate signals into final output layer
        final_inputs=numpy.dot(self.who,hidden_outputs)
        #calculate the signals emering from final output layer
        final_outputs=self.activation_function(final_inputs)
        
        return final_outputs
        
    
# number of input,hidden and output nodes
input_nodes=784
hidden_nodes=100
output_nodes=10

# learning rate is 0.3
learning_rate=0.1

# create instance of neural network
n=neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate)

# load the mnist training data CSV file into a list
training_data_file=open("D:/anaconda/mnist_dataset/mnist_train.csv",'r')
training_data_list=training_data_file.readlines()
training_data_file.close()

# train the neural network
# epochs is the number of times the training data set is used for training
epochs=5

for e in range(epochs):
    # go through all records in the training data set
    for record in training_data_list:
        # split the record by the ',' commas
        all_values=record.split(',')
        # scale and shift the inputs
        inputs=(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01
        
        targets=numpy.zeros(output_nodes)+0.01
        # all_values[0] is the target label for this record
        targets[int(all_values[0])]=0.99
        n.train(inputs,targets)
        pass
    pass

test_data_file=open("D:/anaconda/mnist_dataset/mnist_test.csv",'r')
test_data_list=test_data_file.readlines()
test_data_file.close()

# test the neural network

# scorecard for how well the network performs,initially empty
scorecard=[]

# go through all records in the training data set
for record in test_data_list:
    # split the record by the ',' commas
    all_values=record.split(',')
    # correct answer is first value
    correct_label=int(all_values[0])
    #print(correct_label,"correct label")
    # scale and shift the inputs
    inputs=(numpy.asfarray(all_values[1:])/255.0*0.99)+0.01
    
    # query the network
    outputs=n.query(inputs)
    # the index of the highest value corresponds to the label
    label=numpy.argmax(outputs)
    print(label,"network's answer")
    # append correct or incorrect to list
    if(label==correct_label):
        scorecard.append(1)
    else:
        scorecard.append(0)
        pass
    pass




all_values=test_data_list[0].split(',')

print(all_values[0])

image_array=numpy.asfarray(all_values[1:]).reshape((28,28))
matplotlib.pyplot.imshow(image_array,cmap='Greys',interpolation='None')



print(scorecard)

# calculate the performance score,the fraction of correct answers
scorecard_array=numpy.asarray(scorecard)
print("performance =",scorecard_array.sum() / scorecard_array.size)


   
    

        


        
        

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值