手写一个神经网络

写篇水文来记录一下随手写的神经网络。承接上篇的卷积神经网络反向传播推导。不过这个是三层的神经网络,数据是手写数字的识别。代码写的乱七八糟,没有整理。不过可以直接运行。。。测试阶段懒得整理啊。。。代码很冗余的。后期优化一下吧。以此纪念我一个简单的bug找了三天。粗心害死人!

import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.utils import shuffle

def sigmoid(z):
    return 1.0/(1.0+np.exp(-z))

def sigmoid_prime(z):
    return sigmoid(z)*(1-sigmoid(z))

def normalise_images(imgs,dist):
    std = np.std(dist)
    mean = np.mean(dist)
    return (imgs - mean) / std

def Loss(Out, Label):
    loss = 0
    for i,j in zip(Out, Label):
        for a,b in zip(i[0],j):
            loss += (a-b)**2
    return loss/(2*len(Out))

def Testing(x_test, y_test, W_and_B):
    W1 = W_and_B[0]
    W2 = W_and_B[1]
    B1 = W_and_B[2]
    B2 = W_and_B[3]
    
    acc = 0
    Len = len(x_test)
    for x,y in zip(x_test, y_test):
        x = x.reshape([784,1])
        y = y.reshape([10,1])
        Hide = np.dot(W1, x) + B1
        Hide_s = sigmoid(Hide)
        Output = np.dot(W2, Hide_s) + B2
        Output_s = sigmoid(Output)
        a = np.argmax(Output_s)
        b = np.argmax(y)
        if a==b:
            acc+=1
    acc = acc/Len
    print('准确率为:%f' %acc)
    
mnist = input_data.read_data_sets("mnist/", one_hot=True)
x_train, y_train = mnist.train.images,mnist.train.labels  
x_test, y_test = mnist.test.images, mnist.test.labels
x_train= normalise_images(x_train, x_train) 
x_test = normalise_images(x_test, x_train)

batch_size = 10
num_input = 784 
num_hide = 30
num_classes = 10
lr = 3 #学习率
EPOCHS = 10

W1 = np.random.randn(num_hide, num_input)
B1 = np.random.randn(num_hide,1)
W2 = np.random.randn(num_classes, num_hide)
B2 = np.random.randn(num_classes,1)

LOSS = list()

for i in range(EPOCHS):
    num_examples = len(x_train)
    x_train,y_train = shuffle(x_train,y_train)
    for offset in range(0,num_examples,batch_size):
        end = offset+batch_size
        batch_x,batch_y = x_train[offset:end],y_train[offset:end]

        length = len(batch_x)
        tmp = 1.0/length
        Out_list = list()
        
        delta_1 = np.zeros(B2.shape)
        delta_2 = np.zeros(B1.shape)
        delta_W1 = np.zeros(W1.shape)
        delta_W2 = np.zeros(W2.shape)

        for x,y in zip(batch_x, batch_y):  
            x = x.reshape([784,1])
            y = y.reshape([10,1])

            Hide = np.dot(W1, x) + B1
            Hide_s = sigmoid(Hide)
            Output = np.dot(W2, Hide_s) + B2
            Output_s = sigmoid(Output)
            
            Out_list.append(Output_s)

            a = (Output_s-y)*sigmoid_prime(Output) #输出层误差,B2梯度
            b = np.dot(W2.T, a)*sigmoid_prime(Hide) #隐藏层误差,B1梯度
            c = np.dot(a, Hide_s.T) #W2梯度
            d = np.dot(b, x.T) #W1梯度

            delta_1 += a
            delta_2 += b
            delta_W2 += c
            delta_W1 += d

        W1 = W1 - lr*tmp*delta_W1
        W2 = W2 - lr*tmp*delta_W2
        B1 = B1 - lr*tmp*delta_2
        B2 = B2 - lr*tmp*delta_1

        loss = Loss(Out_list, batch_y)
        LOSS.append(loss)
        
    print('EPOCH %d......' %i)
        

plt.plot(LOSS)

W_and_B = [W1, W2, B1, B2]
Testing(x_train, y_train, W_and_B)
Testing(x_test, y_test, W_and_B)

在这里插入图片描述
再放一个大佬写的,然后我稍微修改的版本,通用的多层神经网络:

import random
import numpy as np

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist/", one_hot=True)

x_train, y_train = mnist.train.images,mnist.train.labels  
x_test, y_test = mnist.test.images, mnist.test.labels

def turning_data(x_data, y_data):
    Data = list()
    for x,y in zip(x_data, y_data):
        x = x.reshape([784,1])
        y = y.reshape([10,1])
        tmp = [x,y]
        Data.append(tmp)
    return Data

training_data = turning_data(x_train, y_train)
test_data = turning_data(x_test, y_test)

class Network(object):

    def __init__(self, sizes):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
        self.weights = [np.random.randn(y, x)
                        for x, y in zip(sizes[:-1], sizes[1:])]

    def feedforward(self, a):
        for b, w in zip(self.biases, self.weights):
            a = sigmoid(np.dot(w, a)+b)
        return a

    def SGD(self, training_data, epochs, mini_batch_size, eta,test_data=None):
        if test_data: n_test = len(test_data)
        n = len(training_data)
        for j in range(epochs):
            random.shuffle(training_data)
            mini_batches = [
                training_data[k:k+mini_batch_size]
                for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta)
            if test_data:
                print("Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test))
            else:
                print("Epoch {0} complete".format(j))

    def update_mini_batch(self, mini_batch, eta):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        for x, y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)
            nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
        self.weights = [w-(eta/len(mini_batch))*nw
                        for w, nw in zip(self.weights, nabla_w)]
        self.biases = [b-(eta/len(mini_batch))*nb
                       for b, nb in zip(self.biases, nabla_b)]

    def backprop(self, x, y):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        # feedforward
        activation = x
        activations = [x] # list to store all the activations, layer by layer
        zs = [] # list to store all the z vectors, layer by layer
        for b, w in zip(self.biases, self.weights):
            z = np.dot(w, activation)+b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        # backward pass
        delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        for l in range(2, self.num_layers):
            z = zs[-l]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
        return (nabla_b, nabla_w)

    def evaluate(self, test_data):
        test_results = [(np.argmax(self.feedforward(x)), np.argmax(y)) for (x, y) in test_data]
        return sum([int(x == y) for (x, y) in test_results])

    def cost_derivative(self, output_activations, y):
        return (output_activations-y)

def sigmoid(z):
    return 1.0/(1.0+np.exp(-z))

def sigmoid_prime(z):
    return sigmoid(z)*(1-sigmoid(z))

net = Network([784, 30, 10])
net.SGD(training_data, 10, 10, 3.0, test_data=test_data)

在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值