神经网络二(Neural Network)

#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'wlc'
__mtime__ = '2017/9/04'
"""
import numpy as np
import random

class Network(object):
    def __init__(self,sizes):#size神经元个数list[3,2,4]
        self.num_layers = len(sizes)#几层
        self.sizes = sizes
        self.biases = [np.random.randn(y,1) for y in sizes[1:]]#randn生成指定参数的矩阵 高斯分布正态分布均值为0方差为1 zip生成数对,zip([1,2],[2,3,4]) = [(1,2),(2,3)
        self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])]#[1:]从第一个元素开始到最后一个元素,[:1]从开始元素到第一个结束不包含第一个元素

    def feedforward(self, a):  # y=Wx + b

        for b, w in zip(self.biases, self.weights):
            a = sigmoid(np.dot(w, a) + b)
        return a  # 向量
    def cost_derivative(self, output_activations, y):
        return (output_activations - y)
        # """Return the vector of partial derivatives \partial C_x /
        # \partial a for the output activations."""

    def SGD(self, training_data, epoch, mini_batch_size, eta, test_data=None):

        if test_data: n_test = len(test_data)
        n = len(training_data)
        for j in xrange(epoch):
            random.shuffle(training_data)  # 洗牌打乱
            mini_batches = [training_data[k:k + mini_batch_size]
                            for k in xrange(0, n, mini_batch_size)
                            ]  # 按照batch_size 大小依次将实例取出
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta)
                if test_data:
                    print "Epoch {0}:{1} / {2}".format(
                        j, self.evaluate(test_data), n_test
                    )
                else:
                    print "Epoch {0} complete".format(j)

    def update_mini_batch(self, mini_batch, eta):
        nabla_b = [np.zeors(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]

        for x, y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)  # 求出权重和偏置的偏导数
            nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]  # 随机梯度下降使用mini_batch 的所有梯度累加然后求均值代替求导
            nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]  # 累加mini_batch 数量的biases的偏导数代替逐个求导
        self.weights = [  # 随机梯度下降更新权重公式
                          w - (eta / len(mini_batch)) * nw
                          for w, nw in zip(self.weights, nabla_w)
                          ]
        self.biases = [  # 更新偏置公式
                         b - (eta / len(mini_batch)) * nb
                         for b, nb in zip(self.biases, nabla_b)
                         ]

    def evaluate(self, test_data):
        test_results = [(np.argmax(self.feedforward(x)), y)  # 对于手写体识别而言返回的是10维的向量,因此返回最大值得那一维的索引便是类别
                        for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)  # 统计测试集中预测正确的个数

    def backprop(self, x, y):
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        # 正向 feedforward
        activation = x
        activations = [x]  # 所有的activations
        zs = []  # 储存所有的Z
        for b, w in zip(self.biases, self.weights):  # b w 一行一行的读取
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        # 反向 backward pass
        #(对于y = x**2 而言delta y = 2x * delta x)因此对于最后的输出层delta x 就是预测值与真实值的差,2x就是对激活函数求导
        delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])#对于输出层的delta
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        for l in xrange(2, self.num_layers):
            z = zs[-1]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-l + 1].transpose(), delta)* sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l +1].transpose())
        return (nabla_b, nabla_w)

nn = Network([2,3,1])
print("#第一层到第二层的链接权重[2,3,1]")
print nn.weights#每个array行代表当前层所有神经元连接下一层某一个神经元的权重
print("#Biases")
print nn.biases


#### Miscellaneous functions
def sigmoid(z):
    """The sigmoid function."""
    return 1.0 / (1.0 + np.exp(-z))


def sigmoid_prime(z):
    """Derivative of the sigmoid function."""
    return sigmoid(z) * (1 - sigmoid(z))

  

转载于:https://www.cnblogs.com/wlc297984368/p/7479986.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值