implement of deep neural network --- python

import numpy as np
import random


def sigmoid(z):
    return 1.0/(1.0 + np.exp(-z))
def sigmoid_prime(z):
    return sigmoid(z)*(1.0-sigmoid(z))


class Net(object):
    def __init__(self,sizes):
        self.layer_num = len(sizes)
        self.sizes = sizes
        self.bias = [ np.random.randn(y,1) for y in sizes[1:] ]
        self.weights = [ np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:]) ]
    

    def feedward(self,a):
        a = np.array([a]).transpose()
        print a
        for b,w in zip(self.bias, self.weights):
            a = sigmoid( np.dot(w,a) + b )
            print w.shape, a.shape
        return a


    def SDG(self, training_data, epochs, mini_batch_size, eta):
        n = len(training_data)
        for j in xrange(epochs):
            random.shuffle(training_data)
            mini_batchs = [ training_data[k:k+mini_batch_size]
                          for k in xrange(0,n,mini_batch_size) ]
            for mini_batch in mini_batchs:
                self.update_mini_batch(mini_batch, eta)
            if j%100 ==0:
                print 'epoch{0} complete..'.format(j)
    
    def update_mini_batch(self, mini_batch, eta):
        
        nabla_b = [ np.zeros(b.shape) for b in self.bias ]
        nabla_w = [ np.zeros(w.shape) for w in self.weights ]
        
        for x,y in mini_batch:
            delta_b, delta_w = self.backprop(x,y)
            nabla_b = [ nb+dnb for nb, dnb in zip(nabla_b, delta_b) ]
            nabla_w = [ nw+bnw for nw, bnw in zip(nabla_w, delta_w) ]
            
        self.weights = [ w-(eta/len(mini_batch))*nw for w,nw in zip(self.weights, nabla_w) ]
        self.bias = [ b-(eta/len(mini_batch))*nb for b,nb in zip(self.bias, nabla_b) ]


    def backprop(self,x,y):
        nabla_b = [ np.zeros(b.shape) for b in self.bias ]
        nabla_w = [ np.zeros(w.shape) for w in self.weights ]
        
        # feedward
        activation = np.array([x]).transpose()
        
        #print activation
        activations = [activation]
        zs = []
        for b,w in zip(self.bias, self.weights):
            z = np.dot(w, activation)+b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)


        # backward
        delta = self.cost_derivate(activations[-1],y) * sigmoid_prime(zs[-1])
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())
        for l in xrange(2, self.layer_num):
            z = zs[-l]
            sp = sigmoid_prime(z)
            delta = np.dot( self.weights[-l+1].transpose(), delta) * sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
        return (nabla_b, nabla_w)
    
    def cost_derivate(self, output_activations, y):
        return (output_activations-y)

xx = Net([2,3,3,1])
traindata = [([1,1],3),([1,0],2),([0,0],0), ([0,1],1),([1,1],3),([1,0],2),([0,0],0), ([0,1],1)]
xx.SDG(traindata, 100, 4, 0.5)


--------------------analysis----------------------



--------------------reference------------------------

http://neuralnetworksanddeeplearning.com/

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值