机器学习_算法_神经网络_BP

参考:

http://wenku.baidu.com/view/7c38bb1b964bcf84b9d57b7d.html

http://wenku.baidu.com/view/e98dbbd433d4b14e8524680e.html

上面两本将就一下,有些位置是错的,我参考的是它的例子,原理参考下面这本

这里有本专门将BP的,http://wenku.baidu.com/view/00b75f5202768e9951e738de.html

网上一大堆都是讲理论的,少数的把流程写了一下,但是真正实现的就太少了,有代码也是matlab这种直接使用内置的模块包的,没有找到切切实实写出来的

没办法,我自己到处找资料总算把它实现出来了,还是python,用其他语言的朋友也可以看一下

#-*- coding:utf-8 -*-
'''
Created on Aug 25, 2013

@author: blacklaw
@ref:    http://wenku.baidu.com/view/e98dbbd433d4b14e8524680e.html
'''
'''
    WingLength FeelerLength Class AimValue(0.9 -> 'Apf' 0.1 -> 'Af')
'''
DATA = [[1.78, 1.14, 'Apf', 0.9], 
        [1.96, 1.18, 'Apf', 0.9], 
        [1.86, 1.2, 'Apf', 0.9], 
        [1.72, 1.24, 'Af', 0.1], 
        [2.0, 1.26, 'Apf', 0.9], 
        [2.0, 1.28, 'Apf', 0.9], 
        
        [1.96, 1.3, 'Apf', 0.9], 
        [1.74, 1.36, 'Af', 0.1], 
        [1.64, 1.38, 'Af', 0.1], 
        [1.82, 1.38, 'Af', 0.1], 
        [1.9, 1.38, 'Af', 0.1], 
        [1.7, 1.4, 'Af', 0.1], 
        
        [1.82, 1.48, 'Af', 0.1], 
        [1.82, 1.54, 'Af', 0.1], 
        [2.08, 1.56, 'Af', 0.1]]
Eta = 0.1

from numpy import *
import random
# Interface
class Inspirator():
    def inspire(self, x):
        return 0

# 
class SigmoidInspirator(Inspirator):
    def inspire(self, x):
        return 1 / (1 + e**-x)

# 
class Nerve():
    inputs = []
    inspirator = Inspirator()
    threshold = 1
    threshold_weight = -0.5
    sons = []
    sons_weight = {}
    last_out = 0
    def __init__(self):
        self.clear_inputs()
        self.clear_sons()
        self.inspirator = SigmoidInspirator()
        self.init_threshold()
        self.sons_weight = {}
        self.last_out = 0
        self.delta = 0
    
    def set_threshold_w(self, w):
        self.threshold_weight = w
        return self
    
    def init_threshold(self):
        self.threshold = 1
        
    def input(self, x, weight = 0.5):
        self.inputs.append({'x':x, 'w':weight, 'o':0})
    
    def add_sons(self, sons, weights = 0.5):
        DEFAULT_WEIGHT = 0.5
        if not isinstance(sons, list):
            sons = [sons]
        if not isinstance(weights, list):
            weights = [weights]   
        for i, son in enumerate(sons):
            self.sons.append(son)
            #print weights[i]
            try: self.sons_weight[son] = weights[i]
            except: self.sons_weight[son] = DEFAULT_WEIGHT
    
    def calc_out(self):
        return self.inspirator.inspire(
                    sum([input['x'] * input['w'] for input in self.inputs] \
                        + [self.threshold * self.threshold_weight])
                )   
        
    def calc_delta(self, sons_detal_sum):
        return self.last_out * (1 - self.last_out) * sons_detal_sum
      
    def clear_inputs(self):
        self.inputs = []
    
    def clear_sons(self):
        self.sons = []
    
    def refresh_delta(self):
        delta = sum([self.sons_weight[son]*son.get_delta()  for son in self.sons])
        self.set_delta(delta)
    
    def refresh_weights(self):
        for son in self.sons:
            d_weight = son.get_delta() * Eta * self.last_out
            self.sons_weight[son] += d_weight
            #print self.sons_weight[son]
        self.threshold_weight += Eta * 1 * self.get_delta()
        #print 'WW',self.threshold_weight
            
    def output(self):
        out = self.calc_out()
        self.last_out = out
        for son in self.sons:
            weight = self.sons_weight[son]
            son.input(out, weight)
        return out
    
    def set_delta(self, delta):
        self.delta = delta
    
    def get_delta(self):
        return self.delta
    
    def info(self):
        return "Hash:%s\nInputs:%s\nLast_out:%s\nThreshold:%s W:%s\nSons:%s\nSons_weight:%s" % \
                   (hash(self),
                    self.inputs,
                    self.last_out,
                    self.threshold,
                    self.threshold_weight,
                    [hash(son) for son in self.sons],
                    [{hash(key) : value} for key, value in self.sons_weight.items()]
                    )

class InputNerve(Nerve):
    def __init__(self):
        Nerve.__init__(self)
        self.inputs = 0
    
    def input(self, x):
        self.inputs = x
        
    def calc_out(self):
        return self.inputs
    
class NervesManager():
    inputs = []
    hiddens = []
    outputs = []
    def __init__(self, nerve_count_list):
        self.inputs = []
        self.hiddens = []
        self.outputs = []
        self.create(nerve_count_list)
        
    def create(self, nerve_count_list):
        # crate input nerve
        for i in range(nerve_count_list[0]):
            input = InputNerve()
            self.inputs.append(input)
        for i in range(nerve_count_list[1]):
            hidden = Nerve()
            self.hiddens.append(hidden)
        self.outputs.append(Nerve())
        
        for nerve in self.inputs:
            nerve.add_sons(self.hiddens, [random.random() for i in range(len(self.hiddens))])
        for nerve in self.hiddens:
            nerve.add_sons(self.outputs, [random.random() for i in range(len(self.outputs))])
    
    def train(self, value_list, T):
        O = self.test(value_list)
        nerves = self.inputs + self.hiddens + self.outputs
        # refresh output nerve by manual
        self.outputs[0].set_delta(self.outputs[0].calc_delta(T - O))
        # refresh input and hidden by son's delta
        for nerve in self.hiddens + self.inputs:
            nerve.refresh_delta()
        # refresh all nerve's sons weight
        for nerve in nerves:
            nerve.refresh_weights()
        return O, T
    
    def test(self, value_list):
        nerves = self.inputs + self.hiddens + self.outputs
        for i, input in enumerate(self.inputs):
            input.input(value_list[i])
        for nerve in nerves:
            nerve.output()
        for nerve in nerves:
            nerve.clear_inputs()
        return self.outputs[0].last_out
           
if __name__ == "__main__":
    #print DATA
    '''
        input nodes:   2
        hidden nodes:  2
        output nodes:  1 
    '''
    manager = NervesManager([2, 2, 1])
    # train BP
    for i in range(2000):
        avr_sum = 0
        print '******** train **********'
        for line in DATA:
            O, T = manager.train([line[0], line[1]], line[3])
            avr_sum += (O - T)**2
            print O, T
        avr = avr_sum / len(DATA)
        print avr
        if avr < 0.005:
            break
    # test BP
    print "********* test **********"
    for line in DATA:
            print  manager.test([line[0], line[1]]), line[3]

OD实现的,两个大类Nerve和NervesManager

这里发个结果给大家看看,我训练了2000*15圈,我Eta选的是0.1,收敛速度还是比较慢,有几次测试陷到局部最小点了,郁闷,以后还会写几篇BP的改进方法

********* test **********
0.994341785362 0.9
0.999736065837 0.9
0.989063612563 0.9
0.163572725812 0.1
0.999218216462 0.9
0.997095492527 0.9
0.730657889775 0.9
0.134026079144 0.1
0.114695534729 0.1
0.142601445274 0.1
0.157811418673 0.1
0.119199019357 0.1
0.120173144422 0.1
0.108409846864 0.1
0.143414585379 0.1





  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值