BP神经网络 -- 详细教程

学习bp神经网络 详细教程http://blog.csdn.net/accumulate_zhang/article/details/53265766

两层神经网络
import numpy as np


# sigmoid function
def nonlin(x,deriv=False):
	if(deriv==True):
		return x*(1-x)
	return 1/(1+np.exp(-x))
	
# input dataset
X=np.array([[0,0,1],
						[0,1,1],
						[1,0,1],
						[1,1,1]])
						
# output dataset
y=np.array([[0,1,1,0]]).T

# seed random numbers to make calculation deterministic (just a good practice)
np.random.seed(1)

# initialize weights randomly with mean 0
syn0=2*np.random.random((3,1))-1

for iter in xrange(10000):	
	
	# forward propagation
	l0=X
	l1=nonlin(np.dot(l0,syn0))
	
	# how much dit we miss?
	l1_error=y-l1
	
	# multiply how much we missed by the 
	#slope of the sigmoid at the values in l1
	l1_delta=l1_error*nonlin(l1,True)

	# update weights
	syn0+=np.dot(l0.T,l1_delta)

print("Output After Training:")
print l1


#Output After Training:
#[[ 0.00966449]
# [ 0.00786506]
# [ 0.99358898]
# [ 0.99211957]]


三层神经网络
import numpy as np

def nonlin(x,deriv=False):
	if(deriv==True):
		return x*(1-x)
	return 1/(1+np.exp(-x))
	
X= np.array([[0,0,1],
						 [0,1,1],
						 [1,0,1],
						 [1,1,1]])
						 
y=np.array([[0],
					 [1],
					 [1],
					 [0]])
					
np.random.seed(1)

# randomly initialize our weights with mean 0
syn0=2*np.random.random((3,4))-1
syn1=2*np.random.random((4,1))-1

for j in range(60000):
	# Feed forward through layers 0, 1, and 2
	l0=X
	l1=nonlin(np.dot(l0,syn0))
	l2=nonlin(np.dot(l1,syn1))
	
	# how much did we miss the target value?
	l2_error=y-l2
	if(j%10000 == 0):
		print("Error: " + str(np.mean(np.abs(l2_error))))
		
	#in what direction is the target value?
	# were we really sure? if so, don't change too much.
	l2_delta=l2_error*nonlin(l2,deriv=True)
	
	# how much did each l1 value contribute to the l2 error(according to the
	l1_error=l2_delta.dot(syn1.T)
	
	# in what direction is the target l1?
	# were we really sure? if so, don't change too much.
	l1_delta=l1_error*nonlin(l1,deriv=True)
	
	syn1+=l1.T.dot(l2_delta)
	syn0+=l0.T.dot(l1_delta)

print("Output After Training:")
print l2




  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值