Python写的Autoencoder的小的验证程序

捣鼓的两天终于写出自己的第一段python代码了。

之前总是想着用Copy的方法来拼凑代码,实际上这种做法是行不通的,也不知是前两天感冒了还是别的原因,拼凑的代码会让你的思维变的混乱,没有统一的逻辑,调试起来更是摸着石头过河,更费劲。所以,能自己写的代码还是要自己动手。


这个代码验证了Autoencoder的Feature Extraction的能力。

但严格意义上,这个代码更像是一个二层的BP,只不过是Input=Output


代码参考了 http://blog.sina.com.cn/s/blog_88e2dbbf0101o41y.html 这篇博文以及大牛的科普文章http://blog.sina.com.cn/s/blog_593af2a70101endk.html





import numpy as np

class myautoencoder():
'''a simple version of autoencoder
X--------inputmatrix
Y--------output matrix
indim----node number of layer input
hdim-----node number of layer hidden
Z1 = X*W1+B1
A1 = sigmod(Z1)
Z2 = A1*W2+B2
A2 = sigmod(Z2)
'
''
def __init__(self,X,Y,indim,hdim):
#training samples
self.X=X #input X [[sample1],[sample2],[sample3]]
self.Y=Y
#number of samples
self.M=len(self.X)
self.indim=indim
self.hdim=hdim
# initialize the weights randomly and the biases to 0
r = np.sqrt(6.) / np.sqrt((self.indim * 2) + 1.)
self.W1 = np.random.random_sample((self.indim,self.hdim)) * 2 * r - r
self.W2 = np.random.random_sample((self.hdim,self.indim)) * 2 * r - r
self.B1 = np.zeros(self.hdim)
self.B2 = np.zeros(self.indim)
self.A1 = np.zeros((self.indim,self.hdim))
self.Z1 = np.zeros((self.indim,self.hdim))
self.dW1 = np.zeros((self.indim,self.hdim))
self.dB1 = np.zeros((self.hdim))
self.dB2 = np.zeros((self.indim))
self.dW2 = np.zeros((self.hdim,self.indim))
self.A2 = np.zeros((self.indim,self.hdim))
self.Z2 = np.zeros((self.indim,self.hdim))
self.delta2 = np.zeros((self.indim,1))
self.delta1 = np.zeros((self.hdim,1))
# value of cost function
self.Jw = 0.0
# learning rate
self.alpha = 1.2
# steps of iteration
self.steps = 30000
def _sigmoid(self,la):
'''
compute the sigmoid function for an array
of arbitrary shape and size
'
''
return 1. / (1. + np.exp(-la))
def backpropalgrithom(self):
#clear values
self.Jw -= self.Jw
self.dB1 -= self.dB1
self.dB2 -= self.dB2
self.dW1 -= self.dW1
self.dW2 -= self.dW2
#backpropagation(iteration over M samples)
# pre-sigmoid activation at the hidden layer
self.Z1= np.dot(self.X,self.W1) + np.tile(self.B1,(self.M,1))
# sigmoid activation of the hidden layer
self.A1 = self._sigmoid(self.Z1)
# pre-sigmoid activation of the output layer
self.Z2 = np.dot(self.A1,self.W2) + np.tile(self.B2,(self.M,1))
# sigmoid activation of the hidden layer
self.A2 = self._sigmoid(self.Z2)
#back propagation
self.delta2 = -(self.X - self.A2) * (self.A2 * (1-self.A2))
self.Jw += ((self.Y - self.A2)*(self.Y -self.A2)).sum()/self.M
self.delta1 = np.dot(self.W2,self.delta2.T).T* (self.A1 * (1-self.A1))
# calculate dW and dB
#[:,:,np.newaxis] broadcast 相乘这个很好用也很重要
#以[4×2] 与 [4×4] 为例
#4 表示的实际是输入维数
#实际上我们想得到的是 4个{[2×1]的矩阵点乘 [1×4]的矩阵},也就是4个[2×4]的矩阵
#这时我们就可用利用这个broadcast的功能来帮助我们实现
self.dW2 += (self.A1[:,:,np.newaxis]*self.delta2[:,np.newaxis,:]).sum(0)
self.dW1 += (self.X[:,:,np.newaxis]*self.delta1[:,np.newaxis,:]).sum(0)
self.dB1 += self.delta1.sum(0)
self.dB2 += self.delta2.sum(0)
#uapdate the weight
self.W1 -= (self.alpha/self.M)*self.dW1
self.W2 -= (self.alpha/self.M)*self.dW2
self.B1 -= (self.alpha/self.M)*self.dB1
self.B2 -= (self.alpha/self.M)*self.dB2
def plainautoencoder(self,steps=None):
if None == steps:
steps =self.steps
for i in range(steps):
self.backpropalgrithom()
if i%100==0:
print "step:%d" % i, "Jw=%f" % self.Jw
def validateautoencoder(self):
self.Z1= np.dot(self.X,self.W1) + np.tile(self.B1,(self.M,1))
# sigmoid activation of the hidden layer
self.A1 = self._sigmoid(self.Z1)
# pre-sigmoid activation of the output layer
self.Z2 = np.dot(self.A1,self.W2) + np.tile(self.B2,(self.M,1))
# sigmoid activation of the hidden layer
self.A2 = self._sigmoid(self.Z2)
print 'Input Value:'
print self.X
print "Layer Hidden:"
print self.A1
print "Output Layer"
print self.A2

if __name__ == '__main__':
#x = np.array([[0,0,0,1], [0,0,1,0], [0,1,0,0], [1,0,0,0]])
#ae=myautoencoder(x,x,4,2)
x=np.eye(8)
ae=myautoencoder(x,x,8,3)
ae.plainautoencoder(10000)
ae.validateautoencoder()




转载于:https://www.cnblogs.com/sunrisebest/p/3495826.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值