python有没有卷积神经网络库_python实现卷积神经网络

1 #! /usr/bin/env python

2 #-*- coding: utf-8 -*-

3

4 from numpy import *

5 importnumpy as np6 importmatplotlib.pyplot as plt7 importmatplotlib.image as mgimg8 importmath9 importgParam10 importcopy11 importscipy.signal as signal12

13

14 #createst uniform random array w/ values in [a,b) and shape args

15 #return value type is ndarray

16 def rand_arr(a, b, *args):17 np.random.seed(0)18 return np.random.rand(*args) * (b - a) +a19

20 #Class Cnn

21 classCcnn:22 def __init__(self, cLyNum, pLyNum,fLyNum,oLyNum):23 self.cLyNum =cLyNum24 self.pLyNum =pLyNum25 self.fLyNum =fLyNum26 self.oLyNum =oLyNum27 self.pSize =gParam.P_SIZE28 self.yita = 0.01

29 self.cLyBias = rand_arr(-0.1, 0.1, 1,cLyNum)30 self.fLyBias = rand_arr(-0.1, 0.1, 1,fLyNum)31 self.kernel_c =zeros((gParam.C_SIZE,gParam.C_SIZE,cLyNum))32 self.kernel_f =zeros((gParam.F_NUM,gParam.F_NUM,fLyNum))33 for i inrange(cLyNum):34 self.kernel_c[:,:,i] = rand_arr(-0.1,0.1,gParam.C_SIZE,gParam.C_SIZE)35 for i inrange(fLyNum):36 self.kernel_f[:,:,i] = rand_arr(-0.1,0.1,gParam.F_NUM,gParam.F_NUM)37 self.pooling_a = ones((self.pSize,self.pSize))/(self.pSize**2)38 self.weight_f = rand_arr(-0.1,0.1, pLyNum, fLyNum)39 self.weight_output = rand_arr(-0.1,0.1,fLyNum,oLyNum)40 defread_pic_data(self, path, i):41 #print 'read_pic_data'

42 data =np.array([])43 full_path = path + '%d'%i +gParam.FILE_TYPE44 try:45 data = mgimg.imread(full_path) #data is np.array

46 data =(double)(data)47 exceptIOError:48 raise Exception('open file error in read_pic_data():', full_path)49 returndata50 defread_label(self, path):51 #print 'read_label'

52 ylab =[]53 try:54 fobj = open(path, 'r')55 for line infobj:56 ylab.append(line.strip())57 fobj.close()58 exceptIOError:59 raise Exception('open file error in read_label():', path)60 returnylab61 #卷积层

62 defconvolution(self, data, kernel):63 data_row, data_col =shape(data)64 kernel_row, kernel_col =shape(kernel)65 n = data_col -kernel_col66 m = data_row -kernel_row67 state = zeros((m+1, n+1))68 for i in range(m+1):69 for j in range(n+1):70 temp = multiply(data[i:i+kernel_row,j:j+kernel_col], kernel)71 state[i][j] =temp.sum()72 returnstate73 #池化层

74 defpooling(self, data, pooling_a):75 data_r, data_c =shape(data)76 p_r, p_c =shape(pooling_a)77 r0 = data_r/p_r78 c0 = data_c/p_c79 state =zeros((r0,c0))80 for i inrange(c0):81 for j inrange(r0):82 temp = multiply(data[p_r*i:p_r*i+1,p_c*j:p_c*j+1],pooling_a)83 state[i][j] =temp.sum()84 returnstate85 #全连接层

86 defconvolution_f1(self, state_p1, kernel_f1, weight_f1):87 #池化层出来的20个特征矩阵乘以池化层与全连接层的连接权重进行相加

88 #wx(这里的偏置项=0),这个结果然后再和全连接层中的神经元的核

89 #进行卷积,返回值:

90 #1:全连接层卷积前,只和weight_f1相加之后的矩阵

91 #2:和全连接层卷积完之后的矩阵

92 n_p0, n_f = shape(weight_f1)#n_p0=20(是Feature Map的个数);n_f是100(全连接层神经元个数)

93 m_p, n_p, pCnt = shape(state_p1)#这个矩阵是三维的

94 m_k_f1, n_k_f1,fCnt = shape(kernel_f1)#12*12*100

95 state_f1_temp =zeros((m_p,n_p,n_f))96 state_f1 = zeros((m_p - m_k_f1 + 1,n_p - n_k_f1 + 1,n_f))97 for n inrange(n_f):98 count =099 for m inrange(n_p0):100 temp = state_p1[:,:,m] *weight_f1[m][n]101 count = count +temp102 state_f1_temp[:,:,n] =count103 state_f1[:,:,n] =self.convolution(state_f1_temp[:,:,n], kernel_f1[:,:,n])104 returnstate_f1, state_f1_temp105 #softmax 层

106 defsoftmax_layer(self,state_f1):107 #print 'softmax_layer'

108 output = zeros((1,self.oLyNum))109 t1 =(exp(np.dot(state_f1,self.weight_output))).sum()110 for i inrange(self.oLyNum):111 t0 =exp(np.dot(state_f1,self.weight_output[:,i]))112 output[:,i]=t0/t1113 returnoutput114 #误差反向传播更新权值

115 defcnn_upweight(self,err_cost, ylab, train_data,state_c1, \116 state_s1, state_f1, state_f1_temp, output):117 #print 'cnn_upweight'

118 m_data, n_data =shape(train_data)119 #softmax的资料请查看 (TODO)

120 label = zeros((1,self.oLyNum))121 label[:,ylab] = 1

122 delta_layer_output = output -label123 weight_output_temp =copy.deepcopy(self.weight_output)124 delta_weight_output_temp =zeros((self.fLyNum, self.oLyNum))125 #print shape(state_f1)

126 #更新weight_output

127 for n inrange(self.oLyNum):128 delta_weight_output_temp[:,n] = delta_layer_output[:,n] *state_f1129 weight_output_temp = weight_output_temp - self.yita *delta_weight_output_temp130

131 #更新bais_f和kernel_f (推导公式请查看 TODO)

132 delta_layer_f1 = zeros((1, self.fLyNum))133 delta_bias_f1 = zeros((1,self.fLyNum))134 delta_kernel_f1_temp =zeros(shape(state_f1_temp))135 kernel_f_temp =copy.deepcopy(self.kernel_f)136 for n inrange(self.fLyNum):137 count =0138 for m inrange(self.oLyNum):139 count = count + delta_layer_output[:,m] *self.weight_output[n,m]140 delta_layer_f1[:,n] = np.dot(count, (1 - np.tanh(state_f1[:,n])**2))141 delta_bias_f1[:,n] =delta_layer_f1[:,n]142 delta_kernel_f1_temp[:,:,n] = delta_layer_f1[:,n] *state_f1_temp[:,:,n]143 #1

144 self.fLyBias = self.fLyBias - self.yita *delta_bias_f1145 kernel_f_temp = kernel_f_temp - self.yita *delta_kernel_f1_temp146

147 #更新weight_f1

148 delta_layer_f1_temp =zeros((gParam.F_NUM,gParam.F_NUM,self.fLyNum))149 delta_weight_f1_temp =zeros(shape(self.weight_f))150 weight_f1_temp =copy.deepcopy(self.weight_f)151 for n inrange(self.fLyNum):152 delta_layer_f1_temp[:,:,n] = delta_layer_f1[:,n] *self.kernel_f[:,:,n]153 for n inrange(self.pLyNum):154 for m inrange(self.fLyNum):155 temp = delta_layer_f1_temp[:,:,m] *state_s1[:,:,n]156 delta_weight_f1_temp[n,m] =temp.sum()157 weight_f1_temp = weight_f1_temp - self.yita *delta_weight_f1_temp158

159 #更新bias_c1

160 n_delta_c = m_data - gParam.C_SIZE + 1

161 delta_layer_p =zeros((gParam.F_NUM,gParam.F_NUM,self.pLyNum))162 delta_layer_c =zeros((n_delta_c,n_delta_c,self.pLyNum))163 delta_bias_c = zeros((1,self.cLyNum))164 for n inrange(self.pLyNum):165 count =0166 for m inrange(self.fLyNum):167 count = count + delta_layer_f1_temp[:,:,m] *self.weight_f[n,m]168 delta_layer_p[:,:,n] =count169 #print shape(np.kron(delta_layer_p[:,:,n], ones((2,2))/4))

170 delta_layer_c[:,:,n] = np.kron(delta_layer_p[:,:,n], ones((2,2))/4) \171 * (1 - np.tanh(state_c1[:,:,n])**2)172 delta_bias_c[:,n] =delta_layer_c[:,:,n].sum()173 #2

174 self.cLyBias = self.cLyBias - self.yita *delta_bias_c175 #更新 kernel_c1

176 delta_kernel_c1_temp =zeros(shape(self.kernel_c))177 for n inrange(self.cLyNum):178 temp =delta_layer_c[:,:,n]179 r1 = map(list,zip(*temp[::1]))#逆时针旋转90度

180 r2 = map(list,zip(*r1[::1]))#再逆时针旋转90度

181 temp = signal.convolve2d(train_data, r2,'valid')182 temp1 = map(list,zip(*temp[::1]))183 delta_kernel_c1_temp[:,:,n] = map(list,zip(*temp1[::1]))184 self.kernel_c = self.kernel_c - self.yita *delta_kernel_c1_temp185 self.weight_f =weight_f1_temp186 self.kernel_f =kernel_f_temp187 self.weight_output =weight_output_temp188

189 #predict

190 defcnn_predict(self,data):191 return

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值