卷积神经网络(三):卷积神经网络CNN的简单实现(部分Python源码)

转载自:

卷积神经网络(三):卷积神经网络CNN的简单实现(部分Python源码) - xuanyuansen的专栏 - 博客频道 - CSDN.NET
http://blog.csdn.net/xuanyuansen/article/details/41924377


上周末利用python简单实现了一个卷积神经网络,只包含一个卷积层和一个maxpooling层,pooling层后面的多层神经网络采用了softmax形式的输出。实验输入仍然采用MNIST图像使用10个feature map时,卷积和pooling的结果分别如下所示。



部分源码如下:

[python]  view plain copy 在CODE上查看代码片 派生到我的代码片
  1. #coding=utf-8  
  2. ''''' 
  3. Created on 2014年11月30日 
  4. @author: Wangliaofan 
  5. '''  
  6. import numpy  
  7. import struct  
  8. import matplotlib.pyplot as plt  
  9. import math  
  10. import random  
  11. import copy  
  12. #test   
  13. from BasicMultilayerNeuralNetwork import BMNN2  
  14.   
  15.   
  16. def sigmoid(inX):  
  17.     if 1.0+numpy.exp(-inX)== 0.0:  
  18.         return 999999999.999999999  
  19.     return 1.0/(1.0+numpy.exp(-inX))  
  20. def difsigmoid(inX):  
  21.     return sigmoid(inX)*(1.0-sigmoid(inX))  
  22. def tangenth(inX):  
  23.     return (1.0*math.exp(inX)-1.0*math.exp(-inX))/(1.0*math.exp(inX)+1.0*math.exp(-inX))  
  24.   
  25. def cnn_conv(in_image, filter_map,B,type_func='sigmoid'):  
  26.     #in_image[num,feature map,row,col]=>in_image[Irow,Icol]  
  27.     #features map[k filter,row,col]  
  28.     #type_func['sigmoid','tangenth']  
  29.     #out_feature[k filter,Irow-row+1,Icol-col+1]  
  30.     shape_image=numpy.shape(in_image)#[row,col]  
  31.     #print "shape_image",shape_image  
  32.     shape_filter=numpy.shape(filter_map)#[k filter,row,col]  
  33.     if shape_filter[1]>shape_image[0or shape_filter[2]>shape_image[1]:  
  34.         raise Exception  
  35.     shape_out=(shape_filter[0],shape_image[0]-shape_filter[1]+1,shape_image[1]-shape_filter[2]+1)  
  36.     out_feature=numpy.zeros(shape_out)  
  37.     k,m,n=numpy.shape(out_feature)  
  38.     for k_idx in range(0,k):  
  39.         #rotate 180 to calculate conv  
  40.         c_filter=numpy.rot90(filter_map[k_idx,:,:], 2)  
  41.         for r_idx in range(0,m):  
  42.             for c_idx in range(0,n):  
  43.                 #conv_temp=numpy.zeros((shape_filter[1],shape_filter[2]))  
  44.                 conv_temp=numpy.dot(in_image[r_idx:r_idx+shape_filter[1],c_idx:c_idx+shape_filter[2]],c_filter)  
  45.                 sum_temp=numpy.sum(conv_temp)  
  46.                 if type_func=='sigmoid':  
  47.                     out_feature[k_idx,r_idx,c_idx]=sigmoid(sum_temp+B[k_idx])  
  48.                 elif type_func=='tangenth':  
  49.                     out_feature[k_idx,r_idx,c_idx]=tangenth(sum_temp+B[k_idx])  
  50.                 else:  
  51.                     raise Exception        
  52.     return out_feature  
  53.   
  54. def cnn_maxpooling(out_feature,pooling_size=2,type_pooling="max"):  
  55.     k,row,col=numpy.shape(out_feature)  
  56.     max_index_Matirx=numpy.zeros((k,row,col))  
  57.     out_row=int(numpy.floor(row/pooling_size))  
  58.     out_col=int(numpy.floor(col/pooling_size))  
  59.     out_pooling=numpy.zeros((k,out_row,out_col))  
  60.     for k_idx in range(0,k):  
  61.         for r_idx in range(0,out_row):  
  62.             for c_idx in range(0,out_col):  
  63.                 temp_matrix=out_feature[k_idx,pooling_size*r_idx:pooling_size*r_idx+pooling_size,pooling_size*c_idx:pooling_size*c_idx+pooling_size]  
  64.                 out_pooling[k_idx,r_idx,c_idx]=numpy.amax(temp_matrix)  
  65.                 max_index=numpy.argmax(temp_matrix)  
  66.                 #print max_index  
  67.                 #print max_index/pooling_size,max_index%pooling_size  
  68.                 max_index_Matirx[k_idx,pooling_size*r_idx+max_index/pooling_size,pooling_size*c_idx+max_index%pooling_size]=1  
  69.     return out_pooling,max_index_Matirx  
  70.   
  71. def poolwithfunc(in_pooling,W,B,type_func='sigmoid'):  
  72.     k,row,col=numpy.shape(in_pooling)  
  73.     out_pooling=numpy.zeros((k,row,col))  
  74.     for k_idx in range(0,k):  
  75.         for r_idx in range(0,row):  
  76.             for c_idx in range(0,col):  
  77.                 out_pooling[k_idx,r_idx,c_idx]=sigmoid(W[k_idx]*in_pooling[k_idx,r_idx,c_idx]+B[k_idx])  
  78.     return out_pooling  
  79. #out_feature is the out put of conv  
  80. def backErrorfromPoolToConv(theta,max_index_Matirx,out_feature,pooling_size=2):  
  81.     k1,row,col=numpy.shape(out_feature)  
  82.     error_conv=numpy.zeros((k1,row,col))  
  83.     k2,theta_row,theta_col=numpy.shape(theta)  
  84.     if k1!=k2:  
  85.         raise Exception  
  86.     for idx_k in range(0,k1):  
  87.         for idx_row in range( 0, row):  
  88.             for idx_col in range( 0, col):  
  89.                 error_conv[idx_k,idx_row,idx_col]=\  
  90.                     max_index_Matirx[idx_k,idx_row,idx_col]*\  
  91.                     float(theta[idx_k,idx_row/pooling_size,idx_col/pooling_size])*\  
  92.                     difsigmoid(out_feature[idx_k,idx_row,idx_col])  
  93.     return error_conv  
  94.   
  95. def backErrorfromConvToInput(theta,inputImage):  
  96.     k1,row,col=numpy.shape(theta)  
  97.     #print "theta",k1,row,col  
  98.     i_row,i_col=numpy.shape(inputImage)  
  99.     if row>i_row or col> i_col:  
  100.         raise Exception  
  101.     filter_row=i_row-row+1  
  102.     filter_col=i_col-col+1  
  103.     detaW=numpy.zeros((k1,filter_row,filter_col))  
  104.     #the same with conv valid in matlab  
  105.     for k_idx in range(0,k1):  
  106.         for idx_row in range(0,filter_row):  
  107.             for idx_col in range(0,filter_col):  
  108.                 subInputMatrix=inputImage[idx_row:idx_row+row,idx_col:idx_col+col]  
  109.                 #print "subInputMatrix",numpy.shape(subInputMatrix)  
  110.                 #rotate theta 180  
  111.                 #print numpy.shape(theta)  
  112.                 theta_rotate=numpy.rot90(theta[k_idx,:,:], 2)  
  113.                 #print "theta_rotate",theta_rotate  
  114.                 dotMatrix=numpy.dot(subInputMatrix,theta_rotate)  
  115.                 detaW[k_idx,idx_row,idx_col]=numpy.sum(dotMatrix)  
  116.     detaB=numpy.zeros((k1,1))  
  117.     for k_idx in range(0,k1):  
  118.         detaB[k_idx]=numpy.sum(theta[k_idx,:,:])  
  119.     return detaW,detaB  
  120.   
  121. def loadMNISTimage(absFilePathandName,datanum=60000):  
  122.     images=open(absFilePathandName,'rb')  
  123.     buf=images.read()  
  124.     index=0  
  125.     magic, numImages , numRows , numColumns = struct.unpack_from('>IIII' , buf , index)  
  126.     print magic, numImages , numRows , numColumns  
  127.     index += struct.calcsize('>IIII')  
  128.     if magic != 2051:  
  129.         raise Exception  
  130.     datasize=int(784*datanum)  
  131.     datablock=">"+str(datasize)+"B"  
  132.     #nextmatrix=struct.unpack_from('>47040000B' ,buf, index)  
  133.     nextmatrix=struct.unpack_from(datablock ,buf, index)  
  134.     nextmatrix=numpy.array(nextmatrix)/255.0  
  135.     #nextmatrix=nextmatrix.reshape(numImages,numRows,numColumns)  
  136.     #nextmatrix=nextmatrix.reshape(datanum,1,numRows*numColumns)  
  137.     nextmatrix=nextmatrix.reshape(datanum,1,numRows,numColumns)    
  138.     return nextmatrix, numImages  
  139.       
  140. def loadMNISTlabels(absFilePathandName,datanum=60000):  
  141.     labels=open(absFilePathandName,'rb')  
  142.     buf=labels.read()  
  143.     index=0  
  144.     magic, numLabels  = struct.unpack_from('>II' , buf , index)  
  145.     print magic, numLabels  
  146.     index += struct.calcsize('>II')  
  147.     if magic != 2049:  
  148.         raise Exception  
  149.       
  150.     datablock=">"+str(datanum)+"B"  
  151.     #nextmatrix=struct.unpack_from('>60000B' ,buf, index)  
  152.     nextmatrix=struct.unpack_from(datablock ,buf, index)  
  153.     nextmatrix=numpy.array(nextmatrix)  
  154.     return nextmatrix, numLabels  
  155.   
  156. def simpleCNN(numofFilter,filter_size,pooling_size=2,maxIter=1000,imageNum=500):  
  157.     decayRate=0.01  
  158.     MNISTimage,num1=loadMNISTimage("F:\Machine Learning\UFLDL\data\common\\train-images-idx3-ubyte",imageNum)  
  159.     print num1  
  160.     row,col=numpy.shape(MNISTimage[0,0,:,:])  
  161.     out_Di=numofFilter*((row-filter_size+1)/pooling_size)*((col-filter_size+1)/pooling_size)  
  162.     MLP=BMNN2.MuiltilayerANN(1,[128],out_Di,10,maxIter)  
  163.     MLP.setTrainDataNum(imageNum)  
  164.     MLP.loadtrainlabel("F:\Machine Learning\UFLDL\data\common\\train-labels-idx1-ubyte")  
  165.     MLP.initialweights()  
  166.     #MLP.printWeightMatrix()  
  167.     rng = numpy.random.RandomState(23455)  
  168.     W_shp = (numofFilter, filter_size, filter_size)  
  169.     W_bound = numpy.sqrt(numofFilter * filter_size * filter_size)  
  170.     W_k=rng.uniform(low=-1.0 / W_bound,high=1.0 / W_bound,size=W_shp)  
  171.     B_shp = (numofFilter,)  
  172.     B= numpy.asarray(rng.uniform(low=-.5, high=.5, size=B_shp))  
  173.     cIter=0  
  174.     while cIter<maxIter:  
  175.         cIter += 1  
  176.         ImageNum=random.randint(0,imageNum-1)  
  177.         conv_out_map=cnn_conv(MNISTimage[ImageNum,0,:,:], W_k, B,"sigmoid")  
  178.         out_pooling,max_index_Matrix=cnn_maxpooling(conv_out_map,2,"max")  
  179.         pool_shape = numpy.shape(out_pooling)  
  180.         MLP_input=out_pooling.reshape(1,1,out_Di)  
  181.         #print numpy.shape(MLP_input)  
  182.         DetaW,DetaB,temperror=MLP.backwardPropogation(MLP_input,ImageNum)  
  183.         if cIter%50 ==0 :  
  184.             print cIter,"Temp error: ",temperror  
  185.         #print numpy.shape(MLP.Theta[MLP.Nl-2])  
  186.         #print numpy.shape(MLP.Ztemp[0])  
  187.         #print numpy.shape(MLP.weightMatrix[0])  
  188.         theta_pool=MLP.Theta[MLP.Nl-2]*MLP.weightMatrix[0].transpose()  
  189.         #print numpy.shape(theta_pool)  
  190.         #print "theta_pool",theta_pool  
  191.         temp=numpy.zeros((1,1,out_Di))  
  192.         temp[0,:,:]=theta_pool  
  193.         back_theta_pool=temp.reshape(pool_shape)  
  194.         #print "back_theta_pool",numpy.shape(back_theta_pool)  
  195.         #print "back_theta_pool",back_theta_pool  
  196.         error_conv=backErrorfromPoolToConv(back_theta_pool,max_index_Matrix,conv_out_map,2)  
  197.         #print "error_conv",numpy.shape(error_conv)  
  198.         #print error_conv  
  199.         conv_DetaW,conv_DetaB=backErrorfromConvToInput(error_conv,MNISTimage[ImageNum,0,:,:])  
  200.         #print "W_k",W_k  
  201.         #print "conv_DetaW",conv_DetaW  
  202.         #print "conv_DetaB",conv_DetaB  
  203.         temp=W_k- decayRate*conv_DetaW  
  204.         W_k=copy.deepcopy(temp)  
  205.         #print "W_k",W_k  
  206.         temp = B - decayRate*conv_DetaB  
  207.         B=copy.deepcopy(B)  
  208.         #print "B",B  
  209.         MLP.updatePara(DetaW, DetaB, 1)  
  210.     return W_k,B,MLP  
  211. def getTrainAccuracy(numofFilter,filter_size,pooling_size,ImageNum,W_k,B,MLP):  
  212.     MNISTimage,num1=loadMNISTimage("F:\Machine Learning\UFLDL\data\common\\train-images-idx3-ubyte",ImageNum)  
  213.     MLP.setTrainDataNum(ImageNum)  
  214.     MLP.loadtrainlabel("F:\Machine Learning\UFLDL\data\common\\train-labels-idx1-ubyte")  
  215.     #MNISTlabel,num2=loadMNISTimage("F:\Machine Learning\UFLDL\data\common\\train-images-idx3-ubyte",ImageNum)  
  216.     row,col=numpy.shape(MNISTimage[0,0,:,:])  
  217.     iteration=0  
  218.     out_Di=numofFilter*((row-filter_size+1)/pooling_size)*((col-filter_size+1)/pooling_size)  
  219.     accuracycount=0  
  220.     while iteration<ImageNum:  
  221.         conv_out_map=cnn_conv(MNISTimage[iteration,0,:,:], W_k, B,"sigmoid")  
  222.         out_pooling,max_index_Matrix=cnn_maxpooling(conv_out_map,2,"max")  
  223.         #pool_shape = numpy.shape(out_pooling)  
  224.         MLP_input=out_pooling.reshape(1,1,out_Di)  
  225.         Atemp,Ztemp,errorsum=MLP.forwardPropogation(MLP_input,iteration)  
  226.         TrainPredict=Atemp[MLP.Nl-2]  
  227.         #print TrainPredict  
  228.         Plist=TrainPredict.tolist()  
  229.         LabelPredict=Plist[0].index(max(Plist[0]))  
  230.         #print "LabelPredict",LabelPredict  
  231.         #print "trainLabel",MLP.trainlabel[iteration]  
  232.         if int(LabelPredict) == int(MLP.trainlabel[iteration]):  
  233.             accuracycount += 1  
  234.         iteration += 1  
  235.         if iteration%50 ==0 :  
  236.             print iteration  
  237.     print "accuracy:", float(accuracycount)/float(ImageNum)  
  238.     return  float(accuracycount)/float(ImageNum)  
  239.       
  240. if __name__ == '__main__':  
  241.     MNISTimage,num1=loadMNISTimage("F:\Machine Learning\UFLDL\data\common\\train-images-idx3-ubyte",1)  
  242.     MNISTlabel,num2=loadMNISTlabels("F:\Machine Learning\UFLDL\data\common\\train-labels-idx1-ubyte",1)  
  243.     fig1 = plt.figure("convolution")  
  244.     k=10  
  245.     filter_size=5  
  246.     rng = numpy.random.RandomState(23455)  
  247.     w_shp = (k, filter_size, filter_size)  
  248.     w_bound = numpy.sqrt(k * filter_size * filter_size)  
  249.     w_k=rng.uniform(low=-1.0 / w_bound,high=1.0 / w_bound,size=w_shp)  
  250.     B_shp = (k,)  
  251.     B= numpy.asarray(rng.uniform(low=-.5, high=.5, size=B_shp))  
  252.     #print B  
  253.     out_map=cnn_conv(MNISTimage[0,0,:,:], w_k, B,"sigmoid")  
  254.     for idx in range(0,10):  
  255.         plotwindow = fig1.add_subplot(2,5,idx+1)  
  256.         plt.imshow(out_map[idx,:,:], cmap='gray')  
  257.     #plt.show()  
  258.     fig2 = plt.figure("max-pooling")  
  259.     out_pooling,max_index=cnn_maxpooling(out_map)  
  260.     for idx in range(0,10):  
  261.         plotwindow = fig2.add_subplot(2,5,idx+1)  
  262.         plt.imshow(out_pooling[idx,:,:], cmap='gray')  
  263.           
  264.     W_pool_shp = (k,)  
  265.     W_pool= numpy.asarray(rng.uniform(low=-1, high=1, size=W_pool_shp))  
  266.     B_pool_shp = (k,)  
  267.     B_pool= numpy.asarray(rng.uniform(low=-.5, high=.5, size=B_pool_shp))  
  268.     fig3 = plt.figure("pooling")  
  269.     pooling=poolwithfunc(out_pooling, W_pool, B_pool)  
  270.     for idx in range(0,10):  
  271.         plotwindow = fig3.add_subplot(2,5,idx+1)  
  272.         plt.imshow(pooling[idx,:,:], cmap='gray')  
  273.     #plt.show()  
  274.       
  275.     W_k,B,MLP=simpleCNN(5,5,2,2000,10000)  
  276.     #MLP.printWeightMatrix()  
  277.     accu=getTrainAccuracy(5,5,2,4000,W_k,B,MLP)  
  278.     print accu  
  279.     pass  

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值