深度的卷积神经网络CNN(MNIST数据集示例)

        前面篇幅介绍的全连接层和简单卷积神经网络,层数都比较少,现在来加多几层,对于加深了的深度神经网络,其实就是我们常说的深度学习,类似搭积木,只需要叠加层即可。现在来构建一个深度的CNN,这里使用的卷积层全是3x3的小型滤波器,特点是随着层的加深,通道数在变大,然后经过池化层逐渐减小中间数据的空间大小。
我们还是使用MNIST手写数据集来实现这个深度CNN,网络结构如下:

输入图片-->Conv-->ReLU-->Conv-->ReLU-->Pool-->Conv-->ReLU-->Conv-->ReLU-->Pool-->Conv-->ReLU-->Conv-->ReLU-->Pool-->Affine-->ReLU-->Dropout-->Affine-->Dropout-->Softmax-->分类输出

deepconv.py

import numpy as np
import pickle
from collections import OrderedDict
from common.layers import *

class DeepConvNet:
    '''深度的卷积网络层(3x3的滤波器)
    结构层(6个卷积层+3个池化层,全连接层后面用dropout):
      conv-relu-conv-relu-pool-       [16个滤波器]
      conv-relu-conv-relu-pool-       [32个滤波器]
      conv-relu-conv-relu-pool-       [64个滤波器]
      affine-relu-dropout-affine-dropout-softmax
    '''
    def __init__(self, input_dim=(1, 28, 28),
                 conv1={'filterNum':16,'filterSize':3,'pad':1,'stride':1},
                 conv2={'filterNum':16,'filterSize':3,'pad':1,'stride':1},
                 conv3={'filterNum':32,'filterSize':3,'pad':1,'stride':1},
                 conv4={'filterNum':32,'filterSize':3,'pad':2,'stride':1},
                 conv5={'filterNum':64,'filterSize':3,'pad':1,'stride':1},
                 conv6={'filterNum':64,'filterSize':3,'pad':1,'stride':1},
                 hiddenSize=50, outputSize=10):
        #上一层的神经元数量
        pre_n=np.array([1*3*3,16*3*3,16*3*3,32*3*3,32*3*3,64*3*3,64*4*4,hiddenSize])
        #权重初始值使用He,因为激活函数是ReLU
        weight_inits=np.sqrt(2.0/pre_n)

        self.params={}
        pre_channel_n=input_dim[0]#通道数,每经过一个卷积层更新
        for i,conv in enumerate([conv1,conv2,conv3,conv4,conv5,conv6]):
            self.params['W'+str(i+1)]=weight_inits[i]*np.random.randn(conv['filterNum'],pre_channel_n,conv['filterSize'],conv['filterSize'])
            self.params['b'+str(i+1)]=np.zeros(conv['filterNum'])
            pre_channel_n=conv['filterNum']#更新通道数
        self.params['W7']=weight_inits[6]*np.random.randn(64*4*4,hiddenSize)
        self.params['b7']=np.zeros(hiddenSize)
        self.params['W8']=weight_inits[7]*np.random.randn(hiddenSize,outputSize)
        self.params['b8']=np.zeros(outputSize)

        #生成各层(21层)
        self.layers=[]
        self.layers.append(Convolution(self.params['W1'],self.params['b1'],conv1['stride'],conv1['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W2'],self.params['b2'],conv2['stride'],conv2['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2,pool_w=2,stride=2))
        self.layers.append(Convolution(self.params['W3'],self.params['b3'],conv3['stride'],conv3['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W4'],self.params['b4'],conv4['stride'],conv4['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2,pool_w=2,stride=2))
        self.layers.append(Convolution(self.params['W5'],self.params['b5'],conv5['stride'],conv5['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W6'],self.params['b6'],conv6['stride'],conv6['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2,pool_w=2,stride=2))
        self.layers.append(Affine(self.params['W7'],self.params['b7']))
        self.layers.append(Relu())
        self.layers.append(Dropout(0.5))
        self.layers.append(Affine(self.params['W8'],self.params['b8']))
        self.layers.append(Dropout(0.5))

        self.last_layer=SoftmaxWithLoss()

    def predict(self,x,train_flg=False):
        for layer in self.layers:
            if isinstance(layer,Dropout):#判断层是不是Dropout类型
                x=layer.forward(x,train_flg)
            else:
                x=layer.forward(x)
        return x

    def loss(self,x,t):
        y=self.predict(x,train_flg=True)
        return self.last_layer.forward(y,t)

    def accuracy(self,x,t,batch_size=100):
        if t.ndim!=1:t=np.argmax(t,axis=1)
        acc=0.0
        for i in range(int(x.shape[0]/batch_size)):
            tx=x[i*batch_size:(i+1)*batch_size]
            tt=t[i*batch_size:(i+1)*batch_size]
            y=self.predict(tx,train_flg=False)
            y=np.argmax(y,axis=1)
            acc+=np.sum(y==tt)
        return acc/x.shape[0]

    def gradient(self,x,t):
        #forward
        self.loss(x,t)
        #backward
        dout=1
        dout=self.last_layer.backward(dout)
        tmp_layers=self.layers.copy()
        tmp_layers.reverse()
        for layer in tmp_layers:
            dout=layer.backward(dout)
        grads={}#遍历包含权重偏置的层
        for i,layer_i in enumerate((0,2,5,7,10,12,15,18)):
            grads['W'+str(i+1)]=self.layers[layer_i].dW
            grads['b'+str(i+1)]=self.layers[layer_i].db
        return grads

    def save_params(self,fname='params.pkl'):
        params={}
        for k,v in self.params.items():
            params[k]=v
        with open(fname,'wb') as f:
            pickle.dump(params,f)

    def load_params(self,fname='params.pkl'):
        with open(fname,'rb') as f:
            params=pickle.load(f)
        for k,v in params.items():
            self.params[k]=v
        for i,layer_i in enumerate((0,2,5,7,10,12,15,18)):
            self.layers[layer_i].W=self.params['W'+str(i+1)]
            self.layers[layer_i].b=self.params['b'+str(i+1)]

 测试精度并保存学习后的参数,代码如下:

import numpy as np
from dataset.mnist import load_mnist
from deepconv import DeepConvNet
from common.trainer import Trainer

#加载MNIST数据集
(x_train,t_train),(x_test,t_test)=load_mnist(flatten=False)

#深度学习CNN
network=DeepConvNet()
trainer=Trainer(network,x_train,t_train,x_test,t_test,epochs=20,mini_batch_size=100,optimizer='Adam',optimizer_param={'lr':0.001},evaluate_sample_num_per_epoch=1000)
trainer.train()

#保存学习的权重偏置参数,方便后续调用
network.save_params('DeepCNN_Params.pkl')
print('保存参数成功!')

        上面代码的训练大概花费了6~7个小时(本人配置一般的电脑),接下来我们直接来加载深度学习完保存的权重偏置参数的pkl文件,看下这个深度CNN的精度能达到多少,以及查看20个没有被正确识别的数字图片有什么特征。

import numpy as np
import matplotlib.pyplot as plt
from deepconv import DeepConvNet
from dataset.mnist import load_mnist

#加载MNIST数据集
#((60000, 1, 28, 28), (60000,)) ((10000, 1, 28, 28), (10000,))
(x_train,t_train),(x_test,t_test)=load_mnist(flatten=False)

#深度学习CNN
network=DeepConvNet()
#加载生成的权重参数文件
network.load_params('DeepCNN_Params.pkl')

print('正在计算识别的精度......')
#保存分类的标签索引值
classified_label_idxs=[]
acc=0.0
batchSize=100
for i in range(int(x_test.shape[0]/batchSize)):
    tx=x_test[i*batchSize:(i+1)*batchSize]#图片(100,1,28,28)
    tt=t_test[i*batchSize:(i+1)*batchSize]#正确解标签(100,1)
    y=network.predict(tx,train_flg=False)
    y=np.argmax(y,axis=1)#预测出的图片的最大索引值
    classified_label_idxs.append(y)#(100,100)
    acc+=np.sum(y==tt)#将正确预测的进行累加
acc=acc/x_test.shape[0]
print('测试的精度:'+str(acc))

classified_label_idxs=np.array(classified_label_idxs)
classified_label_idxs=classified_label_idxs.flatten()

#画出识别错误的图片
c_img=1
fig=plt.figure()
fig.subplots_adjust(left=0,right=1,bottom=0,top=1,hspace=0.2,wspace=0.2)
errors={}
for i,v in enumerate(classified_label_idxs==t_test):
    if not v:
        ax=fig.add_subplot(4,5,c_img,xticks=[],yticks=[])
        ax.imshow(x_test[i].reshape(28,28),cmap=plt.cm.gray_r,interpolation='nearest')
        errors[c_img]=(t_test[i],classified_label_idxs[i])
        c_img+=1
        if c_img>20:break
print("识别错误的数字对:{0}".format(errors))
plt.show()

正在计算识别的精度......
测试的精度:0.9929
识别错误的数字对:{1: (4, 2), 2: (6, 0), 3: (8, 9), 4: (7, 9), 5: (8, 2), 6: (5, 3), 7: (7, 9), 8: (7, 9), 9: (4, 2), 10: (8, 9), 11: (6, 5), 12: (7, 1), 13: (4, 6), 14: (7, 2), 15: (9, 4), 16: (4, 9), 17: (7, 3), 18: (7, 9), 19: (4, 9), 20: (7, 9)}

        我们发现精度又提高了,基本在99.3%左右,有时会达到99.4%以上,这个已经是很不错的识别精度了,而我们观察那些没有被正确识别的数字图片,其实有时连我们自己也有点分辨不清楚,比如第二个,正确数字是6,容易看成是0,这就让深度学习增加了更大可能。

ValueError: shapes (100,576) and (1024,50) not aligned: 576 (dim 1) != 1024 (dim 0)
dot点积运算的时候,形状不匹配不一致导致的。
这个是我在conv4 = {'filterNum':32, 'filterSize':3, 'pad':2, 'stride':1},
其中的pad填充填写成了1,这就造成了形状不能对齐!

DeepCNN_Params.pkl文件下载地址

评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

寅恪光潜

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值