使用深度学习的inception卷积神经网络训练CIFAR10数据集代码实现

inception网络结构

在这里插入图片描述

  • 使用多个卷积核对图像进行卷积并堆叠起来
  • 这样做的好处是可以降低计算量,并且可以提取到更多的特征

代码实现

# cifar-10-batches-py
import tensorflow as tf
import os
import pickle
import numpy as np

# 种子
tf.set_random_seed(1)

# 构建文件名
cifar_dir = './cifar-10-batches-py'
print(os.listdir(cifar_dir))

train_filenames = [os.path.join(cifar_dir,'data_batch_%d'%i) for i in range(1,6)]
test_filenames = [os.path.join(cifar_dir,'test_batch')]


# 数据载入
def load_data(filename):
    with open(filename,'rb') as f:
        data = pickle.load(f,encoding='bytes')
        return data[b'data'],data[b'labels']


# 构建数据处理类
class CifarData:
    def __init__(self,filenames,need_shuffle):
        all_data = []
        all_labels = []
        for i in filenames:
            data,labels = load_data(i)
            all_data.append(data)
            all_labels.append(labels)

        self._data = np.vstack(all_data) / 127.5 - 1
        self._labels = np.hstack(all_labels)
        self._index = 0
        self._num_examples = self._data.shape[0]
        self._need_shuffle = need_shuffle
        if self._need_shuffle:
            self.shuffle_data()

    def shuffle_data(self):
        o = np.random.permutation(self._num_examples)
        self._data = self._data[o]
        self._labels = self._labels[o]

    def next_batch(self,batch_size):
        end_index = self._index + batch_size
        if end_index > self._num_examples:
            if self._need_shuffle:
                self.shuffle_data()
                self._index = 0
                end_index = batch_size
            else:
                raise Exception('没有过大的样本')
        if end_index > self._num_examples:
            raise Exception('尺寸过大')

        batch_data = self._data[self._index:end_index]
        batch_labels = self._labels[self._index:end_index]
        self._index = end_index
        return batch_data,batch_labels


# 实例化
train_data = CifarData(train_filenames,True)
test_data = CifarData(test_filenames,False)


# 构建inception块
def inception_block(x,output_for_path,name):
    with tf.variable_scope(name):
        con1_1 = tf.layers.conv2d(x,output_for_path[0],(1,1),(1,1),padding='same',activation=tf.nn.relu,name='conv1_1')
        con3_3 = tf.layers.conv2d(x,output_for_path[1],(3,3),(1,1),padding='same',activation=tf.nn.relu,name='conv3_3')
        con5_5 = tf.layers.conv2d(x,output_for_path[2],(5,5),(1,1),padding='same',activation=tf.nn.relu,name='conv5_5')
        max_pooling = tf.layers.max_pooling2d(x,(2,2),(2,2),name='max_pooling')

    max_pooling_shpe = max_pooling.get_shape().as_list()[1:]
    x_shape = x.get_shape().as_list()[1:]

    width_padding = (x_shape[0] - max_pooling_shpe[0]) // 2
    height_padding = (x_shape[1] - max_pooling_shpe[1]) // 2

    paded_pooling = tf.pad(max_pooling,
                           [[0,0],
                            [width_padding,width_padding],
                            [height_padding,height_padding],
                            [0,0]])

    concat_layer = tf.concat([con1_1,con3_3,con5_5,paded_pooling],axis=3)
    return concat_layer


# 建模
X = tf.placeholder(tf.float32,shape=[None,3072])
Y = tf.placeholder(tf.int64,shape=[None])
X_img = tf.reshape(X,[-1,3,32,32])
X_img = tf.transpose(X_img,perm=[0,2,3,1])

conv1_1 = tf.layers.conv2d(X_img,32,(3,3),(1,1),padding='same',activation=tf.nn.relu,name='conv1_1')
pooling1 = tf.layers.max_pooling2d(conv1_1,(2,2),(2,2))

incption2a = inception_block(pooling1,[16,16,16],name='inception2a')
incption2b = inception_block(incption2a,[16,16,16],name='inception2b')
pooling2 = tf.layers.max_pooling2d(incption2b,(2,2),(2,2))

incption3a = inception_block(pooling2,[16,16,16],name='inception3a')
incption3b = inception_block(incption3a,[16,16,16],name='inception3b')
pooling3 = tf.layers.max_pooling2d(incption3b,(2,2),(2,2))

flatten = tf.layers.flatten(pooling3)

y_ = tf.layers.dense(flatten,10)

# 损失
loss = tf.losses.sparse_softmax_cross_entropy(logits=y_,labels=Y)

# 预测
predict = tf.argmax(y_,1)
# 准确率
accuracy = tf.reduce_mean(tf.cast(tf.equal(predict,Y),dtype=tf.float32))

# 优化器
train_op = tf.train.AdamOptimizer(0.001).minimize(loss)

# 超参数
batch_size = 20
train_steps = 10000
test_steps = 100

# 开启会话
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(train_steps):
        x_train,y_train = train_data.next_batch(batch_size)
        los,acc,_ = sess.run([loss,accuracy,train_op],feed_dict={X:x_train,Y:y_train})
        if (i+1) % 500 == 0:
            print('批次',i+1)
            print('代价',los)
            print('准确率',acc)

        if (i+1) % 5000 == 0:
            test_data = CifarData(test_filenames,False)
            all_acc = []
            for j in range(test_steps):
                x_test,y_test = test_data.next_batch(batch_size)
                accs = sess.run([accuracy],feed_dict={X:x_test,Y:y_test})
                all_acc.append(accs)
            print('测试集准确率',sess.run(tf.reduce_mean(all_acc)))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值