深度学习之cifar-10⑥——十分类(cifar_10_inception)

这个代码与前面代码类似,只是用的模型不一样,这里运用的是inception模型,详情注释请看 深度学习之cifar-10④——十分类(cifar_10_Alexnet) 或 ⑤ 都一样

import tensorflow as tf
import os
import pickle
import numpy as np

cifar_dir = '../../datas/cifar-10-batches-py'

def load_data(filename):
    with open(filename,'rb') as f:
        data = pickle.load(f,encoding='bytes')
        return data[b'data'],data[b'labels']

class Cifar_Data:

    def __init__(self,filenames,need_shuffle):
        all_data = []
        all_labels = []

        for filename in filenames:
            data , labels = load_data(filename)
            all_data.append(data)
            all_labels.append(labels)

        self.data = np.vstack(all_data)
        self.data = self.data / 127.5 - 1
        self.labels = np.hstack(all_labels)

        print(self.data.shape)
        print(self.labels.shape)

        self.num_examples = self.data.shape[0]
        self.indicator = 0
        self.need_shuffle = need_shuffle

        if self.need_shuffle:
            self.shuffle_data()

    def shuffle_data(self):
        p = np.random.permutation(self.num_examples)
        self.data = self.data[p]
        self.labels = self.labels[p]

    def next_batch(self,batch_size):
        end_indicator = self.indicator + batch_size
        if end_indicator > self.num_examples:
            if self.need_shuffle:
                self.shuffle_data()
                self.indicator = 0
                end_indicator = batch_size
            else:
                raise Exception('没有更多的例子')

        if end_indicator > self.num_examples:
            raise Exception('批次大于所有例子')

        batch_data = self.data[self.indicator:end_indicator]
        batch_labels = self.labels[self.indicator:end_indicator]
        self.indicator = end_indicator
        return batch_data,batch_labels

train_filenames = [os.path.join(cifar_dir,'data_batch_%d'%i)for i in range(1,6)]
test_filenames = [os.path.join(cifar_dir,'test_batch')]

train_data = Cifar_Data(train_filenames,True)
test_data = Cifar_Data(test_filenames,False)

# inception 块  三层卷积  一个最大池化
def inception_block(x,output_path):
    conv1_1 = tf.layers.conv2d(x,output_path[0],(1,1),padding='same',activation=tf.nn.relu)
    conv3_3 = tf.layers.conv2d(x,output_path[1],(3,3),padding='same',activation=tf.nn.relu)
    conv5_5 = tf.layers.conv2d(x,output_path[2],(5,5),padding='same',activation=tf.nn.relu)
    max_pooling = tf.layers.max_pooling2d(x,(2,2),(2,2),padding='same')

    #获取池化和输入的维度
    max_pooling_shape = max_pooling.get_shape().as_list()[1:]
    input_shape = x.get_shape().as_list()[1:]

    # 算出宽 高的维度   用来给最大池化padding
    w_shape = (input_shape[0] - max_pooling_shape[0]) // 2
    h_shape = (input_shape[1] - max_pooling_shape[1]) // 2

    # 让池化层padding与卷积一样大
    pad_poolong = tf.pad(max_pooling,[
        [0,0],[w_shape,w_shape],[h_shape,h_shape],[0,0]
    ])

    # 把四层摞在一起
    concat_layers = tf.concat([conv1_1,conv3_3,conv5_5,pad_poolong],axis=3)

    return concat_layers

x = tf.placeholder(tf.float32,[None,3072])
x_img = tf.reshape(x,[-1,32,32,3])
y = tf.placeholder(tf.int64,[None])

# 一层卷积
conv1 = tf.layers.conv2d(x_img, 32, (3, 3), padding='same',activation=tf.nn.relu)
pooling1 = tf.layers.max_pooling2d(conv1,(2,2),(2,2),padding='same')

# 调用块  两次  并池化
inception_2a = inception_block(pooling1,[16,16,16])
inception_2b = inception_block(inception_2a,[16,16,16])
pooling2 = tf.layers.max_pooling2d(inception_2b,(2,2),(2,2),padding='same')

# 调用块  两次  并池化
inception_3a = inception_block(pooling2,[16,16,16])
inception_3b = inception_block(inception_3a,[16,16,16])
pooling3 = tf.layers.max_pooling2d(inception_3b,(2,2),(2,2),padding='same')

# 换化为向量
flatten = tf.layers.flatten(pooling3)

# 全连接
a = tf.layers.dense(flatten,10)

cost = tf.losses.sparse_softmax_cross_entropy(y,a)

optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)

pre = tf.argmax(a,1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(pre,y),tf.float32))

sess = tf.Session()
sess.run(tf.global_variables_initializer())

# 长度
batch_size = 50

# 层数过多   只用了500for i in range(1,501):
    batch_x , batch_y = train_data.next_batch(batch_size)

    c,a,_ =sess.run([cost,accuracy,optimizer],feed_dict={x:batch_x,y:batch_y})

    if i % 100 == 0:
        print(i,c,a)

    if i % 250 == 0:
        batch_x1, batch_y1 = test_data.next_batch(batch_size)
        all_acc = []

        for k in range(1,101):
            a1 = sess.run(accuracy, feed_dict={x: batch_x1, y: batch_y1})
            all_acc.append(a1)
        print(np.mean(all_acc))

100 1.9033884 0.3
200 1.6314828 0.38
0.54
300 1.6455951 0.44
400 1.5996035 0.42
500 1.6855372 0.44
0.5
因为层数比较多,这里训练次数较少所以准确率较低
  • 1
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值