深度学习之cifar-10⑧——利用数据优化提升准确率(87%)

此代码是在以前cifar-10的基础上对数据进行优化(随机翻转、调整光照、改变对比度、白化、使用均值或方差的分布信息、保存参数等等)来提高准确率

import tensorflow as tf
import numpy as np
import os
import pickle
CIFAR_DIR = "../../datas/cifar-10-batches-py"
def load_data(filename):
    with open(filename,'rb') as f:
        data = pickle.load(f,encoding='bytes')
        return data[b'data'],data[b'labels']

class CifarDate:
    def __init__(self,filenames,need_shuffle):
        all_data = []
        all_label = []
        for filename in filenames:
            data,labels = load_data(filename)
            all_data.append(data)
            all_label.append(labels)
        self._data = np.vstack(all_data)
        self._labels = np.hstack(all_label)

        self.start = 0
        self._num_examples = self._data.shape[0]
        self._need_shuffle = need_shuffle
        if self._need_shuffle:
            self._shuffle_data()
    def _shuffle_data(self):
        p = np.random.permutation(self._num_examples)
        self._data = self._data[p]
        self._labels = self._labels[p]
    def next_batch(self,batch_size):
        end = self.start + batch_size
        if end > self._num_examples:
            if self._need_shuffle:
                self._shuffle_data()
                self.start = 0
                end = batch_size
            else:
                raise Exception('have no more examples')
        if end > self._num_examples:
            raise Exception('batch size is larger than all examplts')
        batch_data = self._data[self.start:end]
        batch_labels = self._labels[self.start:end]
        self.start = end
        return batch_data,batch_labels
train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)]
test_filenames = [os.path.join(CIFAR_DIR, 'test_batch')]

train_data = CifarDate(train_filenames, True)
test_data = CifarDate(test_filenames, False)

batch_size = 20
x = tf.placeholder(tf.float32, [None, 3072])

y = tf.placeholder(tf.int64, [None])
x_image = tf.reshape(x,[-1,3,32,32])
x_image = tf.transpose(x_image, perm=[0, 2, 3, 1])

x_image_arr = tf.split(x_image,num_or_size_splits=batch_size,axis=0)

result_x_image_arr = []

# 数据优化
for x_single_image in x_image_arr:
    x_single_image = tf.reshape(x_single_image,[32,32,3])
    #随机翻转
    data_aug_1 = tf.image.random_flip_left_right(x_single_image)
    #调整光照
    data_aug_2 = tf.image.random_brightness(data_aug_1,max_delta=63)
    #改变对比度
    data_aug_3 = tf.image.random_contrast(data_aug_2,lower=0.2,upper=1.8)
    #白化
    data_aug_4 = tf.image.per_image_standardization(data_aug_3)
    x_single_image = tf.reshape(data_aug_4,[1,32,32,3])
    result_x_image_arr.append(x_single_image)

# 拼接
result_x_images = tf.concat(result_x_image_arr,axis=0)

# 归一化
normal_result_x_images = result_x_images / 127.5-1

# 卷积方法
def conv_wrapper(inputs,name,is_training,output_channel,kernel_size=(3,3),
                 activation=tf.nn.relu,padding='same'):
    with tf.name_scope(name):
        conv2d = tf.layers.conv2d(inputs,output_channel,kernel_size,padding=padding,
                                  activation=None,name=name + '/conv2d',
                                  kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))
        # 从截断的正态分布中输出随机值。
        # 生成的值服从具有指定平均值和标准偏差的正态分布,如果生成的值大于平均值2个标准偏差的值则丢弃重新选择。
        bn = tf.layers.batch_normalization(conv2d,training=is_training)
        # 表示该网络当前是否正在训练,告知Batch
        # Normalization层是否应该更新或者使用均值或方差的分布信息
        return activation(bn)

# 池化方法
def pooling_wrapper(inputs,name):
    return tf.layers.max_pooling2d(inputs,(2,2),(2,2),name=name)

conv1_1 = conv_wrapper(normal_result_x_images,'conv1_1',True,64)
conv1_2 = conv_wrapper(conv1_1,'conv1_2',True,64)
conv1_3 = conv_wrapper(conv1_2,'conv1_3',True,64)
pooling1 = pooling_wrapper(conv1_3,'pool1')

conv2_1 = conv_wrapper(pooling1, 'conv2_1',True,128)
conv2_2 = conv_wrapper(conv2_1, 'conv2_2',True,128)
conv2_3 = conv_wrapper(conv2_2, 'conv2_3',True,128)
pooling2 = pooling_wrapper(conv2_3, 'pool2')

conv3_1 = conv_wrapper(pooling2, 'conv3_1',True,256)
conv3_2 = conv_wrapper(conv3_1, 'conv3_2',True,256)
conv3_3 = conv_wrapper(conv3_2, 'conv3_3',True,256)
pooling3 = pooling_wrapper(conv3_3, 'pool3')

# 展平
flatten = tf.layers.flatten(pooling3)

# 全连接
y_ = tf.layers.dense(flatten, 10)

# 代价
loss = tf.losses.sparse_softmax_cross_entropy(labels=y,logits=y_)

tf.add_to_collection(loss,tf.contrib.layers.l2_regularizer(loss))
pred = tf.argmax(y_,1)
corr = tf.equal(pred,y)
accur = tf.reduce_mean(tf.cast(corr,tf.float64))
with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
# 5e-40

LOG_DIR = '.'
run_label = 'run_vgg_tensorboard'
run_dir = os.path.join(LOG_DIR,run_label)
if not os.path.exists(run_dir):
    os.mkdir(run_dir)
train_log_dir = os.path.join(run_dir,'train')
test_log_dir = os.path.join(run_dir,'test')
if not os.path.exists(train_log_dir):
    os.mkdir(train_log_dir)
if not os.path.exists(test_log_dir):
    os.mkdir(test_log_dir)

# data文件存储参数数据,index文件存储索引信息,meta文件存储源信息
model_dir = os.path.join(run_dir,'model')
if not os.path.exists(model_dir):
    os.mkdir(model_dir)

saver = tf.train.Saver()    #保存模型快照  参数
model_name = 'ckp-20000'
model_path = os.path.join(model_dir,model_name)

output_model_every_steps = 500
train_steps = 20000
test_steps = 100
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    if os.path.exists(model_path+'.index'):
        saver.restore(sess,model_path)  #model_path存储的参数初始化sess
        print('model restored from %s' % model_path)
    else:
        print('model %s does not exist' % model_path)


    for i in range(train_steps):
        batch_data, batch_labels = train_data.next_batch(batch_size)
        loss_val, acc_val, _ = sess.run([loss, accur, train_op],
                                        feed_dict={x: batch_data,y: batch_labels})
        if (i + 1) % 500 == 0:
            print('[Train] Step: %d, loss: %4.5f, acc: %4.5f'% (i + 1, loss_val, acc_val))

        if (i + 1) % 5000 == 0:
            test_data = CifarDate(test_filenames, False)
            all_test_acc_val = []
            for j in range(test_steps):
                test_batch_data, test_batch_labels = test_data.next_batch(batch_size)
                test_acc_val = sess.run([accur],feed_dict={x: test_batch_data, y: test_batch_labels})
                all_test_acc_val.append(test_acc_val)
            test_acc = np.mean(all_test_acc_val)
            print('[Test ] Step: %d, acc: %4.5f' % (i + 1, test_acc))
        if (i+1) % output_model_every_steps == 0:
            saver.save(sess, os.path.join(model_dir, 'ckp-%05d' % (i+1)))
            print('model saved to ckp-%05d' % (i+1))
[Train] Step: 500, loss: 2.33453, acc: 0.20000
model saved to ckp-00500
[Train] Step: 1000, loss: 1.54013, acc: 0.50000
model saved to ckp-01000
[Train] Step: 1500, loss: 1.47036, acc: 0.50000
model saved to ckp-01500
[Train] Step: 2000, loss: 1.26717, acc: 0.45000
model saved to ckp-02000
[Train] Step: 2500, loss: 1.49031, acc: 0.50000
model saved to ckp-02500
[Train] Step: 3000, loss: 1.16324, acc: 0.55000
model saved to ckp-03000
[Train] Step: 3500, loss: 0.81570, acc: 0.80000
model saved to ckp-03500
[Train] Step: 4000, loss: 0.94220, acc: 0.70000
model saved to ckp-04000
[Train] Step: 4500, loss: 0.70749, acc: 0.75000
model saved to ckp-04500
[Train] Step: 5000, loss: 0.57232, acc: 0.90000
[Test ] Step: 5000, acc: 0.73700
model saved to ckp-05000
[Train] Step: 5500, loss: 0.59342, acc: 0.75000
model saved to ckp-05500
[Train] Step: 6000, loss: 0.35936, acc: 0.90000
model saved to ckp-06000
[Train] Step: 6500, loss: 0.27314, acc: 0.95000
model saved to ckp-06500
[Train] Step: 7000, loss: 0.66962, acc: 0.70000
model saved to ckp-07000
[Train] Step: 7500, loss: 0.59381, acc: 0.85000
model saved to ckp-07500
[Train] Step: 8000, loss: 0.40683, acc: 0.80000
model saved to ckp-08000
[Train] Step: 8500, loss: 0.19584, acc: 0.95000
model saved to ckp-08500
[Train] Step: 9000, loss: 0.63560, acc: 0.80000
model saved to ckp-09000
[Train] Step: 9500, loss: 0.22437, acc: 0.95000
model saved to ckp-09500
[Train] Step: 10000, loss: 0.53473, acc: 0.75000
[Test ] Step: 10000, acc: 0.79150
model saved to ckp-10000
[Train] Step: 10500, loss: 0.60545, acc: 0.85000
model saved to ckp-10500
[Train] Step: 11000, loss: 0.64660, acc: 0.75000
model saved to ckp-11000
[Train] Step: 11500, loss: 0.52264, acc: 0.85000
model saved to ckp-11500
[Train] Step: 12000, loss: 0.34272, acc: 0.85000
model saved to ckp-12000
[Train] Step: 12500, loss: 0.72353, acc: 0.75000
model saved to ckp-12500
[Train] Step: 13000, loss: 0.25910, acc: 0.90000
model saved to ckp-13000
[Train] Step: 13500, loss: 0.99427, acc: 0.65000
model saved to ckp-13500
[Train] Step: 14000, loss: 0.55493, acc: 0.85000
model saved to ckp-14000
[Train] Step: 14500, loss: 0.26968, acc: 0.95000
model saved to ckp-14500
[Train] Step: 15000, loss: 0.87950, acc: 0.70000
[Test ] Step: 15000, acc: 0.82050
model saved to ckp-15000
[Train] Step: 15500, loss: 0.39943, acc: 0.85000
model saved to ckp-15500
[Train] Step: 16000, loss: 0.63854, acc: 0.70000
model saved to ckp-16000
[Train] Step: 16500, loss: 0.17351, acc: 0.90000
model saved to ckp-16500
[Train] Step: 17000, loss: 0.67718, acc: 0.80000
model saved to ckp-17000
[Train] Step: 17500, loss: 0.08306, acc: 1.00000
model saved to ckp-17500
[Train] Step: 18000, loss: 0.11991, acc: 1.00000
model saved to ckp-18000
[Train] Step: 18500, loss: 0.41899, acc: 0.85000
model saved to ckp-18500
[Train] Step: 19000, loss: 0.18777, acc: 0.95000
model saved to ckp-19000
[Train] Step: 19500, loss: 0.43061, acc: 0.85000
model saved to ckp-19500
[Train] Step: 20000, loss: 0.31199, acc: 0.75000
[Test ] Step: 20000, acc: 0.84200

  • 1
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值