深度学习之神经网络图像分类cifar10数据集

import tensorflow as tf
import numpy as np
import os
import pickle


# 数据载入参数
def load_data(filename):
    with open(filename,'rb') as f:
        data = pickle.load(f,encoding='bytes')
        return data[b'data'],data[b'labels']


# 建立数据处理类
# 方便从cifar10取出图片,打乱顺序等操作。
class CifarData:
    def __init__(self,filenames,need_shuffle):
        all_data = []
        all_labels = []
        for filename in filenames:
            data,label = load_data(filename)
            all_data.append(data)
            all_labels.append(label)

        self._data = np.vstack(all_data)
        self._data = self._data / 127.5 - 1
        self._labels = np.hstack(all_labels)
        self._num_examples = self._data.shape[0]
        self._need_shauffle = need_shuffle
        self._start_index = 0
        if self._need_shauffle:
            self.shuffle_data()

    def shuffle_data(self):
        o = np.random.permutation(self._num_examples)
        self._data = self._data[o]
        self._labels = self._labels[o]

    def next_batch(self,batch_size):
        end_index = self._start_index + batch_size
        if end_index > self._num_examples:
            if self._need_shauffle:
                self.shuffle_data()
                self._start_index = 0
                end_index = batch_size
            else:
                raise Exception('没有过多的样本')
        if end_index > self._num_examples:
            raise Exception('尺寸过大')

        batch_data = self._data[self._start_index:end_index]
        batch_labels = self._labels[self._start_index:end_index]
        self._start_index = end_index
        return batch_data,batch_labels


# 文件夹路径
cifar_dir = './cifar-10-batches-py'
print(os.listdir(cifar_dir))
# 构建文件组
train_filenames = [os.path.join(cifar_dir,'data_batch_%d'%i)for i in range(1,6)]
test_filenames = [os.path.join(cifar_dir,'test_batch')]

# 实例化
train_data = CifarData(train_filenames,True)
test_data = CifarData(test_filenames,False)

# 模型
X = tf.placeholder(dtype=tf.float32,shape=[None,3072])
Y = tf.placeholder(tf.int64)

# 构建神经网络模型
hidden_1 = tf.layers.dense(X,400,activation=tf.nn.relu)
hidden_2 = tf.layers.dense(hidden_1,120,activation=tf.nn.relu)
hidden_3 = tf.layers.dense(hidden_2,84,activation=tf.nn.relu)

y_ = tf.layers.dense(hidden_3,10)

loss = tf.losses.sparse_softmax_cross_entropy(labels=Y,logits=y_)

predict = tf.argmax(y_,1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Y,predict),dtype=tf.float32))

with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

# 超参数
batch_size = 20
train_steps = 10000
test_steps = 100

# 开启会话
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(train_steps):
        x_train,y_train = train_data.next_batch(batch_size)
        c,acc,_ = sess.run([loss,accuracy,train_op],feed_dict={X:x_train,Y:y_train})
        if (i+1) % 500 == 0:
            print('代价:',c)
            print('准确率: ',acc)
        if (i + 1) % 5000 == 0:
            # 测试
            test_data = CifarData(test_filenames,False)
            acc_all = []
            for j in range(test_steps):
                x_test, yh_test = test_data.next_batch(batch_size)
                acc_val = sess.run(accuracy,feed_dict={X:x_test,Y:yh_test})
                acc_all.append(acc_val)
            print('测试准确率:',sess.run(tf.reduce_mean(acc_all)))






  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值