CIFAR-10:VGGNet实现

import tensorflow as tf
import os
import pickle
import numpy as np
CIFAR_DIR='cifar-10-python/cifar-10-batches-py'
print(os.listdir(CIFAR_DIR))
def load_data(filename):
    """read data from file"""
    with open(filename,'rb') as f:
        data=pickle.load(f,encoding='bytes')
        return data[b'data'],data[b'labels']

class CifarData:
    def __init__(self,filenames,need_shuffle):
        all_data=[]
        all_labels=[]
        for filename in filenames:
            data,labels=load_data(filename)
            all_data.append(data)
            all_labels.append(labels)
        self._data=np.vstack(all_data)
        self._data=self._data/127.5-1#归一化
        self._labels=np.hstack(all_labels)
        self._num_examples=self._data.shape[0]
        self._need_shuffle=need_shuffle
        self._indicator=0
        if self._need_shuffle:
            self._shuffle_data()
    def _shuffle_data(self):
        p=np.random.permutation(self._num_examples)
        self._data=self._data[p]
        self._labels=self._labels[p]
    def next_batch(self,batch_size):
        """return batch_size examples as a batch"""
        end_indicator=self._indicator+batch_size
        if end_indicator>self._num_examples:
            if self._need_shuffle:
                self._shuffle_data()
                self._indicator=0
                end_indicator=batch_size
            else:
                raise Exception("have no more example")
        if end_indicator>self._num_examples:
            raise Exception("batch size is larger than all example")
        batch_data=self._data[self._indicator:end_indicator]
        batch_labels=self._labels[self._indicator:end_indicator]
        self._indicator=end_indicator
        return batch_data,batch_labels
train_filenames=[os.path.join(CIFAR_DIR,'data_batch_%d'%i)for i in range(1,6)]
test_filenames=[os.path.join(CIFAR_DIR,'test_batch')]

train_data=CifarData(train_filenames,True)
test_data=CifarData(test_filenames,False)
#[None,3072]
x=tf.placeholder(tf.float32,[None,3072])
y=tf.placeholder(tf.int64,[None])
#32*32
x_image=tf.reshape(x,[-1,3,32,32])
x_images=tf.transpose(x_image,perm=[0,2,3,1])
#conv1=神经元图,fea_map,输出图像
conv1_1=tf.layers.conv2d(x_images,
                       32,
                       (3,3),
                       padding='same',
                       activation=tf.nn.relu,
                       name='conv1_1'
                       )
conv1_2=tf.layers.conv2d(conv1_1,
                       32,
                       (3,3),
                       padding='same',
                       activation=tf.nn.relu,
                       name='conv1_2'
                       )

#16*16
pooling1=tf.layers.max_pooling2d(conv1_2,
                                 (2,2),#kernel size
                                 (2,2),#stride
                                 name='pool1')

conv2_1=tf.layers.conv2d(pooling1,
                       32,
                       (3,3),
                       padding='same',
                       activation=tf.nn.relu,
                       name='conv2_1'
                       )
conv2_2=tf.layers.conv2d(conv2_1,
                       32,
                       (3,3),
                       padding='same',
                       activation=tf.nn.relu,
                       name='conv2_2'
                       )

#8*8
pooling2=tf.layers.max_pooling2d(conv2_2,
                                 (2,2),#kernel size
                                 (2,2),#stride
                                 name='pool2')

conv3_1=tf.layers.conv2d(pooling2,
                       32,
                       (3,3),
                       padding='same',
                       activation=tf.nn.relu,
                       name='conv3_1'
                       )

conv3_2=tf.layers.conv2d(pooling2,
                       32,
                       (3,3),
                       padding='same',
                       activation=tf.nn.relu,
                       name='conv3_2'
                       )
#4*4*32
pooling3=tf.layers.max_pooling2d(conv3_2,
                                 (2,2),#kernel size
                                 (2,2),#stride
                                 name='pool3')
#[None,4*4*32]
flatten=tf.layers.flatten(pooling3)
y_=tf.layers.dense(flatten,10)#输出为10是因为他是一个10分类的图

#交叉熵损失函数
loss=tf.losses.sparse_softmax_cross_entropy(labels=y,logits=y_)
#labels->one_hot
#logits->softmax

predict=tf.argmax(y_,1)
correct_prediction=tf.equal(predict,y)#equial用来比对值是否一样返回的是布尔类型
accurary=tf.reduce_mean(tf.cast(correct_prediction,tf.float64))

with tf.name_scope('train_op'):
    train_op=tf.train.AdamOptimizer(1e-3).minimize(loss)
    # train_op = tf.train.AdadeltaOptimizer(0.4).minimize(loss)

init=tf.global_variables_initializer()
batch_size=20
train_steps=100000
test_steps=100
sess = tf.InteractiveSession() #交互式
sess.run(init)
for i in range(train_steps):
    batch_data,batch_labels=train_data.next_batch(batch_size)
    loss_val,acc_val,_=sess.run([loss,accurary,train_op],feed_dict={x:batch_data,y:batch_labels})
    if (i+1) %500==0:
        print('[train] step:%d,loss:%4.5f,acc:%4.5f'%(i+1,loss_val,acc_val))
    if (i+1)%5000==0:
        test_data=CifarData(test_filenames,True)
        all_test_acc_val=[]
        for j in range(test_steps):
            test_batch_data,test_batch_labels=test_data.next_batch(batch_size)
            test_acc_val=sess.run([accurary],
                feed_dict={x:test_batch_data,y:test_batch_labels})
            all_test_acc_val.append(test_acc_val)
        test_acc=np.mean(all_test_acc_val)
        print('[test] step:%d,acc:%4.5f'%(i+1,test_acc))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值