CIFAR-10:ResNet实现

import tensorflow as tf
import os
import pickle
import numpy as np
CIFAR_DIR='cifar-10-python/cifar-10-batches-py'
print(os.listdir(CIFAR_DIR))
def load_data(filename):
    """read data from file"""
    with open(filename,'rb') as f:
        data=pickle.load(f,encoding='bytes')
        return data[b'data'],data[b'labels']

class CifarData:
    def __init__(self,filenames,need_shuffle):
        all_data=[]
        all_labels=[]
        for filename in filenames:
            data,labels=load_data(filename)
            all_data.append(data)
            all_labels.append(labels)
        self._data=np.vstack(all_data)
        self._data=self._data/127.5-1#归一化
        self._labels=np.hstack(all_labels)
        self._num_examples=self._data.shape[0]
        self._need_shuffle=need_shuffle
        self._indicator=0
        if self._need_shuffle:
            self._shuffle_data()
    def _shuffle_data(self):
        p=np.random.permutation(self._num_examples)
        self._data=self._data[p]
        self._labels=self._labels[p]
    def next_batch(self,batch_size):
        """return batch_size examples as a batch"""
        end_indicator=self._indicator+batch_size
        if end_indicator>self._num_examples:
            if self._need_shuffle:
                self._shuffle_data()
                self._indicator=0
                end_indicator=batch_size
            else:
                raise Exception("have no more example")
        if end_indicator>self._num_examples:
            raise Exception("batch size is larger than all example")
        batch_data=self._data[self._indicator:end_indicator]
        batch_labels=self._labels[self._indicator:end_indicator]
        self._indicator=end_indicator
        return batch_data,batch_labels
train_filenames=[os.path.join(CIFAR_DIR,'data_batch_%d'%i)for i in range(1,6)]
test_filenames=[os.path.join(CIFAR_DIR,'test_batch')]
train_data=CifarData(train_filenames,True)
test_data=CifarData(test_filenames,False)

def residual_block(x,output_channel):
    """残差连接块"""
    input_channel=x.get_shape().as_list()[-1]
    if input_channel*2==output_channel:
        increase_dim=True#改变通道数目
        strides = (2, 2)#做降采样
    elif input_channel==output_channel:
        increase_dim=False#不改变通道数目
        strides=(1,1)#不做降采样
    else:
        raise Exception("input channel can't match output channel")
    conv1=tf.layers.conv2d(x,
                           output_channel,
                           (3,3),
                           strides=strides,
                           padding='same',
                           activation=tf.nn.relu,
                           name='conv1')
    conv2 = tf.layers.conv2d(conv1,
                             output_channel,
                             (3, 3),
                             strides=(1,1),
                             padding='same',
                             activation=tf.nn.relu,
                             name='conv2')
    if increase_dim:
        #[None,image_width,image_height,channel]->[,,,channel*2]
        pooled_x=tf.layers.average_pooling2d(x,
                                             (2,2),
                                             (2,2),
                                             padding='valid')
        padded_x=tf.pad(pooled_x,
                        [[0,0],
                         [0,0],
                         [0,0],
                         [input_channel//2,input_channel//2]])
    else:
        padded_x=x;
    output_x=conv2+padded_x
    return output_x
def res_net(x,num_residual_blocks,num_filter_base,class_num):
    '''residual network implementation'''
    '''num_residual_blocks:每层有多少个残差连接块[3,4,6,3]
        num_filter_base:图像一开始的通道数目
        class_num:要判断参数的数目(10)
    '''
    num_subsampling=len(num_residual_blocks)#查看进行多少次降采样
    layers=[]#用来储存每次的计算结果
    #x:[None,width,height,channel] -> [width,height,channel]
    input_size=x.get_shape().as_list()[1:]
    #先让x经过一个普通的卷积层
    with tf.variable_scope('conv0'):
        conv0=tf.layers.conv2d(x,
                               num_filter_base,
                               (3,3),
                               strides=(1,1),
                               padding='same',
                               activation=tf.nn.relu,
                               name='conv0')
        layers.append(conv0)
    for sample_id in range(num_subsampling):
        for i in range(num_residual_blocks[sample_id]):
            with tf.variable_scope("conv%d_%d"%(sample_id,i)):
                conv=residual_block(
                    layers[-1],
                    num_filter_base*(2**sample_id),#通过次方每次让通道数目翻倍
                )
                layers.append(conv)
    #进行判断输出的神经元的大小是不是正确的
    multiplier=2**(num_subsampling - 1)
    assert layers[-1].get_shape().as_list()[1:]==[input_size[0]/multiplier,#预测的图片长
                                                  input_size[1]/multiplier,#预测的图片宽
                                                  num_filter_base*multiplier]#预测的通道多少
    with tf.variable_scope('fc'):
        '''average pooling
        layer[-1]->[width,hight,channel]'''
        #global_pool和普通的pooling一样就是kernal_size==图片的大小
        global_pool=tf.reduce_mean(layers[-1],[1,2])#对一二两个维度的数值做pooling,把一个二维的图变成了一个像素点
        logits=tf.layers.dense(global_pool,class_num)#在进行softmax之前的值叫logits
        layers.append(logits)
    return layers[-1]


#[None,3072]
x=tf.placeholder(tf.float32,[None,3072])
y=tf.placeholder(tf.int64,[None])
#32*32
x_image=tf.reshape(x,[-1,3,32,32])
x_images=tf.transpose(x_image,perm=[0,2,3,1])



y_=res_net(x_images,[2,3,2],32,10)

#交叉熵损失函数
loss=tf.losses.sparse_softmax_cross_entropy(labels=y,logits=y_)
#labels->one_hot
#logits->softmax

predict=tf.argmax(y_,1)
correct_prediction=tf.equal(predict,y)#equial用来比对值是否一样返回的是布尔类型
accurary=tf.reduce_mean(tf.cast(correct_prediction,tf.float64))

with tf.name_scope('train_op'):
    train_op=tf.train.AdamOptimizer(1e-3).minimize(loss)
    # train_op = tf.train.AdadeltaOptimizer(0.4).minimize(loss)

init=tf.global_variables_initializer()
batch_size=20
train_steps=100000
test_steps=100
sess = tf.InteractiveSession() #交互式
sess.run(init)
for i in range(train_steps):
    batch_data,batch_labels=train_data.next_batch(batch_size)
    loss_val,acc_val,_=sess.run([loss,accurary,train_op],feed_dict={x:batch_data,y:batch_labels})
    if (i+1) %500==0:
        print('[train] step:%d,loss:%4.5f,acc:%4.5f'%(i+1,loss_val,acc_val))
    if (i+1)%5000==0:
        test_data=CifarData(test_filenames,True)
        all_test_acc_val=[]
        for j in range(test_steps):
            test_batch_data,test_batch_labels=test_data.next_batch(batch_size)
            test_acc_val=sess.run([accurary],
                feed_dict={x:test_batch_data,y:test_batch_labels})
            all_test_acc_val.append(test_acc_val)
        test_acc=np.mean(all_test_acc_val)
        print('[test] step:%d,acc:%4.5f'%(i+1,test_acc))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值