基于Tensorflow 1.x 实现ResNet18

本代码是基于《【北京大学】人工智能实践:Tensorflow笔记 》课程B站链接自行编写的程序,使用1版本的tensorflow,2版本可使用以下代码兼容:

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

有关细节:
1.cifar-10 数据集 50000张用于训练,10000张用于测试
2.使用BN批规范化(训练时保存滑动平均值用于测试)
3.使用数据增强(标准化+四周填充0+随机裁剪+随即水平翻转)
4.使用分段衰减学习率(初始0.1,36000轮和54000轮除以10)
5.对所有可训练参数使用滑动平均值
最终测试集准确率可达90.16%

import tensorflow as tf
import numpy as np
import os

IMAGE_SIZE = 32 #输入图片的大小
IMAGE_CHANNELS = 3 #输入图片的深度
KERNEL_SIZE = 3 #卷积核的大小(所有卷积核的大小都一样)
KERNEL_NUM = [64, 128, 256, 512] #残差模块各个block卷积核的个数
DEPTH = [[64, 64], [64, 128], [128, 256], [256, 512]] #残差模块各个block输入图片的深度
BLOCK_LIST = [2, 2, 2, 2] #残差模块各个block卷积层的数量
OUTPUT_SIZE = 10 #全连接层输出节点的数量,即类别数

LEARNING_RATES = [0.1,0.01,0.001] #学习率
BOUNDARIES = [36000,54000] #学习衰减轮数
MOVING_AVERAGE_DECAY = 0.99 #滑动平均衰减率
BATCH_SIZE = 32 #批处理数
STEPS = 64000 #训练轮数
TEST_SIZE = 10000 #测试集图片数量
MODEL_SAVE_PATH = "./resnet18_model/" #文件保存路径
MODEL_NAME = 'resnet18_model' #文件名

##前向传播搭建网络结构(计算图)

#定义卷积函数
def conv2d(x, shape, strides=1):
    w = tf.get_variable("w",shape,initializer=tf.truncated_normal_initializer(stddev=0.1)) #生成去掉最大偏离点的正态分布的随机数
    return tf.nn.conv2d(x,w,strides = [1,strides,strides,1],padding="SAME")

#定义批规范化函数
#训练时使用tf.nn.moments函数来计算批数据的均值和方差,然后在迭代过程中更新均值和方差的分布,并且使用tf.nn.batch_normalization做标准化
#使用with tf.control_dependencies...语句结构块来强迫Tensorflow先更新均值和方差的分布,再执行批标准化操作
#测试时使用的均值和方差分布来自于训练时使用滑动平均算法估计的值
def batch_normalization(x, depth, is_training):
    gamma = tf.get_variable("gamma",[depth],initializer=tf.ones_initializer)
    beta = tf.get_variable("beta",[depth],initializer=tf.zeros_initializer)
    pop_mean = tf.get_variable("mean",[depth],initializer=tf.zeros_initializer,trainable=False)
    pop_variance = tf.get_variable("variance",[depth],initializer=tf.ones_initializer,trainable=False)
    if is_training:
        batch_mean, batch_variance = tf.nn.moments(x,[0, 1, 2],keep_dims=False)
        decay = 0.99
        train_mean = tf.assign(pop_mean,pop_mean*decay + batch_mean*(1 - decay))
        train_variance = tf.assign(pop_variance,pop_variance*decay + batch_variance*(1 - decay))
        with tf.control_dependencies([train_mean,train_variance]):
            return tf.nn.batch_normalization(x,batch_mean,batch_variance,beta,gamma,1e-3)
    else:
        return tf.nn.batch_normalization(x,pop_mean,pop_variance,beta,gamma,1e-3)

#定义残差模块
def basicblock(x, depth, kernel_num, is_training, strides=1, residual_path=False):
    residual = x #residual等于输入值本身,即residual=x
    #将输入通过卷积、BN层、激活层,计算F(x)
    with tf.variable_scope('1'):
        c1 = conv2d(x,[KERNEL_SIZE,KERNEL_SIZE,depth,kernel_num],strides)
        b1 = batch_normalization(c1,kernel_num,is_training)
        a1 = tf.nn.relu(b1)
    
    with tf.variable_scope('2'):
        c2 = conv2d(a1,[KERNEL_SIZE,KERNEL_SIZE,kernel_num,kernel_num])
        b2 = batch_normalization(c2,kernel_num,is_training)

    #residual_path为True时,对输入进行下采样,即用1x1的卷积核做卷积操作,保证x能和F(x)维度相同,顺利相加
    if residual_path:
        with tf.variable_scope('3'):
            down_c1 = conv2d(x,[1,1,depth,kernel_num],strides)
            residual = batch_normalization(down_c1,kernel_num,is_training)

    a2 = tf.nn.relu(b2 + residual) #最后输出的是两部分的和,即F(x)+x或F(x)+Wx,再过激活函数
    return a2

#定义全连接函数
def fc(x, shape):
    w = tf.get_variable("w",shape,initializer=tf.truncated_normal_initializer(stddev=0.1)) #生成去掉最大偏离点的正态分布的随机数
    b = tf.get_variable("b",shape[1],initializer=tf.constant_initializer(0.0))
    return tf.matmul(x,w)+b

#定义前向传播函数
#初始化各层的权重,设定计算式(计算图只规定计算式,不进行真实计算)
def forward(x, is_training=True):
    with tf.variable_scope('c1'):
        conv1 = conv2d(x,[KERNEL_SIZE,KERNEL_SIZE,IMAGE_CHANNELS,KERNEL_NUM[0]])
        bn1 = batch_normalization(conv1,KERNEL_NUM[0],is_training)
        relu1 = tf.nn.relu(bn1)
    
    block = relu1
    name = [['block1c1', 'block1c2'], ['block2c1', 'block2c2'], ['block3c1', 'block3c2'], ['block4c1', 'block4c2']]
    for block_id in range(len(BLOCK_LIST)):  # 第几个Block
        for layer_id in range(BLOCK_LIST[block_id]):  # 第几个卷积层
            with tf.variable_scope(name[block_id][layer_id]):
                if block_id != 0 and layer_id == 0:  # 对除第一个block以外的每个block的输入进行下采样
                    block = basicblock(block,DEPTH[block_id][layer_id],KERNEL_NUM[block_id],is_training,strides=2,residual_path=True)
                else:
                    block = basicblock(block,DEPTH[block_id][layer_id],KERNEL_NUM[block_id],is_training)
    pool = tf.nn.avg_pool(block,ksize=[1,4,4,1],strides=[1,1,1,1],padding="VALID") #全局平均池化

    pool_shape = pool.get_shape().as_list() #得到各维度值
    nodes = pool_shape[1]* pool_shape[2] * pool_shape[3]
    reshaped = tf.reshape(pool,[pool_shape[0],nodes]) #拉直成一维向量输入全连接层
    
    with tf.variable_scope('f1'):
        y = fc(reshaped,[nodes,OUTPUT_SIZE])
        
    return y

##反向传播定义参数优化方法,进行计算(会话)

#定义获取图片函数
def get_images():
    cifar10 = tf.keras.datasets.cifar10
    (x0, y0), (x1, y1) = cifar10.load_data()
    #归一化
    x_train, x_test = x0 / 255.0, x1 / 255.0
    #转换标签的格式
    y_train, y_test = np.zeros((y0.shape[0],10)), np.zeros((y1.shape[0],10))
    for i in list(range(y0.shape[0])):
        n = y0[i,0]
        y_train[i,n] = 1
    for i in list(range(y1.shape[0])):
        n = y0[i,0]
        y_test[i,n] = 1
    
    return x_train, y_train, x_test, y_test

#定义反向传播函数
#定义损失函数和训练过程,使用指数衰减的学习率、滑动平均,定义会话(通过with结构进行计算)
def backward(x_train, y_train):
    #抽取批数据
    input_queue = tf.train.slice_input_producer([x_train, y_train], num_epochs=None, shuffle=True, capacity=BATCH_SIZE*10)
    x_batch, y_batch = tf.train.batch(input_queue, batch_size=BATCH_SIZE, num_threads=2, capacity=BATCH_SIZE*10, allow_smaller_final_batch=False)
    
    #数据增强(原论文:4 pixels are padded on each side,and a 32×32 crop is randomly sampled from the padded image or its horizontal flip)
    x_batch = tf.image.per_image_standardization(x_batch)
    x_batch = tf.image.resize_image_with_crop_or_pad(x_batch, 36, 36) #四周填充0
    x_batch = tf.random_crop(x_batch, [BATCH_SIZE, 32, 32, 3]) #随机裁剪
    x_batch = tf.image.random_flip_left_right(x_batch) #随机水平翻转
 
    #给输入输出占位,,相当于声明变量
    x = tf.placeholder(tf.float32,[
        BATCH_SIZE,
        IMAGE_SIZE,
        IMAGE_SIZE,
        IMAGE_CHANNELS
    ])
    y_ = tf.placeholder(tf.float32,[None,OUTPUT_SIZE])

    y = forward(x)
    global_step = tf.Variable(0, trainable = False) #初始化轮数计数器,定义为不可训练
    
    #定义损失函数
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cem = tf.reduce_mean(ce)
    loss = cem

    #定义准确率
    correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 
    
    #定义分段常数衰减的学习率(原论文:We start with a learning rate of 0.1, divide it by 10 at 32k and 48k iterations, and terminate training at 64k iterations)
    learning_rate = tf.train.piecewise_constant(global_step, boundaries=BOUNDARIES, values=LEARNING_RATES)
    
    #定义训练过程
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    
    #定义滑动平均(原论文:We use a weight decay of 0.0001 and momentum of 0.9)
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    ema_op = ema.apply(tf.trainable_variables())
    #将train_step和ema_op结合形成一个新的训练节点train_op
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name="train")
        
    saver = tf.train.Saver(max_to_keep=None) #实例化saver对象,保存所有model
    
    #定义会话
    with tf.Session() as sess:
        init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        sess.run(init_op)
        
        #实现断点续训,即先判断是否有已保存的模型,如果有则恢复会话,从最近的断点处继续训练
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        exclude = ['']
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess,ckpt.model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)    
        #训练
        for i in range(STEPS):
            xs, ys = sess.run([x_batch, y_batch])
            _, loss_value, train_accuracy, step = sess.run([train_op, loss, accuracy, global_step], feed_dict={x:xs, y_: ys})
            if step == 1 or step % 500 ==0: #每隔500轮,输出损失函数和训练集准确率的值并保存模型
                print("After %d training step(s), losses = %f, train accuracy = %f. " %(step, loss_value, train_accuracy))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step = global_step,write_meta_graph=False)
        coord.request_stop()
        coord.join(threads)

#测试程序复现网络节点,计算测试集的准确率

def test(x_test, y_test):
    with tf.Graph().as_default() as g:
        #input_queue = tf.train.slice_input_producer([x_test, y_test], num_epochs=None, shuffle=True, capacity=BATCH_SIZE*10)
        #x_batch, y_batch = tf.train.batch(input_queue, batch_size=BATCH_SIZE, num_threads=2, capacity=BATCH_SIZE*10, allow_smaller_final_batch=False)
        #生成低分辨率图片(最邻近下采样,双三次插值上采样)
        #xs = tf.image.resize_images(x_test, [16, 16], method=1)
        #xs = tf.image.resize_images(xs, [32, 32], method=2)
        xs = tf.image.per_image_standardization(x_test)

        x = tf.placeholder(tf.float32,[TEST_SIZE,
                                      IMAGE_SIZE,
                                      IMAGE_SIZE,
                                      IMAGE_CHANNELS])
        y_ = tf.placeholder(tf.float32,[None,OUTPUT_SIZE])
        y = forward(x,is_training=False)
        
        ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
        
        #计算准确率
        correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                for i in range(int(STEPS/500)): #每隔500轮,输出测试集准确率的值
                    global_step = str(500*(i+1))
                    path = ckpt.model_checkpoint_path.replace(step, global_step)
                    saver.restore(sess, path)
                    
                    #coord = tf.train.Coordinator()
                    #threads = tf.train.start_queue_runners(sess=sess, coord=coord) 
                    xx = sess.run(xs)
                    test_accuracy = sess.run(accuracy,feed_dict = {x: xx, y_: y_test})
                    print("After %s training step(s), test accuracy = %f"%(global_step, test_accuracy))

                #coord.request_stop()
                #coord.join(threads)
            else:
                print("No checkpoint file found")
                return

##主程序             
def main():
    tf.logging.set_verbosity(tf.logging.ERROR)
    os.environ['TF_CPP_MIN_LOG_LEVEL']='3' #屏蔽报错和GPU调用信息
    
    x_train, y_train, x_test, y_test = get_images()
    backward(x_train, y_train)
    test(x_test, y_test)

#Make a script both importable and executable
#使得该脚本模块既可以导入到别的模块中用,也可以自己执行  
if __name__ == "__main__":
    main()
  • 2
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值