densenet训练cifar10数据集(tensorflow)
![在这里插入图片描述](https://i-blog.csdnimg.cn/blog_migrate/054a383d19bbe6e61467844e5be5edff.jpeg)
train.py
#到处需要的库
import tensorflow as tf
from tflearn.layers.conv import global_avg_pool
from tensorflow.contrib.layers import batch_norm, flatten
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib.framework import arg_scope
from cifar10 import *
#卷积层的数量基数
growth_k = 24
# dense block与transition block的个数
nb_block = 2
#初始学习率
init_learning_rate = 1e-4
#优化器ep值
epsilon = 1e-4
#dropout比率
dropout_rate = 0.2
#动量值
nesterov_momentum = 0.9
#每批次数据个数
batch_size = 64
#遍历所有数据的迭代次数
#batch_size * iteration = data_set_number
iteration = 782
#每迭代10次测试一次
test_iteration = 10
#总的epoch,共遍历300次数据集
total_epochs = 300
#图像大小
image_size = 32
#图像通道数
img_channels = 3
#数据集总的类别
class_num = 10
#卷积层计算
def conv_layer(input, filter, kernel, stride=1, layer_name="conv"):
#定义命名空间
with tf.name_scope(layer_name):
#给定输入,卷积核数量以及尺寸等进行计算
network = tf.layers.conv2d(inputs=input, use_bias=False, filters=filter, kernel_size=kernel, strides=stride, padding='SAME')
return network
def Global_Average_Pooling(x, stride=1):
"""
width = np.shape(x)[1]
height = np.shape(x)[2]
pool_size = [width, height]
return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride)
不使用tflearn操作全局平均池化
"""
#对卷积层特征图做全局平均池化
#可能需要安装h5py和curses
return global_avg_pool(x, name='Global_avg_pooling')
#对特征层批量归一化
def Batch_Normalization(x, training, scope):
with arg_scope([batch_norm],
scope=scope,
updates_collections=None,
decay=0.9,
center=True,
scale=True,
zero_debias_moving_mean=True) :
#training为True执行第一个lambda,否则执行第二个lambda
return tf.cond(training,
lambda : batch_norm(inputs=x, is_training=training, reuse=None),
lambda : batch_norm(inputs=x, is_training=training, reuse=True))
#使用dropout防止过拟合
def Drop_out(x, rate, training) :
return tf.layers.dropout(inputs=x, rate=rate, training=training)
#relu激活函数
def Relu(x):
return tf.nn.relu(x)
#平均池化
def Average_pooling(x, pool_size=[2,2], stride=2, padding='VALID'):
return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)
#最大池化
def Max_Pooling(x, pool_size=[3,3], stride=2, padding='VALID'):
return tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)
#拼接tensor
def Concatenation(layers) :
return tf.concat(layers, axis=3)
#dense block
def Linear(x) :
return tf.layers.dense(inputs=x, units=class_num, name='linear')
#测试
def Evaluate(sess):
#设置初始值
test_acc = 0.0
test_loss = 0.0
test_pre_index = 0
add = 1000
for it in range(test_iteration):
test_batch_x = test_x[test_pre_index: test_pre_index + add]
test_batch_y = test_y[test_pre_index: test_pre_index + add]
test_pre_index = test_pre_index + add
test_feed_dict = {
x: test_batch_x,
label: test_batch_y,
learning_rate: epoch_learning_rate,
training_flag: False
}
loss_, acc_ = sess.run([cost, accuracy], feed_dict=test_feed_dict)
test_loss += loss_ / 10.0
test_acc += acc_ / 10.0
summary = tf.Summary(value=[tf.Summary.Value(ta