1、图像识别经典数据集Cifar
cifar是32x32x3的彩色图片集
2、卷积神经网络简介
3、卷积神经网络常用的结构
3.1卷积层
Filter的深度就是Filter的个数
3.2池化层
4、经典卷积神经网络模型
卷积神经网络结构的问题
卷积层和池化层的参数配置问题
5、卷积网络的迁移学习
迁移学习可以总结为在别人成功的模型(网络结构和训练好的参数)的基础上修改、增减自己的网络结构。这样做的好处是缩短模型搭建的时间,减少参数的训练量。
附录
conv_mnist_infernece.py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
"""
使用LeNet-5模型实现卷积神经网络的前向传播
"""
import tensorflow as tf
INPUT_NODE = 784
OUTPUT_NODE = 10
IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10
# 第一层卷积层的尺寸和深度
CONV1_DEEP = 32
CONV1_SIZE = 5
# 第二层卷积层的尺寸和深度
CONV2_DEEP = 64
CONV2_SIZE = 5
# 全连接层的节点个数
FC_SIZE = 512
# 定义卷积神经网络的前向传播过程。 注意区别训练过程和测试过程。
def infernece(input_tensor, train, regularizer):
# 声明第一层卷积层的变量,并实现前向传播过程。
# 定义的卷积层输入为 28*28*1 的原始MNIST图片像素, 使用了全0填充,所以输出为 28*28*32
with tf.variable_scope("layer1-conv1"):
conv1_weights = tf.get_variable(
"weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable(
"bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
# 使用边长为5 深度为32的过滤器,移动步长1 且使用全填充
conv1 = tf.nn.conv2d(
input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding="SAME")
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
# 实现第二层池化层的前向传播,使用了最大池化层。过滤器边长为2,使用全0填充,步长为2
# 这一层的输入为上一层的输出 即 28*28*32 ,输出这为 14*14*32 池化不会改变深度
with tf.name_scope("layer2-pooll"):
pool1 = tf.nn.max_pool(
relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
# 声明第三层卷积层的变量,并实现前向传播。
# 这一层的输入为 14*14*32 输出为 14*14*64
with tf.variable_scope("layer3-conv2"):
conv2_weights = tf.get_variable(
"weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable(
"bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
# 使用边长为5, 深度为64的过滤器,步长为1,使用全0填充
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding="SAME")
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
# 实现第四层池化层的前向传播过程。和第二层类似,这层的输入为 14*14*64 输出为 7*7*64
with tf.name_scope("layer4-pool2"):
pool2 = tf.nn.max_pool(
relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
# 将第四层池化层的输出转化为第五层全连接层的输入格式。第四层输出 7*7*64
# 这里需要将 7*7*64 拉成一个向量。
pool_shape = pool2.get_shape().as_list()
# 拉成一个向量,长度为之前矩阵的长宽及深度的乘积 pool_shape[0]为一个batch中数据个数 这里可以理解吧
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
# 通过tf.reshape 函数将第四层的输出变成一个batch的向量
reshaped = tf.reshape(pool2, [pool_shape[0], nodes])
# 声明第五层全连接层的变量,并实现前向传播过程。输入的长度为7*7*64 = 3136 输出是512长度
with tf.variable_scope("layer5-fc1"):
fc1_weights = tf.get_variable(
"weight", [nodes, FC_SIZE], initializer=tf.truncated_normal_initializer(stddev=0.1))
# 只有全连接层的权重需要加入正则化
if regularizer != None:
tf.add_to_collection("losses", regularizer(fc1_weights))
fc1_biases = tf.get_variable(
"bias", [FC_SIZE], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
# 使用dropout在训练时随机将部分节点的输出改为0,避免过拟合。一般用在全连接层
if train:
fc1 = tf.nn.dropout(fc1, 0.5)
# 声明第六层全连接的变量和前向传播过程。这层长度为512,输出为长度10.通过Softmax分类得到结果
with tf.variable_scope("layer6-fc2"):
fc2_weights = tf.get_variable(
"weight", [FC_SIZE, NUM_LABELS], initializer=tf.truncated_normal_initializer(0.1))
if regularizer != None:
tf.add_to_collection("losses", regularizer(fc2_weights))
fc2_biases = tf.get_variable(
"bias", [NUM_LABELS], initializer=tf.constant_initializer(0.1))
# 这里不需要激活函数
logit = tf.matmul(fc1, fc2_weights) + fc2_biases
return logit
conv_mnist_train.py
import tensorflow as tf
import conv_mnist_infernece
from tensorflow.examples.tutorials.mnist import input_data
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 配置神经网络参数
BATCH_SIZE = 100 # 数据模型
LEARNINT_RATE_BASE = 0.8 # 学习率
LEARNINT_RATE_DECAY = 0.99 # 学习衰减率
REGULARAZTION_RATE = 0.0001 # 正则化损失
TRAINING_STEPS = 30000 # 训练步数
MOVING_AVERAGE_DECAY = 0.99 # 滑动平均衰减率
# 保存模型路径
MODEL_SAVE_PATH = "./conv/"
MODEL_NAME = "model.ckpt"
def train(mnist):
# 调整输入数据的格式,输入为一个四维矩阵
x = tf.placeholder(dtype=tf.float32,
shape=[BATCH_SIZE,
conv_mnist_infernece.IMAGE_SIZE,
conv_mnist_infernece.IMAGE_SIZE,
conv_mnist_infernece.NUM_CHANNELS],
name="x-input")
y_ = tf.placeholder(tf.float32, [None, conv_mnist_infernece.OUTPUT_NODE], name='y-out')
# 计算L2正则化损失项
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
y = conv_mnist_infernece.infernece(x, False, regularizer)
global_step = tf.Variable(0, trainable=False)
# 定义损失函数、学习率、滑动平均操作 及训练过程
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
# 指数衰减
learning_rate = tf.train.exponential_decay(
LEARNINT_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNINT_RATE_DECAY)
# 在训练神经网络模型时,每过一遍数据既要通过反向传播来更新神经网络中的参数
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# 同时也要更新每一个参数的滑动平均值。tf.group 和 tf.control_dependencies 两种方法
# train_op = tf.group(train_step, variavle_averages_op, name='train')
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
# 开始训练过程
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
# print(xs.shape)
# print(ys.shape)
reshape_xs = tf.reshape(xs, [BATCH_SIZE,
conv_mnist_infernece.IMAGE_SIZE,
conv_mnist_infernece.IMAGE_SIZE,
conv_mnist_infernece.NUM_CHANNELS])
reshape_xs = reshape_xs.eval()
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshape_xs, y_: ys})
if i % 10 == 0:
print("After %d training step(s), loss on training average batch is %g ." % (step, loss_value))
# 保存模型
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def main(argv=None):
mnist = input_data.read_data_sets("./MNIST", one_hot=True)
train(mnist)
if __name__ == "__main__":
tf.app.run()