1.基本定义
全连接NN:每个神经元与前后层的每个神经元都有连接关系,输入的是特征,输出的是预测结果
参数个数 = (前层*后层+后层),也就是w和b的总数
待优化的参数过多,容易导致模型的过拟合,因此在实际应用中,通常先对你特征进行提取,再喂给全连接网络。
卷积 convolution:用一个正方形卷积核,遍历图上的每个点,图片区域内,相对应的每一个像素值,乘以卷积内相应点的权重,求和,再加上偏置。
输出图片的边长 = (输入图片的边长 -卷积核边长+1)/步长
Padding 全零填充:SAME OR VALID
same:输入长度/步长
valid: (输入步长-核长+1)/步长
def conv2d(x,w):
return tf.nn.conv2d(x, #输入描述[batch_size, 5,5,1]
w, #卷积核描述 [3,3,1,16]
strides = [1,1,1,1], #核滑动步长
padding ="SAME") #填充方式
池化pooling:减少特征数量,最大池化(max_pool)可提取图片纹理,均值池化(avg_pool)可保留背景特征
def max_pool_2x2(x):
return tf.nn.max_pool(x, #输入描述
ksize= [1,2,2,1], #池化核描述
strides = [1,2,2,1], #滑动步长
padding ="SAME") #全零填充
舍弃 Dropout:在神经网络训练过程中,将一部分神经元按照一定概率从神经网络中舍弃,使用时被舍弃的神经元恢复链接
fc1 = tf.nn.dropout(fc1,0.5)
2.示例
以letnet网络结构为例,上篇博客提到的mnist手写数字识别的数据为例子,左边的是原文中的网络结构,根据需求,修改成右边的结构
代码:
mnist_lenet5_forward.py
#coding:utf-8
import tensorflow as tf
IMAGE_SIZE =28 #图片大小
NUM_CHANNELS = 1 #通道数
CONV1_SIZE =5 #第一层卷积:5*5*1*32
CONV1_KERNEL_NUM=32
CONV2_SIZE =5 #第二层卷积:5*5*32*64
CONV2_KERNEL_NUM =64
FC_SIZE =512 #全连接层节点数 512
OUTPUT_NODE = 10 #一共有0~9的标签十个
#1.定义神经网络的参数w和b,定义前向传播过程
def get_weight (shape, regularizer):
w = tf.Variable(tf.truncated_normal(shape, stddev = 0.1), dtype = tf.float32)
if regularizer != None:
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
return w
def get_bias(shape):
b = tf.Variable(tf.zeros(shape = shape))
return b
#定义卷积函数 输入 和 卷积核
def conv2d(x,w):
return tf.nn.conv2d(x, w, strides = [1,1,1,1], padding ="SAME")
#定义最大池化 输入
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize= [1,2,2,1], strides = [1,2,2,1], padding ="SAME")
#定义前向传播过程
def forward(x, train, regularizer):
#第一个卷积层 5*5*1*32
conv1_w = get_weight([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS, CONV1_KERNEL_NUM], regularizer)#第一层卷积的权重
conv1_b = get_bias([CONV1_KERNEL_NUM]) #32
conv1 = conv2d(x, conv1_w)
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b))
pool1 =max_pool_2x2(relu1)
#第二个卷积层 5*5*32*64
conv2_w = get_weight([CONV2_SIZE,CONV2_SIZE,CONV1_KERNEL_NUM,CONV2_KERNEL_NUM], regularizer)#第一层卷积的权重
conv2_b = get_bias([CONV2_KERNEL_NUM]) #64
conv2 = conv2d(pool1, conv2_w)
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
pool2 =max_pool_2x2(relu2)
#拉直 【1, 7*7*64】
pool_shape = pool2.get_shape().as_list()
nodes = pool_shape[1] *pool_shape[2]*pool_shape[3]
reshaped = tf.reshape(pool2, [pool_shape[0],nodes])
#全连接层 512个节点
fc1_w = get_weight([nodes, FC_SIZE], regularizer)
fc1_b = get_bias([FC_SIZE])
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w)+fc1_b)
if train :
fc1 = tf.nn.dropout(fc1,0.5)
fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)
fc2_b = get_bias([OUTPUT_NODE])
y = tf.matmul(fc1, fc2_w)+fc2_b
return y
mnist_lenet5_backward.py
#coding:utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_lenet5_forward
import os
import numpy as np
STEPS = 50000
BATCH_SIZE = 200
LEARNING_RATE_BASE = 0.1 #初始学习率
LEARNING_RATE_DECAY = 0.99 #学习率衰减率
REGULARIZER = 0.0001 #正则化权重
MOVING_AVERAGE_DECAY = 0.99 #滑动平均
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"
def backward(mnist):
#卷积神经网络的输入必须是4维的
x = tf.placeholder (tf.float32,
[BATCH_SIZE, #每次处理多少图片
mnist_lenet5_forward.IMAGE_SIZE, #图片长
mnist_lenet5_forward.IMAGE_SIZE, #宽
mnist_lenet5_forward.NUM_CHANNELS]) #通道数
y_ = tf.placeholder (tf.float32, shape = (None, mnist_lenet5_forward.OUTPUT_NODE))
y = mnist_lenet5_forward.forward(x,True, REGULARIZER) #前向传播
#运行了几轮batchsize的计数器,初值为0,设置为不被训练
global_step = tf.Variable(0,trainable = False)
#定义指数下降学习率
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase= True)
#定义损失函数
loss_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits =y, labels = tf.argmax(y_,1))
loss_cem = tf.reduce_mean(loss_ce)
loss_total = loss_cem + tf.add_n(tf.get_collection('losses'))
#定义反向传播方法:含正则化
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_total, global_step = global_step)
#实现滑动平均
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
# 反向传播更新参数之后,再更新每一个参数的滑动平均值,用下面的代码可以一次完成这两个操作
with tf.control_dependencies([train_step, ema_op]):
train_op = tf.no_op(name="train")
#设置完使用滑动平均模型之后,只需要在每次使用反向传播的时候改为使用run.(train_op)就可以正常执行了。
#保存
saver = tf.train.Saver()
#会话执行
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path )
#global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
for i in range(STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
reshaped_xs =np.reshape(xs,[BATCH_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.IMAGE_SIZE,
mnist_lenet5_forward.NUM_CHANNELS])
_, loss_value, step = sess.run([train_op, loss_total, global_step], feed_dict = {x:reshaped_xs, y_:ys})
if i%1000==0:
print('after ' +str(step) + ' steps, loss is ' +str(loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
def main():
mnist = input_data.read_data_sets('./data/',one_hot= True)
backward(mnist)
if __name__ == "__main__":
main()
mnist_lenet_test.py和mnist_app.py和上一篇博客的一模一样
运行结果:老笔记本电脑运行的太慢了,一下午还没执行1000次循环,贴个结果吧,慢慢跑