tensorflow1.x第四篇

lenet5实现

  • 卷积是一种有效提取图片特征的方法,卷积核可以理解为对图片的不同观察方式。
  • lenet5实现
#forward
import tensorflow as tf
# 设定神经网络的超参数
# 定义神经网络可以接收的图片的尺寸和通道数
IMAGE_SIZE = 28
NUM_CHANNELS = 1
# 定义第一层卷积核的大小和个数
CONV1_SIZE = 5
CONV1_KERNEL_NUM = 32
# 定义第二层卷积核的大小和个数
CONV2_SIZE = 5
CONV2_KERNEL_NUM = 64
# 定义第三层全连接层的神经元个数
FC_SIZE = 512
# 定义第四层全连接层的神经元个数
OUTPUT_NODE = 10
# 定义初始化网络权重函数
def get_weight(shape,regularizer):
    #生成去掉过大偏离点的正态分布随机数的张量,stddev 是指定标准差
    w = tf.Variable(tf.truncated_normal(shape,stddev=0.1))
    #正则化,通过限制权重的大小,使模型不会随意拟合训练数据中的随机噪音
    if regularizer!=None:
        tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w
def get_bias(shape):
    b = tf.Variable(tf.zeros(shape))
    return b
'''
tensorflow给出的卷积函数
tf.nn.conv2d(
            输入描述, [batch,5(分辨率),5(分辨率),1(通道数)]
            卷积核描述, [3(size),3,1(通道数),16(卷积核个数)]
            核滑动步长 [1(固定),1(行步长),1(列步长),1(固定)]
            padding = 'VALID' or 'SAME')
    1)对输入图片的描述:用 batch 给出一次喂入多少张图片,每张图片的分
辨率大小,比如 5 行 5 列,以及这些图片包含几个通道的信息,如果是灰度图
则为单通道,参数写 1,如果是彩色图则为红绿蓝三通道,参数写 3。
    2)对卷积核的描述:要给出卷积核的行分辨率和列分辨率、通道数以及用
了几个卷积核。比如上图描述,表示卷积核行列分辨率分别为 3 行和 3 列,且是 1 通道的,一共有 16 个这样的卷积核,卷积核的通道数是由输入图片的通道数
决定的,卷积核的通道数等于输入图片的通道数,所以卷积核的通道数也是 1。
一共有 16 个这样的卷积核,说明卷积操作后输出图片的深度是 16,也就是输出
为 16 通道。
    3)对卷积核滑动步长的描述:上图第二个参数表示横向滑动步长,第三个
参数表示纵向滑动步长。第一个 1 和最后一个 1 这里固定的。这句表示横向纵向
都以 1 为步长。
    4)是否使用padding:用的是VALID。注意这里是以字符串的形式给出VALID。
            

'''
def conv2d(x,w):
    return tf.nn.conv2d(x,w,[1,1,1,1],'SAME')
'''
pool = tf.nn.max_pool
pool = tf.nn.avg_pool(
            输入描述,[batch,28,28,3]
            池化核描述, [1,2,2,1]
            池化核滑动步长,[1,2,2,1]
            padding = 'SAME')
    1)对输入的描述:给出一次输入 batch 张图片、行列分辨率、输入通道的个
数。
    2)对池化核的描述:只描述行分辨率和列分辨率,第一个和最后一个参数
固定是 1。 
    3)对池化核滑动步长的描述:只描述横向滑动步长和纵向滑动步长,第一
个和最后一个参数固定是 1。 
    4)是否使用 padding:padding 可以是使用零填充 SAME 或者不使用零填充
VALID
'''
def max_poll_2x2(x):
    return tf.nn.max_pool(x,[1,2,2,1],[1,2,2,1],'SAME')

def forward(x,train,regularizer):
    conv1_w = get_weight([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_KERNEL_NUM],regularizer)
    conv1_b = get_bias([CONV1_KERNEL_NUM])
    conv1 = conv2d(x,conv1_w)
    relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_b))
    pool1 = max_poll_2x2(relu1)

    conv2_w = get_weight([CONV2_SIZE,CONV2_SIZE,NUM_CHANNELS,CONV2_KERNEL_NUM],regularizer)
    conv2_b = get_bias([CONV2_KERNEL_NUM])
    conv2 = conv2d(x,conv2_w)
    relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_b))
    pool2 = max_poll_2x2(relu2)

    #get_shape 函数用于获取一个张量的维度,并且输出张量每个维度上面的值
    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped = tf.reshape(pool2,[pool_shape[0],nodes])

    fc1_w = get_weight([nodes,FC_SIZE],regularizer)
    fc1_b = get_bias([FC_SIZE])
    fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_w)+fc1_b)
    if train:
        fc1 = tf.nn.dropout(fc1,0.5)

    fc2_w = get_weight([FC_SIZE,OUTPUT_NODE],regularizer)
    fc2_b = get_bias([OUTPUT_NODE])
    y = tf.matmul(fc1,fc2_w)+fc2_b
    return y
#backward
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import forward
import os
import numpy as np

BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.005
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"

def backward(mnist):
    x = tf.placeholder(tf.float32,[
        BATCH_SIZE,
        forward.IMAGE_SIZE,
        forward.IMAGE_SIZE,
        forward.NUM_CHANNELS
    ])
    y_ = tf.placeholder(tf.float32,[None,forward.OUTPUT_NODE])
    y = forward.forward(x,True,REGULARIZER)
    global_step = tf.Variable(0,trainable=False)

    '''
    sparse_softmax_cross_entropy_with_logits(
                _sentinel=None, 
                labels=None, 
                logits=None, 
                name=None) 
    此函数的参数 logits 为神经网络最后一层的输出,它的大小为[batch_size, 
    num_classes],参数 labels 表示实际标签值,大小为[batch_size,num_classes]。
    第一步是先对网络最后一层的输出做一个 softmax,输出为属于某一属性的概率
    向量;再将概率向量与实际标签向量做交叉熵,返回向量。
    '''
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection('losses'))

    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True
        )

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step)

    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step,ema_op]):
        train_op = tf.no_op('train')

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess,ckpt.model_checkpoint_path)
        for i in range(STEPS):
            xs,ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs,(
                BATCH_SIZE,
                forward.IMAGE_SIZE,
                forward.IMAGE_SIZE,
                forward.NUM_CHANNELS
                                     ))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
            if i % 100 == 0:
                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
if __name__ == '__main__':
    mnist = input_data.read_data_sets("./data/", one_hot=True)
    backward(mnist)
#test
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import forward
import backward
import numpy as np

def test(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32,[
            mnist.test.num_examples,
            forward.IMAGE_SIZE,
            forward.IMAGE_SIZE,
            forward.NUM_CHANNELS
        ])
        y_ = tf.placeholder(tf.float32,[None,forward.OUTPUT_NODE])
        y = forward.forward(x,False,backward.REGULARIZER)

        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_prediction = tf.equal(tf.argmax(y_,1),tf.argmax(y,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                reshaped_x = np.reshape(mnist.test.images, (
                    mnist.test.num_examples,
                    forward.IMAGE_SIZE,
                    forward.IMAGE_SIZE,
                    forward.NUM_CHANNELS))
                accuracy_score = sess.run(accuracy, feed_dict={x: reshaped_x, y_: mnist.test.labels})
                print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
            else:
                print('No checkpoint file found')
                return
if __name__ == '__main__':
    mnist = input_data.read_data_sets("./data/", one_hot=True)
    test(mnist)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值