tensorflow mnist数据集 cnn demo

程序分为两部分
inference 定义了cnn的前向传播过程
train通过梯度下降逐渐减小误差

# -*- coding: utf-8 -*-
# @Time    : 2018/3/31 15:08
# @Author  : timothy
'''
    使用CNN进行mnist手写数字识别
    输入:28*28*1
    卷积层:卷积核大小:5*5,深度为32,步长为1,用全0填充   28*28*1 -》 28*28*32
    池化层:过滤器大小:2*2 步长2   28*28*32 -》14*14*32
    卷积层:卷积核大小:5*5,深度为64,步长为1,用全0填充  14*14*32 -》14*14*64
    池化层:过滤器大小:2*2 步长2   14*14*64 -》7*7*64
    全连接层:512个节点   7*7*64 -》512
    全连接层:10个节点    512 -》10
    输出:使用softmax
'''

import tensorflow as tf


INPUT_NODE = 784
OUTPUT_NODE = 10

IMAGE_SIZE = 28
NUM_CHANNELS = 1  # 通道数,mnist是黑白的因此是1
NUM_LABLES = 10

# 卷积层1
CONV1_DEEP = 32
CONV1_SIZE = 5

# 卷积层2
CONV2_DEEP = 64
CONV2_SIZE = 5

# 全连接层数目
FC_SIZE = 512


# 前向传播
def inference(input_tensor, train, regularizer):
    # 卷积层1  28*28*1 -》28*28*32
    # 变量命名空间,这样weight和biases变量和后续层就不会重复
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable('weight', [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],\
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable('biases', [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
        # strides第一,四个参数只能为1,中间两参数表示在长宽两方向的步长为1;padding='SAME'全0填充
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    #  池化层1  28*28*32 -》 14*14*32
    with tf.variable_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    #  卷积层2  14*14*32 -》 14*14*64
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable('weight', [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],\
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable('biases', [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
        # strides第一,四个参数只能为1,中间两参数表示在长宽两方向的步长为1;padding='SAME'全0填充
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    #  池化层2  14*14*64 -》7*7*64
    with tf.variable_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    # 将池化层输出 【7*7*64】 的3维张量拉成一个向量
    pool_shape = pool2.get_shape().as_list()  #
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
    reshaped = tf.reshape(pool2, [pool_shape[0], nodes])

    #  全连接层1
    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable('weight', [nodes, FC_SIZE], initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biases = tf.get_variable('biases', [FC_SIZE], initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
        if train:
            fc1 = tf.nn.dropout(fc1, 0.5)  # 避免过拟合

    #  全连接层2
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable('weight', [FC_SIZE, NUM_LABLES],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = tf.get_variable('biases', [NUM_LABLES], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases

    return logit
# -*- coding: utf-8 -*-
# @Time    : 2018/4/1 13:14
# @Author  : timothy


import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_cnn_inference as mci
import numpy as np


# 初始化常量
BATCH_SIZE = 100   # 1就是随机梯度下降,整个数据集就是梯度下降
LEARNING_RATE_BASE = 0.8  # 基础学习率
LEARNING_RATE_DECAY = 0.99  # 学习率的衰减率
REGULARIZATION_RATE = 0.0001  # 正则化项在损失函数里的权重
TRAINING_STEP = 5000  # 训练轮数
MOVING_AVERAGE_DECAY = 0.99  # 滑动平均衰减率


# 开始训练
def train(mnist):
    x = tf.placeholder(tf.float32, [BATCH_SIZE, mci.IMAGE_SIZE, mci.IMAGE_SIZE, mci.NUM_CHANNELS], name='x-input')  # 输入图片
    y_ = tf.placeholder(tf.float32, [None, mci.OUTPUT_NODE], name='y-input')  # 图片标签0-9

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  # 正则化
    y = mci.inference(x, None, regularizer)  # 不采用随机梯度下降学习出的答案
    global_step = tf.Variable(0, trainable=False)  # 训练轮数 该参数不可训练
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    # 定义损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y_, 1), logits=y)  # 交叉熵
    cross_entropy_mean = tf.reduce_mean(cross_entropy)  # 当前batch交叉熵均值

    # 引入正则项后的损失函数
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    # 设定学习率指数衰减
    iter_times = mnist.train.num_examples / BATCH_SIZE
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, iter_times, LEARNING_RATE_DECAY)

    # 执行梯度下降
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step)

    # 更新滑动平均值
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    # 计算准确率
    correction_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correction_prediction, tf.float32))

    # 开始训练
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        # 准备测试数据
        x_test, y_test = mnist.test.next_batch(BATCH_SIZE)
        x_test_reshaped = np.reshape(x_test, (BATCH_SIZE, mci.IMAGE_SIZE, mci.IMAGE_SIZE, mci.NUM_CHANNELS))
        test_feed = {x: x_test_reshaped, y_: y_test}
        # 准备训练数据
        x_train, y_train = mnist.train.next_batch(BATCH_SIZE)
        x_train_reshaped = np.reshape(x_train, (BATCH_SIZE, mci.IMAGE_SIZE, mci.IMAGE_SIZE, mci.NUM_CHANNELS))
        train_feed = {x: x_train_reshaped, y_: y_train}
        # 开始训练
        for i in range(TRAINING_STEP):
            # 每次训练BATCH_SIZE张图片
            session.run(train_op, feed_dict=train_feed)
            # 每一百步进行一个测试
            if i % 100 == 0:
                test_acc = session.run(accuracy, feed_dict=test_feed)
                print('after training for %d steps, training accuracy is %g' % (i + 100, test_acc))


def main():
    mnist = input_data.read_data_sets('resource/', one_hot=True)
    train(mnist)


if __name__ == '__main__':
    main()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值