11.CNN可视化

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100 #批次大小
n_batch = mnist.train.num_examples // batch_size  # 一共多少批次

def variable_summary(var):
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)

#初始化权值
def weight_variable(shape, name):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return  tf.Variable(initial, name=name)

#初始化偏置
def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape)
    return  tf.Variable(initial, name=name)

#卷积层
def conv2d(x, W):
    return tf.nn.conv2d(x,W, strides=[1,1,1,1], padding='SAME')

#池化层
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1],padding='SAME')

with tf.name_scope('input'):
    x = tf.placeholder(tf.float32, [None, 784], name='x-input')  # 行,列   28*28=794 列
    y = tf.placeholder(tf.float32, [None, 10], name='y-input')
    with tf.name_scope('x_image'):
        x_image = tf.reshape(x, [-1,28,28,1], name='x_image')#改变x的格式转为4D的向量[batch,in_height,in_width,in_channels]

#初始化第一个卷积层的权值和偏置值
with tf.name_scope('conv1'):
    with tf.name_scope('W_conv1'):
        W_conv1 = weight_variable([5,5,1,32], name='W_con1')
    with tf.name_scope('b_conv1'):
        b_conv1 = bias_variable([32], name='b_conv1')

    #把x_image和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
    with tf.name_scope('conv2d_1'):
        conv2d_1 = conv2d(x_image, W_conv1) + b_conv1
    with tf.name_scope('relu'):
        h_conv1 = tf.nn.relu(conv2d_1)
    with tf.name_scope('h_pool1'):
        h_pool1 = max_pool_2x2(h_conv1)

# 初始化第二个卷积层的权值和偏置值
with tf.name_scope('conv2'):
    with tf.name_scope('W_conv2'):
        W_conv2 = weight_variable([5, 5, 32, 64], name='W_conv2')
    with tf.name_scope('b_conv2'):
        b_conv2 = bias_variable([64], name='b_conv2')

    with tf.name_scope('conv2d_2'):
        conv2d_2 = conv2d(h_pool1, W_conv2) + b_conv2
    with tf.name_scope('relu'):
        h_conv2 = tf.nn.relu(conv2d_2)
    with tf.name_scope('h_pool2'):
        h_pool2 = max_pool_2x2(h_conv2)  # 进行最大池化

    #初始化第一个全连接层的权值
with tf.name_scope('fc1'):
    with tf.name_scope('W_fc1'):
        W_fc1 = weight_variable([7*7*64, 1024], name='W_fc1')
    with tf.name_scope('b_fc1'):
        b_fc1 = bias_variable([1024], name='b_fc1')

    #把池化层2的输出扁平化为1维,-1代表批次规格
    with tf.name_scope('h_pool2_flat'):
        h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64], name='h_pool2_flat')
    #求第一个全连接层的输出
    with tf.name_scope('wx_plus_b1'):
        wx_plus_b1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1
    with tf.name_scope('h_fc1'):
        h_fc1 = tf.nn.relu(wx_plus_b1)  # 获得全连接层的输出

    #keep_prob用来表示神经元的输出概率
    with tf.name_scope('keep_prob'):
        keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    with tf.name_scope('h_fc1_drop'):
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name='h_fc1_drop')

#初始化第二个全连接层
with tf.name_scope('fc2'):
    with tf.name_scope('W_fc2'):
        W_fc2 = weight_variable([1024, 10], name='W_fc2')
    with tf.name_scope('b_fc2'):
        b_fc2 = bias_variable([10], name='b_fc2')
    with tf.name_scope('wx_plus_b2'):
        wx_plus_b2 = tf.matmul(h_fc1_drop,W_fc2) + b_fc2
    with tf.name_scope('prediction'):
        prediction = tf.nn.softmax(wx_plus_b2, name='prediction') # 得到概率

with tf.name_scope('cross_entropy'):
        cross_entropy =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=prediction))
	tf.summary.scalar('cross_entropy', cross_entropy)	
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

with tf.name_scope('accuracy'):
    with tf.name_scope('corrent_prediction'):
        corrent_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y,1 ))
    with tf.name_scope('accuracy'):
        accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

merged = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    train_writer = tf.summary.FileWriter('logs/train', sess.graph)
    test_writer  = tf.summary.FileWriter('logs/test',  sess.graph)
    for epoch in range(1001):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        sess.run(train_step, feed_dict={x:batch_xs , y:batch_ys, keep_prob:0.5})
        summary = sess.run(merged, feed_dict={x:batch_xs, y:batch_ys, keep_prob:1.0})
        train_writer.add_summary(summary, epoch)

        batch_xs, batch_ys = mnist.test.next_batch(batch_size)
        summary = sess.run(merged, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})
        test_writer.add_summary(summary, epoch)

        if epoch%100 == 0:
            test_acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})
            train_acc = sess.run(accuracy, feed_dict={x: mnist.train.images[:10000], y: mnist.train.labels[:10000], keep_prob: 1.0})
            # 打印迭代次数及对应准确率
            print("Iter " + str(epoch) + ",Testing Accuracy " + str(test_acc)+ ",Training Accuracy " + str(train_acc))

Iter 0,Testing Accuracy 0.1196,Training Accuracy 0.1218
Iter 100,Testing Accuracy 0.6168,Training Accuracy 0.6058
Iter 200,Testing Accuracy 0.8017,Training Accuracy 0.8009
Iter 300,Testing Accuracy 0.8394,Training Accuracy 0.8407
Iter 400,Testing Accuracy 0.8514,Training Accuracy 0.8499
Iter 500,Testing Accuracy 0.8582,Training Accuracy 0.8592
Iter 600,Testing Accuracy 0.8608,Training Accuracy 0.8589
Iter 700,Testing Accuracy 0.8655,Training Accuracy 0.8654
Iter 800,Testing Accuracy 0.9509,Training Accuracy 0.9467
Iter 900,Testing Accuracy 0.9571,Training Accuracy 0.9524
Iter 1000,Testing Accuracy 0.9604,Training Accuracy 0.9605

在这里插入图片描述


— 对于任意一个卷积网络来说,几个必不可少的部分为:
(1)输入层:用以对数据进行输入
(2)卷积层:使用给定的核函数对输入的数据进行特征提取,并根据核函数的数据产生若干个卷积特征结果
(3)池化层:用以对数据进行降维,减少数据的特征
(4)全连接层:对数据已有的特征进行重新提取并输出结果

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值