Tensorflow深度学习笔记(四)--CNN手写数字识别可视化

数据集:MNIST
模型:CNN;卷积层数:2;全连接层数:3,全连接层Dropout Rate:0.8;激活函数:Relu
损失函数:交叉熵
Optimizer:AdamOptimizer
可视化工具:tensorboad


先看结果:
训练21epoch,测试集accuracy如下:

Iter 0, Testing Accuracy= 0.9455
Iter 1, Testing Accuracy= 0.9692
Iter 2, Testing Accuracy= 0.9775
Iter 3, Testing Accuracy= 0.9818
Iter 4, Testing Accuracy= 0.9825
Iter 5, Testing Accuracy= 0.9843
Iter 6, Testing Accuracy= 0.9856
Iter 7, Testing Accuracy= 0.9875
Iter 8, Testing Accuracy= 0.9871
Iter 9, Testing Accuracy= 0.989
Iter 10, Testing Accuracy= 0.9902
Iter 11, Testing Accuracy= 0.9895
Iter 12, Testing Accuracy= 0.99
Iter 13, Testing Accuracy= 0.9915
Iter 14, Testing Accuracy= 0.9904
Iter 15, Testing Accuracy= 0.9913
Iter 16, Testing Accuracy= 0.9917
Iter 17, Testing Accuracy= 0.9918
Iter 18, Testing Accuracy= 0.9916
Iter 19, Testing Accuracy= 0.9908
Iter 20, Testing Accuracy= 0.9918


sorboard结果展示

loss

Accuracy
卷积层1 Weight变化
conv1Weight
卷积层1 Bias变化
卷积层1 Bias变化
全连接层1 Weight变化
全连接层1 Weight变化
全连接层1 Bias变化
全连接层1 Bias变化
Graph
Graph
embedding
embedding


附代码:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector

读取数据

mnist = input_data.read_data_sets(‘MNIST_data/’,one_hot=True)

取批量大小为100

batch_size = 100
n_batch = mnist.train.num_examples//batch_size
image_num = 10000
DIR = “F:/dl/”

载入图片

embedding = tf.Variable(tf.stack(mnist.test.images[:image_num]), trainable=False, name=’embedding’)

def variable_summary(var):
with tf.name_scope(‘summary’):
mean = tf.reduce_mean(var)
tf.summary.scalar(‘mean’,mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
tf.summary.scalar(‘stddev’,stddev)
tf.summary.scalar(‘max’,tf.reduce_max(var))
tf.summary.scalar(‘min’,tf.reduce_min(var))

with tf.name_scope(‘input’):

定义输入占位符

x = tf.placeholder(tf.float32,shape=[None,784],name='x-input')
y = tf.placeholder(tf.float32,shape=[None,10],name='y-input')
x_image = tf.reshape(x,shape=[-1,28,28,1],name='x_image')

显示图片

with tf.name_scope(‘input_reshape’):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image(‘input’, image_shaped_input, 10)

初始化weight

def weight_variable(shape,name):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial,name=name)

初始化bias

def bias_variable(shape,name):
initial = tf.constant(0.1,shape=shape)
return tf.Variable(initial,name=name)

定义卷积层

def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding=’SAME’)

定义max池化层

def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding=’SAME’)

初始化第一卷积层参数

with tf.name_scope(‘conv1’):
W_conv1 = weight_variable([5,5,1,32],name=’W_conv1’)
B_conv1 = bias_variable([32],name=’B_conv1’)
variable_summary(W_conv1)
variable_summary(B_conv1)
#卷积网络第一层
conv2d_1 = conv2d(x_image,W_conv1) + B_conv1
h_conv_1 = tf.nn.relu(conv2d_1,name=’h_conv2d_1’)
h_pool_1 = max_pool_2x2(h_conv_1)

with tf.name_scope(‘conv2’):
#初始化第二卷积层参数
W_conv2 = weight_variable([5,5,32,64],name=’W_conv2’)
B_conv2 = bias_variable([64],name=’B_conv2’)
variable_summary(W_conv2)
variable_summary(B_conv2)

#卷积网络第二层
conv2d_2 = conv2d(h_pool_1,W_conv2) + B_conv2
h_conv_2 = tf.nn.relu(conv2d_2,name='h_conv_2')
h_pool_2 = max_pool_2x2(h_conv_2)

#池化层输出结果转化为一维向量
h_pool2_vec = tf.reshape(h_pool_2,[-1,7*7*64])

定义drop_rate

keep_prob = tf.placeholder(tf.float32,name=’keep_prob’)

with tf.name_scope(‘fc1’):
#初始化全连接1层参数
W_fc_1 = weight_variable([7*7*64,1024],name=’W_fc_1’)
B_fc_1 =bias_variable([1024],name=’b_fc_1’)
variable_summary(W_fc_1)
variable_summary(B_fc_1)

#第一全连接层
h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_vec,W_fc_1) + B_fc_1)
h_fc1_drop = tf.nn.dropout(h_fc_1,keep_prob=keep_prob)

with tf.name_scope(‘fc2’):
#初始化第2全连接层参数
W_fc_2 = weight_variable([1024,200],name=’W_fc_2’)
B_fc_2 = bias_variable([200],name=’B_fc_2’)
variable_summary(W_fc_2)
variable_summary(B_fc_2)

#第2全连接层
h_fc_2 = tf.nn.relu(tf.matmul(h_fc1_drop,W_fc_2) + B_fc_2)

h_fc2_drop = tf.nn.dropout(h_fc_2,keep_prob=keep_prob)

with tf.name_scope(‘fc3’):
#初始化第3全连接层参数
W_fc_3 = weight_variable([200,10],name=’W_fc_3’)
B_fc_3 = bias_variable([10],name=’B_fc_3’)
variable_summary(W_fc_3)
variable_summary(B_fc_3)

#第3全连接层
prediction = tf.nn.softmax(tf.matmul(h_fc2_drop,W_fc_3) + B_fc_3)

with tf.name_scope(‘loss’):
#交叉熵损失函数
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
tf.summary.scalar(‘loss’,cross_entropy)
lr = tf.Variable(0.0001,dtype=tf.float32)
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

with tf.name_scope(‘accuracy’):
predict_result = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(predict_result,tf.float32))
tf.summary.scalar(‘accuracy’,accuracy)

merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(‘logs/test’,sess.graph)

#产生metadata文件
if tf.gfile.Exists(DIR + 'projector/projector1/metadata.tsv'):
    tf.gfile.DeleteRecursively(DIR + 'projector/projector1/metadata.tsv')
with open(DIR + 'projector/projector1/metadata.tsv', 'w') as f:
    labels = sess.run(tf.argmax(mnist.test.labels[:],1))
    for i in range(image_num):
        f.write(str(labels[i]) + '\n')


projector_writer = tf.summary.FileWriter(DIR + 'projector/projector1',sess.graph)
saver = tf.train.Saver()
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = embedding.name
embed.metadata_path = DIR + 'projector/projector1/metadata.tsv'
embed.sprite.image_path = DIR + 'projector/data/mnist_10k_sprite.png'
embed.sprite.single_image_dim.extend([28,28])
projector.visualize_embeddings(projector_writer,config)


for epoch in range(21):

    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()
    for batch in range(n_batch):
        batch_x,batch_y = mnist.train.next_batch(batch_size)
        summary,_ = sess.run([merged,train_step],feed_dict={x:batch_x,y:batch_y,keep_prob:0.8},options=run_options,run_metadata=run_metadata)
    projector_writer.add_run_metadata(run_metadata, 'step%03d' % epoch)
    projector_writer.add_summary(summary, epoch)
    test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
    print ("Iter " + str(epoch) + ", Testing Accuracy= " + str(test_acc))


saver.save(sess, DIR + 'projector/projector1/a_model.ckpt')
projector_writer.close()`

附:本人第一篇博客,还不太用……后面会补上之前写的BP神经网络,以及后面会学习的RNN,GAN,RL。时间允许的话也会更一些ML内容,欢迎小伙伴一起交流。



  • 0
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值