tensorboard 数据降维分布图_CNN中tensorboard数据可视化

1.CNN_my_test.py

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('data/', one_hot=True)

print('数据ok')

print(mnist.train.images[0].shape)

def weight_initializer(shape):

initializer = tf.truncated_normal(shape, stddev= 0.1)

return tf.Variable(initializer)

def biases_initializer(shape):

initializer = tf.constant(0.1, shape=shape)

return tf.Variable(initializer)

x = tf.placeholder(tf.float32, shape=[None, 784])

y = tf.placeholder(tf.float32, shape=[None, 10])

x_image = tf.reshape(x, [-1, 28, 28, 1])

tf.summary.image('input', x_image, 1)

wc1 = weight_initializer([5, 5, 1, 32])

bc1 = biases_initializer([32])

hc1 = tf.nn.relu(tf.nn.conv2d(x_image, wc1, strides=[1, 1, 1, 1], padding='SAME') + bc1)

pool_hc1 = tf.nn.max_pool(hc1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

wc2 = weight_initializer([5, 5, 32, 64])

bc2 = biases_initializer([64])

hc2 = tf.nn.relu(tf.nn.conv2d(pool_hc1, wc2, strides=[1, 1, 1, 1], padding='SAME') + bc2)

pool_hc2 = tf.nn.max_pool(hc2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

wd1 = weight_initializer([7*7*64, 1024])

bd1 = biases_initializer([1024])

hc2_flat = tf.reshape(pool_hc2, [-1, 7*7*64])

hd1 = tf.nn.relu(tf.matmul(hc2_flat, wd1) + bd1)

hd1_dp = tf.nn.dropout(hd1, keep_prob=0.7)

wd2 = weight_initializer([1024, 10])

bd2 = biases_initializer([10])

y_conv = tf.nn.softmax(tf.matmul(hd1_dp, wd2) + bd2)

cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))

tf.summary.scalar('cross entropy', cross_entropy)

train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy)

corr = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))

acc = tf.reduce_mean(tf.cast(corr, tf.float32))

sess = tf.Session()

sess.run(tf.global_variables_initializer())

merged = tf.summary.merge_all()

log_dir = './log'

train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)

for i in range(2000):

if i % 100 != 0:

batch = mnist.train.next_batch(50)

train_step.run(session=sess, feed_dict={x: batch[0], y: batch[1]})

summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y: batch[1]})

train_writer.add_summary(summary, i)

else:

batch = mnist.train.next_batch(50)

train_accuracy = acc.eval(session=sess, feed_dict={x: batch[0], y: batch[1]})

test_accuracy = acc.eval(session=sess, feed_dict={x: mnist.test.images[0:50], y: mnist.test.labels[0:50]})

print('train_acc: %.5f, test_acc: %.5f' % (train_accuracy, test_accuracy))

run_metadata = tf.RunMetadata()

train_writer.add_run_metadata(run_metadata, 'step%03d' % i)

summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y: batch[1]})

train_writer.add_summary(summary, 1)

print('训练完成!!')

train_writer.close()

分3个部分

1.将需要记录的变量用一下函数记录

图像

tf.summary.image('input', x_image, 1)

散点图

tf.summary.scalar('cross entropy', cross_entropy)

2.生成实现变量记录的对象,和记录文件路径

merged = tf.summary.merge_all()

log_dir = './log'

train_writer = tf.summary.FileWriter(log_dir + '/train', sess.graph)

3.训练时进行记录

summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y: batch[1]})

train_writer.add_summary(summary, 1)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值