一张图了解tensorboard的使用


使用的是mnist数据集

 

import os
import tensorflow as tf

LOGDIR = '../data_set'

mnist = tf.contrib.learn.datasets.mnist.read_data_sets(train_dir=LOGDIR, one_hot=True)


def conv_layer(input, size_in, size_out, name_scope='conv'):
    with tf.name_scope(name_scope):
        w = tf.Variable(tf.truncated_normal([5, 5, size_in, size_out], stddev=0.1), name='W')
        b = tf.Variable(tf.constant(0.1, shape=[size_out]), name='B')
        conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='SAME')
        act = tf.nn.relu(conv + b)

        tf.summary.histogram('weights', w)
        tf.summary.histogram('biases', b)
        tf.summary.histogram('activations', act)

        return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


def fc_layer(input, size_in, size_out, name_scope='fc'):
    with tf.name_scope(name_scope):
        w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name='W')
        b = tf.Variable(tf.constant(0.1, shape=[size_out]), name='B')
        act = tf.nn.relu(tf.matmul(input, w) + b)
        # 添加显示信息
        tf.summary.histogram('weights', w)
        tf.summary.histogram('biases', b)
        tf.summary.histogram('activations', act)
        return act


def model(learning_rate, iter_num):
    tf.reset_default_graph()
    sess = tf.Session()

    x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    # 显示输入图片
    tf.summary.image('input', x_image, 5)

    y = tf.placeholder(tf.float32, shape=[None, 10], name='label')

    conv1 = conv_layer(x_image, 1, 64, name_scope='conv')
    conv_out = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    flattened = tf.reshape(conv_out, [-1, 7 * 7 * 64])
    y_hat = fc_layer(flattened, 7 * 7 * 64, 10, name_scope='fc')

    # 设置作用域
    with tf.name_scope('loss'):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_hat, labels=y))
        tf.summary.scalar('loss', loss)
    with tf.name_scope('train'):
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

    summ = tf.summary.merge_all()
    sess.run(tf.global_variables_initializer())
    # 保存路径
    tenboard_dir = './logs/fourth/' + 'lr_' + str(learning_rate) + 'iter_num_' + str(iter_num)

    # 指定一个文件用来保存图
    writer = tf.summary.FileWriter(tenboard_dir)
    # 把图add进去
    writer.add_graph(sess.graph)

    for i in range(iter_num):
        batch = mnist.train.next_batch(100)

        sess.run(train_step, feed_dict={x: batch[0], y: batch[1]})
        if i % 5 == 0:
            [train_accuracy, s] = sess.run([accuracy, summ], feed_dict={x: batch[0], y: batch[1]})
            writer.add_summary(s, i)
            print("train_accuracy is %.2f where step is %5d" % (train_accuracy, i))


def main():
    # 不同的学习率和不同的迭代次数
    for learning_rate in [1E-4, 1E-3]:
        for iter_num in [100, 200]:
            model(learning_rate, iter_num)


if __name__ == '__main__':
    main()

部分结果图

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值