[TensorFlow]入门学习笔记(6)-Tensorboard简易教程和模型保存

模型保存

tf.train.Saver()

The Saver class adds ops to save and restore variables to and from checkpoints. It also provides convenience methods to run these ops.

两个重要的函数。
一个是saver.save() 将某个session中的模型和参数都保存在save-path,并且后面是迭代次数。

而对于restrore()函数,我认为理解恢复操作的最好方法是将它简单的看做是一种数据初始化操作,就是讲之前的session中的数据完整的init出来,在当前的session中。

# -*- coding: UTF-8 -*

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)

learning_rate = 0.001
batch_size = 100
display_step = 1
model_path = "../tmp/model.ckpt"

n_hidden_1 = 256
n_hidden_2 = 256
n_input = 784
n_classes = 10

x = tf.placeholder(tf.float32,[None,n_input])
y = tf.placeholder(tf.float32,[None,n_classes])

weights = {
    'h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),
    'h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
    'out':tf.Variable(tf.random_normal([n_hidden_2,n_classes]))
}

biases = {
    'b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'b2':tf.Variable(tf.random_normal([n_hidden_2])),
    'out':tf.Variable(tf.random_normal([n_classes]))
}

#构建模型
def multilayer_preceptron(x,weights,biases):
    #hidden 1 with relu activation
    layer_1 = tf.add(tf.matmul(x,weights['h1']),biases['b1'])
    layer_1 = tf.nn.relu(layer_1)
    #hidden 2 with relu activation
    layer_2 = tf.add(tf.matmul(layer_1,weights['h2']),biases['b2'])
    layer_2 = tf.nn.relu(layer_2)
    #output layer with linear activation
    out_layer = tf.matmul(layer_2,weights['out'])+biases['out']
    return out_layer

pred = multilayer_preceptron(x,weights,biases)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

init = tf.global_variables_initializer()

saver = tf.train.Saver()

print "Starting 1st session..."
if __name__ == '__main__':
    with tf.Session() as sess:
        #init variables
        sess.run(init)

        for epoch in range(3):
            avg_cost = 0
            total_batch = int(mnist.train.num_examples/batch_size)
            #loop
            for i in range(total_batch):
                batch_x,batch_y = mnist.train.next_batch(batch_size)
                _,c = sess.run([optimizer,cost],feed_dict={
                    x:batch_x,
                    y:batch_y
                })

                avg_cost += c/total_batch

            if epoch % display_step == 0:
                print "Epoch:", '%04d' % (epoch + 1), "cost=", \
                    "{:.9f}".format(avg_cost)

        print "First Optimization Finished!"

        # Test model
        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        # Calculate accuracy
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})

        # Save model weights to disk
        save_path = saver.save(sess, model_path)
        print "Model saved in file: %s" % save_path

    #running a new session..

    with tf.Session() as sess:
        sess.run(init)

        #理解恢复操作的最好方法是将它简单的看做是一种数据初始化操作
        load_path = saver.restore(sess,model_path)
        print "Model restored from file:%s"%save_path

        for epoch in range(7):
            avg_cost = 0
            total_batch = int(mnist.train.num_examples/batch_size)
            #loop
            for i in range(total_batch):
                batch_x,batch_y = mnist.train.next_batch(batch_size)
                _,c = sess.run([optimizer,cost],feed_dict={
                    x:batch_x,
                    y:batch_y
                })

                avg_cost += c/total_batch

            if epoch % display_step == 0:
                print "Epoch:", '%04d' % (epoch + 1), "cost=", \
                    "{:.9f}".format(avg_cost)
        print "Second Optimization Finished!"

        # Test model
        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        # Calculate accuracy
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print "Accuracy:", accuracy.eval(
            {x: mnist.test.images, y: mnist.test.labels})

TensorBoard

tf.summary.scalar() 将记录要显示的变量,在tensorboard中显示,所有的summary也相当于op,定义完scalar后,将他们merge所有的op为一个组合。

在session函数迭代里面,run()出函数。

summary_writer = tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())
写函数将所有的参数保存在log中,便于我们调用。

然后在迭代里面讲当前的summary op ,add进写文件。

最后,在终端里面,tensorboard –logdit=”“

打开http://127.0.0.0:6006/ into your web browser

basic model

# -*- coding: UTF-8 -*

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)

learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1
logs_path = '../tmp/tensorflow_logs/example'

x = tf.placeholder(tf.float32,[None,784],name='InputData')
y = tf.placeholder(tf.float32,[None,10],name='LabelData')

w = tf.Variable(tf.zeros([784,10]),name='Weights')
b = tf.Variable(tf.zeros([10]),name='Bias')

with tf.name_scope('Model'):
    pred = tf.nn.softmax(tf.matmul(x,w)+b)

with tf.name_scope('Loss'):
    cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))

with tf.name_scope('SGD'):
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

with tf.name_scope('Accuracy'):
    acc = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
    acc = tf.reduce_mean(tf.cast(acc,tf.float32))

init = tf.global_variables_initializer()

tf.summary.scalar("loss",cost)

tf.summary.scalar("accuracy",acc)

merged_summary_op = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(init)

    summary_writer = tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())

    for epoch in range(training_epochs):
        avg_cost = 0
        total_batch = int(mnist.train.num_examples/batch_size)
        #loop
        for i in range(total_batch):
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)

            _,c,summary = sess.run([optimizer,cost,merged_summary_op],
                                   feed_dict={x:batch_xs,y:batch_ys})

            summary_writer.add_summary(summary,(epoch)*total_batch+i)

            avg_cost+=c/total_batch


        if (epoch + 1) % display_step == 0:
            print "Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost)

    print "Optimization Finished!"

    # Test model
    # Calculate accuracy
    print "Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})

    print "Run the command line:\n" \
          "--> tensorboard --logdir=/tmp/tensorflow_logs " \
          "\nThen open http://127.0.0.0:6006/ into your web browser"

升级版的Tensorboard

# -*- coding: UTF-8 -*

import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1
logs_path = '../tmp/tensorflow_logs/example2'

# Network Parameters
n_hidden_1 = 20 # 1st layer number of features
n_hidden_2 = 40 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph Input
# mnist data image of shape 28*28=784
x = tf.placeholder(tf.float32, [None, 784], name='InputData')
# 0-9 digits recognition => 10 classes
y = tf.placeholder(tf.float32, [None, 10], name='LabelData')

#使用tf.summary.scalar记录标量
# 使用tf.summary.histogram记录数据的直方图
# 使用tf.summary.distribution记录数据的分布图
# 使用tf.summary.image记录图像数据

# Create model
def multilayer_perceptron(x, weights, biases):
    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)
    # Create a summary to visualize the first layer ReLU activation
    tf.summary.histogram("relu1", layer_1)
    # Hidden layer with RELU activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)
    # Create another summary to visualize the second layer ReLU activation
    tf.summary.histogram("relu2", layer_2)
    # Output layer
    out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
    return out_layer

# Store layers weight & bias
weights = {
    'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),
    'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),
    'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3')
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),
    'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),
    'b3': tf.Variable(tf.random_normal([n_classes]), name='b3')
}

# Encapsulating all ops into scopes, making Tensorboard's Graph
# Visualization more convenient
with tf.name_scope('Model'):
    # Build model
    pred = multilayer_perceptron(x, weights, biases)

with tf.name_scope('Loss'):
    # Softmax Cross entropy (cost function)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))

with tf.name_scope('SGD'):
    # Gradient Descent
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    # Op to calculate every variable gradient
    grads = tf.gradients(loss, tf.trainable_variables())
    grads = list(zip(grads, tf.trainable_variables()))
    # Op to update all variables according to their gradient
    apply_grads = optimizer.apply_gradients(grads_and_vars=grads)

with tf.name_scope('Accuracy'):
    # Accuracy
    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    acc = tf.reduce_mean(tf.cast(acc, tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()

# Create a summary to monitor cost tensor
tf.summary.scalar("loss", loss)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", acc)
# Create summaries to visualize weights
for var in tf.trainable_variables():
    tf.summary.histogram(var.name, var)
# Summarize all gradients
for grad, var in grads:
    tf.summary.histogram(var.name + '/gradient', grad)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)

    # op to write logs to Tensorboard
    summary_writer = tf.summary.FileWriter(logs_path,
                                            graph=tf.get_default_graph())

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop), cost op (to get loss value)
            # and summary nodes
            _, c, summary = sess.run([apply_grads, loss, merged_summary_op],
                                     feed_dict={x: batch_xs, y: batch_ys})
            # Write logs at every iteration
            summary_writer.add_summary(summary, epoch * total_batch + i)
            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if (epoch+1) % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))

    print("Optimization Finished!")

    # Test model
    # Calculate accuracy
    print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))

    print("Run the command line:\n" \
          "--> tensorboard --logdir=/tmp/tensorflow_logs " \
          "\nThen open http://0.0.0.0:6006/ into your web browser")
  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值