TensorFlow学习日记17

1. Layers

解析:

"""
This tutorial will introduce how to combine TFLearn and Tensorflow, using
TFLearn trainer with regular Tensorflow graph.
"""
from __future__ import print_function

import tensorflow as tf
import tflearn

# --------------------------------------
# High-Level API: Using TFLearn wrappers
# --------------------------------------

# Using MNIST Dataset
import tflearn.datasets.mnist as mnist

mnist_data = mnist.read_data_sets(one_hot=True)

# User defined placeholders
with tf.Graph().as_default():
    # Placeholders for data and labels
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])

    # Using TFLearn wrappers for network building
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='linear')

    # Defining other ops using Tensorflow
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

    # Initializing the variables
    init = tf.initialize_all_variables()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)

        batch_size = 128
        for epoch in range(2):  # 2 epochs
            avg_cost = 0.
            total_batch = int(mnist_data.train.num_examples / batch_size)
            for i in range(total_batch):
                batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
                sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})
                cost = sess.run(loss, feed_dict={X: batch_xs, Y: batch_ys})
                avg_cost += cost / total_batch
                if i % 20 == 0:
                    print("Epoch:", '%03d' % (epoch + 1), "Step:", '%03d' % i,
                          "Loss:", str(cost))


2. Trainer

解析:

"""
This tutorial will introduce how to combine TFLearn and Tensorflow, using
TFLearn wrappers regular Tensorflow expressions.
"""

import tensorflow as tf
import tflearn

# ----------------------------
# Utils: Using TFLearn Trainer
# ----------------------------

# Loading MNIST complete dataset
import tflearn.datasets.mnist as mnist
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)

# Define a dnn using Tensorflow
with tf.Graph().as_default():

    # Model variables
    X = tf.placeholder("float", [None, 784])
    Y = tf.placeholder("float", [None, 10])

    W1 = tf.Variable(tf.random_normal([784, 256]))
    W2 = tf.Variable(tf.random_normal([256, 256]))
    W3 = tf.Variable(tf.random_normal([256, 10]))
    b1 = tf.Variable(tf.random_normal([256]))
    b2 = tf.Variable(tf.random_normal([256]))
    b3 = tf.Variable(tf.random_normal([10]))

    # Multilayer perceptron
    def dnn(x):
        x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))
        x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))
        x = tf.add(tf.matmul(x, W3), b3)
        return x

    net = dnn(X)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net, labels=Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)), tf.float32),
        name='acc')

    # Using TFLearn Trainer
    # Define a training op (op for backprop, only need 1 in this model)
    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                              metric=accuracy, batch_size=128)

    # Create Trainer, providing all training ops. Tensorboard logs stored
    # in /tmp/tflearn_logs/. It is possible to change verbose level for more
    # details logs about gradients, variables etc...
    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
    # Training for 10 epochs.
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={X: testX, Y: testY},
                n_epoch=10, show_metric=True)


3. Built-in Ops

解析:

from __future__ import division, print_function, absolute_import

"""
This tutorial will introduce how to combine TFLearn built-in ops with any
Tensorflow graph.
"""

import tensorflow as tf
import tflearn

# ----------------------------------
# Using TFLearn built-in ops example
# ----------------------------------

# Using MNIST Dataset
import tflearn.datasets.mnist as mnist
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)

# User defined placeholders
with tf.Graph().as_default():

    # Model variables
    X = tf.placeholder("float", [None, 784])
    Y = tf.placeholder("float", [None, 10])

    W1 = tf.Variable(tf.random_normal([784, 256]))
    W2 = tf.Variable(tf.random_normal([256, 256]))
    W3 = tf.Variable(tf.random_normal([256, 10]))
    b1 = tf.Variable(tf.random_normal([256]))
    b2 = tf.Variable(tf.random_normal([256]))
    b3 = tf.Variable(tf.random_normal([10]))

    # Multilayer perceptron
    def dnn(x):
        # Using TFLearn PReLU activations ops
        x = tflearn.prelu(tf.add(tf.matmul(x, W1), b1))
        tflearn.summaries.monitor_activation(x) # Monitor activation
        x = tflearn.prelu(tf.add(tf.matmul(x, W2), b2))
        tflearn.summaries.monitor_activation(x) # Monitor activation
        x = tf.nn.softmax(tf.add(tf.matmul(x, W3), b3))
        return x

    net = dnn(X)

    # Using objective ops from TFLearn to compute crossentropy
    loss = tflearn.categorical_crossentropy(net, Y)

    # Using metric ops from TFLearn to compute accuracy
    acc = tflearn.metrics.accuracy_op(net, Y)

    # Using TFLearn SGD Optimizer class
    optimizer = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=200)
    # Because of lr decay, it is required to first build the Optimizer with
    # the step tensor that will monitor training step.
    # (Note: When using TFLearn estimators wrapper, build is self managed,
    # so only using above `Optimizer` class as `DNN` optimizer arg is enough).
    step = tflearn.variable("step", initializer='zeros', shape=[])
    optimizer.build(step_tensor=step)
    optim_tensor = optimizer.get_tensor()

    # Using TFLearn Trainer
    # Define a training op (op for backprop, only need 1 in this model)
    trainop = tflearn.TrainOp(loss=loss, optimizer=optim_tensor,
                              metric=acc, batch_size=128,
                              step_tensor=step)

    # Create Trainer, providing all training ops. Tensorboard logs stored
    # in /tmp/tflearn_logs/. It is possible to change verbose level for more
    # details logs about gradients, variables etc...
    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=0)
    # Training for 10 epochs.
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={X: testX, Y: testY},
                n_epoch=10, show_metric=True)


4. Summaries

解析:

"""
This example introduces the use of TFLearn functions to easily summarize
variables into tensorboard.
"""

import tensorflow as tf
import tflearn

# Loading MNIST dataset
import tflearn.datasets.mnist as mnist
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)

# Define a dnn using Tensorflow
with tf.Graph().as_default():

    # Model variables
    X = tf.placeholder("float", [None, 784])
    Y = tf.placeholder("float", [None, 10])

    # Multilayer perceptron, with `tanh` functions activation monitor
    def dnn(x):
        with tf.name_scope('Layer1'):
            W1 = tf.Variable(tf.random_normal([784, 256]), name="W1")
            b1 = tf.Variable(tf.random_normal([256]), name="b1")
            x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))
            # Add this `tanh` op to activations collection or monitoring
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)
            # Add weights regularizer (Regul. summary automatically added)
            tflearn.add_weights_regularizer(W1, 'L2', weight_decay=0.001)

        with tf.name_scope('Layer2'):
            W2 = tf.Variable(tf.random_normal([256, 256]), name="W2")
            b2 = tf.Variable(tf.random_normal([256]), name="b2")
            x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))
            # Add this `tanh` op to activations collection or monitoring
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)
            # Add weights regularizer (Regul. summary automatically added)
            tflearn.add_weights_regularizer(W2, 'L2', weight_decay=0.001)

        with tf.name_scope('Layer3'):
            W3 = tf.Variable(tf.random_normal([256, 10]), name="W3")
            b3 = tf.Variable(tf.random_normal([10]), name="b3")
            x = tf.add(tf.matmul(x, W3), b3)

        return x

    net = dnn(X)
        
    with tf.name_scope('Summaries'):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=net,labels=Y))
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)), tf.float32),
            name="acc")

    # construct two varaibles to add as additional "valiation monitors"
    # these varaibles are evaluated each time validation happens (eg at a snapshot)
    # and the results are summarized and output to the tensorboard events file,
    # together with the accuracy and loss plots.
    #
    # Here, we generate a dummy variable given by the sum over the current
    # network tensor, and a constant variable.  In practice, the validation
    # monitor may present useful information, like confusion matrix
    # entries, or an AUC metric.
    with tf.name_scope('CustomMonitor'):
        test_var = tf.reduce_sum(tf.cast(net, tf.float32), name="test_var")
        test_const = tf.constant(32.0, name="custom_constant")
        # Define a train op
    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                            validation_monitors=[test_var, test_const],
                            metric=accuracy, batch_size=128)

    # Tensorboard logs stored in /tmp/tflearn_logs/. Using verbose level 2.
    trainer = tflearn.Trainer(train_ops=trainop,
                              tensorboard_dir='/tmp/tflearn_logs/',
                              tensorboard_verbose=2)
    # Training for 10 epochs.
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={X: testX, Y: testY},
                n_epoch=10, show_metric=True, run_id='Summaries_example')

    # Run the following command to start tensorboard:
    # >> tensorboard /tmp/tflearn_logs/
    # Navigate with your web browser to http://0.0.0.0:6006/


5. Variables

解析:

"""
This example introduces the use of TFLearn variables to easily implement
Tensorflow variables with custom initialization and regularization.
"""

import tensorflow as tf
import tflearn
import tflearn.variables as va

# Loading MNIST dataset
import tflearn.datasets.mnist as mnist
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)

# Define a dnn using Tensorflow
with tf.Graph().as_default():

    # Model variables
    X = tf.placeholder("float", [None, 784])
    Y = tf.placeholder("float", [None, 10])

    # Multilayer perceptron
    def dnn(x):
        with tf.variable_scope('Layer1'):
            # Creating variable using TFLearn
            W1 = va.variable(name='W', shape=[784, 256],
                             initializer='uniform_scaling',
                             regularizer='L2')
            b1 = va.variable(name='b', shape=[256])
            x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))

        with tf.variable_scope('Layer2'):
            W2 = va.variable(name='W', shape=[256, 256],
                             initializer='uniform_scaling',
                             regularizer='L2')
            b2 = va.variable(name='b', shape=[256])
            x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))

        with tf.variable_scope('Layer3'):
            W3 = va.variable(name='W', shape=[256, 10],
                             initializer='uniform_scaling')
            b3 = va.variable(name='b', shape=[10])
            x = tf.add(tf.matmul(x, W3), b3)

        return x

    net = dnn(X)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1)), tf.float32),
        name='acc')

    # Define a train op
    trainop = tflearn.TrainOp(loss=loss, optimizer=optimizer,
                              metric=accuracy, batch_size=128)

    trainer = tflearn.Trainer(train_ops=trainop, tensorboard_verbose=3,
                              tensorboard_dir='/tmp/tflearn_logs/')
    # Training for 10 epochs.
    trainer.fit({X: trainX, Y: trainY}, val_feed_dicts={X: testX, Y: testY},
                n_epoch=10, show_metric=True, run_id='Variables_example')


参考文献:

[1] TFLearn Examples:https://github.com/tflearn/tflearn/tree/master/examples 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

NLP工程化

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值