tensorflow1.x(2)如何使用tf.layer和tf.contrib简化代码

tf.layer和tf.contrib是对一些模块比较高级的封装,学会他们的使用可以避免我们在开发的时候重复造轮子

如果单纯用基础算子进行实现一个多层感知机

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
 
 
def fc(input_size, hidden_size, layer_cnt, input_var):
    """
    全连接层实现
    :param input_size: 输入维度
    :param hidden_size: 输出维度
    :param layer_cnt: 第几层
    :param input_var: 输入张量
    :return: 输出张量
    """
    w = tf.get_variable(name="w" + str(layer_cnt), shape=[input_size, hidden_size],
                        initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)
    b = tf.get_variable(name="b" + str(layer_cnt), shape=[1, hidden_size],
                        initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)
    output = tf.matmul(input_var, w) + b
    return output
 
 
def activate(weights, func_name, name=None):
    """
    激活函数实现
    :param weights: 激活前输入
    :param func_name: str类型,激活函数名
    :param name: name
    :return: 激活后输出
    """
    func_name = func_name.lower()
    if func_name == 'sigmoid':
        return tf.nn.sigmoid(weights, name=name)
    elif func_name == 'softmax':
        return tf.nn.softmax(weights, name=name)
    elif func_name == 'relu':
        return tf.nn.relu(weights, name=name)
    elif func_name == 'tanh':
        return tf.nn.tanh(weights, name=name)
    elif func_name == 'elu':
        return tf.nn.elu(weights, name=name)
    elif func_name == 'none':
        return weights
    else:
        return tf.nn.relu(weights, name=name)
 
 
def dnn(cur_in, layer_size, activate_fuc):
    """
    多层感知机实现
    :param cur_in: 输入张量
    :param layer_size: 模型每层维度
    :param activate_fuc: 模型每层激活函数
    :return: 输出张量
    """
    for layer_cnt in range(len(layer_size) - 1):
        if layer_cnt == 0:
            output = fc(input_size=layer_size[layer_cnt], hidden_size=layer_size[layer_cnt + 1],
                        layer_cnt=layer_cnt, input_var=cur_in)
        else:
            output = fc(input_size=layer_size[layer_cnt], hidden_size=layer_size[layer_cnt + 1],
                        layer_cnt=layer_cnt, input_var=output)
        output = activate(output, func_name=activate_fuc[layer_cnt])
    return output
 
 
# x_data [200, 1]
# 定义数据
x_data = np.linspace(-0.5, 0.5, 200)[:, np.newaxis]
noise = np.random.normal(0, 0.02, size=x_data.shape)
y_data = np.square(x_data) + noise
 
# 占位符 定义输入
x = tf.placeholder(dtype=tf.float32, shape=[None, 1])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
 
# 定义输出
y_pred = dnn(x, layer_size=[1, 10, 1], activate_fuc=["tanh", "tanh"])
 
# 定义损失函数 mse_loss
loss = tf.reduce_mean(tf.square(y_pred - y))
# 定义优化器
opt = tf.train.AdamOptimizer(learning_rate=0.01)
# 最小化目标损失
train_step = opt.minimize(loss)
with tf.Session() as sess:
    # 变量全局初始化
    sess.run(tf.global_variables_initializer())
    for i in range(1000):
        # train_step
        sess.run(train_step, feed_dict={x: x_data, y: y_data})
        # 打印每回合的loss
        if (i + 1) % 50 == 0:
            print("epoch", i, "loss", sess.run(loss, feed_dict={x: x_data, y: y_data}))
    # 画图对比预测值和标签
    plt.scatter(x_data, y_data)
    prediction = sess.run(y_pred, feed_dict={x: x_data, y: y_data})
    plt.plot(x_data, prediction, "r", lw=5)
    plt.show()

现在我们使用tf.layers里面的Dense和tf.contrib里面的优化器和损失函数进行代码简化

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.contrib.losses import mean_squared_error
from tensorflow.contrib.opt import AdamWOptimizer


def dnn(cur_in, layer_size, activate_fuc):
    """
    多层感知机实现
    :param cur_in: 输入张量
    :param layer_size: 模型每层维度
    :param activate_fuc: 模型每层激活函数
    :return: 输出张量
    """
    for layer_cnt in range(len(layer_size) - 1):
        if layer_cnt == 0:
            output = tf.layers.Dense(layer_size[layer_cnt + 1], activation=activate_fuc[layer_cnt])(cur_in)
        else:
            output = tf.layers.Dense(layer_size[layer_cnt + 1], activation=activate_fuc[layer_cnt])(output)
    return output


# x_data [200, 1]
# 定义数据
x_data = np.linspace(-0.5, 0.5, 200)[:, np.newaxis]
noise = np.random.normal(0, 0.02, size=x_data.shape)
y_data = np.square(x_data) + noise

# 占位符 定义输入
x = tf.placeholder(dtype=tf.float32, shape=[None, 1])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])

# 定义输出
y_pred = dnn(x, layer_size=[1, 10, 1], activate_fuc=["tanh", "tanh"])

# 定义损失函数 mse_loss
loss = mean_squared_error(y_pred, y)
# 定义优化器
opt = AdamWOptimizer(learning_rate=0.01, weight_decay=0)
# 最小化目标损失
train_step = opt.minimize(loss)
with tf.Session() as sess:
    # 变量全局初始化
    sess.run(tf.global_variables_initializer())
    for i in range(1000):
        # train_step
        sess.run(train_step, feed_dict={x: x_data, y: y_data})
        # 打印每回合的loss
        if (i + 1) % 50 == 0:
            print("epoch", i, "loss", sess.run(loss, feed_dict={x: x_data, y: y_data}))
    # 画图对比预测值和标签
    plt.scatter(x_data, y_data)
    prediction = sess.run(y_pred, feed_dict={x: x_data, y: y_data})
    plt.plot(x_data, prediction, "r", lw=5)
    plt.show()

简化后代码的拟合效果

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值