tensorflow1.0实战(实现全连接网络,Dataset使用,自定义estimator))

一、TF1.0实现全连接网络

1、TF1.0计算图结构

hidden_units = [100, 100] #定义两层,每层有100个单元的全连接网络
class_num = 10 #类别数

#placeholder:占位符
x = tf.placeholder(tf.float32, [None, 28 * 28]) #存放数据,图像
y = tf.placeholder(tf.int64, [None]) #label

input_for_next_layer = x
for hidden_unit in hidden_units: #隐含层定义
    input_for_next_layer = tf.layers.dense(input_for_next_layer,
                                           hidden_unit,
                                           activation=tf.nn.relu)
#输出层:没有激活函数
logits = tf.layers.dense(input_for_next_layer,
                         class_num)

# last_hidden_output * W(logits) -> softmax -> prob
# tf.losses.sparse_softmax_cross_entropy函数所做的事情:
"""
 1. logit -> softmax -> prob
 2. labels -> one_hot
 3. calculate cross entropy
"""

#计算损失函数
loss = tf.losses.sparse_softmax_cross_entropy(labels = y,
                                              logits = logits)
#准确率
# get accuracy.
prediction = tf.argmax(logits, 1)
correct_prediction = tf.equal(prediction, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

2、TF1.0模型训练

# session

init = tf.global_variables_initializer()
batch_size = 20
epochs = 10
train_steps_per_epoch = x_train.shape[0] // batch_size
#valid_steps = x_valid.shape[0] // batch_size

def eval_with_sess(sess, x, y, accuracy, x_valid_scaled, y_valid, batch_size):
    eval_steps = x_valid_scaled.shape[0] // batch_size
    eval_accuracies = []
    for step in range(eval_steps):
        batch_data = x_valid_scaled[step * batch_size : (step+1) * batch_size]
        batch_label = y_valid[step * batch_size : (step+1) * batch_size]
        accuracy_val = sess.run(accuracy,
                                feed_dict = {
                                    x: batch_data,
                                    y: batch_label
                                })
        eval_accuracies.append(accuracy_val)
    return np.mean(eval_accuracies)

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(epochs):
        for step in range(train_steps_per_epoch):
            batch_data = x_train_scaled[
                step * batch_size : (step+1) * batch_size]
            batch_label = y_train[
                step * batch_size : (step+1) * batch_size]
            loss_val, accuracy_val, _ = sess.run(
                [loss, accuracy, train_op],
                feed_dict = {
                    x: batch_data,
                    y: batch_label
                })
            print('\r[Train] epoch: %d, step: %d, loss: %3.5f, accuracy: %2.2f' % (
                epoch, step, loss_val, accuracy_val))
        valid_accuracy = eval_with_sess(sess, x, y, accuracy,
                                        x_valid_scaled, y_valid,
                                        batch_size)
        print("\t[Valid] acc: %2.2f" % (valid_accuracy))

附全部代码:

import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)
    
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)

fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]


# x = (x - u) / std

from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
# x_train: [None, 28, 28] -> [None, 784]
x_train_scaled = scaler.fit_transform(
    x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)
x_valid_scaled = scaler.transform(
    x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)
x_test_scaled = scaler.transform(
    x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)

hidden_units = [100, 100] #定义两层,每层有100个单元的全连接网络
class_num = 10

x = tf.placeholder(tf.float32, [None, 28 * 28]) #数据
y = tf.placeholder(tf.int64, [None]) #label

input_for_next_layer = x
for hidden_unit in hidden_units:
    input_for_next_layer = tf.layers.dense(input_for_next_layer,
                                           hidden_unit,
                                           activation=tf.nn.relu)
#输出层:没有激活函数
logits = tf.layers.dense(input_for_next_layer,
                         class_num)
# last_hidden_output * W(logits) -> softmax -> prob
#函数所做的事情:
# 1. logit -> softmax -> prob
# 2. labels -> one_hot
# 3. calculate cross entropy

#计算损失函数
loss = tf.losses.sparse_softmax_cross_entropy(labels = y,
                                              logits = logits)
#准确率
# get accuracy.
prediction = tf.argmax(logits, 1)
correct_prediction = tf.equal(prediction, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)


# session

init = tf.global_variables_initializer()
batch_size = 20
epochs = 10
train_steps_per_epoch = x_train.shape[0] // batch_size
#valid_steps = x_valid.shape[0] // batch_size

def eval_with_sess(sess, x, y, accuracy, x_valid_scaled, y_valid, batch_size):
    eval_steps = x_valid_scaled.shape[0] // batch_size
    eval_accuracies = []
    for step in range(eval_steps):
        batch_data = x_valid_scaled[step * batch_size : (step+1) * batch_size]
        batch_label = y_valid[step * batch_size : (step+1) * batch_size]
        accuracy_val = sess.run(accuracy,
                                feed_dict = {
                                    x: batch_data,
                                    y: batch_label
                                })
        eval_accuracies.append(accuracy_val)
    return np.mean(eval_accuracies)

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(epochs):
        for step in range(train_steps_per_epoch):
            batch_data = x_train_scaled[
                step * batch_size : (step+1) * batch_size]
            batch_label = y_train[
                step * batch_size : (step+1) * batch_size]
            loss_val, accuracy_val, _ = sess.run(
                [loss, accuracy, train_op],
                feed_dict = {
                    x: batch_data,
                    y: batch_label
                })
            print('\r[Train] epoch: %d, step: %d, loss: %3.5f, accuracy: %2.2f' % (
                epoch, step, loss_val, accuracy_val))
        valid_accuracy = eval_with_sess(sess, x, y, accuracy,
                                        x_valid_scaled, y_valid,
                                        batch_size)
        print("\t[Valid] acc: %2.2f" % (valid_accuracy))

二、TF1.0 Dataset使用

1、使用make_one_shot_iterator:

特点:自动初始化,不能被重复初始化

import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)

fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]

print(x_valid.shape, y_valid.shape)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# x = (x - u) / std

from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
# x_train: [None, 28, 28] -> [None, 784]
x_train_scaled = scaler.fit_transform(
    x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)
x_valid_scaled = scaler.transform(
    x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)
x_test_scaled = scaler.transform(
    x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)

y_train = np.asarray(y_train, dtype = np.int64)
y_valid = np.asarray(y_valid, dtype = np.int64)
y_test = np.asarray(y_test, dtype = np.int64)
def make_dataset(images, labels, epochs, batch_size, shuffle = True):
    dataset = tf.data.Dataset.from_tensor_slices((images, labels))
    if shuffle:
        dataset = dataset.shuffle(10000)
    dataset = dataset.repeat(epochs).batch(batch_size)
    return dataset
batch_size = 20
epochs = 10
dataset = make_dataset(x_train_scaled, y_train,
                       epochs = epochs,
                       batch_size = batch_size)
#make_one_shot_iterator函数特点:
# 1. auto initialization
# 2. can't be re-initialized. make_initializable_iterator
dataset_iter = dataset.make_one_shot_iterator()
x, y = dataset_iter.get_next()#得到下一组数据
with tf.Session() as sess:
    x_val, y_val = sess.run([x, y])
    print(x_val.shape)
    print(y_val.shape)

hidden_units = [100, 100]
class_num = 10

input_for_next_layer = x
for hidden_unit in hidden_units:
    input_for_next_layer = tf.layers.dense(input_for_next_layer,
                                           hidden_unit,
                                           activation=tf.nn.relu)
logits = tf.layers.dense(input_for_next_layer,
                         class_num)
# last_hidden_output * W(logits) -> softmax -> prob
# 1. logit -> softmax -> prob
# 2. labels -> one_hot
# 3. calculate cross entropy
loss = tf.losses.sparse_softmax_cross_entropy(labels = y,
                                              logits = logits)
# get accuracy.
prediction = tf.argmax(logits, 1)
correct_prediction = tf.equal(prediction, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

# session
init = tf.global_variables_initializer()
train_steps_per_epoch = x_train.shape[0] // batch_size

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(epochs):
        for step in range(train_steps_per_epoch):
            loss_val, accuracy_val, _ = sess.run(
                [loss, accuracy, train_op])
            print('\r[Train] epoch: %d, step: %d, loss: %3.5f, accuracy: %2.2f' % (
                epoch, step, loss_val, accuracy_val), end="")

2、使用make_initializable_iterator:

特点:可以被重复初始化

import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)

fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]

print(x_valid.shape, y_valid.shape)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)

# x = (x - u) / std

from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
# x_train: [None, 28, 28] -> [None, 784]
x_train_scaled = scaler.fit_transform(
    x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)
x_valid_scaled = scaler.transform(
    x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)
x_test_scaled = scaler.transform(
    x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28 * 28)

y_train = np.asarray(y_train, dtype = np.int64)
y_valid = np.asarray(y_valid, dtype = np.int64)
y_test = np.asarray(y_test, dtype = np.int64)

def make_dataset(images, labels, epochs, batch_size, shuffle = True):
    dataset = tf.data.Dataset.from_tensor_slices((images, labels))
    if shuffle:
        dataset = dataset.shuffle(10000)
    dataset = dataset.repeat(epochs).batch(batch_size)
    return dataset

batch_size = 20
epochs = 10

images_placeholder = tf.placeholder(tf.float32, [None, 28 * 28])
labels_placeholder = tf.placeholder(tf.int64, (None,))

dataset = make_dataset(images_placeholder, labels_placeholder,
                       epochs = epochs,
                       batch_size = batch_size)

dataset_iter = dataset.make_initializable_iterator()
x, y = dataset_iter.get_next()
with tf.Session() as sess:
    sess.run(dataset_iter.initializer,
             feed_dict = {
                 images_placeholder: x_train_scaled,
                 labels_placeholder: y_train
             })
    x_val, y_val = sess.run([x, y])
    print(x_val.shape)
    print(y_val.shape)
    sess.run(dataset_iter.initializer,
             feed_dict = {
                 images_placeholder: x_valid_scaled,
                 labels_placeholder: y_valid,
             })
    x_val, y_val = sess.run([x, y])
    print(x_val.shape)
    print(y_val.shape)

三、TF1.0自定义estimator

import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)

# https://storage.googleapis.com/tf-datasets/titanic/train.csv
# https://storage.googleapis.com/tf-datasets/titanic/eval.csv
train_file = "./data/titanic/train.csv"
eval_file = "./data/titanic/eval.csv"

train_df = pd.read_csv(train_file)
eval_df = pd.read_csv(eval_file)

print(train_df.head())
print(eval_df.head())

y_train = train_df.pop('survived')
y_eval = eval_df.pop('survived')

print(train_df.head())
print(eval_df.head())
print(y_train.head())
print(y_eval.head())

categorical_columns = ['sex', 'n_siblings_spouses', 'parch', 'class',
                       'deck', 'embark_town', 'alone']
numeric_columns = ['age', 'fare']

feature_columns = []
for categorical_column in categorical_columns:
    vocab = train_df[categorical_column].unique()
    print(categorical_column, vocab)
    feature_columns.append(
        tf.feature_column.indicator_column(
            tf.feature_column.categorical_column_with_vocabulary_list(
                categorical_column, vocab)))

for categorical_column in numeric_columns:
    feature_columns.append(
        tf.feature_column.numeric_column(
            categorical_column, dtype=tf.float32))

def make_dataset(data_df, label_df, epochs = 10, shuffle = True,
                 batch_size = 32):
    dataset = tf.data.Dataset.from_tensor_slices(
        (dict(data_df), label_df))
    if shuffle:
        dataset = dataset.shuffle(10000)
    dataset = dataset.repeat(epochs).batch(batch_size)
    return dataset.make_one_shot_iterator().get_next()
#构建自定义的estimator
output_dir = "customized_estimator"
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

def model_fn(features, labels, mode, params):
    # model runtime state: Train, Eval, Predict
    
    input_for_next_layer = tf.feature_column.input_layer(
        features, params['feature_columns'])
    for n_unit in params['hidden_units']:
        input_for_next_layer = tf.layers.dense(input_for_next_layer,
                                               units = n_unit,
                                               activation = tf.nn.relu)
    logits = tf.layers.dense(input_for_next_layer,
                             params['n_classes'],
                             activation = None)
    predicted_classes = tf.argmax(logits, 1) #长度为n的向量
    
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            "class_ids": predicted_classes[:, tf.newaxis], #扩展出来一个维度,变成n*1的矩阵
            "probabilities": tf.nn.softmax(logits),
            "logits": logits
        }
        return tf.estimator.EstimatorSpec(mode,
                                          predictions = predictions)
    
    loss = tf.losses.sparse_softmax_cross_entropy(labels = labels,
                                                  logits = logits)
    accuracy = tf.metrics.accuracy(labels = labels,#做累计
                                   predictions = predicted_classes,
                                   name = "acc_op")
    metrics = {"accuracy": accuracy}
    
    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode, loss = loss,
                                          eval_metric_ops = metrics)
    optimizer = tf.train.AdamOptimizer()
    
    train_op = optimizer.minimize(
        loss, global_step = tf.train.get_global_step())
    
    if mode == tf.estimator.ModeKeys.TRAIN:
        return tf.estimator.EstimatorSpec(mode, loss = loss,
                                          train_op = train_op)

estimator = tf.estimator.Estimator(
    model_fn = model_fn,
    model_dir = output_dir,
    params = {
        "feature_columns": feature_columns,
        "hidden_units": [100, 100],
        "n_classes": 2
    })
estimator.train(input_fn = lambda : make_dataset(
    train_df, y_train, epochs = 100))

estimator.evaluate(lambda : make_dataset(
    eval_df, y_eval, epochs = 1))

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值