TensorFlow 2.0 笔记(五)—— Keras高层API

Keras != tf.keras

  • datasets
  • layers
  • losses
  • metrics
  • optimizers

Metrics

  • update_state
  • result().numpy()
  • reset_states
import os
import tensorflow as tf
import tensorflow_datasets as tfds

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']


def normalize(images, labels):
    images = tf.cast(images, tf.float32)
    images /= 255
    return images, labels


print("datasets", train_dataset.map(normalize))
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)

num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples

BATCH_SIZE = 128
train_dataset = train_dataset.shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)

model = tf.keras.Sequential([
    tf.keras.layers.Dense(256, activation=tf.nn.relu),
    tf.keras.layers.Dense(128, activation=tf.nn.relu),
    tf.keras.layers.Dense(64, activation=tf.nn.relu),
    tf.keras.layers.Dense(32, activation=tf.nn.relu),
    tf.keras.layers.Dense(10)
])

model.build(input_shape=[None, 28*28])
model.summary()
optimizer = tf.keras.optimizers.Adam(lr=1e-3)


def main():
    acc_meter = tf.keras.metrics.Accuracy()
    loss_meter = tf.keras.metrics.Mean()
    for epoch in range(5):
        for step, (x,y) in enumerate(train_dataset):
            x = tf.reshape(x, [-1, 28*28])

            with tf.GradientTape() as tape:
                logits = model(x)
                y_ = tf.one_hot(y, depth=10)
                #loss_mse = tf.reduce_mean(tf.losses.MSE(y_, logits))
                loss_ce = tf.reduce_mean(tf.losses.categorical_crossentropy(y_, logits, from_logits=True))
                loss_meter.update_state(loss_ce)

            grads = tape.gradient(loss_ce, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if step % 500 == 0:
                # test
                # total_correct = 0
                # total_num = 0
                for _, (x_test, y_test) in enumerate(test_dataset):
                    x_test = tf.reshape(x_test, [-1, 28 * 28])
                    logits = model(x_test)
                    prob = tf.nn.softmax(logits, axis=1)
                    pred = tf.argmax(prob, axis=1)
                    # correct = tf.equal(pred, y_test)
                    # total_correct += tf.reduce_sum(tf.cast(correct, dtype=tf.int32)).numpy()
                    # total_num += x_test.shape[0]
                    acc_meter.update_state(y_test, pred)

                print(epoch, step, 'loss:', loss_meter.result().numpy(),
                      'Evaluate Acc:', acc_meter.result().numpy())
                loss_meter.reset_states()
                acc_meter.reset_states()


if __name__ == '__main__':
    main()
  • Compile
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
              loss=tf.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
  • Fit
model.fit(train_dataset, epochs=10, validation_data=test_dataset, validation_freq=2)
  • Evaluate
model.evaluate(test_dataset)
  • Predict
sample = next(iter(test_dataset))
x = sample[0]
y = sample[1] # one-hot
pred = model.predict(x) # [b, 10]
# convert back to number 
y = tf.argmax(y, axis=1)
pred = tf.argmax(pred, axis=1)

print(pred)
print(y)
import os
import tensorflow as tf
import tensorflow_datasets as tfds

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']


def normalize(images, labels):
    images = tf.cast(images, tf.float32)
    images /= 255.
    images = tf.reshape(images, [28*28])
    labels = tf.one_hot(labels, depth=10)
    return images, labels


train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)

num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples

BATCH_SIZE = 100
train_dataset = train_dataset.shuffle(num_train_examples).batch(BATCH_SIZE).repeat(10)
test_dataset = test_dataset.batch(BATCH_SIZE)

model = tf.keras.Sequential([
    tf.keras.layers.Dense(256, activation=tf.nn.relu),
    tf.keras.layers.Dense(128, activation=tf.nn.relu),
    tf.keras.layers.Dense(64, activation=tf.nn.relu),
    tf.keras.layers.Dense(32, activation=tf.nn.relu),
    tf.keras.layers.Dense(10)
])

model.build(input_shape=[None, 28*28])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
              loss=tf.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
model.fit(train_dataset, epochs=10, validation_data=test_dataset, validation_freq=2)

model.evaluate(test_dataset)

sample = next(iter(test_dataset))
x = sample[0]
y = sample[1] # one-hot
pred = model.predict(x) # [b, 10]
# convert back to number
y = tf.argmax(y, axis=1)
pred = tf.argmax(pred, axis=1)

print(pred)
print(y)

自定义网络

  • keras.Sequential
model = tf.keras.Sequential([
    tf.keras.layers.Dense(256, activation=tf.nn.relu),
    tf.keras.layers.Dense(128, activation=tf.nn.relu),
    tf.keras.layers.Dense(64, activation=tf.nn.relu),
    tf.keras.layers.Dense(32, activation=tf.nn.relu),
    tf.keras.layers.Dense(10)
])
model.build(input_shape=[None, 28*28])
model.summary()

model.trainable_variables
model.call()
  • keras.layers.Layer
  • keras.Model
    • Inherit from keras.layers.Layer / keras.Model
    • _init_
    • call
    • Model: compile/fit/evaluate
class MyDense(tf.keras.layers.Layer):

    def __init__(self, inp_dim, outp_dim):
        super(MyDense, self).__init__()

        self.kernel = self.add_variable('w', [inp_dim, outp_dim])
        self.bias = self.add_variable('b', [outp_dim])

    def call(self, inputs, training=None):
        out = inputs @ self.kernel + self.bias

        return out
class MyModel(tf.keras.Model):

    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = MyDense(28*28, 256)
        self.fc2 = MyDense(256, 128)
        self.fc3 = MyDense(128, 64)
        self.fc4 = MyDense(64, 32)
        self.fc5 = MyDense(32, 10)

    def call(self, inputs, training=None, mask=None):
        x = self.fc1(inputs)
        x = tf.nn.relu(x)
        x = self.fc2(x)
        x = tf.nn.relu(x)
        x = self.fc3(x)
        x = tf.nn.relu(x)
        x = self.fc4(x)
        x = tf.nn.relu(x)
        x = self.fc5(x)

        return x
import os
import tensorflow as tf
import tensorflow_datasets as tfds

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']


def normalize(images, labels):
    images = tf.cast(images, tf.float32)
    images /= 255.
    images = tf.reshape(images, [28*28])
    labels = tf.one_hot(labels, depth=10)
    return images, labels


train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)

num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples

BATCH_SIZE = 100
train_dataset = train_dataset.shuffle(num_train_examples).batch(BATCH_SIZE)\
	.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(BATCH_SIZE)


class MyDense(tf.keras.layers.Layer):

    def __init__(self, inp_dim, outp_dim):
        super(MyDense, self).__init__()

        self.kernel = self.add_variable('w', [inp_dim, outp_dim])
        self.bias = self.add_variable('b', [outp_dim])

    def call(self, inputs, training=None):
        out = inputs @ self.kernel + self.bias

        return out


class MyModel(tf.keras.Model):

    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = MyDense(28*28, 256)
        self.fc2 = MyDense(256, 128)
        self.fc3 = MyDense(128, 64)
        self.fc4 = MyDense(64, 32)
        self.fc5 = MyDense(32, 10)

    def call(self, inputs, training=None, mask=None):
        x = self.fc1(inputs)
        x = tf.nn.relu(x)
        x = self.fc2(x)
        x = tf.nn.relu(x)
        x = self.fc3(x)
        x = tf.nn.relu(x)
        x = self.fc4(x)
        x = tf.nn.relu(x)
        x = self.fc5(x)

        return x


model = MyModel()
model.build(input_shape=[None, [28*28]])
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
              loss=tf.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
model.fit(train_dataset, epochs=2, validation_data=test_dataset, validation_freq=1)

model.evaluate(test_dataset)

sample = next(iter(test_dataset))
x = sample[0]
y = sample[1] # one-hot
pred = model.predict(x) # [b, 10]
# convert back to number
y = tf.argmax(y, axis=1)
pred = tf.argmax(pred, axis=1)

print(pred)
print(y)

模型的保存与加载

  • save/load weights
#save the weights
model.save_weights('weights.ckpt')
print('save weights')
del model

model = tf.keras.Sequential([
    tf.keras.layers.Dense(256, activation=tf.nn.relu),
    tf.keras.layers.Dense(128, activation=tf.nn.relu),
    tf.keras.layers.Dense(64, activation=tf.nn.relu),
    tf.keras.layers.Dense(32, activation=tf.nn.relu),
    tf.keras.layers.Dense(10)
])

model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
              loss=tf.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
#restore the weights
model.load_weights('weights.ckpt')
print('loaded weights!')
model.evaluate(test_dataset)

  • save/load entire model
model.save('model.h5')
print('saved total model.')
del model

print('load model from file')
model = tf.keras.models.load_model('model.h5')
model.evaluate(test_dataset)
  • saved_model
tf.saved_model.save(model, 'saved_model/')
imported = tf.saved_model.load('saved_model/')
f = imported.signatures["serving_default"]
print(f(x=tf.ones([1, 28*28])))

例子

import os
from abc import ABC

import tensorflow as tf

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

BATCH_SIZE = 128


def preprocess(images, labels):
    images = 2 * tf.cast(images, tf.float32) / 255. - 1.
    labels = tf.cast(labels, tf.int32)
    return images, labels


class MyDense(tf.keras.layers.Layer, ABC):

    def __init__(self, inp_dim, outp_dim):
        super(MyDense, self).__init__()

        self.kernel = self.add_variable('w', [inp_dim, outp_dim])
        self.bias = self.add_variable('b', [outp_dim])

    def call(self, inputs, training=None):
        out = inputs @ self.kernel + self.bias

        return out


class MyModel(tf.keras.Model, ABC):

    def __init__(self):
        super(MyModel, self).__init__()
        self.fc1 = MyDense(32*32*3, 256)
        self.fc2 = MyDense(256, 128)
        self.fc3 = MyDense(128, 64)
        self.fc4 = MyDense(64, 32)
        self.fc5 = MyDense(32, 10)

    def call(self, inputs, training=None, mask=None):
        x = tf.reshape(inputs, [-1, 32*32*3])
        x = self.fc1(x)
        x = tf.nn.relu(x)
        x = self.fc2(x)
        x = tf.nn.relu(x)
        x = self.fc3(x)
        x = tf.nn.relu(x)
        x = self.fc4(x)
        x = tf.nn.relu(x)
        x = self.fc5(x)

        return x


(x, y), (x_val, y_val) = tf.keras.datasets.cifar10.load_data()
y = tf.squeeze(y)
y_val = tf.squeeze(y_val)
y = tf.one_hot(y, depth=10)
y_val = tf.one_hot(y_val, depth=10)
print("datasets:", x.shape, y.shape, x_val.shape, y_val.shape, x.min(), x.max())

train_data = tf.data.Dataset.from_tensor_slices((x, y))
train_data = train_data.map(preprocess).shuffle(50000).batch(BATCH_SIZE)
test_data = tf.data.Dataset.from_tensor_slices((x_val, y_val))
test_data = test_data.map(preprocess).batch(BATCH_SIZE)

model = MyModel()
model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3),
              loss=tf.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

model.fit(train_data, epochs=15, validation_data=test_data, validation_freq=1)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值