eager execution

What’s eager execution?
区别于tensorflow1.x, 新版里不需要通过session.run()来查看变量内容,从而方便我们进行调试。

#Setup and basic usage
from __future__ import absolute_import, division, print_function, unicode_literals
import os

import tensorflow as tf

import cProfile
tf.executing_eagerly()
True
x = [[2.]]
m = tf.matmul(x, x)
print("numpy method:",m.numpy())  
# tf.Tensor.numpy method returns the object's value as a NumPy ndarray
print("row method:  ",m)
print("hello, {}".format(m))
numpy method: [[4.]]
row method:   tf.Tensor([[4.]], shape=(1, 1), dtype=float32)
hello, [[4.]]

numpy <鈥? tensor

1. numpy->tensor

counter = tf.constant( [0,1])
print(counter)
tf.Tensor([0 1], shape=(2,), dtype=int32)
counter = tf.convert_to_tensor([[1,2,3],[4,5,6]])
print(counter)
tf.Tensor(
[[1 2 3]
 [4 5 6]], shape=(2, 3), dtype=int32)

2. tensor->numpy

num = counter.numpy()
print(num, type(num))
[[1 2 3]
 [4 5 6]] <class 'numpy.ndarray'>

A particular tf.GradientTape can only compute one gradient; subsequent calls throw a runtime error.

w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
  loss = w * w

grad = tape.gradient(loss, w)
print(grad)
tf.Tensor([[2.]], shape=(1, 1), dtype=float32)
grad = tape.gradient(grad, w)
#ERROR锛侊紒锛?GradientTape.gradient can only be called once on non-persistent tapes.
---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

<ipython-input-21-dcd2b2c4c6bf> in <module>()
      1 
----> 2 grad = tape.gradient(grad, w)


S:\Anaconda\application\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\backprop.py in gradient(self, target, sources, output_gradients, unconnected_gradients)
    963     """
    964     if self._tape is None:
--> 965       raise RuntimeError("GradientTape.gradient can only be called once on "
    966                          "non-persistent tapes.")
    967     if self._recording:


RuntimeError: GradientTape.gradient can only be called once on non-persistent tapes.
grad2 = tape.gradient(loss, w)
#ERROR锛侊紒锛?GradientTape.gradient can only be called once on non-persistent tapes.
---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

<ipython-input-22-6c773ce286dc> in <module>()
----> 1 grad2 = tape.gradient(loss, w)


S:\Anaconda\application\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\backprop.py in gradient(self, target, sources, output_gradients, unconnected_gradients)
    963     """
    964     if self._tape is None:
--> 965       raise RuntimeError("GradientTape.gradient can only be called once on "
    966                          "non-persistent tapes.")
    967     if self._recording:


RuntimeError: GradientTape.gradient can only be called once on non-persistent tapes.

Train a model

1. 鏋勫缓鏁版嵁闆?- from_tensor_slices: 鍚庨潰鏄竴涓寘鍚涓牱鏈殑tensor锛屼娇鐢╯lices浼氬皢鏁版嵁浠庣涓€缁村皢鍚勪釜鏍锋湰鍒嗗紑銆?- from_tensor: 琛ㄧず鏀惧叆鐨則ensors浠h〃涓€涓牱鏈? 甯稿父鎼厤interleave鏉ュ畬鎴愬澶氫釜鏂囦欢鐨勬牱鏈紙涓€涓枃浠舵斁鍏ヤ竴涓牱鏈級鐨勫悓鏃惰鍙栥€?

# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
#
dataset = tf.data.Dataset.from_tensor_slices(
  (tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
   tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)

2. 鏋勫缓绠€鍗曟ā鍨?閫氬父鏄皟鐢╧eras鐨刲ayers搴撴潵杩涜璋冪敤锛孌ense,Conv2D, BatchNomolization, MaxPooling绛夌瓑

# Build the model
mnist_model = tf.keras.Sequential([
  tf.keras.layers.Conv2D(16,[3,3], activation='relu',
                         input_shape=(None, None, 1)),
  tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
  tf.keras.layers.GlobalAveragePooling2D(),
  tf.keras.layers.Dense(10)
])

浣犲彲浠ラ€氳繃杩欑鏂瑰紡鏉ユ煡鐪嬩换鎰忔椂鍒诲睍绀烘ā鍨嬬殑缁撴灉

for images,labels in dataset.take(1):
  print("Logits: ", mnist_model(images[0:1]).numpy())
Logits:  [[-0.02026599  0.01537856  0.03644642  0.02598679 -0.01940615 -0.00344082
   0.00128668  0.00131152 -0.02102163  0.0017579 ]]

3. 璁粌妯″瀷

  • 瀹氫箟鎹熷け鍑芥暟(tf.keras.loss.XXX)
  • 瀹氫箟浼樺寲鍣? 锛坱f.keras.optimizers.XXX)(Adam, SGD)
  • 瀹氫箟璁粌姝ラ 锛坒it / custom training loop)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

loss_history = []
def train_step(images, labels):
  with tf.GradientTape() as tape:
    logits = mnist_model(images, training=True)
    
    # Add asserts to check the shape of the output.
    tf.debugging.assert_equal(logits.shape, (32, 10))
    
    loss_value = loss_object(labels, logits)

  loss_history.append(loss_value.numpy().mean())
  grads = tape.gradient(loss_value, mnist_model.trainable_variables)
  optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))
def train(epochs):
  for epoch in range(epochs):
    for (batch, (images, labels)) in enumerate(dataset):
      train_step(images, labels)
    print ('Epoch {} finished'.format(epoch))
train(epochs=3)
Epoch 0 finished
Epoch 1 finished
Epoch 2 finished

4. 鐢诲嚭Loss

import matplotlib.pyplot as plt

plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
Text(0, 0.5, 'Loss [entropy]')
plt.show()

在这里插入图片描述

Object-oriented metrics

‘tf.keras.metrics’ are stored as objects. Update a metric by passing the new data to the callable, and retrieve the result using the tf.keras.metrics.result method, for example

What’s eager execution?

#Setup and basic usage
from __future__ import absolute_import, division, print_function, unicode_literals
import os

import tensorflow as tf

import cProfile
tf.executing_eagerly()
True
x = [[2.]]
m = tf.matmul(x, x)
print("numpy method:",m.numpy())  
# tf.Tensor.numpy method returns the object's value as a NumPy ndarray
print("row method:  ",m)
print("hello, {}".format(m))
numpy method: [[4.]]
row method:   tf.Tensor([[4.]], shape=(1, 1), dtype=float32)
hello, [[4.]]

numpy <—> tensor

1. numpy->tensor

counter = tf.constant( [0,1])
print(counter)
tf.Tensor([0 1], shape=(2,), dtype=int32)
counter = tf.convert_to_tensor([[1,2,3],[4,5,6]])
print(counter)
tf.Tensor(
[[1 2 3]
 [4 5 6]], shape=(2, 3), dtype=int32)

2. tensor->numpy

num = counter.numpy()
print(num, type(num))
[[1 2 3]
 [4 5 6]] <class 'numpy.ndarray'>

A particular tf.GradientTape can only compute one gradient; subsequent calls throw a runtime error.

w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
  loss = w * w

grad = tape.gradient(loss, w)
print(grad)
tf.Tensor([[2.]], shape=(1, 1), dtype=float32)
grad = tape.gradient(grad, w)
#ERROR!!! GradientTape.gradient can only be called once on non-persistent tapes.
---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

<ipython-input-21-dcd2b2c4c6bf> in <module>()
      1 
----> 2 grad = tape.gradient(grad, w)


S:\Anaconda\application\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\backprop.py in gradient(self, target, sources, output_gradients, unconnected_gradients)
    963     """
    964     if self._tape is None:
--> 965       raise RuntimeError("GradientTape.gradient can only be called once on "
    966                          "non-persistent tapes.")
    967     if self._recording:


RuntimeError: GradientTape.gradient can only be called once on non-persistent tapes.
grad2 = tape.gradient(loss, w)
#ERROR!!! GradientTape.gradient can only be called once on non-persistent tapes.
---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

<ipython-input-22-6c773ce286dc> in <module>()
----> 1 grad2 = tape.gradient(loss, w)


S:\Anaconda\application\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\backprop.py in gradient(self, target, sources, output_gradients, unconnected_gradients)
    963     """
    964     if self._tape is None:
--> 965       raise RuntimeError("GradientTape.gradient can only be called once on "
    966                          "non-persistent tapes.")
    967     if self._recording:


RuntimeError: GradientTape.gradient can only be called once on non-persistent tapes.

Train a model

1. 构建数据集

  • from_tensor_slices: 后面是一个包含多个样本的tensor,使用slices会将数据从第一维将各个样本分开。
  • from_tensor: 表示放入的tensors代表一个样本, 常常搭配interleave来完成对多个文件的样本(一个文件放入一个样本)的同时读取。
# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
#
dataset = tf.data.Dataset.from_tensor_slices(
  (tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
   tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)

2. 构建简单模型

通常是调用keras的layers库来进行调用,Dense,Conv2D, BatchNomolization, MaxPooling等等

# Build the model
mnist_model = tf.keras.Sequential([
  tf.keras.layers.Conv2D(16,[3,3], activation='relu',
                         input_shape=(None, None, 1)),
  tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
  tf.keras.layers.GlobalAveragePooling2D(),
  tf.keras.layers.Dense(10)
])

你可以通过这种方式来查看任意时刻展示模型的结果

for images,labels in dataset.take(1):
  print("Logits: ", mnist_model(images[0:1]).numpy())
Logits:  [[-0.02026599  0.01537856  0.03644642  0.02598679 -0.01940615 -0.00344082
   0.00128668  0.00131152 -0.02102163  0.0017579 ]]

3. 训练模型

  • 定义损失函数(tf.keras.loss.XXX)
  • 定义优化器 (tf.keras.optimizers.XXX)(Adam, SGD)
  • 定义训练步骤 (fit / custom training loop)
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

loss_history = []
def train_step(images, labels):
  with tf.GradientTape() as tape:
    logits = mnist_model(images, training=True)
    
    # Add asserts to check the shape of the output.
    tf.debugging.assert_equal(logits.shape, (32, 10))
    
    loss_value = loss_object(labels, logits)

  loss_history.append(loss_value.numpy().mean())
  grads = tape.gradient(loss_value, mnist_model.trainable_variables)
  optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))
def train(epochs):
  for epoch in range(epochs):
    for (batch, (images, labels)) in enumerate(dataset):
      train_step(images, labels)
    print ('Epoch {} finished'.format(epoch))
train(epochs=3)
Epoch 0 finished
Epoch 1 finished
Epoch 2 finished

4. 画出Loss

import matplotlib.pyplot as plt

plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
Text(0, 0.5, 'Loss [entropy]')
plt.show()

在这里插入图片描述

Object-oriented metrics

‘tf.keras.metrics’ are stored as objects. Update a metric by passing the new data to the callable, and retrieve the result using the tf.keras.metrics.result method, for example


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值