![1bf7ab96b6920b1a26cdeeb9cb05d12e.png](https://i-blog.csdnimg.cn/blog_migrate/adf8a0ac505a9c4724fd93927848a557.jpeg)
自动求导机制:
梯度求解利器:tf.GradientTape
GradientTape是eager模式下计算梯度用的,而eager模式是TensorFlow2.0的默认模式。
![9c2921b76f0f9875a7006a01fb848e21.png](https://i-blog.csdnimg.cn/blog_migrate/7c26ee7852c63281bbe4d5cd554c8225.png)
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
dy_dx = g.gradient(y, x) # y’ = 2*x = 2*3 = 6
![e2c9089620cba3cd87d851b313012498.png](https://i-blog.csdnimg.cn/blog_migrate/14e0dff25572b180393c31b5c13c0b31.jpeg)
![6cae700b2c8147b434af8ac3de08a1f3.png](https://i-blog.csdnimg.cn/blog_migrate/5a552983ef0b9cc4444f8a0d44f21022.jpeg)
![e5e9a9078bf311d9e85e128b63f04cb1.png](https://i-blog.csdnimg.cn/blog_migrate/3bd0b0893b7f8a919330b29f31714899.jpeg)
详细看下apply_gradients:
![ab7513d62078067aafc3c7176e66eb05.png](https://i-blog.csdnimg.cn/blog_migrate/e2fc6dca4ebed0887e6ece968d7fac58.png)
案例1、模型自动求导
构建模型(神经网络的前向传播) --> 定义损失函数 --> 定义优化函数 --> 定义tape --> 模型得到预测值 --> 前向传播得到loss --> 反向传播 --> 用优化函数将计算出来的梯度更新到变量上面去。
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# 定义自己需要的层
self.dense_1 = tf.keras.layers.Dense(32, activation='relu') #隐藏层
self.dense_2 = tf.keras.layers.Dense(num_classes)#输出层
def call(self, inputs):
#定义前向传播
# 使用在 (in `__init__`)定义的层
x = self.dense_1(inputs)
return self.dense_2(x)
import numpy as np
# 10分类问题
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model = MyModel(num_classes=10)
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
with tf.GradientTape() as tape:
predictions = model(data)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables) #求梯度
optimizer.apply_gradients(zip(gradients, model.trainable_variables)) #
案例2:使用GradientTape自定义训练模型
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# 定义自己需要的层
self.dense_1 = tf.keras.layers.Dense(32, activation='relu')
self.dense_2 = tf.keras.layers.Dense(num_classes)
def call(self, inputs):
#定义前向传播
# 使用在 (in `__init__`)定义的层
x = self.dense_1(inputs)
return self.dense_2(x)
import numpy as np
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
model = MyModel(num_classes=10)
# Instantiate an optimizer.
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = tf.keras.losses.CategoricalCrossentropy()
# Prepare the training dataset.
batch_size = 64
train_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 3
for epoch in range(epochs):
print('Start of epoch %d' % (epoch,))
# 遍历数据集的batch_size
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# 打开GradientTape以记录正向传递期间运行的操作,这将启用自动区分。
with tf.GradientTape() as tape:
# 运行该模型的前向传播。 模型应用于其输入的操作将记录在GradientTape上。
logits = model(x_batch_train, training=True) # 这个minibatch的预测值
# 计算这个minibatch的损失值
loss_value = loss_fn(y_batch_train, logits)
# 使用GradientTape自动获取可训练变量相对于损失的梯度。
grads = tape.gradient(loss_value, model.trainable_weights)
# 通过更新变量的值来最大程度地减少损失,从而执行梯度下降的一步。
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# 每200 batches打印一次.
if step % 200 == 0:
print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value)))
print('Seen so far: %s samples' % ((step + 1) * 64))
案例3:使用GradientTape自定义训练模型进阶(加入评估函数)
让我们将metric添加到组合中。下面可以在从头开始编写的训练循环中随时使用内置指标(或编写的自定义指标)。流程如下:
- 在循环开始时初始化metrics
- metric.update_state():每batch之后更新
- metric.result():需要显示metrics的当前值时调用
- metric.reset_states():需要清除metrics状态时重置(通常在每个epoch的结尾)
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
# 定义自己需要的层
self.dense_1 = tf.keras.layers.Dense(32, activation='relu')
self.dense_2 = tf.keras.layers.Dense(num_classes)
def call(self, inputs):
#定义前向传播
# 使用在 (in `__init__`)定义的层
x = self.dense_1(inputs)
return self.dense_2(x)
import numpy as np
x_train = np.random.random((1000, 32))
y_train = np.random.random((1000, 10))
x_val = np.random.random((200, 32))
y_val = np.random.random((200, 10))
x_test = np.random.random((200, 32))
y_test = np.random.random((200, 10))
# 优化器
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)
# 损失函数
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# 准备metrics函数
train_acc_metric = tf.keras.metrics.CategoricalAccuracy()
val_acc_metric = tf.keras.metrics.CategoricalAccuracy()
# 准备训练数据集
batch_size = 64
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# 准备测试数据集
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
进行几个epoch运行训练循环:
model = MyModel(num_classes=10)
epochs = 3
for epoch in range(epochs):
print('Start of epoch %d' % (epoch,))
# 遍历数据集的batch_size
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
#一个batch
with tf.GradientTape() as tape:
logits = model(x_batch_train)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))####
# 更新训练集的metrics
train_acc_metric(y_batch_train, logits)
# 在每个epoch结束时显示metrics。
train_acc = train_acc_metric.result()
print('Training acc over epoch: %s' % (float(train_acc),))
# 在每个epoch结束时重置训练指标
train_acc_metric.reset_states()#!!!!!!!!!!!!!!!
# 在每个epoch结束时运行一个验证集。
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val)
# 更新验证集merics
val_acc_metric(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
print('Validation acc: %s' % (float(val_acc),))
val_acc_metric.reset_states()
#显示测试集
鸢尾花自定义模型:
import tensorflow as tf
from sklearn import datasets #导入数据集
import numpy as np
iris = datasets.load_iris()
data = iris.data
labels = iris.target
data = np.concatenate((data,labels.reshape(150,1)),axis=-1)
#将数据打乱:神经网络的拟合能力太强了,如果不乱序的话,同一个组合的batch反复出现,模型有可能会“记住”这些样本的次序,从而影响泛化能力。
np.random.shuffle(data)
#将数据拆分为数据和标签
Y = data[:,-1]
X = data[:,:4]
#子类模型
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel,self).__init__()
self.D1 = tf.keras.layers.Dense(32,activation='relu')
self.D2 = tf.keras.layers.Dense(32,activation='relu')
self.D3 = tf.keras.layers.Dense(3,activation='softmax')
def call(self,inputs):
x = self.D1(inputs)
x = self.D2(x)
x = self.D3(x)
return x
model = MyModel()
#2.定义损失函数
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
#3.定义优化函数
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
batch_size = 32
train_dataset = tf.data.Dataset.from_tensor_slices((X, Y))
train_dataset = train_dataset.shuffle(buffer_size=64).batch(batch_size)
train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
epochs = 100
for epoch in range(epochs):
for step,(x_batch_train,y_batch_train) in enumerate(train_dataset):
#定义tape计算梯度
with tf.GradientTape() as tape:
# 运行该模型的前向传播。 模型应用于其输入的操作将记录在GradientTape上。
logits = model(x_batch_train) # 这个minibatch的预测值
# 计算这个minibatch的损失值
loss_value = loss_object(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# 更新训练集的metrics
train_acc_metric(y_batch_train, logits)
train_acc = train_acc_metric.result()
print('Training acc over epoch: %s' % (float(train_acc),))