w = tf.Variable([[1.0]])
with tf.GradientTape() as tape: #命名成tape
loss = w * w
grad = tape.gradient(loss, w) #求解梯度,dloss/dw
print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32)
GradientTape
最新推荐文章于 2023-06-23 09:41:47 发布