参考https://www.jianshu.com/p/f893cb703b6b和关于tf.stop_gradient的使用及理解
通过self.q_target = tf.stop_gradient(q_target),将原本为TensorFlow计算图中的一个op(节点)转为一个常量self.q_target,这时候对于loss的求导反传就不会传到target net去了。
import tensorflow.compat.v1 as tf
import tensorflow as tf2
tf.disable_v2_behavior()
import numpy as np
tf2.random.set_seed(1)
np.random.seed(1)
tf.compat.v1.disable_eager_execution()
session = tf.Session()
x_input = tf.placeholder(tf.float32, name='x_input')
y_input = tf.placeholder(tf.float32, name='y_input')
w = tf.Variable(2.0, name='weight')
b = tf.Variable(1.0, name='biases')
y = tf.add(tf.multiply(x_input, w), b)
f1 = y - y_input
gradients_node = tf.gradients(f1, w)
init = tf.global_variables_initializer()
session.run(init)
in1 = np.array([5.0])
in2 = np.array([10.0])
result = session.run(gradients_node, feed_dict={x_input: in1, y_input: in2})
print(result) # [5.0]
y1 = 2 * y
f2 = y - y1
gradients_node = tf.gradients(f2, w)
result = session.run(gradients_node, feed_dict={x_input: in1})
print(result) # [-5.0]
y2 = 3 * y
f3 = y - tf.stop_gradient(y2)
gradients_node = tf.gradients(f3, w)
result = session.run(gradients_node, feed_dict={x_input: in1})
print(result) # [5.0]
y3 = 4 * y
y4 = tf.placeholder(tf.float32, name='y4_input')
f4 = y - y4
gradients_node = tf.gradients(f4, w)
y3_result = session.run(y3, feed_dict={x_input: in1})
print(y3_result) # [44.0]
result = session.run(gradients_node, feed_dict={x_input: in1, y4: y3_result})
print(result) # [5.0]
gradients_node = tf.gradients(y * tf.stop_gradient(y1), w)
result = session.run(gradients_node, feed_dict={x_input: in1})
# y=wx+b y1=2wx+2b >> x*(2wx+2b)=x*(4x+2)=110
print(result) # [110.0]
gradients_node = tf.gradients(y * y1, w)
result = session.run(gradients_node, feed_dict={x_input: in1})
# y=wx+b y1=2wx+2b y*y1=2wwxx+4bwx+2bb >> 4wxx+4bx=8xx+4x=220
print(result) # [220.0]
session.close()