Actor Critic是强化学习中的一种结合体,结合了以值为基础(如Q-learning)和以动作概率为基础(如policy gradient)的算法
Actor Critic的 actor前身就是policy gradient, 能够在连续动作中选择合适的动作
critic的前身就是Q-learning,或其他function approximation方法,能进行单步更新
actor基于概率选行为,critic基于actor的行为评判行为的得分,actor根据critic的评分修改选行为的概率
优点: 可以进行单步更新,比传统的Policy Gradient要快.
缺点: 取决于Critic的价值判断,但是Critic难收敛,再加上Actor的更新,就更难收敛
后来google deepmind提出了actor critic升级版 Deep Deterministic Policy Gradient, 它融合了DQN的优势,解决了收敛难的问题
实战
actor修改行为时就像蒙着眼睛一直向前开车,critic就是那个扶方向盘改变actor开车方向的
""" Actor-Critic using TD-error as the Advantage, Reinforcement Learning. The cart pole example. Policy is oscillated. View more on my tutorial page: https://morvanzhou.github.io/tutorials/ Using: tensorflow 1.0 gym 0.8.0 """ import numpy as np import tensorflow as tf import gym np.random.seed(2) tf.set_random_seed(2) # reproducible # Superparameters OUTPUT_GRAPH = False MAX_EPISODE = 3000 DISPLAY_REWARD_THRESHOLD = 200 # renders environment if total episode reward is greater then this threshold MAX_EP_STEPS = 1000 # maximum time step in one episode RENDER = False # rendering wastes time GAMMA = 0.9 # reward discount in TD error LR_A = 0.001 # learning rate for actor LR_C = 0.01 # learning rate for critic env = gym.make('CartPole-v0') env.seed(1) # reproducible env = env.unwrapped N_F = env.observation_space.shape[0] N_A = env.action_space.n class Actor(object): def __init__(self, sess, n_features, n_actions, lr=0.001): self.sess = sess self.s = tf.placeholder(tf.float32, [1, n_features], "state") self.a = tf.placeholder(tf.int32, None, "act") self.td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error with tf.variable_scope('Actor'): l1 = tf.layers.dense( inputs=self.s, units=20, # number of hidden units activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='l1' ) self.acts_prob = tf.layers.dense( inputs=l1, units=n_actions, # output units activation=tf.nn.softmax, # get action probabilities kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='acts_prob' ) with tf.variable_scope('exp_v'): log_prob = tf.log(self.acts_prob[0, self.a]) self.exp_v = tf.reduce_mean(log_prob * self.td_error) # advantage (TD_error) guided loss with tf.variable_scope('train'): self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v) def learn(self, s, a, td): s = s[np.newaxis, :] feed_dict = {self.s: s, self.a: a, self.td_error: td} _, exp_v = self.sess.run([self.train_op, self.exp_v], feed_dict) return exp_v def choose_action(self, s): s = s[np.newaxis, :] probs = self.sess.run(self.acts_prob, {self.s: s}) # get probabilities for all actions return np.random.choice(np.arange(probs.shape[1]), p=probs.ravel()) # return a int class Critic(object): def __init__(self, sess, n_features, lr=0.01): self.sess = sess self.s = tf.placeholder(tf.float32, [1, n_features], "state") self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next") self.r = tf.placeholder(tf.float32, None, 'r') with tf.variable_scope('Critic'): l1 = tf.layers.dense( inputs=self.s, units=20, # number of hidden units activation=tf.nn.relu, # None # have to be linear to make sure the convergence of actor. # But linear approximator seems hardly learns the correct Q. kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='l1' ) self.v = tf.layers.dense( inputs=l1, units=1, # output units activation=None, kernel_initializer=tf.random_normal_initializer(0., .1), # weights bias_initializer=tf.constant_initializer(0.1), # biases name='V' ) with tf.variable_scope('squared_TD_error'): self.td_error = self.r + GAMMA * self.v_ - self.v self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval with tf.variable_scope('train'): self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss) def learn(self, s, r, s_): s, s_ = s[np.newaxis, :], s_[np.newaxis, :] v_ = self.sess.run(self.v, {self.s: s_}) td_error, _ = self.sess.run([self.td_error, self.train_op], {self.s: s, self.v_: v_, self.r: r}) return td_error sess = tf.Session() actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A) critic = Critic(sess, n_features=N_F, lr=LR_C) # we need a good teacher, so the teacher should learn faster than the actor sess.run(tf.global_variables_initializer()) if OUTPUT_GRAPH: tf.summary.FileWriter("logs/", sess.graph) for i_episode in range(MAX_EPISODE): s = env.reset() t = 0 track_r = [] while True: if RENDER: env.render() a = actor.choose_action(s) s_, r, done, info = env.step(a) if done: r = -20 track_r.append(r) td_error = critic.learn(s, r, s_) # gradient = grad[r + gamma * V(s_) - V(s)] actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error] s = s_ t += 1 if done or t >= MAX_EP_STEPS: ep_rs_sum = sum(track_r) if 'running_reward' not in globals(): running_reward = ep_rs_sum else: running_reward = running_reward * 0.95 + ep_rs_sum * 0.05 if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering print("episode:", i_episode, " reward:", int(running_reward)) break