import gym
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Configuration paramaters for the whole setup
seed =42
gamma =0.99# Discount factor for past rewards
max_steps_per_episode =10000
env = gym.make("CartPole-v0")# Create the environment
env.seed(seed)
eps = np.finfo(np.float32).eps.item()# Smallest number such that 1.0 + eps != 1.0
optimizer = keras.optimizers.Adam(learning_rate=0.01)
huber_loss = keras.losses.Huber()
action_probs_history =[]
critic_value_history =[]
rewards_history =[]
running_reward =0
episode_count =0whileTrue:# Run until solved
state = env.reset()
episode_reward =0with tf.GradientTape()as tape:for timestep inrange(1, max_steps_per_episode):# env.render(); Adding this line would show the attempts# of the agent in a pop up window.
state = tf.convert_to_tensor(state)
state = tf.expand_dims(state,0)# Predict action probabilities and estimated future rewards# from environment state
action_probs, critic_value = model(state)
critic_value_history.append(critic_value[0,0])# Sample action from action probability distribution
action = np.random.choice(num_actions, p=np.squeeze(action_probs))
action_probs_history.append(tf.math.log(action_probs[0, action]))# Apply the sampled action in our environment
state, reward, done, _ = env.step(action)
rewards_history.append(reward)
episode_reward += reward
if done:break# Update running reward to check condition for solving
running_reward =0.05* episode_reward +(1-0.05)* running_reward
# Calculate expected value from rewards# - At each timestep what was the total reward received after that timestep# - Rewards in the past are discounted by multiplying them with gamma# - These are the labels for our critic
returns =[]
discounted_sum =0for r in rewards_history[::-1]:
discounted_sum = r + gamma * discounted_sum
returns.insert(0, discounted_sum)# Normalize
returns = np.array(returns)
returns =(returns - np.mean(returns))/(np.std(returns)+ eps)
returns = returns.tolist()# Calculating loss values to update our network
history =zip(action_probs_history, critic_value_history, returns)
actor_losses =[]
critic_losses =[]for log_prob, value, ret in history:# At this point in history, the critic estimated that we would get a# total reward = `value` in the future. We took an action with log probability# of `log_prob` and ended up recieving a total reward = `ret`.# The actor must be updated so that it predicts an action that leads to# high rewards (compared to critic's estimate) with high probability.
diff = ret - value
actor_losses.append(-log_prob * diff)# actor loss# The critic must be updated so that it predicts a better estimate of# the future rewards.
critic_losses.append(
huber_loss(tf.expand_dims(value,0), tf.expand_dims(ret,0)))# Backpropagation
loss_value =sum(actor_losses)+sum(critic_losses)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))# Clear the loss and reward history
action_probs_history.clear()
critic_value_history.clear()
rewards_history.clear()# Log details
episode_count +=1if episode_count %10==0:
template ="running reward: {:.2f} at episode {}"print(template.format(running_reward, episode_count))if running_reward >195:# Condition to consider the task solvedprint("Solved at episode {}!".format(episode_count))break
WARNING:tensorflow:Layer dense is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.
If you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.
To change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.
running reward: 12.18 at episode 10
running reward: 16.44 at episode 20
running reward: 15.52 at episode 30
running reward: 14.82 at episode 40
running reward: 13.60 at episode 50
running reward: 12.37 at episode 60
running reward: 11.90 at episode 70
running reward: 12.10 at episode 80
running reward: 12.14 at episode 90
running reward: 12.66 at episode 100
running reward: 14.01 at episode 110
running reward: 12.67 at episode 120
running reward: 14.34 at episode 130
running reward: 25.74 at episode 140
running reward: 28.25 at episode 150
running reward: 33.08 at episode 160
running reward: 48.51 at episode 170
running reward: 57.25 at episode 180
running reward: 75.87 at episode 190