强化学习实践三||价值迭代法1
Q(s,a) = 求和 p * (r + 折扣率 * V(s_____) )
import gym
import collections
from tensorboardX import SummaryWriter
ENV_NAME = "FrozenLake-v1"
#ENV_NAME = "FrozenLake8x8-v0" # uncomment for larger version
GAMMA = 0.9
TEST_EPISODES = 20
class Agent:
def __init__(self):
self.env = gym.make(ENV_NAME) # 为智能体创建环境
self.state = self.env.reset() # 创建初始状态
# {(s,a,s_):r,...}
self.rewards = collections.defaultdict(float) # 创建一个默认值为float的字典,初值为0.0,存放立即奖励r
# {(s,a):['s_':个数],...} 在 (s,a)下,下一个动作及其出现的个数,用于计算概率
self.transits = collections.defaultdict(collections.Counter) # 创建一个默认值为计数器的字典
# {s:状态价值,...}
self.values = collections.defaultdict(float) # 存放状态价值V
def play_n_random_steps(self, count):
for _ in range(count): # 从0到count-1 的整数
action = self.env.action_space.sample() # 从动作空间中随机采样出一个动作
new_state, reward, is_done, _ = self.env.step(action) # 执行该动作
# 将reward存入字典,key值为(状态,动作,新状态)
self.rewards[(self.state, action, new_state)] = reward
# key值为(state,action) 记录new_state 出现的个数,注:此处new_state可以是多个值
self.transits[(self.state, action)][new_state] += 1
# 状态更新
self.state = self.env.reset() if is_done else new_state
# 计算Q(s,a)
def calc_action_value(self, state, action):
# 得到(state, action)下可能出现的新动作,以及新动作的次数
target_counts = self.transits[(state, action)]
# 算出(state, action)下所有新动作的记录数 = 新动作1出现次数 + 新动作2出现次数 +...
total = sum(target_counts.values())
# 记录在(state, action)下,新动作出现的概率与(对应立即奖励+期望奖励)的乘积之和,即Q值
action_value = 0.0
# tgt_state是新动作,count是对应出现次数
for tgt_state, count in target_counts.items():
reward = self.rewards[(state, action, tgt_state)]
val = reward + GAMMA * self.values[tgt_state] # values[tgt_state] 初始为0
action_value += (count / total) * val
return action_value # 返回Q(s,a)的值
def select_action(self, state):
best_action, best_value = None, None
for action in range(self.env.action_space.n): # 遍历每一个动作
# 计算出该状态下各个动作的Q值
action_value = self.calc_action_value(state, action)
if best_value is None or best_value < action_value:
best_value = action_value
best_action = action
# 返回Q值最大的那个动作,best_value 就是状态价值V
return best_action
def play_episode(self, env): # 每次以最大Q值来选择动作玩这局游戏
total_reward = 0.0
state = env.reset()
while True:
action = self.select_action(state) # 选取该状态下Q值最大的那个动作
new_state, reward, is_done, _ = env.step(action)
self.rewards[(state, action, new_state)] = reward # 记录回报
self.transits[(state, action)][new_state] += 1 # 记录次数
total_reward += reward
if is_done:
break
state = new_state # 更新动作,循环游戏
return total_reward # 返回这局游戏的总奖励
def value_iteration(self):
# 计算所有状态-动作对的Q值,暴力循环
for state in range(self.env.observation_space.n):
state_values = [
self.calc_action_value(state, action)
for action in range(self.env.action_space.n)
]
# 保存 V(s)的值,状态值
self.values[state] = max(state_values)
if __name__ == "__main__":
test_env = gym.make(ENV_NAME)
agent = Agent()
writer = SummaryWriter(comment="-v-iteration")
iter_no = 0
best_reward = 0.0
while True:
iter_no += 1
agent.play_n_random_steps(100) # 随便玩100步,不是100局,留下初始记录,和之后更新的随机记录
agent.value_iteration() # 计算出各个状态的V值
reward = 0.0
for _ in range(TEST_EPISODES): # 以Q函数来玩TEST_EPISODES局游戏
reward += agent.play_episode(test_env)
reward /= TEST_EPISODES # 记录TEST_EPISODES局游戏的平均奖励
writer.add_scalar("reward", reward, iter_no)
if reward > best_reward:
print("Best reward updated %.3f -> %.3f" % (
best_reward, reward))
best_reward = reward
if reward > 0.80: # 平均奖励大于0.8则结束
print("Solved in %d iterations!" % iter_no)
break
writer.close()
# 试玩并渲染可视化
env = gym.make(ENV_NAME)
s = env.reset()
env.render()
while True:
s_,r,is_done,_ = env.step(agent.select_action(s))
env.render()
if is_done:
break
s = s_