相关知识结构如下:
参考资料:
https://oneraynyday.github.io/ml/2018/05/24/Reinforcement-Learning-Monte-Carlo/#first-visit-monte-carlo
在本次复现中,使用的gym中的21点游戏模块
part1、环境测试:以随机策略玩n局游戏
import gym
env = gym.make('Blackjack-v0')
# print(env.observation_space)
# print(env.action_space)
# 以随机策略玩n局
def random_play(n):
for i_episode in range(n):
state = env.reset() # 状态随机初始化
while True:
print(state) # 此刻的状态
action = env.action_space.sample() # 随机选择动作
# 环境对此动作的反馈 【状态,回报,是否完成游戏,信息】
state, reward, done, info = env.step(action)
# 游戏结束,显示结果
if done:
print('End game! Reward: ', reward)
print('You won :)\n') if reward > 0 else print('You lost :(\n')
break
return
part2 蒙特卡洛预测方法:给定策略,学习其状态价值函数
def MC_perdiction(max_hit, discount,i):
# 初始化
V = [] # 储存V(St)
player_trajectory = []
returns = [] # Returns
# 这一幕状态初始化
state = env.reset()
while True:
# step1、在目前状态下,根据策略选择动作
action = 1
if state[0] >= max_hit:
action = 0
# step2、根据动作,从环境中获得反馈。并储存采样数据【目前状态,动作,回报】
next_state, reward, done, info = env.step(action)
player_trajectory.append((state, action, reward)) # 幕序列【S0,A0,R1】
# step3、状态更新
state = next_state # 状态更新
# 得出幕序列:S0 A0 R1 S1 A1 R2 …… S(T-1) A(T-1) RT
if done:
break
# print("回合%d结束,幕序列为:" % i, player_trajectory)
G = 0
states, actions, rewards = zip(*player_trajectory)
discounts = np.array([discount ** i for i in range(len(rewards) + 1)]) # 形成折扣系数矩阵
# 使用每次访问型MC预测算法
for j, state in enumerate(states):
# print("估计状态", j, state)
G = sum(rewards[j:] * discounts[:-(1 + j)])
returns.append((state, G))
state, v = zip(*returns