强化学习笔记(三)

  • 实现探宝者移动策略
  • 输出结果在代码下方
import numpy as np
import pandas as pd
import time

# 设置可重复显示的随机数
np.random.seed(2)

# 初始距离宝藏距离
N_STATES = 6

# 探宝者可执行操作
ACTIONS = ['left', 'right']

# 贪心策略执行比例
EPSILON = 0.9

# 学习效率
ALPHA = 0.1

# 衰减因子
LAMBDA = 0.9

# 最大回合数
MAX_EPISODES = 15

# 单步移动用时
FRESH_TIME = 0.1

# 建立 Q 表格
def build_q_table(n_states, actions):
    # 表格初始化为 0, 每个状态下均有 2 个行为可以选择
    table = pd.DataFrame(
        np.zeros((n_states, len(actions))),
        columns = actions
    )
    
    # 显示表格
    print(table)

    # 返回表格
    return table

# 选取行为
def choose_action(state, q_table):
    # 抽取 Q 表格中状态为 state 的一行
    state_actions = q_table.iloc[state, :]

    # 10% 随机选择 (初始状态也随机选择)
    if (np.random.uniform() > EPSILON) or (state_actions.all() == 0):
        # 随机选择
        action_name = np.random.choice(ACTIONS)
    # 90% 选择最优策略
    else:
        # 最优策略
        action_name = state_actions.idxmax()
    
    # 返回选择
    return action_name

# 获取环境返回数值
def get_env_feedback(S, A):
    # 向右
    if A == 'right':
        # 到达终点
        if S == N_STATES - 2:
            S_ = 'terminal'
            R = 1
        # 未到终点
        else:
            S_ = S + 1
            R = 0
    # 向左
    else:
        # 到达起点
        if S == 0:
            S_ = S
            R = 0
        # 未到起点
        else:
            S_ = S - 1
            R = 0

    # 返回下一个状态和奖励
    return S_, R

# 更新环境并实现可视化
def update_env(S, episode, step_counter):
    # 环境初始状态
    env_list = ['-'] * (N_STATES - 1) + ['T']

    # 到达终点
    if S == 'terminal':
        interaction = 'Episode %s: total_steps = %s' % (episode + 1, step_counter)
        # 显示当前状态
        print('\r{}'.format(interaction), end='')

        # 延时
        time.sleep(2)
        # print('\r                              ', end='')

    # 未到达终点
    else:
        # 当前位置
        env_list[S] = 'o'
        interaction = ''.join(env_list)
        print('\r{}'.format(interaction), end='')

        # 延时
        time.sleep(FRESH_TIME)

# 强化学习
def rl():
    # 建立表格
    q_table = build_q_table(N_STATES, ACTIONS)

    # 回合循环
    for episode in range(MAX_EPISODES):
        # 步数初始化
        step_counter = 0
        # 位置初始化
        S = 0
        # 终点标识符初始化
        is_terminated = False
        # 环境初始化
        update_env(S, episode, step_counter)

        # 到达终点停止循环
        while not is_terminated:
            A = choose_action(S, q_table)
            S_, R = get_env_feedback(S, A)

            # Q 估计值 
            q_predict = q_table.loc[S, A]

            # 未到达终点
            if S_ != 'terminal':
                # Q 实际值
                q_target = R + LAMBDA * q_table.iloc[S_, :].max()

            # 到达终点
            else:
                # Q 实际值
                q_target = R
                
                # 标志位更新
                is_terminated = True
            
            # 更新 Q 表格
            q_table.loc[S, A] += ALPHA * (q_target - q_predict)

            # 状态位置更新
            S = S_

            # 显示环境
            update_env(S, episode, step_counter + 1)

            # 步数更新
            step_counter += 1

        # 显示当前 Q 表格
        print('\r\nQ-table:')
        print(q_table)

    # 返回最终的 Q 表格
    return q_table

# 主函数
if __name__ == "__main__":
    q_table = rl()
    
  • 输出结果
   left  right
0   0.0    0.0
1   0.0    0.0
2   0.0    0.0
3   0.0    0.0
4   0.0    0.0
5   0.0    0.0

Episode 1: total_steps = 38
Q-table:
   left  right
0   0.0    0.0
1   0.0    0.0
2   0.0    0.0
3   0.0    0.0
4   0.0    0.1
5   0.0    0.0

Episode 2: total_steps = 28
Q-table:
       left     right
0  0.000000  0.000000
1  0.000000  0.000000
2  0.000000  0.001539
3  0.000073  0.017100
4  0.000810  0.190000
5  0.000000  0.000000

Episode 3: total_steps = 6
Q-table:
       left     right
0  0.000000  0.000000
1  0.000000  0.000139
2  0.000000  0.002924
3  0.000073  0.032490
4  0.000810  0.271000
5  0.000000  0.000000

Episode 4: total_steps = 7
Q-table:
       left     right
0  0.000000  0.000012
1  0.000000  0.000612
2  0.000035  0.005556
3  0.000073  0.053631
4  0.000810  0.343900
5  0.000000  0.000000

Episode 5: total_steps = 6
Q-table:
       left     right
0  0.000001  0.000066
1  0.000000  0.001051
2  0.000035  0.009827
3  0.000073  0.079219
4  0.000810  0.409510
5  0.000000  0.000000

Episode 6: total_steps = 5
Q-table:
       left     right
0  0.000001  0.000154
1  0.000000  0.001830
2  0.000035  0.015974
3  0.000073  0.108153
4  0.000810  0.468559
5  0.000000  0.000000

Episode 7: total_steps = 5
Q-table:
       left     right
0  0.000001  0.000304
1  0.000000  0.003085
2  0.000035  0.024110
3  0.000073  0.139508
4  0.000810  0.521703
5  0.000000  0.000000

Episode 8: total_steps = 7
Q-table:
       left     right
0  0.000001  0.000773
1  0.000050  0.004946
2  0.000035  0.034255
3  0.000073  0.172510
4  0.000810  0.569533
5  0.000000  0.000000

Episode 9: total_steps = 5
Q-table:
       left     right
0  0.000001  0.001141
1  0.000050  0.007535
2  0.000035  0.046355
3  0.000073  0.206517
4  0.000810  0.612580
5  0.000000  0.000000

Episode 10: total_steps = 5
Q-table:
       left     right
0  0.000001  0.001705
1  0.000050  0.010953
2  0.000035  0.060306
3  0.000073  0.240998
4  0.000810  0.651322
5  0.000000  0.000000

Episode 11: total_steps = 7
Q-table:
       left     right
0  0.000001  0.003254
1  0.000271  0.015285
2  0.000035  0.075966
3  0.000073  0.275517
4  0.000810  0.686189
5  0.000000  0.000000

Episode 12: total_steps = 5
Q-table:
       left     right
0  0.000001  0.004305
1  0.000271  0.020594
2  0.000035  0.093166
3  0.000073  0.309722
4  0.000810  0.717570
5  0.000000  0.000000

Episode 13: total_steps = 7
Q-table:
       left     right
0  0.000001  0.005728
1  0.000271  0.032612
2  0.002454  0.111724
3  0.000073  0.343331
4  0.000810  0.745813
5  0.000000  0.000000

Episode 14: total_steps = 5
Q-table:
       left     right
0  0.000001  0.008090
1  0.000271  0.039406
2  0.002454  0.131451
3  0.000073  0.376121
4  0.000810  0.771232
5  0.000000  0.000000

Episode 15: total_steps = 5
Q-table:
       left     right
0  0.000001  0.010827
1  0.000271  0.047296
2  0.002454  0.152157
3  0.000073  0.407920
4  0.000810  0.794109
5  0.000000  0.000000
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值