7.1、Off-policy n-step Sarsa代码实现

算法伪代码:在这里插入图片描述
1)maze_env.py(maze环境直接使用了MorvanZhou的代码

import numpy as np
import time
import sys
if sys.version_info.major == 2:
    import Tkinter as tk
else:
    import tkinter as tk


UNIT = 40   # pixels
MAZE_H = 4  # grid height
MAZE_W = 4  # grid width


class Maze(tk.Tk, object):
    def __init__(self):
        super(Maze, self).__init__()
        self.action_space = ['u', 'd', 'l', 'r']
        self.n_actions = len(self.action_space)
        self.title('maze')
        self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
        self._build_maze()

    def _build_maze(self):
        self.canvas = tk.Canvas(self, bg='white',
                           height=MAZE_H * UNIT,
                           width=MAZE_W * UNIT)

        # create grids
        for c in range(0, MAZE_W * UNIT, UNIT):
            x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
            self.canvas.create_line(x0, y0, x1, y1)
        for r in range(0, MAZE_H * UNIT, UNIT):
            x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
            self.canvas.create_line(x0, y0, x1, y1)

        # create origin
        origin = np.array([20, 20])

        # hell
        hell1_center = origin + np.array([UNIT * 2, UNIT])
        self.hell1 = self.canvas.create_rectangle(
            hell1_center[0] - 15, hell1_center[1] - 15,
            hell1_center[0] + 15, hell1_center[1] + 15,
            fill='black')
        # hell
        hell2_center = origin + np.array([UNIT, UNIT * 2])
        self.hell2 = self.canvas.create_rectangle(
            hell2_center[0] - 15, hell2_center[1] - 15,
            hell2_center[0] + 15, hell2_center[1] + 15,
            fill='black')

        # create oval
        oval_center = origin + UNIT * 2
        self.oval = self.canvas.create_oval(
            oval_center[0] - 15, oval_center[1] - 15,
            oval_center[0] + 15, oval_center[1] + 15,
            fill='yellow')

        # create red rect
        self.rect = self.canvas.create_rectangle(
            origin[0] - 15, origin[1] - 15,
            origin[0] + 15, origin[1] + 15,
            fill='red')

        # pack all
        self.canvas.pack()

    def reset(self):
        self.update()
        time.sleep(0.5)
        self.canvas.delete(self.rect)
        origin = np.array([20, 20])
        self.rect = self.canvas.create_rectangle(
            origin[0] - 15, origin[1] - 15,
            origin[0] + 15, origin[1] + 15,
            fill='red')
        # return observation
        return self.canvas.coords(self.rect)

    def step(self, action):
        s = self.canvas.coords(self.rect)
        base_action = np.array([0, 0])
        if action == 0:   # up
            if s[1] > UNIT:
                base_action[1] -= UNIT
        elif action == 1:   # down
            if s[1] < (MAZE_H - 1) * UNIT:
                base_action[1] += UNIT
        elif action == 2:   # right
            if s[0] < (MAZE_W - 1) * UNIT:
                base_action[0] += UNIT
        elif action == 3:   # left
            if s[0] > UNIT:
                base_action[0] -= UNIT

        self.canvas.move(self.rect, base_action[0], base_action[1])  # move agent

        s_ = self.canvas.coords(self.rect)  # next state

        # reward function
        if s_ == self.canvas.coords(self.oval):
            reward = 1
            done = True
            s_ = 'terminal'
        elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
            reward = -1
            done = True
            s_ = 'terminal'
        else:
            reward = 0
            done = False

        return s_, reward, done

    def render(self):
        time.sleep(0.1)
        self.update()

2)Off-policy n-step Sarsa 算法实现

import numpy as np
import pandas as pd
from maze_env import Maze


class OffSarsaN(object):
    # n-step Off-policy Learning by Importance Sampling
    def __init__(self, action_space):
        self.nA = action_space
        self.actions = list(range(action_space))

        self.q_table = pd.DataFrame(columns=self.actions)

    def check_state_exist(self, s):
        if s not in self.q_table.index:
            self.q_table = self.q_table.append(
                pd.Series([0]*len(self.actions),
                          index=self.q_table.columns,
                          name=s)
            )

    def target_policy(self, s):
        # target_policy is the greedy policy
        self.check_state_exist(s)
        A = self.target_policy_probs(s)
        return np.random.choice(range(self.nA), p=A)

    def target_policy_probs(self, s, epsilon=.1):
        A = np.ones(self.nA, dtype=float) * epsilon / self.nA
        best_action = np.argmax(self.q_table.loc[s, :])
        A[best_action] += (1.0 - epsilon)
        return A

    def behaviour_policy(self, s):
        # behaviour policy is the epsilon-greedy
        self.check_state_exist(s)
        A = self.behaviour_policy_probs(s)
        return np.random.choice(range(self.nA), p=A)

    def behaviour_policy_probs(self, s, epsilon=.3):
        A = np.ones(self.nA, dtype=float) * epsilon / self.nA
        best_action = np.argmax(self.q_table.loc[s, :])
        A[best_action] += (1.0 - epsilon)
        return A


if __name__ == '__main__':
    env = Maze()
    action_space = env.n_actions
    RL = OffSarsaN(action_space)

    n = 3
    gamma = 0.9
    alpha = 0.01

    for episode in range(100):
        buffer_s = []
        buffer_a = []
        buffer_r = []
        state = env.reset()
        action = RL.behaviour_policy(str(state))

        buffer_s.append(str(state))
        buffer_a.append(action)

        T = 10000
        t = 0

        while True:
            if t < T:
                env.render()
                state_, reward, done = env.step(action)
                buffer_s.append(str(state_))
                buffer_r.append(reward)

                if state_ == 'terminal':
                    T = t + 1
                else:
                    action_ = RL.behaviour_policy(str(state_))
                    buffer_a.append(action_)
                    action = action_

            tao = t - n + 1

            if tao >= 0:
                rho = 1
                for i in range(tao+1, min(tao+n, T)):
                    rho *= RL.target_policy_probs(buffer_s[i])[buffer_a[i]] /\
                           RL.behaviour_policy_probs(buffer_s[i])[buffer_a[i]]
                G = 0
                for i in range(tao+1, min(tao+n, T)+1):
                    G += gamma**(i-tao-1) * buffer_r[i-1]

                if tao+n < T:
                    G += gamma**n * RL.q_table.loc[buffer_s[tao+n], buffer_a[tao+n]]

                RL.q_table.loc[buffer_s[tao], buffer_a[tao]] += \
                    alpha * rho * (G - RL.q_table.loc[buffer_s[tao], buffer_a[tao]])

            if tao == T-1:
                break

            t += 1

    print('game over')
    env.destroy()
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值