【零基础强化学习】教你用Q-learning训练基于gym的火箭月球着陆器


更多代码gitee主页:https://gitee.com/GZHzzz
博客主页CSDN:https://blog.csdn.net/gzhzzaa

写在前面

算法流程

  • 需要小火箭能精确、平稳(缓慢)的降落在停机坪!


在这里插入图片描述

show me code, no bb

import sys
import logging
import itertools
import copy

import numpy as np
np.random.seed(0)
import pandas as pd
import gym
import matplotlib.pyplot as plt
import torch
torch.manual_seed(0)
import torch.nn as nn
import torch.optim as optim
import torch.distributions as distributions

logging.basicConfig(level=logging.DEBUG,
        format='%(asctime)s [%(levelname)s] %(message)s',
        stream=sys.stdout, datefmt='%H:%M:%S')
     
env = gym.make('LunarLander-v2')
env.seed(0)
for key in vars(env):
    logging.info('%s: %s', key, vars(env)[key])
for key in vars(env.spec):
    logging.info('%s: %s', key, vars(env.spec)[key])
class DQNReplayer:
    def __init__(self, capacity):
        self.memory = pd.DataFrame(index=range(capacity),
                columns=['state', 'action', 'reward', 'next_state', 'done'])
        self.i = 0
        self.count = 0
        self.capacity = capacity

    def store(self, *args):
        self.memory.loc[self.i] = args
        self.i = (self.i + 1) % self.capacity
        self.count = min(self.count + 1, self.capacity)

    def sample(self, size):
        indices = np.random.choice(self.count, size=size)
        return (np.stack(self.memory.loc[indices, field]) for field in
                self.memory.columns)
class SQLAgent:
    def __init__(self, env):
        self.action_n = env.action_space.n
        self.gamma = 0.99

        self.replayer = DQNReplayer(10000)

        self.alpha = 0.02

        self.evaluate_net = self.build_net(
                input_size=env.observation_space.shape[0],
                hidden_sizes=[256, 256], output_size=self.action_n)
        self.optimizer = optim.Adam(self.evaluate_net.parameters(), lr=3e-4)
        self.loss = nn.MSELoss()

    def build_net(self, input_size, hidden_sizes, output_size):
        layers = []
        for input_size, output_size in zip(
                [input_size,] + hidden_sizes, hidden_sizes + [output_size,]):
            layers.append(nn.Linear(input_size, output_size))
            layers.append(nn.ReLU())
        layers = layers[:-1]
        model = nn.Sequential(*layers)
        return model

    def reset(self, mode=None):
        self.mode = mode
        if self.mode == 'train':
            self.trajectory = []
            self.target_net = copy.deepcopy(self.evaluate_net)

    def step(self, observation, reward, done):
        state_tensor = torch.as_tensor(observation,
                dtype=torch.float).squeeze(0)
        q_div_alpha_tensor = self.evaluate_net(state_tensor) / self.alpha
        v_div_alpha_tensor = torch.logsumexp(q_div_alpha_tensor, dim=-1,
                keepdim=True)
        prob_tensor = (q_div_alpha_tensor - v_div_alpha_tensor).exp()
        action_tensor = distributions.Categorical(prob_tensor).sample()
        action = action_tensor.item()
        if self.mode == 'train':
            self.trajectory += [observation, reward, done, action]
            if len(self.trajectory) >= 8:
                state, _, _, act, next_state, reward, done, _ = \
                        self.trajectory[-8:]
                self.replayer.store(state, act, reward, next_state, done)
            if self.replayer.count >= 500:
                self.learn()
        return action

    def close(self):
        pass

    def learn(self):
        # replay
        states, actions, rewards, next_states, dones = \
                self.replayer.sample(128) # replay transitions
        state_tensor = torch.as_tensor(states, dtype=torch.float)
        action_tensor = torch.as_tensor(actions, dtype=torch.long)
        reward_tensor = torch.as_tensor(rewards, dtype=torch.float)
        next_state_tensor = torch.as_tensor(next_states, dtype=torch.float)
        done_tensor = torch.as_tensor(dones, dtype=torch.float)

        # train
        next_q_tensor = self.target_net(next_state_tensor)
        next_v_tensor = self.alpha * torch.logsumexp(next_q_tensor / self.alpha, dim=-1)
        target_tensor = reward_tensor + self.gamma * (1. - done_tensor) * next_v_tensor
        pred_tensor = self.evaluate_net(state_tensor)
        q_tensor = pred_tensor.gather(1, action_tensor.unsqueeze(1)).squeeze(1)
        loss_tensor = self.loss(q_tensor, target_tensor.detach())
        self.optimizer.zero_grad()
        loss_tensor.backward()
        self.optimizer.step()


agent = SQLAgent(env)
# 训练
def play_episode(env, agent, max_episode_steps=None, mode=None, render=False):
    observation, reward, done = env.reset(), 0., False
    agent.reset(mode=mode)
    episode_reward, elapsed_steps = 0., 0
    while True:
        action = agent.step(observation, reward, done)
        if render:
            env.render()
        if done:
            break
        observation, reward, done, _ = env.step(action)
        episode_reward += reward
        elapsed_steps += 1
        if max_episode_steps and elapsed_steps >= max_episode_steps:
            break
    agent.close()
    return episode_reward, elapsed_steps


logging.info('==== train ====')
episode_rewards = []
for episode in itertools.count():
    episode_reward, elapsed_steps = play_episode(env.unwrapped, agent,
            max_episode_steps=env._max_episode_steps, render=1,mode='train')
    episode_rewards.append(episode_reward)
    logging.debug('train episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
    if np.mean(episode_rewards[-10:]) > 250:
        break
plt.plot(episode_rewards)


logging.info('==== test ====')
episode_rewards = []
for episode in range(100):
    episode_reward, elapsed_steps = play_episode(env, agent, render=1)
    episode_rewards.append(episode_reward)
    logging.debug('test episode %d: reward = %.2f, steps = %d',
            episode, episode_reward, elapsed_steps)
logging.info('average episode reward = %.2f ± %.2f',
        np.mean(episode_rewards), np.std(episode_rewards))
  • 代码全部亲自跑过,你懂的!╰(°▽°)╯

结果展示

在这里插入图片描述

写在最后

十年磨剑,与君共勉!
更多代码gitee主页:https://gitee.com/GZHzzz
博客主页CSDN:https://blog.csdn.net/gzhzzaa

  • Fighting!

基于pytorch的经典模型基于pytorch的典型智能体模型
强化学习经典论文强化学习经典论文
在这里插入图片描述

while True:
	Go life

在这里插入图片描述

谢谢点赞交流!(❁´◡`❁)

当然可以!Q-learning 是一种经典的强化学习算法,可以用来学习最优策略。下面是一个使用 Q-learning 算法来强化学习您给出的代码的示例: ```python import gym import numpy as np # 创建环境 env = gym.make('Hero-ram-v0') num_states = env.observation_space.shape[0] num_actions = env.action_space.n # 初始化 Q 表 Q = np.zeros((num_states, num_actions)) # 定义超参数 alpha = 0.5 # 学习率 gamma = 0.9 # 折扣因子 epsilon = 0.1 # epsilon-greedy 策略的探索率 # 进行 Q-learning num_episodes = 1000 for episode in range(num_episodes): state = env.reset() done = False while not done: # epsilon-greedy 策略选择动作 if np.random.uniform() < epsilon: action = env.action_space.sample() else: action = np.argmax(Q[state, :]) next_state, reward, done, _ = env.step(action) # 更新 Q 表 Q[state, action] += alpha * (reward + gamma * np.max(Q[next_state, :]) - Q[state, action]) state = next_state # 使用学习到的 Q 表玩游戏 for episode in range(5): state = env.reset() done = False total_reward = 0 while not done: action = np.argmax(Q[state, :]) next_state, reward, done, _ = env.step(action) total_reward += reward state = next_state print("Episode {}: Total Reward = {}".format(episode+1, total_reward)) ``` 请注意,Q-learning 是一种基于模型的强化学习算法,需要有完整的环境模型。所以确保您的环境提供了正确的状态转移和奖励函数。 希望这可以帮助到您!如果您还有其他问题,请随时提问。
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

北郭zz

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值