强化学习实践之交叉熵方法

  • 核心:丢弃不好的episode
  • 步骤:
    • 使用当前的模型和环境运行N个episode
    • 计算每个episode的总奖励并确定一个奖励边界。通常,使用所有奖励的一些百分位数,例如:50%和70%
    • 丢弃所有包含边界以下奖励的episode
    • 用观察作为输入,已发布的动作作为期望输出训练剩余的“精华”episode
    • 不断重复上述步骤
  • 局限性:
    • 对于训练,episode必须是有限的,最好是简短的
    • episode的总奖励应具有足够的可变性,足以将好的episode与坏episode分开
    • 没有关于Agent是成功还是失败的中间提示
代码表示
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:tiger
# datetime:2021/9/11 1:33 下午

import gym
import torch
import numpy as np
from torch import nn
from collections import namedtuple
from tensorboardX import SummaryWriter
import torch.optim as optim

HIDDEN_SIZE = 128  # 隐藏层中的隐藏元的数量
BATCH_SIZE = 20  # 批量大小
PERCENTILE = 70  # 表示筛选的比例,这里就是丢掉70%

class Net(nn.Module):
    def __init__(self, obs_size, hidden_size, n_actions):
        super(Net, self).__init__()
        # 创建网络结构,这里没有加softmax,所以在训练的时候要加上softmax
        self.net = nn.Sequential(
            nn.Linear(obs_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, n_actions)
        )

    # 前向计算函数,在__call__函数中被调用
    def forward(self, x):
        return self.net(x)

# 这里表示一个耽搁episode,存储无折扣的奖励和EpisodeStep的集合
Episode = namedtuple('Episode', field_names=['reward', 'steps'])

# 这里表示Agent在episode中所进行的单个步骤,它存储来自环境的观察以及Agent完成的动作。
EpisodeStep = namedtuple('EpisodeStep', field_names=['observation', 'action'])

# 这里用来接收环境,神经网络以及每次迭代应该生成的episode的数量
def iterate_batches(env, net, batch_size):
    batch = []  # 这里将用做累积batch(一个Episode实例)
    episode_reward = 0.0  # 这里是当前episode的累积奖励
    episode_steps = []   # 这里是当前episode的步骤列表
    obs = env.reset()  # 重置环境,并且获得第一个observation
    sm = nn.Softmax(dim=1)  # softmax层
    # 启动环境循环
    while True:
        obs_v = torch.FloatTensor([obs])  # 将环境传过来的参数转换为tensor形式,这里传入的是一个1*4的向量,1表示一个批次中1组数据,4表示1组数据中的4个元素
        act_probs_v = sm(net(obs_v))  # 这里将网络传出的动作序列转换为概率分布
        act_probs = act_probs_v.data.numpy()[0]  # 该批次中的第一组数据,输出动作的概率分布
        action = np.random.choice(len(act_probs), p=act_probs)  # 在上面得到的概率分布中随机抽取一个动作,注意这里第二个参数表示每个动作抽到的概率的分布,不能丢了,不然可能不收敛
        next_obs, reward, is_done, _ = env.step(action)  # 环境执行刚刚抽取到的动作
        episode_reward += reward  # 一个episode的总奖励加上刚刚执行完动作,环境给出的奖励
        episode_steps.append(EpisodeStep(observation=obs, action=action))  # 将这一步的加入episode_steps数组
        # 这里是判断一个episode是否结束了,如果episode结束了,is_done的值会变为True,这样就会执行下面的语句了
        if is_done:
            batch.append(Episode(reward=episode_reward, steps=episode_steps))  # 将该Episode加入到batch中
            episode_reward = 0.0  # 将episode的累积奖励转换为0
            episode_steps = []  # 清空episode的步骤
            next_obs = env.reset()  # 重置环境
            if len(batch) == batch_size:  # 判断batch里面的episode数量是否达到了我们要的一个批量的大小,如果达到了就返回一个生成器,然后清空批量缓存
                yield batch
                batch = []
        obs = next_obs  # 这里将观察置为下一个observation,然后开启下一个循环的时候用做下一步的输入

# 这个函数用来过滤不满足条件的episode
def filter_batch(batch, percentile):
    rewards = list(map(lambda s: s.reward, batch))  # 这里得到了这个批次的所有的episode的奖励
    reward_bound = np.percentile(rewards, percentile)  # 这里得到了episode的边界值
    reward_mean = float(np.mean(rewards))  # 这里得到了奖励的平均值

    train_obs = []  # 设置训练的observation列表
    train_act = []  # 设置训练的action列表
    # 从一个批次中拿出一个episode
    for example in batch:
        if example.reward < reward_bound:
            continue
        # extend用于在list的末尾中添加另一个序列的多个值,这里也就是将一个批次中,所有满足条件的episode的每一步都放进来
        train_obs.extend(map(lambda step: step.observation, example.steps))
        # 这里我们将一个批次中符合条件的episode中的每一个action放到这里来
        train_act.extend(map(lambda step: step.action, example.steps))
    train_obs_v = torch.FloatTensor(train_obs)  # 这里将列表转换为tensor
    train_act_v = torch.LongTensor(train_act)  # # 这里将列表转换为tensor
    return train_obs_v, train_act_v, reward_bound, reward_mean  # 返回一个批次中满足条件的观察,动作,奖励的边界值,奖励的均值

if __name__ == '__main__':
    env = gym.make("CartPole-v0")
    obs_size = env.observation_space.shape[0]
    n_actions = env.action_space.n
    net = Net(obs_size, HIDDEN_SIZE, n_actions)
    objective = nn.CrossEntropyLoss()
    optimizer = optim.Adam(params=net.parameters(), lr=0.01)  # 定义使用Adam作为优化函数
    writer = SummaryWriter(comment="-cartpole")
    # 这里使用iterate_batches函数生层一个批次的episode,前面我们定义iterate_batches的时候,我们直到它返回的是一个生成器,这样我们enumerate后是一个迭代编号,和batch_size大小的episode
    for iter_no, batch in enumerate(iterate_batches(env, net, BATCH_SIZE)):
        obs_v, act_v, reward_b, reward_m = filter_batch(batch, PERCENTILE)
        optimizer.zero_grad()
        action_scores_v = net(obs_v)
        loss_v = objective(action_scores_v, act_v)
        loss_v.backward()
        optimizer.step()
        print("%d: loss=%.3f, reward_mean=%.1f, reward_bound=%.1f" % (iter_no, loss_v.item(), reward_m, reward_b))
        writer.add_scalar("loss", loss_v.item(), iter_no)
        writer.add_scalar("reward_bound", reward_b, iter_no)
        writer.add_scalar("reward_mean", reward_m, iter_no)
        if reward_m > 199:
            print("Solved!")
            break
    writer.close()
0: loss=0.691, reward_mean=22.8, reward_bound=26.3
1: loss=0.685, reward_mean=25.1, reward_bound=28.3
2: loss=0.671, reward_mean=26.8, reward_bound=27.5
3: loss=0.665, reward_mean=35.0, reward_bound=30.3
4: loss=0.646, reward_mean=34.9, reward_bound=34.6
5: loss=0.643, reward_mean=46.4, reward_bound=54.3
6: loss=0.630, reward_mean=42.5, reward_bound=47.3
7: loss=0.625, reward_mean=63.3, reward_bound=81.2
8: loss=0.614, reward_mean=46.5, reward_bound=45.6
9: loss=0.602, reward_mean=47.5, reward_bound=54.9
10: loss=0.617, reward_mean=60.9, reward_bound=66.2
11: loss=0.622, reward_mean=56.8, reward_bound=74.4
12: loss=0.584, reward_mean=62.5, reward_bound=82.8
13: loss=0.586, reward_mean=65.5, reward_bound=76.4
14: loss=0.584, reward_mean=73.8, reward_bound=88.2
15: loss=0.576, reward_mean=69.0, reward_bound=87.9
16: loss=0.576, reward_mean=83.0, reward_bound=90.5
17: loss=0.573, reward_mean=79.7, reward_bound=82.9
18: loss=0.570, reward_mean=73.7, reward_bound=85.9
19: loss=0.577, reward_mean=73.0, reward_bound=80.4
20: loss=0.570, reward_mean=88.9, reward_bound=90.2
21: loss=0.555, reward_mean=85.8, reward_bound=94.5
22: loss=0.567, reward_mean=114.1, reward_bound=126.3
23: loss=0.556, reward_mean=120.5, reward_bound=137.4
24: loss=0.558, reward_mean=138.1, reward_bound=161.3
25: loss=0.560, reward_mean=156.2, reward_bound=200.0
26: loss=0.566, reward_mean=164.2, reward_bound=200.0
27: loss=0.551, reward_mean=160.5, reward_bound=200.0
28: loss=0.552, reward_mean=184.5, reward_bound=200.0
29: loss=0.556, reward_mean=177.6, reward_bound=200.0
30: loss=0.546, reward_mean=186.8, reward_bound=200.0
31: loss=0.547, reward_mean=184.9, reward_bound=200.0
32: loss=0.542, reward_mean=186.2, reward_bound=200.0
33: loss=0.546, reward_mean=194.2, reward_bound=200.0
34: loss=0.547, reward_mean=196.9, reward_bound=200.0
35: loss=0.547, reward_mean=197.5, reward_bound=200.0
36: loss=0.549, reward_mean=194.4, reward_bound=200.0
37: loss=0.537, reward_mean=194.1, reward_bound=200.0
38: loss=0.536, reward_mean=200.0, reward_bound=200.0
Solved!

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值