REFINFORCE算法实现

#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:tiger
# datetime:2021/9/18 5:11 下午


import gym
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch import nn
from torch.distributions import Categorical
from torch import finfo

# 一些超参数
GAMMA = 0.9  # 计算累积回报时候的折扣系数
LR = 0.001  # 学习率


eps = finfo(torch.float32).eps

# 定义Agent
class Agent(nn.Module):
    def __init__(self):
        super(Agent, self).__init__()
        self.net = nn.Sequential(nn.Linear(4, 32), nn.ReLU(),
                                 nn.Linear(32, 32), nn.ReLU(),
                                 nn.Linear(32, 2), nn.Softmax(dim=1))

        self.opt = torch.optim.Adam(self.net.parameters(), lr=LR)

    # 前向计算
    def forward(self, state):
        return self.net(state)

# 定义训练器类
class Trainer:
    def __init__(self, env):
        self.env = env
        self.agent = Agent()
        self.opt = torch.optim.Adam(self.agent.parameters(), lr=LR)  # 定义优化器

    def __call__(self):  # 训练
        reward_plt = []
        for episode in range(1000):  # 这里迭代了1000次
            state = self.env.reset()  # 初始化环境得到初始状态
            traj = [[], [], []]  # 用来存放一个episode的state,action,reward
            log_probs = []  # 用来存放动作的log值
            while True:
                action, log_prob = self.__select_action(state)  # 选择动作并返回动作的log值
                log_probs.append(log_prob)
                next_state, reward, done, _ = self.env.step(action)  # 更新环境
                traj[0].append(state)
                traj[1].append(action)
                traj[2].append(reward)
                state = next_state
                # 如果done的True表示这个episode结束
                if done:
                    break
            returns = []  # 用于记录每一个时间步的回报
            r = 0
            rewards = 0  # 计算episode总的奖励
            for reward in traj[2][::-1]:  # 反着取计算回报
                r = r * GAMMA + reward
                rewards += reward
                returns.insert(0, r)
            reward_plt.append(rewards)
            returns = torch.tensor(returns)
            returns = (returns - returns.mean()) / (returns.std() + eps)  # 标准化returns序列
            loss = 0  # 记录loss
            for r, log_prob in zip(returns, log_probs):
                loss += -r * log_prob  # 计算loss
            # 更新
            self.opt.zero_grad()
            loss.backward()
            self.opt.step()
        plt.plot(reward_plt)
        plt.show()

    # 选择动作
    def __select_action(self, state):
        state = torch.from_numpy(np.expand_dims(state, 0)).float()  # 将ndarray类型转换为tensor类型
        act_probs = self.agent(state)[0]  # 计算得到每个动作的概率
        example = Categorical(act_probs)  # 创建样本
        action = example.sample()  # 抽样
        return action.item(), example.log_prob(action)  # 返回动作和动作概率的log值

    # 测试
    def test(self):
        while True:
            state = self.env.reset()
            rewards = 0
            while True:
                self.env.render()
                state = torch.from_numpy(np.expand_dims(state, 0)).float()
                action_prob = self.agent(state)
                action = torch.argmax(action_prob).item()
                next_state, reward, done, _ = env.step(action)
                rewards += reward
                state = next_state
                if done:
                    break
            print(rewards)
            break


if __name__ == '__main__':
    env = gym.make("CartPole-v0")
    trainer = Trainer(env)
    trainer()
    trainer.test()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值