强化学习训练营-Task3-PPO算法实战练习-20240507

R4 PPO(近段策略优化)算法讲解

1、策略(要输出最优动作的策略模型)

2、近端(代理函数的剪裁)

3、优化(使用代理函数)的出现及其实际意义,导致了算法的命名。

R4.1 由浅入深,简化版ppo(100行代码)

#导入gym和torch相关包
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical

#Hyperparameters
learning_rate = 0.0005 #学习率
gamma         = 0.98   #
lmbda         = 0.95
eps_clip      = 0.1
K_epoch       = 3
T_horizon     = 20

#定义PPO架构
class PPO(nn.Module):
    def __init__(self):
        super(PPO, self).__init__()
        self.data = [] #用来存储交互数据
        
        self.fc1   = nn.Linear(4,256) #由于倒立摆环境简单,这里仅用一个线性变换来训练数据
        self.fc_pi = nn.Linear(256,2) #policy函数(输出action)的全连接层
        self.fc_v  = nn.Linear(256,1) #value函数(输出v)的全连接层
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate) #优化器

    #policy函数
    #输入观测值x
    #输出动作空间概率,从而选择最优action
    def pi(self, x, softmax_dim = 0): 
        x = F.relu(self.fc1(x))
        x = self.fc_pi(x)
        prob = F.softmax(x, dim=softmax_dim)
        return prob
    
    #value函数
    #输入观测值x
    #输出x状态下value的预测值(reward),提供给policy函数作为参考值
    def v(self, x):
        x = F.relu(self.fc1(x))
        v = self.fc_v(x)
        return v
    
    #把交互数据存入buffer
    def put_data(self, transition):
        self.data.append(transition)
        
    #把数据形成batch,训练模型时需要一个一个batch输入模型
    def make_batch(self):
        s_lst, a_lst, r_lst, s_prime_lst, prob_a_lst, done_lst = [], [], [], [], [], []
        for transition in self.data:
            s, a, r, s_prime, prob_a, done = transition
            
            s_lst.append(s)
            a_lst.append([a])
            r_lst.append([r])
            s_prime_lst.append(s_prime)
            prob_a_lst.append([prob_a])
            done_mask = 0 if done else 1
            done_lst.append([done_mask])
            
        s,a,r,s_prime,done_mask, prob_a = torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
                                          torch.tensor(r_lst), torch.tensor(s_prime_lst, dtype=torch.float), \
                                          torch.tensor(done_lst, dtype=torch.float), torch.tensor(prob_a_lst)
        self.data = []
        return s, a, r, s_prime, done_mask, prob_a
    
    
    #训练模型
    
    def train_net(self):
        #make batch 数据,喂给模型
        s, a, r, s_prime, done_mask, prob_a = self.make_batch()

        for i in range(K_epoch): #K_epoch:训练多少个epoch
            #计算td_error 误差,value模型的优化目标就是尽量减少td_error
            td_target = r + gamma * self.v(s_prime) * done_mask
            delta = td_target - self.v(s)
            delta = delta.detach().numpy()

            #计算advantage:
            #即当前策略比一般策略(baseline)要好多少
            #policy的优化目标就是让当前策略比baseline尽量好,但是每次更新时又不能偏离太多,所以后面会有个clip
            advantage_lst = []
            advantage = 0.0
            for delta_t in delta[::-1]:
                advantage = gamma * lmbda * advantage + delta_t[0]
                advantage_lst.append([advantage])
            advantage_lst.reverse()
            advantage = torch.tensor(advantage_lst, dtype=torch.float)

            #计算ratio 防止单词更新偏离太多
            pi = self.pi(s, softmax_dim=1)
            pi_a = pi.gather(1,a)
            ratio = torch.exp(torch.log(pi_a) - torch.log(prob_a))  # a/b == exp(log(a)-log(b))

            #通过clip 保证ratio在(1-eps_clip, 1+eps_clip)范围内
            surr1 = ratio * advantage
            surr2 = torch.clamp(ratio, 1-eps_clip, 1+eps_clip) * advantage
            #这里简化ppo,把policy loss和value loss放在一起计算
            loss = -torch.min(surr1, surr2) + F.smooth_l1_loss(self.v(s) , td_target.detach())

            #梯度优化
            self.optimizer.zero_grad()
            loss.mean().backward()
            self.optimizer.step()

#主函数:简化ppo 这里先交互T_horizon个回合然后停下来学习训练,再交互,这样循环10000次
def main():
    #创建倒立摆环境
    env = gym.make('CartPole-v1')
    model = PPO()
    score = 0.0
    print_interval = 20

    #主循环
    for n_epi in range(10000):
        s = env.reset()
        done = False
        while not done:
            for t in range(T_horizon):
                #由当前policy模型输出最优action
                prob = model.pi(torch.from_numpy(s).float())
                m = Categorical(prob)
                a = m.sample().item()
                #用最优action进行交互
                s_prime, r, done, info = env.step(a)

                #存储交互数据,等待训练
                model.put_data((s, a, r/100.0, s_prime, prob[a].item(), done))
                s = s_prime

                score += r
                if done:
                    break

            #模型训练
            model.train_net()

        #打印每轮的学习成绩
        if n_epi%print_interval==0 and n_epi!=0:
            print("# of episode :{}, avg score : {:.1f}".format(n_epi, score/print_interval))
            score = 0.0

    env.close()

if __name__ == '__main__':
    main()

R4.2 openai版本ppo算法实践 (训练超级玛丽)

通过上面简化版本的ppo玩倒立摆,你已经对ppo有了简单的认知,但是上面的做法还有很多待改进的地方,比如模型太简单,如果像超级玛丽这种观测值是图像的话,简单的线性变换肯定不满足条件 、策略模型和value模型应该分开优化损失,因为policy和value的loss很多情况下数值是差别很大的,小的那个往往得不到有效优化等 接下来介绍openai 官方版本的ppo怎么实现

#3.然后我们来设计ppo算法来实现马里奥通关
#3.1 先创建游戏环境(
#    a.组合定义action
#    b.重定义reward
#    c.堆叠zhenlv
#    d.预处理输入的图像
#    )
    
#导入相关包
import gym_super_mario_bros
from gym.spaces import Box
from gym import Wrapper
from nes_py.wrappers import JoypadSpace#BinarySpaceToDiscreteSpaceEnv
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT, RIGHT_ONLY
import cv2
import numpy as np
import subprocess as sp

class Monitor:
    def __init__(self, width, height, saved_path):

        self.command = ["ffmpeg", "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", "{}X{}".format(width, height),
                        "-pix_fmt", "rgb24", "-r", "80", "-i", "-", "-an", "-vcodec", "mpeg4", saved_path]
        try:
            self.pipe = sp.Popen(self.command, stdin=sp.PIPE, stderr=sp.PIPE)
        except FileNotFoundError:
            pass

    def record(self, image_array):
        self.pipe.stdin.write(image_array.tostring())


def process_frame(frame):
    if frame is not None:
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(frame, (84, 84))[None, :, :] / 255.
        return frame
    else:
        return np.zeros((1, 84, 84))


class CustomReward(Wrapper):
    def __init__(self, env=None, monitor=None):
        super(CustomReward, self).__init__(env)
        self.observation_space = Box(low=0, high=255, shape=(1, 84, 84))
        self.curr_score = 0
        if monitor:
            self.monitor = monitor
        else:
            self.monitor = None

    def step(self, action):
        state, reward, done, info = self.env.step(action)
        if self.monitor:
            self.monitor.record(state)
        state = process_frame(state)
        reward += (info["score"] - self.curr_score) / 40.
        self.curr_score = info["score"]
        if done:
            if info["flag_get"]:
                reward += 50
            else:
                reward -= 50
        return state, reward / 10., done, info

    def reset(self):
        self.curr_score = 0
        return process_frame(self.env.reset())


class CustomSkipFrame(Wrapper):
    def __init__(self, env, skip=4):
        super(CustomSkipFrame, self).__init__(env)
        self.observation_space = Box(low=0, high=255, shape=(4, 84, 84))
        self.skip = skip

    def step(self, action):
        total_reward = 0
        states = []
        state, reward, done, info = self.env.step(action)
        for i in range(self.skip):
            if not done:
                state, reward, done, info = self.env.step(action)
                total_reward += reward
                states.append(state)
            else:
                states.append(state)
        states = np.concatenate(states, 0)[None, :, :, :]
        return states.astype(np.float32), reward, done, info

    def reset(self):
        state = self.env.reset()
        states = np.concatenate([state for _ in range(self.skip)], 0)[None, :, :, :]
        return states.astype(np.float32)


def create_train_env(world, stage, action_type, output_path=None):
    env = gym_super_mario_bros.make("SuperMarioBros-{}-{}-v0".format(world, stage))
    if output_path:
        monitor = Monitor(256, 240, output_path)
    else:
        monitor = None
    if action_type == "right":
        actions = RIGHT_ONLY
    elif action_type == "simple":
        actions = SIMPLE_MOVEMENT
    else:
        actions = COMPLEX_MOVEMENT
    env = JoypadSpace(env, actions)
    env = CustomReward(env, monitor)
    env = CustomSkipFrame(env)
    return env, env.observation_space.shape[0], len(actions)
    
#5.创建ppo算法
#5.1创建ac
import numpy as np
import scipy.signal
from gym.spaces import Box, Discrete

import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
import torch.nn.functional as F

def combined_shape(length, shape=None):
    if shape is None:
        return (length,)
    return (length, shape) if np.isscalar(shape) else (length, *shape)

#定义通用cnn model类
class cnn_model(nn.Module):
    def __init__(self, num_inputs, num_out, activation=nn.ReLU):
        super(cnn_model, self).__init__()
        self.conv1 = nn.Conv2d(num_inputs, 32, 3, stride=2, padding=1)
        self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
        self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
        self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
        self.lstm = nn.Linear(32 * 6 * 6, 512)
        # self.critic_linear = nn.Linear(512, 1)
        # self.actor_linear = nn.Linear(512, num_actions)
        self.fc_out = nn.Linear(512, num_out)
        self._initialize_weights()

    def _initialize_weights(self):
        for module in self.modules():
            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                # nn.init.kaiming_uniform_(module.weight)
                nn.init.constant_(module.bias, 0)
            elif isinstance(module, nn.LSTMCell):
                nn.init.constant_(module.bias_ih, 0)
                nn.init.constant_(module.bias_hh, 0)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x))
        x = x.view(x.size(0), -1)
        x = F.relu(self.lstm(x))
        out = self.fc_out(x)
        return out
    
#utils:
def count_vars(module):
    return sum([np.prod(p.shape) for p in module.parameters()])

def discount_cumsum(x, discount):
    return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]

class userActor(nn.Module):

    def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
        super().__init__()
        self.logits_net = cnn_model(obs_dim, act_dim, activation=activation)
        print(self.logits_net)

    def forward(self, obs, act=None):
        pi = Categorical(logits=self.logits_net(obs))
        logp_a = None
        if act is not None:
            logp_a = pi.log_prob(act)
        return pi, logp_a
    
class userCritic(nn.Module):

    def __init__(self, obs_dim, hidden_sizes, activation):
        super().__init__()
        self.v_net = cnn_model(obs_dim, 1, activation=activation)#cnn_net([obs_dim] + list(hidden_sizes) + [1], activation)
        print(self.v_net)

    def forward(self, obs):
        return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
    
#5.3定义ppo
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
import gym
import time
import scipy.signal
# import spinup.algos.pytorch.ppo.core as core
# from core_1 import Actor, Critic
from core import userCritic, userActor
from env import create_train_env
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs

device = torch.device('cuda')

class PPOBuffer:
    """
    A buffer for storing trajectories experienced by a PPO agent interacting
    with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
    for calculating the advantages of state-action pairs.
    """

    def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
        self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
        self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
        self.adv_buf = np.zeros(size, dtype=np.float32)
        self.rew_buf = np.zeros(size, dtype=np.float32)
        self.ret_buf = np.zeros(size, dtype=np.float32)
        self.val_buf = np.zeros(size, dtype=np.float32)
        self.logp_buf = np.zeros(size, dtype=np.float32)
        self.gamma, self.lam = gamma, lam
        self.ptr, self.path_start_idx, self.max_size = 0, 0, size

    def store(self, obs, act, rew, val, logp):
        """
        Append one timestep of agent-environment interaction to the buffer.
        """
        assert self.ptr < self.max_size     # buffer has to have room so you can store
        self.obs_buf[self.ptr] = obs
        self.act_buf[self.ptr] = act
        self.rew_buf[self.ptr] = rew
        self.val_buf[self.ptr] = val
        self.logp_buf[self.ptr] = logp
        self.ptr += 1

    def finish_path(self, last_val=0):
        """
        Call this at the end of a trajectory, or when one gets cut off
        by an epoch ending. This looks back in the buffer to where the
        trajectory started, and uses rewards and value estimates from
        the whole trajectory to compute advantage estimates with GAE-Lambda,
        as well as compute the rewards-to-go for each state, to use as
        the targets for the value function.

        The "last_val" argument should be 0 if the trajectory ended
        because the agent reached a terminal state (died), and otherwise
        should be V(s_T), the value function estimated for the last state.
        This allows us to bootstrap the reward-to-go calculation to account
        for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
        """

        path_slice = slice(self.path_start_idx, self.ptr)
        rews = np.append(self.rew_buf[path_slice], last_val)
        vals = np.append(self.val_buf[path_slice], last_val)

        # the next two lines implement GAE-Lambda advantage calculation
        deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
        self.adv_buf[path_slice] = discount_cumsum(deltas, self.gamma * self.lam)

        # the next line computes rewards-to-go, to be targets for the value function
        self.ret_buf[path_slice] = discount_cumsum(rews, self.gamma)[:-1]

        self.path_start_idx = self.ptr

    def get(self):
        """
        Call this at the end of an epoch to get all of the data from
        the buffer, with advantages appropriately normalized (shifted to have
        mean zero and std one). Also, resets some pointers in the buffer.
        """
        assert self.ptr == self.max_size    # buffer has to be full before you can get
        self.ptr, self.path_start_idx = 0, 0
        # the next two lines implement the advantage normalization trick
        adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
        self.adv_buf = (self.adv_buf - adv_mean) / adv_std
        data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,
                    adv=self.adv_buf, logp=self.logp_buf)
        #data.to(device)
        return {k: torch.as_tensor(v, dtype=torch.float32).to(device) for k,v in data.items()}

def ppo(env_fn, actor=nn.Module, critic=nn.Module, ac_kwargs=dict(), seed=0,
        steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,
        vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=1000,
        target_kl=0.01, logger_kwargs=dict(), save_freq=10):
    # Special function to avoid certain slowdowns from PyTorch + MPI combo.
    setup_pytorch_for_mpi()

    # Set up logger and save configuration
    logger = EpochLogger(**logger_kwargs)
    logger.save_config(locals())

    # Random seed
    seed += 10000 * proc_id()
    torch.manual_seed(seed)
    np.random.seed(seed)

    # Instantiate environment
    env = env_fn()
    obs_dim = env.observation_space.shape
    act_dim = env.action_space.n
    
    # Create actor-critic module
    ac_pi = actor(obs_dim[0], act_dim, hidden_sizes=[64, 64], activation=nn.Tanh)  # env.observation_space, env.action_space, nn.ReLU)
    ac_v = critic(obs_dim[0], hidden_sizes=[64, 64], activation=nn.Tanh)  # env.observation_space, nn.ReLU)

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cup')
    ac_pi.to(device)
    ac_v.to(device)

    # Sync params across processes
    sync_params(ac_pi)
    sync_params(ac_v)

    # Count variables
    def count_vars(module):
        return sum([np.prod(p.shape) for p in module.parameters()])
    var_counts = tuple(count_vars(module) for module in [ac_pi, ac_v])
    logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)

    # Set up experience buffer
    local_steps_per_epoch = int(steps_per_epoch / num_procs())
    buf = PPOBuffer(obs_dim, env.action_space.shape, local_steps_per_epoch, gamma, lam)
    
    # Set up function for computing PPO policy loss
    def compute_loss_pi(data):
        obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']

        # Policy loss
        pi, logp = ac_pi(obs, act)
        ratio = torch.exp(logp - logp_old)
        clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * adv
        loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()

        # Useful extra info
        approx_kl = (logp_old - logp).mean().item()
        ent = pi.entropy().mean().item()
        clipped = ratio.gt(1+clip_ratio) | ratio.lt(1-clip_ratio)
        clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
        pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)

        return loss_pi, pi_info

    # Set up function for computing value loss
    def compute_loss_v(data):
        obs, ret = data['obs'], data['ret']
        return ((ac_v(obs) - ret)**2).mean()

    # Set up optimizers for policy and value function
    pi_optimizer = Adam(ac_pi.parameters(), lr=pi_lr)
    vf_optimizer = Adam(ac_v.parameters(), lr=vf_lr)

    # Set up model saving
    logger.setup_pytorch_saver(ac_pi)
    
    def update():
        data = buf.get()

        pi_l_old, pi_info_old = compute_loss_pi(data)
        pi_l_old = pi_l_old.item()
        v_l_old = compute_loss_v(data).item()

        # Train policy with multiple steps of gradient descent
        for i in range(train_pi_iters):
            pi_optimizer.zero_grad()
            loss_pi, pi_info = compute_loss_pi(data)
            kl = mpi_avg(pi_info['kl'])
            if kl > 1.5 * target_kl:
                logger.log('Early stopping at step %d due to reaching max kl.'%i)
                break
            loss_pi.backward()
            mpi_avg_grads(ac_pi)    # average grads across MPI processes
            pi_optimizer.step()

        logger.store(StopIter=i)

        # Value function learning
        for i in range(train_v_iters):
            vf_optimizer.zero_grad()
            loss_v = compute_loss_v(data)
            loss_v.backward()
            mpi_avg_grads(ac_v)    # average grads across MPI processes
            vf_optimizer.step()

        # Log changes from update
        kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']
        logger.store(LossPi=pi_l_old, LossV=v_l_old,
                     KL=kl, Entropy=ent, ClipFrac=cf,
                     DeltaLossPi=(loss_pi.item() - pi_l_old),
                     DeltaLossV=(loss_v.item() - v_l_old))

    # Prepare for interaction with environment
    start_time = time.time()
    o, ep_ret, ep_len = env.reset(), 0, 0

    # Main loop: collect experience in env and update/log each epoch
    for epoch in range(epochs):
        for t in range(local_steps_per_epoch):
            # a, v, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
            with torch.no_grad():
                rr = torch.from_numpy(o.copy()).float().to(device)#.unsqueeze(0)
                pi, _ = ac_pi(rr, None)
                a = pi.sample()
                # logp_a = self.pi._log_prob_from_distribution(pi, a)
                logp = pi.log_prob(a)#.sum(axis=-1)
                v = ac_v(torch.as_tensor(o, dtype=torch.float32).to(device))

            next_o, r, d, _ = env.step(a.cpu().numpy().item())
            ep_ret += r
            ep_len += 1

            # save and log
            buf.store(o, a.cpu().numpy(), r, v.cpu().numpy(), logp.cpu().numpy())
            logger.store(VVals=v.cpu().numpy())

            # Update obs (critical!)
            o = next_o

            timeout = ep_len == max_ep_len
            terminal = d #or timeout
            epoch_ended = t==local_steps_per_epoch-1

            if terminal or epoch_ended:
                if epoch_ended and not(terminal):
                    print('Warning: trajectory cut off by epoch at %d steps.'%ep_len, flush=True)
                # if trajectory didn't reach terminal state, bootstrap value target
                if epoch_ended:
                    print('epoch_end')
                    # _, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
                    with torch.no_grad():
                        v =ac_v(torch.from_numpy(o).float().to(device)).cpu().numpy()
                else:
                    print('epret :',ep_ret)
                    v = 0
                buf.finish_path(v)
                if terminal:
                    # only save EpRet / EpLen if trajectory finished
                    logger.store(EpRet=ep_ret, EpLen=ep_len)
                o, ep_ret, ep_len = env.reset(), 0, 0


        # Save model
        if (epoch % save_freq == 0) or (epoch == epochs-1):
            logger.save_state({'env': env}, None)

        # Perform PPO update!
        update()

        # Log info about epoch
        logger.log_tabular('Epoch', epoch)
        logger.log_tabular('EpRet', with_min_and_max=True)
        logger.log_tabular('EpLen', average_only=True)
        logger.log_tabular('VVals', with_min_and_max=True)
        logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
        logger.log_tabular('LossPi', average_only=True)
        logger.log_tabular('LossV', average_only=True)
        logger.log_tabular('DeltaLossPi', average_only=True)
        logger.log_tabular('DeltaLossV', average_only=True)
        logger.log_tabular('Entropy', average_only=True)
        logger.log_tabular('KL', average_only=True)
        logger.log_tabular('ClipFrac', average_only=True)
        logger.log_tabular('StopIter', average_only=True)
        logger.log_tabular('Time', time.time()-start_time)
        logger.dump_tabular()

if __name__ == '__main__':
    
    hid_sizes = 128
    gamma = 0.999
    seed = 0
    steps = 10000
    epochs = 150
    cpu = 1
    exp_name = "ppo"
    
    
    import os
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
    mpi_fork(cpu)  # run parallel code with mpi

    from spinup.utils.run_utils import setup_logger_kwargs
    logger_kwargs = setup_logger_kwargs(exp_name, seed)
    # from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
    env_fn = lambda : create_train_env(1,1,'complex')
    # env_fn = SubprocVecEnv([])
    # env_fn = lambda : JoypadSpace(gym_super_mario_bros.make("SuperMarioBros-{}-{}-v0".format(1, 1)), gym_super_mario_bros.actions.COMPLEX_MOVEMENT)
    ppo(env_fn, actor=userActor, critic=userCritic,#core.MLPActorCritic, #gym.make(args.env)
        ac_kwargs=dict(hidden_sizes=hid_sizes), gamma=gamma,
        seed=seed, steps_per_epoch=steps, epochs=epochs,
        logger_kwargs=logger_kwargs, clip_ratio=0.2, pi_lr=0.001, vf_lr=0.001)

#6.查看训练结果
!pwd
%matplotlib inline
!python -m spinup.run plot /root/lele/spinningup/spinningup/data/ppo/ppo_s0

R4.2.1 设计DL模型

#导入相关包
import numpy as np
import scipy.signal
from gym.spaces import Box, Discrete

import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
import torch.nn.functional as F

#定义通用cnn model作为base model
class cnn_model(nn.Module):
    def __init__(self, num_inputs, num_out, activation=nn.ReLU): 
        super(cnn_model, self).__init__()
        self.conv1 = nn.Conv2d(num_inputs, 32, 3, stride=2, padding=1) #卷积层
        self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)  #卷积层
        self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)  #卷积层
        self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)  #卷积层
        self.liner = nn.Linear(32 * 6 * 6, 512) #线性层
        self.fc_out = nn.Linear(512, num_out)   #输出层
        self._initialize_weights() #模型权重初始化

    #对模型参数进行初始化,合理的初始化对训练的收敛起到非常好的作用
    def _initialize_weights(self):
        for module in self.modules():
            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                nn.init.constant_(module.bias, 0)
            elif isinstance(module, nn.LSTMCell):
                nn.init.constant_(module.bias_ih, 0)
                nn.init.constant_(module.bias_hh, 0)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x))
        x = x.view(x.size(0), -1) #把卷积输出的高纬数据拉平
        x = F.relu(self.liner(x))
        out = self.fc_out(x)
        return out

R4.2.2 设计ppo的ac模型(Actor/Critic)

Actor负责policy,输出具体要执行的action, Critic负责预测该状态下的value 指引action更新policy 训练的时候我们通过Critic的指引来更新Actor,而Critic是由Reward指引更新的

#定义actor,负责输出action的概率分布,对应简化版的pi函数
class userActor(nn.Module):

    def __init__(self, obs_dim, act_dim, activation):
        super().__init__()
        self.logits_net = cnn_model(obs_dim, act_dim, activation=activation) #定义策略模型,输出为动作空间大小
        print(self.logits_net)

    #计算策略分布,和action的概率
    def forward(self, obs, act=None):
        pi = Categorical(logits=self.logits_net(obs))
        logp_a = None
        if act is not None:
            logp_a = pi.log_prob(act)
        return pi, logp_a
        
#定义Critic,对应简化板的v函数,训练的时候我们通过Critic的指引来更新Actor,而Critic是由Reward指引更新的
class userCritic(nn.Module):

    def __init__(self, obs_dim, activation):
        super().__init__()
        self.v_net = cnn_model(obs_dim, 1, activation=activation) #输出为1,因为输出为当前策略的value预测值(一维)
        print(self.v_net)

    def forward(self, obs):
        return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.

R4.2.3 定义ppo 算法

import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
import gym
import time
import scipy.signal
#from core import userCritic, userActor
from env import create_train_env
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs

#指定使用gpu
device = torch.device('cuda')

#utils:
def count_vars(module):
    return sum([np.prod(p.shape) for p in module.parameters()])

def discount_cumsum(x, discount):
    return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]

def combined_shape(length, shape=None):
    if shape is None:
        return (length,)
    return (length, shape) if np.isscalar(shape) else (length, *shape)
R4.2.3.1 定义PPOBuffer 用来存储交互数据,提供给模型训练使用
class PPOBuffer:
    """
    A buffer for storing trajectories experienced by a PPO agent interacting
    with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
    for calculating the advantages of state-action pairs.
    """

    def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
        self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
        self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
        self.adv_buf = np.zeros(size, dtype=np.float32)
        self.rew_buf = np.zeros(size, dtype=np.float32)
        self.ret_buf = np.zeros(size, dtype=np.float32)
        self.val_buf = np.zeros(size, dtype=np.float32)
        self.logp_buf = np.zeros(size, dtype=np.float32)
        self.gamma, self.lam = gamma, lam
        self.ptr, self.path_start_idx, self.max_size = 0, 0, size

    def store(self, obs, act, rew, val, logp):
        """
        Append one timestep of agent-environment interaction to the buffer.
        """
        assert self.ptr < self.max_size     # buffer has to have room so you can store
        self.obs_buf[self.ptr] = obs
        self.act_buf[self.ptr] = act
        self.rew_buf[self.ptr] = rew
        self.val_buf[self.ptr] = val
        self.logp_buf[self.ptr] = logp
        self.ptr += 1

    def finish_path(self, last_val=0):
        """
        Call this at the end of a trajectory, or when one gets cut off
        by an epoch ending. This looks back in the buffer to where the
        trajectory started, and uses rewards and value estimates from
        the whole trajectory to compute advantage estimates with GAE-Lambda,
        as well as compute the rewards-to-go for each state, to use as
        the targets for the value function.

        The "last_val" argument should be 0 if the trajectory ended
        because the agent reached a terminal state (died), and otherwise
        should be V(s_T), the value function estimated for the last state.
        This allows us to bootstrap the reward-to-go calculation to account
        for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
        """

        path_slice = slice(self.path_start_idx, self.ptr)
        rews = np.append(self.rew_buf[path_slice], last_val)
        vals = np.append(self.val_buf[path_slice], last_val)

        # the next two lines implement GAE-Lambda advantage calculation
        deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
        self.adv_buf[path_slice] = discount_cumsum(deltas, self.gamma * self.lam)

        # the next line computes rewards-to-go, to be targets for the value function
        self.ret_buf[path_slice] = discount_cumsum(rews, self.gamma)[:-1]

        self.path_start_idx = self.ptr

    def get(self): #取数据用于训练
        """
        Call this at the end of an epoch to get all of the data from
        the buffer, with advantages appropriately normalized (shifted to have
        mean zero and std one). Also, resets some pointers in the buffer.
        """
        assert self.ptr == self.max_size    # buffer has to be full before you can get
        self.ptr, self.path_start_idx = 0, 0
        # the next two lines implement the advantage normalization trick
        adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
        self.adv_buf = (self.adv_buf - adv_mean) / adv_std
        data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,
                    adv=self.adv_buf, logp=self.logp_buf)
        
        return {k: torch.as_tensor(v, dtype=torch.float32).to(device) for k,v in data.items()}

R4.2.3.2 定义ppo算法 及 更新策略
#定义policy模型
ac_pi = actor(4, act_dim, activation=nn.Tanh)  # 输入为观察值的channel(4)输出为action的深度(7)激活函数使用Tanh

#定义value模型
ac_v = critic(4, activation=nn.Tanh)  # 输入为观察值的channel(4)激活函数使用Tanh

# Set up function for computing PPO policy loss
def compute_loss_pi(data):
    obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']

    # Policy loss
    pi, logp = ac_pi(obs, act) #计算action的概率分布
    ratio = torch.exp(logp - logp_old) #计算新老策略的差异大小
    clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * adv #clip 新老策略的更新范围在(1-clip_ratio, 1+clip_ratio)内
    loss_pi = -(torch.min(ratio * adv, clip_adv)).mean() #计算最终的policy loss 使adv(优势)更明显的方向做梯度更新

    return loss_pi

# Set up function for computing value loss
def compute_loss_v(data):
    obs, ret = data['obs'], data['ret']
    return ((ac_v(obs) - ret)**2).mean() #真实值和预测值做均方差loss 使v模型预测更接近真实值

# Set up optimizers for policy and value function
pi_optimizer = Adam(ac_pi.parameters(), lr=pi_lr)
vf_optimizer = Adam(ac_v.parameters(), lr=vf_lr)

#update 模型参数(训练)
def update():
    data = buf.get() #读取训练数据

    # Train policy with multiple steps of gradient descent
    for i in range(train_pi_iters):
        pi_optimizer.zero_grad()
        loss_pi, pi_info = compute_loss_pi(data)
        loss_pi.backward()
        pi_optimizer.step()

    # Value function learning
    for i in range(train_v_iters):
        vf_optimizer.zero_grad()
        loss_v = compute_loss_v(data)
        loss_v.backward()
        vf_optimizer.step()
        
# Prepare for interaction with environment
o, ep_ret, ep_len = env.reset(), 0, 0
# Main loop: collect experience in env and update/log each epoch
for epoch in range(1000):
    for t in range(2000):
        with torch.no_grad(): #收集数据过程,不做参数更新
            rr = torch.from_numpy(o.copy()).float().to(device) #数据转换
            pi, _ = ac_pi(rr, None) # 计算pi
            a = pi.sample() #取出action用于环境交互
            logp = pi.log_prob(a) #去除a对应的概率存起来用于模型训练时判断更新幅度,防止步幅过大学习率过大
            v = ac_v(torch.as_tensor(o, dtype=torch.float32).to(device)) # 计算v存起来给pi模型提供指引

        next_o, r, d, _ = env.step(a.cpu().numpy().item()) #在游戏中执行模型输出的action
        ep_ret += r
        ep_len += 1

        # save and log
        buf.store(o, a.cpu().numpy(), r, v.cpu().numpy(), logp.cpu().numpy())

        # Update obs (critical!)
        o = next_o

        #下面部分为对回合结束时的特殊情况做一下处理,比如最大步数达到了但是并没有gameover则需要获取下一帧的观察值,而小人game over时则不需要,初学者可以掠过
        timeout = ep_len == max_ep_len
        terminal = d 
        epoch_ended = t==local_steps_per_epoch-1

        if terminal or epoch_ended:
            if epoch_ended and not(terminal):
                print('Warning: trajectory cut off by epoch at %d steps.'%ep_len, flush=True)
            # if trajectory didn't reach terminal state, bootstrap value target
            if epoch_ended:
                print('epoch_end')
                with torch.no_grad():
                    v =ac_v(torch.from_numpy(o).float().to(device)).cpu().numpy()
            else:
                print('epret :',ep_ret)
                v = 0
            buf.finish_path(v)
            if terminal:
                # only save EpRet / EpLen if trajectory finished
                logger.store(EpRet=ep_ret, EpLen=ep_len)
            o, ep_ret, ep_len = env.reset(), 0, 0

    # Perform PPO update!
    update()
R4.2.3.3 PPO算法完整代码(添加log记录、mpi多进程)
#ppo函数完整代码
def ppo(env_fn, actor=nn.Module, critic=nn.Module, ac_kwargs=dict(), seed=0,
        steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,
        vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=1000,
        target_kl=0.01, logger_kwargs=dict(), save_freq=10):
    # Special function to avoid certain slowdowns from PyTorch + MPI combo.
    setup_pytorch_for_mpi()

    # Set up logger and save configuration
    logger = EpochLogger(**logger_kwargs)
    logger.save_config(locals())

    # Random seed
    seed += 10000 * proc_id()
    torch.manual_seed(seed)
    np.random.seed(seed)

    # Instantiate environment
    env = env_fn()
    obs_dim = env.observation_space.shape
    act_dim = env.action_space.n
    
    # Create actor-critic module
    ac_pi = actor(obs_dim[0], act_dim, hidden_sizes=[64, 64], activation=nn.Tanh)  # env.observation_space, env.action_space, nn.ReLU)
    ac_v = critic(obs_dim[0], hidden_sizes=[64, 64], activation=nn.Tanh)  # env.observation_space, nn.ReLU)

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cup')
    ac_pi.to(device)
    ac_v.to(device)

    # Sync params across processes
    sync_params(ac_pi)
    sync_params(ac_v)

    # Count variables
    def count_vars(module):
        return sum([np.prod(p.shape) for p in module.parameters()])
    var_counts = tuple(count_vars(module) for module in [ac_pi, ac_v])
    logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)

    # Set up experience buffer
    local_steps_per_epoch = int(steps_per_epoch / num_procs())
    buf = PPOBuffer(obs_dim, env.action_space.shape, local_steps_per_epoch, gamma, lam)
    
    # Set up function for computing PPO policy loss
    def compute_loss_pi(data):
        obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']

        # Policy loss
        pi, logp = ac_pi(obs, act)
        ratio = torch.exp(logp - logp_old)
        clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * adv
        loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()

        # Useful extra info
        approx_kl = (logp_old - logp).mean().item()
        ent = pi.entropy().mean().item()
        clipped = ratio.gt(1+clip_ratio) | ratio.lt(1-clip_ratio)
        clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
        pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)

        return loss_pi, pi_info

    # Set up function for computing value loss
    def compute_loss_v(data):
        obs, ret = data['obs'], data['ret']
        return ((ac_v(obs) - ret)**2).mean()

    # Set up optimizers for policy and value function
    pi_optimizer = Adam(ac_pi.parameters(), lr=pi_lr)
    vf_optimizer = Adam(ac_v.parameters(), lr=vf_lr)

    # Set up model saving
    logger.setup_pytorch_saver(ac_pi)
    
    def update():
        data = buf.get()

        pi_l_old, pi_info_old = compute_loss_pi(data)
        pi_l_old = pi_l_old.item()
        v_l_old = compute_loss_v(data).item()

        # Train policy with multiple steps of gradient descent
        for i in range(train_pi_iters):
            pi_optimizer.zero_grad()
            loss_pi, pi_info = compute_loss_pi(data)
            kl = mpi_avg(pi_info['kl'])
            if kl > 1.5 * target_kl:
                logger.log('Early stopping at step %d due to reaching max kl.'%i)
                break
            loss_pi.backward()
            mpi_avg_grads(ac_pi)    # average grads across MPI processes
            pi_optimizer.step()

        logger.store(StopIter=i)

        # Value function learning
        for i in range(train_v_iters):
            vf_optimizer.zero_grad()
            loss_v = compute_loss_v(data)
            loss_v.backward()
            mpi_avg_grads(ac_v)    # average grads across MPI processes
            vf_optimizer.step()

        # Log changes from update
        kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']
        logger.store(LossPi=pi_l_old, LossV=v_l_old,
                     KL=kl, Entropy=ent, ClipFrac=cf,
                     DeltaLossPi=(loss_pi.item() - pi_l_old),
                     DeltaLossV=(loss_v.item() - v_l_old))

    # Prepare for interaction with environment
    start_time = time.time()
    o, ep_ret, ep_len = env.reset(), 0, 0

    # Main loop: collect experience in env and update/log each epoch
    for epoch in range(epochs):
        for t in range(local_steps_per_epoch):
            # a, v, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
            with torch.no_grad():
                rr = torch.from_numpy(o.copy()).float().to(device)#.unsqueeze(0)
                pi, _ = ac_pi(rr, None)
                a = pi.sample()
                # logp_a = self.pi._log_prob_from_distribution(pi, a)
                logp = pi.log_prob(a)#.sum(axis=-1)
                v = ac_v(torch.as_tensor(o, dtype=torch.float32).to(device))

            next_o, r, d, _ = env.step(a.cpu().numpy().item())
            ep_ret += r
            ep_len += 1

            # save and log
            buf.store(o, a.cpu().numpy(), r, v.cpu().numpy(), logp.cpu().numpy())
            logger.store(VVals=v.cpu().numpy())

            # Update obs (critical!)
            o = next_o

            timeout = ep_len == max_ep_len
            terminal = d #or timeout
            epoch_ended = t==local_steps_per_epoch-1

            if terminal or epoch_ended:
                if epoch_ended and not(terminal):
                    print('Warning: trajectory cut off by epoch at %d steps.'%ep_len, flush=True)
                # if trajectory didn't reach terminal state, bootstrap value target
                if epoch_ended:
                    print('epoch_end')
                    # _, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
                    with torch.no_grad():
                        v =ac_v(torch.from_numpy(o).float().to(device)).cpu().numpy()
                else:
                    print('epret :',ep_ret)
                    v = 0
                buf.finish_path(v)
                if terminal:
                    # only save EpRet / EpLen if trajectory finished
                    logger.store(EpRet=ep_ret, EpLen=ep_len)
                o, ep_ret, ep_len = env.reset(), 0, 0


        # Save model
        if (epoch % save_freq == 0) or (epoch == epochs-1):
            logger.save_state({'env': env}, None)

        # Perform PPO update!
        update()

        # Log info about epoch
        logger.log_tabular('Epoch', epoch)
        logger.log_tabular('EpRet', with_min_and_max=True)
        logger.log_tabular('EpLen', average_only=True)
        logger.log_tabular('VVals', with_min_and_max=True)
        logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
        logger.log_tabular('LossPi', average_only=True)
        logger.log_tabular('LossV', average_only=True)
        logger.log_tabular('DeltaLossPi', average_only=True)
        logger.log_tabular('DeltaLossV', average_only=True)
        logger.log_tabular('Entropy', average_only=True)
        logger.log_tabular('KL', average_only=True)
        logger.log_tabular('ClipFrac', average_only=True)
        logger.log_tabular('StopIter', average_only=True)
        logger.log_tabular('Time', time.time()-start_time)
        logger.dump_tabular()
R4.2.3.3 主函数
if __name__ == '__main__':
    
    hid_sizes = 128
    gamma = 0.999
    seed = 0
    steps = 10000
    epochs = 150
    cpu = 1
    exp_name = "ppo"
    
    
    import os
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
    mpi_fork(cpu)  # run parallel code with mpi

    from spinup.utils.run_utils import setup_logger_kwargs
    logger_kwargs = setup_logger_kwargs(exp_name, seed)
    env_fn = lambda : create_train_env(1,1,'complex')
    ppo(env_fn, actor=userActor, critic=userCritic,
        ac_kwargs=dict(hidden_sizes=hid_sizes), gamma=gamma,
        seed=seed, steps_per_epoch=steps, epochs=epochs,
        logger_kwargs=logger_kwargs, clip_ratio=0.2, pi_lr=0.001, vf_lr=0.001)

R4.2.3.4 查看训练过程指标
#查看训练plot出来
!pwd
%matplotlib inline
!python -m spinup.run plot /root/lele/spinningup/spinningup/data/ppo/ppo_s0
R4.2.3.5 加载训练好的模型并在游戏中运行
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cup')
def load_pytorch_policy(fpath, itr='', deterministic=False):
    """ Load a pytorch policy saved with Spinning Up Logger."""

    fname = osp.join(fpath, 'pyt_save', 'model' + itr + '.pt')
    print('\n\nLoading from %s.\n\n' % fname)

    model = torch.load(fname) #加载训练好的模型

    # make function for producing an action given a single state
    def get_action(x):
        with torch.no_grad():
            x = torch.as_tensor(x, dtype=torch.float32)
            pi, _ = model(x.to(device), None)
            action = pi.sample()
        return action.cpu()

    return get_action


def run_policy(env, get_action, max_ep_len=None, num_episodes=100, render=True):
    assert env is not None, \
        "Environment not found!\n\n It looks like the environment wasn't saved, " + \
        "and we can't run the agent in it. :( \n\n Check out the readthedocs " + \
        "page on Experiment Outputs for how to handle this situation."

    logger = EpochLogger()

    o, r, d, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0
    while n < num_episodes:
        if render:
            env.render()
            time.sleep(1e-3)

        a = get_action(o)
        o, r, d, _ = env.step(a.numpy().item())
        ep_ret += r
        ep_len += 1

        if d or (ep_len == max_ep_len):
            logger.store(EpRet=ep_ret, EpLen=ep_len)
            print('Episode %d \t EpRet %.3f \t EpLen %d' % (n, ep_ret, ep_len))
            o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
            n += 1

    logger.log_tabular('EpRet', with_min_and_max=True)
    logger.log_tabular('EpLen', average_only=True)
    logger.dump_tabular()
    
if __name__ == '__main__':
    
    #这里根据你自己的spriningup安装路径来修改
    fpath = r'/root/lele/spinningup/spinningup/data/ppo/ppo_s0/'
    episodes = 100
    store_true = False
    
    env = create_train_env(1,1, 'complx')
    get_action = load_pytorch_policy(fpath)#itr='_50'
    run_policy(env, get_action, 0, episodes, store_true)

注:完整代码见:https://github.com/gaoxiaos/Supermariobros-PPO-pytorch.git

R5 强化学习的近况&挑战

R5.1强化学习的近况

强化学习一直被学术届认为是通往通用智能的大门,所以在这个领域深耕的学术论文每年都在指数增加,特别是今年各大AI会议的论坛都把强化学习的讨论放在了重要位置,比如世界人工智能大会的主论坛、ijcai今年在清华平台举办的麻将AI大赛,nips更是把四个赛道全部放在了强化学习领域 强化学习面临的问题还有很多,比如数据采样的难度,由于数据来源于交互所以模型的学习速度依赖于采样速度,如何在有限的交互步数下取得更好的成绩就成了业内模型创新的方向。再如很多场景无法明确提出奖励函数,这时候如何让模型模仿专家达到专业的程度等各方面的研究都在进行,以及算法方面,自从ppo出来并一统江湖后已经很长时间没有出现质的飞跃的算法,这块的研究也是非常值得进一步探索的。如果你也有兴趣,那么欢迎添加微信入群,和其他小伙伴一起交流探索,打比赛拿奖金。

R5.2强化学习的挑战

通过上面的学习,我们来挑战下有一定难度的新游戏“大鱼吃小鱼”-该游戏是openai新推出的一个“随机生成”环境procgen里非常有代表性的一个场景,为了解决网络模型通常的“记住”怎么走而非完全理解怎么走的问题,openai推出了procgen benchmark用来评估模型的泛化性能,每次reset游戏时游戏的分布都是随机生成的,比如大鱼吃小鱼,每次开场的小鱼分布都是随机的,小鱼行为也都是随机的,这样就需要你控制的angent要真实理解周围环境才能作出正确判断吃到更多小鱼。 竞赛直达:

参考来源

  1. rl_course3
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值