RLlib:用户自定义模型代码示例

内置网络

RLlib内置的模型可以实现:

  • 全连接神经网络
  • LSTM包装
  • 注意力网络包装

这些功能都可以在model config里面实现

官方:自定义全连接神经网络

使用torch定义了全连接神经网络,并且考虑了:

  • 是否有最后一层fc(Fully Connected layer,fc,全连接层);
  • 用户输出的num_outputs是否为0;
  • 网络是否与值函数进行共享

这段代码比较复杂,但是和model config里面的很多参数进行了兼容,可以先看每一个参数的含义,了解内置的模型需要实现的功能,再来看这个代码。

import logging
import numpy as np
import gymnasium as gym

from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.misc import SlimFC, AppendBiasLayer, normc_initializer
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import Dict, TensorType, List, ModelConfigDict

torch, nn = try_import_torch()

logger = logging.getLogger(__name__)


class fullyconnectednetwork(TorchModelV2, nn.Module):
    """通用的全连接神经网络模型.."""

    def __init__(
        self,
        obs_space: gym.spaces.Space,
        action_space: gym.spaces.Space,
        num_outputs: int,
        model_config: ModelConfigDict,
        name: str,
    ):
        TorchModelV2.__init__(
            self, obs_space, action_space, num_outputs, model_config, name
        )
        nn.Module.__init__(self)
        # 从配置中获取模型的超参数,
        # fcnet_hiddens:内在的全连接神经网络,隐藏层的数目 [256, 256]
        # post_fcnet_hiddens:最终全连接层,默认没有
        hiddens = list(model_config.get("fcnet_hiddens", [])) + list(model_config.get("post_fcnet_hiddens", []))
        # 激活函数
        activation = model_config.get("fcnet_activation")
        if not model_config.get("fcnet_hiddens", []):
            activation = model_config.get("post_fcnet_activation")
        # 是否跳过用于将隐藏层输出调整为“num_outputs”大小的最终线性层。如果为 True,则最后一个隐藏层 应该已经匹配 num_outputs。
        no_final_linear = model_config.get("no_final_linear")
        # 是否神经层共享为值函数
        self.vf_share_layers = model_config.get("vf_share_layers")
        # 对于 DiagGaussian 动作分布,制作模型的后半部分输出浮动偏置变量而不是状态相关变量。仅在是使用默认的全连接网络时有效。
        self.free_log_std = model_config.get("free_log_std")
        # Generate free-floating bias variables for the second half of the outputs.
        # 为输出的后半部分生成自由浮动的偏置变量。如果开启了 free_log_std,确保输出维度是偶数
        if self.free_log_std:
            assert num_outputs % 2 == 0, (
                "num_outputs must be divisible by two",
                num_outputs,
            )
            num_outputs = num_outputs // 2
        # 定义神经层
        layers = []
        prev_layer_size = int(np.product(obs_space.shape))  # 上一层的输出维度
        self._logits = None
        # 创建前 n-1 层(隐藏层),跳过最后一个post_fcnet_hiddens
        for size in hiddens[:-1]:
            layers.append(
                SlimFC(
                    in_size=prev_layer_size,
                    out_size=size,
                    initializer=normc_initializer(1.0),
                    activation_fn=activation,
                )
            )
            prev_layer_size = size

        # The last layer is adjusted to be of size num_outputs, but it's a
        # layer with activation.
        # 最后一层应该输出维度为num_outputs,并且有激活函数
        #  如果 no_final_linear为True 或者输出维度为 0
        if no_final_linear and num_outputs:
            layers.append(
                SlimFC(
                    in_size=prev_layer_size,
                    out_size=num_outputs,
                    initializer=normc_initializer(1.0),
                    activation_fn=activation,
                )
            )
            prev_layer_size = num_outputs
        # Finish the layers with the provided sizes (`hiddens`), plus -
        # iff num_outputs > 0 - a last linear layer of size num_outputs.
        # 如果没有最后一个线性层(no_final_linear为False),或者num_outputs为o
        else:
            if len(hiddens) > 0:
                layers.append(
                    SlimFC(
                        in_size=prev_layer_size,
                        out_size=hiddens[-1],
                        initializer=normc_initializer(1.0),
                        activation_fn=activation,
                    )
                )
                prev_layer_size = hiddens[-1]
            if num_outputs:
                self._logits = SlimFC(
                    in_size=prev_layer_size,
                    out_size=num_outputs,
                    initializer=normc_initializer(0.01),
                    activation_fn=None,
                )
            else:
                self.num_outputs = ([int(np.product(obs_space.shape))] + hiddens[-1:])[
                    -1
                ]

        # Layer to add the log std vars to the state-dependent means.
        # 如果开启了 free_log_std 且有 logits,则添加自由浮动的标准差到状态相关的均值的层
        if self.free_log_std and self._logits:
            self._append_free_log_std = AppendBiasLayer(num_outputs)
        # 构建隐藏层
        self._hidden_layers = nn.Sequential(*layers)
        # 如果不共享值函数的隐藏层,构建一个独立的值函数网络
        self._value_branch_separate = None
        if not self.vf_share_layers:
            # Build a parallel set of hidden layers for the value net.
            # 构建一个值函数网络的独立隐藏层
            prev_vf_layer_size = int(np.product(obs_space.shape))
            vf_layers = []
            for size in hiddens:
                vf_layers.append(
                    SlimFC(
                        in_size=prev_vf_layer_size,
                        out_size=size,
                        activation_fn=activation,
                        initializer=normc_initializer(1.0),
                    )
                )
                prev_vf_layer_size = size
            self._value_branch_separate = nn.Sequential(*vf_layers)
        # 构建值函数的输出层
        self._value_branch = SlimFC(
            in_size=prev_layer_size,
            out_size=1,
            initializer=normc_initializer(0.01),
            activation_fn=None,
        )
        # Holds the current "base" output (before logits layer).
        # 保存当前的基础输出(logits 层之前)
        self._features = None
        # Holds the last input, in case value branch is separate.
        # 保存最后一个输入,以便值函数分离
        self._last_flat_in = None

    @override(TorchModelV2)
    def forward(
        self,
        input_dict: Dict[str, TensorType],
        state: List[TensorType],
        seq_lens: TensorType,
    ) -> (TensorType, List[TensorType]):
        # 提取观测值张量
        obs = input_dict["obs_flat"].float()
        self._last_flat_in = obs.reshape(obs.shape[0], -1)
        #  将扁平化的观测值传递给隐藏层(_hidden_layers),并保存结果在 _features 属性中。这里进行了前向传播,计算隐藏层的输出。
        self._features = self._hidden_layers(self._last_flat_in)
        # 如果存在 logits 层(_logits),则将隐藏层的输出传递给 logits 层进行处理,否则直接使用隐藏层的输出。
        # logits 是神经网络的输出,用于表示在离散动作空间中每个动作的概率分布。
        logits = self._logits(self._features) if self._logits else self._features
        # 如果开启了 free_log_std(自由浮动的标准差),则将 logits 传递给 _append_free_log_std 层进行处理。
        # 这通常用于在连续动作空间中表示动作的均值和标准差。
        if self.free_log_std:
            logits = self._append_free_log_std(logits)
        return logits, state

    @override(TorchModelV2)
    def value_function(self) -> TensorType:
        # 断言确保 _features 不为 None,即必须先调用 forward() 方法计算模型的前向传播结果。否则会抛出 AssertionError。
        assert self._features is not None, "must call forward() first"
        # 判断是否存在独立的值函数隐藏层(_value_branch_separate)。如果存在,表示值函数使用了独立的隐藏层;否则,值函数与主体神经网络共享隐藏层。
        if self._value_branch_separate:
            #  如果值函数隐藏层与主体神经网络共享,将 _features(主体网络的输出)传递给值函数的输出层(_value_branch),并使用 squeeze(1) 去除维度为1的维度。
            out = self._value_branch(
                self._value_branch_separate(self._last_flat_in)
            ).squeeze(1)
        else:
            # 如果存在独立的值函数隐藏层,将 _last_flat_in(输入数据)先传递给独立的隐藏层,然后再将其结果传递给值函数的输出层,并使用 squeeze(1) 去除维度为1的维度。
            out = self._value_branch(self._features).squeeze(1)
        return out

如何测试自定义模型,可以使用以下代码,只需要修改自定义模型的名称就可以了

"""
自定义模型测试代码
"""

import argparse
from pathlib import Path
import os

import ray
from ray import air, tune

from ray.rllib.models import ModelCatalog
from ray.rllib.utils.framework import try_import_tf
from ray.tune.registry import get_trainable_cls

from my_torch_model import fullyconnectednetwork

tf1, tf, tfv = try_import_tf()

parser = argparse.ArgumentParser()
parser.add_argument(
    "--run", type=str, default="PPO", help="The RLlib-registered algorithm to use."
)
parser.add_argument(
    "--framework",
    choices=["tf", "tf2", "torch"],
    default="torch",
    help="The DL framework specifier.",
)
parser.add_argument("--stop-iters", type=int, default=100)
parser.add_argument(
    "--input-files",
    type=str,
    default=os.path.join(
        os.path.dirname(os.path.abspath(__file__)), "../tests/data/cartpole/small.json"
    ),
)

if __name__ == "__main__":
    ray.init()
    args = parser.parse_args()

    # Bazel makes it hard to find files specified in `args` (and `data`).
    # Look for them here.
    if not os.path.exists(args.input_files):
        # This script runs in the ray/rllib/examples dir.
        rllib_dir = Path(__file__).parent.parent
        input_dir = rllib_dir.absolute().joinpath(args.input_files)
        args.input_files = str(input_dir)

    ModelCatalog.register_custom_model(
        "custom_loss", 自定义模型的名字
    )

    config = (
        get_trainable_cls(args.run)
        .get_default_config()
        # TODO (Kourosh): This example needs to be migrated to the new RLModule/Learner
        #  API. Users should just inherit the Learner and extend the loss_fn.
        # .experimental(_enable_new_api_stack=False)
        .environment("CartPole-v1")
        .framework(args.framework)
        .rl_module(_enable_rl_module_api=False)
        .rollouts(num_rollout_workers=3)
        .resources(num_gpus=1)
        .training(
            model={
                "custom_model": "custom_loss",
            },
            _enable_learner_api=False,
        )
        # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
        .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
    )

    stop = {
        "training_iteration": args.stop_iters,
    }

    tuner = tune.Tuner(
        args.run,
        param_space=config,
        run_config=air.RunConfig(stop=stop, verbose=1),
    )
    results = tuner.fit()

上面的模型比较复杂,换一个官方简单的共享权重的模型

这段模型中,有一个层可以实现MAPPO的共享参数功能,具体可以了解Algorithms的Malti-agent部分

这个代码没有考虑很多预设参数,所以使用的时候不能和model config配套,需要手动修改里面的参数

import numpy as np

from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch

torch, nn = try_import_torch()

TF2_GLOBAL_SHARED_LAYER = None

TORCH_GLOBAL_SHARED_LAYER = None

if torch:
    # The global, shared layer to be used by both models.
    TORCH_GLOBAL_SHARED_LAYER = SlimFC(
        64,
        64,
        activation_fn=nn.ReLU,
        initializer=torch.nn.init.xavier_uniform_,
    )


class TorchSharedWeightsModel(TorchModelV2, nn.Module):
    """Example of weight sharing between two different TorchModelV2s.

    The shared (single) layer is simply defined outside of the two Models,
    then used by both Models in their forward pass.
    """

    def __init__(
        self, observation_space, action_space, num_outputs, model_config, name
    ):
        TorchModelV2.__init__(
            self, observation_space, action_space, num_outputs, model_config, name
        )
        nn.Module.__init__(self)

        # Non-shared initial layer.
        self.first_layer = SlimFC(
            int(np.product(observation_space.shape)),
            64,
            activation_fn=nn.ReLU,
            initializer=torch.nn.init.xavier_uniform_,
        )

        # Non-shared final layer.
        self.last_layer = SlimFC(
            64,
            self.num_outputs,
            activation_fn=None,
            initializer=torch.nn.init.xavier_uniform_,
        )
        self.vf = SlimFC(
            64,
            1,
            activation_fn=None,
            initializer=torch.nn.init.xavier_uniform_,
        )
        self._global_shared_layer = TORCH_GLOBAL_SHARED_LAYER
        self._output = None

    @override(ModelV2)
    def forward(self, input_dict, state, seq_lens):
        out = self.first_layer(input_dict["obs"])
        self._output = self._global_shared_layer(out)
        model_out = self.last_layer(self._output)
        return model_out, []

    @override(ModelV2)
    def value_function(self):
        assert self._output is not None, "must call forward first!"
        return torch.reshape(self.vf(self._output), [-1])

一个对每个动作进行分支的网络

为了定义一个具有分支的网络,对代码进行了调整。

网络结构:

输入--全连接网络--全连接网络-动作1
			   --全连接网络-动作2

网络代码

import numpy as np

from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch

torch, nn = try_import_torch()

TF2_GLOBAL_SHARED_LAYER = None

TORCH_GLOBAL_SHARED_LAYER = None

############################################# my model #########################################

class myTorchSharedWeightsModel(TorchModelV2, nn.Module):
    """两个不同 TorchModelV2 之间的权重共享示例。
    共享(单个)层只是在两个模型之外定义,
    然后由两个模型在前向传播中使用。
    """

    def __init__(
            self, observation_space, action_space, num_outputs, model_config, name
    ):
        TorchModelV2.__init__(
            self, observation_space, action_space, num_outputs, model_config, name
        )
        nn.Module.__init__(self)
        hidden_size = 64
        # Non-shared initial layer.
        self.first_layer = SlimFC(int(np.product(observation_space.shape)), hidden_size, activation_fn=nn.ReLU,
                                  initializer=torch.nn.init.xavier_uniform_, )
        # self.second_layer = SlimFC(hidden_size, hidden_size, activation_fn=nn.ReLU,
        #                            initializer=torch.nn.init.xavier_uniform_, )
        # branch layers
        self.branch_layers = nn.ModuleList([
            SlimFC(hidden_size, 32, activation_fn=nn.ReLU, initializer=torch.nn.init.xavier_uniform_)
            for _ in range(num_outputs)
        ])
        # Non-shared final layer.
        self.last_layers = nn.ModuleList([
            SlimFC(32, 1, activation_fn=None, initializer=torch.nn.init.xavier_uniform_)
            for _ in range(num_outputs)
        ])
        # value function
        self.vf = SlimFC(32 * num_outputs, 1, activation_fn=None, initializer=torch.nn.init.xavier_uniform_, )
        # The global, shared layer to be used by both models.
        # TORCH_GLOBAL_SHARED_LAYER = SlimFC(64, 64, activation_fn=nn.ReLU, initializer=torch.nn.init.xavier_uniform_, )
        # self._global_shared_layer = TORCH_GLOBAL_SHARED_LAYER
        self._global_shared_layer = None
        self._output = None

    @override(ModelV2)
    def forward(self, input_dict, state, seq_lens):
        out = self.first_layer(input_dict["obs"])
        out_list = [branch_layer(out) for branch_layer in self.branch_layers]
        self._output = torch.cat(out_list, dim=1)
        model_out = [last_layer(out_) for last_layer, out_ in zip(self.last_layers, out_list)]
        return torch.cat(model_out, dim=1), []

    @override(ModelV2)
    def value_function(self):
        assert self._output is not None, "must call forward first!"
        return torch.reshape(self.vf(self._output), [-1])
  • 8
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值