isaacLab 训练机器人 一

仅参考:

1 IsaacLab中如何train (以rsl_rl)为例

位置: source/standalone/workflows/rsl_rl

# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

"""Script to train RL agent with RSL-RL."""

"""Launch Isaac Sim Simulator first."""

import argparse
import sys

from omni.isaac.lab.app import AppLauncher

# local imports
import cli_args  # isort: skip


# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
#视频,布尔,默认不录视频
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
# 默认录制200步step的视频
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
# 默认2000步step记录一次视频
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
# 模拟的环境数量,没有指定,需要其他给定
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
# 任务名称
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
# 环境的随机种子,保证训练的可重复性,
# 用相同的随机种子,后面的生成的随机序列是一样的。
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
# 最大迭代次数
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")



# append RSL-RL cli arguments
#  RSL-RL 相关的命令行参数添加到 ArgumentParser
cli_args.add_rsl_rl_args(parser)

# append AppLauncher cli args
# 启动参数
AppLauncher.add_app_launcher_args(parser)
# 解析出的标准参数和其他参数
args_cli, hydra_args = parser.parse_known_args()

# always enable cameras to record video
# 传入了录制视频参数,就打开摄像头
if args_cli.video:
    args_cli.enable_cameras = True

# clear out sys.argv for Hydra
# sys.argv[0] 通常是脚本的名称(例如,train.py),而 hydra_args 则是 parse_known_args() 方法返回的 Hydra 特定参数
# 避免冲突
sys.argv = [sys.argv[0]] + hydra_args

# launch omniverse app
# 用解析后的命令行参数配置
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app

"""Rest everything follows."""

import gymnasium as gym#工具包
import os#操作系统交互可以用于获取当前工作目录、创建文件夹、操作文件路径等
import torch##
from datetime import datetime#日期和时间

from rsl_rl.runners import OnPolicyRunner#导入强化学习训练库

from omni.isaac.lab.envs import DirectRLEnvCfg, ManagerBasedRLEnvCfg#Lab的直接或者管理器方式
from omni.isaac.lab.utils.dict import print_dict#读字典
from omni.isaac.lab.utils.io import dump_pickle, dump_yaml#将数据对象序列化为 Python 的 pickle 格式,YAML格式

import omni.isaac.lab_tasks  # noqa: F401
from omni.isaac.lab_tasks.utils import get_checkpoint_path#检测点
from omni.isaac.lab_tasks.utils.hydra import hydra_task_config# Hydra 配置管理框架
#包装器,将强化学习环境批量环境等
from omni.isaac.lab_tasks.utils.wrappers.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper

torch.backends.cuda.matmul.allow_tf32 = True#矩阵乘法加速
torch.backends.cudnn.allow_tf32 = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False

#通过装饰器传入任务
@hydra_task_config(args_cli.task, "rsl_rl_cfg_entry_point")
#环境和智能体配置
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlOnPolicyRunnerCfg):
    """Train with RSL-RL agent."""
    # override configurations with non-hydra CLI arguments
    #用命令行参数更新agent配置
    agent_cfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli)
    #环境数量,如果命令行没指定,就默认
    env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
    #最大迭代
    agent_cfg.max_iterations = (
        args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg.max_iterations
    )

    # set the environment seed
    # note: certain randomizations occur in the environment initialization so we set the seed here
    #为了得到相同的结果
    env_cfg.seed = agent_cfg.seed
    
    #---------------------------------------------------------------
    # specify directory for logging experiments
    # 日志文件的根目录
    log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
    #绝对路径
    log_root_path = os.path.abspath(log_root_path)
    print(f"[INFO] Logging experiment in directory: {log_root_path}")
    # specify directory for logging runs: {time-stamp}_{run_name}
    log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    #如果代理配置中定义了 run_name,那么该名称将会添加到日志目录的末尾
    if agent_cfg.run_name:
        log_dir += f"_{agent_cfg.run_name}"
    log_dir = os.path.join(log_root_path, log_dir)
    #---------------------------------------------------------------
    # create isaac environment
    #重点:创建环境,输入注册的任务,环境配置,录视频就改变渲染模式
    env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
    #----------------------------------------------------------------
    # wrap for video recording
    #视频录制包装
    if args_cli.video:
        video_kwargs = {
            #设置视频文件夹路径,
            "video_folder": os.path.join(log_dir, "videos", "train"),
            #一个函数,定义了何时录制视频。在这里,视频录制每隔 args_cli.video_interval 步就触发一次
            "step_trigger": lambda step: step % args_cli.video_interval == 0,
            #
            "video_length": args_cli.video_length,
            #禁用环境的日志记录功能
            "disable_logger": True,
        }
        print("[INFO] Recording videos during training.")
        print_dict(video_kwargs, nesting=4)
        # 
        env = gym.wrappers.RecordVideo(env, **video_kwargs)
    #---------------------------------------------------------------------
    # wrap around environment for rsl-rl
    #是一个自定义的环境包装器,用于将环境适配为 rsl-rl 强化学习框架所需的格式
    env = RslRlVecEnvWrapper(env)
    
    #---------------------------------------------------------------------
    #强化学习的训练运行器
    # create runner from rsl-rl
    # 重点:rsl-rl库的训练运行器
    # 参数为:环境,字典形式的agent配置,日志路径,训练的硬件设备
    runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device)
    # write git state to logs
    # git状态
    runner.add_git_repo_to_log(__file__)
    # save resume path before creating a new log_dir
    #新路径前保存恢复路径,用于中断后继续
    if agent_cfg.resume:
        # get path to previous checkpoint
        resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
        print(f"[INFO]: Loading model checkpoint from: {resume_path}")
        # load previously trained model
        runner.load(resume_path)
    #---------------------------------------------------------------------
    # dump the configuration into log-directory
    # 对象序列化并保存到文件中去
    #环境配置
    dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
    #agent配置
    dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg)
    dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg)
    dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg)
    #---------------------------------------------------------------------

    # run training
    #开始训练
    # 参数为最大迭代次数、 初始时随机的episode长度
    runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True)
    
    #--------------------------------------------------------------------
    # close the simulator
    env.close()


if __name__ == "__main__":
    # run the main function
    main()
    # close sim app
    simulation_app.close()

2  IsaacLab中如何play (以rsl_rl)为例

导入库,导入生成环境,获取命令行参数等与训练基本一致

不同的地方在于

 # load previously trained model
    ppo_runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device)
    # 加载模型
    ppo_runner.load(resume_path)

    # obtain the trained policy for inference
    # 获取训练好的策略,在指定设备上运行
    policy = ppo_runner.get_inference_policy(device=env.unwrapped.device)
    
    # export policy to onnx/jit
    #导出模型(从加载模型时的路径)
    export_model_dir = os.path.join(os.path.dirname(resume_path), "exported")
    export_policy_as_jit(
        ppo_runner.alg.actor_critic, ppo_runner.obs_normalizer, path=export_model_dir, filename="policy.pt"
    )
    #PPO 的 actor-critic 网络,观测归一化,导出路径,导出文件名
    export_policy_as_onnx(
        ppo_runner.alg.actor_critic, normalizer=ppo_runner.obs_normalizer, path=export_model_dir, filename="policy.onnx"
    )
    #---------------------------------------------------------
    # reset environment
    #获取环境观测
    obs, _ = env.get_observations()
    timestep = 0
    # simulate environment
    while simulation_app.is_running():
        # run everything in inference mode
        #推理模式
        with torch.inference_mode():
            # agent stepping
            #策略网络:输入obs,输出动作
            actions = policy(obs)
            # env stepping
            #依据动作进入到环境中,获取下一obs,省去的其他信息一般为奖励,终止等
            obs, _, _, _ = env.step(actions)
        if args_cli.video:
            timestep += 1
            # Exit the play loop after recording one video
            #达到长度,就不录了 
            if timestep == args_cli.video_length:
                break

    # close the simulator
    env.close()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值