isaacgym(legged_gym)学习 (一)—— 添加自己的机器人模型并训练
提示:写完文章后,目录可以自动生成,如何生成可参考右边的帮助文档
文章目录
前言
例如:isaacgym系列学习记录。
这里默认已经安装好isaacgym学习环境,并可以成功运行其中的案例
一、获取你机器人的urdf文件
这里我以宇数科技的GO2机器人为例,去其官网下载GO2的urdf文件
下载好了urdf文件,将其中resources/robots/go2文件复制到legged_gym/resources/robots/目录下
二、配置GO2机器人的学习环境
1.建立配置文件
在legged_gym/legged_gym/envs/目录下建立一个go2文件夹,然后在go2文件夹中建立一个go2_config.py的配置文件,如下:
from legged_gym.envs.base.legged_robot_config import LeggedRobotCfg, LeggedRobotCfgPPO
class GO2RoughCfg( LeggedRobotCfg ):
class init_state( LeggedRobotCfg.init_state ):
pos = [0.0, 0.0, 0.42] # x,y,z [m]
default_joint_angles = { # = target angles [rad] when action = 0.0
'FL_hip_joint': 0.1, # [rad]
'RL_hip_joint': 0.1, # [rad]
'FR_hip_joint': -0.1 , # [rad]
'RR_hip_joint': -0.1, # [rad]
'FL_thigh_joint': 0.8, # [rad]
'RL_thigh_joint': 1., # [rad]
'FR_thigh_joint': 0.8, # [rad]
'RR_thigh_joint': 1., # [rad]
'FL_calf_joint': -1.5, # [rad]
'RL_calf_joint': -1.5, # [rad]
'FR_calf_joint': -1.5, # [rad]
'RR_calf_joint': -1.5, # [rad]
}
class control( LeggedRobotCfg.control ):
# PD Drive parameters:
control_type = 'P'
stiffness = {'joint': 20.} # [N*m/rad]
damping = {'joint': 0.5} # [N*m*s/rad]
# action scale: target angle = actionScale * action + defaultAngle
action_scale = 0.25
# decimation: Number of control action updates @ sim DT per policy DT
decimation = 4
class asset( LeggedRobotCfg.asset ):
file = '{LEGGED_GYM_ROOT_DIR}/resources/robots/go2/urdf/go2.urdf'
name = "go2"
foot_name = "foot"
penalize_contacts_on = ["thigh", "calf"]
terminate_after_contacts_on = ["base"]
self_collisions = 1 # 1 to disable, 0 to enable...bitwise filter
class rewards( LeggedRobotCfg.rewards ):
soft_dof_pos_limit = 0.9
base_height_target = 0.25
class scales( LeggedRobotCfg.rewards.scales ):
torques = -0.0002
dof_pos_limits = -10.0
class GO2RoughCfgPPO( LeggedRobotCfgPPO ):
class algorithm( LeggedRobotCfgPPO.algorithm ):
entropy_coef = 0.01
class runner( LeggedRobotCfgPPO.runner ):
run_name = ''
experiment_name = 'rough_go2'
这个配置文件,重点关注 class asset
类和class runner( LeggedRobotCfgPPO.runner ):
2.添加到task_registry
from legged_gym import LEGGED_GYM_ROOT_DIR, LEGGED_GYM_ENVS_DIR
from legged_gym.envs.a1.a1_config import A1RoughCfg, A1RoughCfgPPO
from .base.legged_robot import LeggedRobot
from .anymal_c.anymal import Anymal
from .anymal_c.mixed_terrains.anymal_c_rough_config import AnymalCRoughCfg, AnymalCRoughCfgPPO
from .anymal_c.flat.anymal_c_flat_config import AnymalCFlatCfg, AnymalCFlatCfgPPO
from .anymal_b.anymal_b_config import AnymalBRoughCfg, AnymalBRoughCfgPPO
from .cassie.cassie import Cassie
from .cassie.cassie_config import CassieRoughCfg, CassieRoughCfgPPO
from .a1.a1_config import A1RoughCfg, A1RoughCfgPPO
from .go2.go2_config import GO2RoughCfg, GO2RoughCfgPPO # 添加的
import os
from legged_gym.utils.task_registry import task_registry
task_registry.register( "anymal_c_rough", Anymal, AnymalCRoughCfg(), AnymalCRoughCfgPPO() )
task_registry.register( "anymal_c_flat", Anymal, AnymalCFlatCfg(), AnymalCFlatCfgPPO() )
task_registry.register( "anymal_b", Anymal, AnymalBRoughCfg(), AnymalBRoughCfgPPO() )
task_registry.register( "a1", LeggedRobot, A1RoughCfg(), A1RoughCfgPPO() )
task_registry.register( "cassie", Cassie, CassieRoughCfg(), CassieRoughCfgPPO() )
task_registry.register( "go2", LeggedRobot, GO2RoughCfg(), GO2RoughCfgPPO() ) # 添加的
添加上面代码中带#添加的
注释的两段代码,
到此,就可以运行下面代码训练你的机器人:
python train.py --task=go2 --num_env=3000 --headless
运行下面代码,验证你训练的模型:
python play.py --task=go2
总结
提示:这里对文章进行总结:
legged_gym, 整体来说对于快速上手还是很友好的。