file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/humanoid.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Mujoco Humanoid robot."""
from __future__ import annotations
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
HUMANOID_CFG = ArticulationCfg(
prim_path="{ENV_REGEX_NS}/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Humanoid/humanoid_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=None,
max_depenetration_velocity=10.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
copy_from_source=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 1.34),
joint_pos={".*": 0.0},
),
actuators={
"body": ImplicitActuatorCfg(
joint_names_expr=[".*"],
stiffness={
".*_waist.*": 20.0,
".*_upper_arm.*": 10.0,
"pelvis": 10.0,
".*_lower_arm": 2.0,
".*_thigh:0": 10.0,
".*_thigh:1": 20.0,
".*_thigh:2": 10.0,
".*_shin": 5.0,
".*_foot.*": 2.0,
},
damping={
".*_waist.*": 5.0,
".*_upper_arm.*": 5.0,
"pelvis": 5.0,
".*_lower_arm": 1.0,
".*_thigh:0": 5.0,
".*_thigh:1": 5.0,
".*_thigh:2": 5.0,
".*_shin": 0.1,
".*_foot.*": 1.0,
},
),
},
)
"""Configuration for the Mujoco Humanoid robot."""
| 2,132 |
Python
| 29.471428 | 82 | 0.513133 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/ant.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the Mujoco Ant robot."""
from __future__ import annotations
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
ANT_CFG = ArticulationCfg(
prim_path="{ENV_REGEX_NS}/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Ant/ant_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=10.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
copy_from_source=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.5),
joint_pos={
".*_leg": 0.0,
"front_left_foot": 0.785398, # 45 degrees
"front_right_foot": -0.785398,
"left_back_foot": -0.785398,
"right_back_foot": 0.785398,
},
),
actuators={
"body": ImplicitActuatorCfg(
joint_names_expr=[".*"],
stiffness=0.0,
damping=0.0,
),
},
)
"""Configuration for the Mujoco Ant robot."""
| 1,659 |
Python
| 28.642857 | 72 | 0.613623 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets/quadcopter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the quadcopters"""
from __future__ import annotations
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
##
# Configuration
##
CRAZYFLIE_CFG = ArticulationCfg(
prim_path="{ENV_REGEX_NS}/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Crazyflie/cf2x.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=10.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
copy_from_source=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.5),
joint_pos={
".*": 0.0,
},
joint_vel={
"m1_joint": 200.0,
"m2_joint": -200.0,
"m3_joint": 200.0,
"m4_joint": -200.0,
},
),
actuators={
"dummy": ImplicitActuatorCfg(
joint_names_expr=[".*"],
stiffness=0.0,
damping=0.0,
),
},
)
"""Configuration for the Crazyflie quadcopter."""
| 1,631 |
Python
| 27.137931 | 67 | 0.602085 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/sensors/test_tiled_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True, enable_cameras=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import copy
import numpy as np
import random
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.replicator.core as rep
from omni.isaac.core.prims import GeometryPrim, RigidPrim
from pxr import Gf, UsdGeom
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.sensors.camera import TiledCamera, TiledCameraCfg
from omni.isaac.lab.utils.timer import Timer
class TestTiledCamera(unittest.TestCase):
"""Test for USD tiled Camera sensor."""
def setUp(self):
"""Create a blank new stage for each test."""
self.camera_cfg = TiledCameraCfg(
height=128,
width=256,
offset=TiledCameraCfg.OffsetCfg(pos=(0.0, 0.0, 4.0), rot=(0.0, 0.0, 1.0, 0.0), convention="ros"),
prim_path="/World/Camera",
update_period=0,
data_types=["rgb", "depth"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
)
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.01
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=self.dt)
self.sim: sim_utils.SimulationContext = sim_utils.SimulationContext(sim_cfg)
# populate scene
self._populate_scene()
# load stage
stage_utils.update_stage()
def tearDown(self):
"""Stops simulator after each test."""
# close all the opened viewport from before.
rep.vp_manager.destroy_hydra_textures("Replicator")
# stop simulation
# note: cannot use self.sim.stop() since it does one render step after stopping!! This doesn't make sense :(
self.sim._timeline.stop()
# clear the stage
self.sim.clear_all_callbacks()
self.sim.clear_instance()
"""
Tests
"""
def test_single_camera_init(self):
"""Test single camera initialization."""
# Create camera
camera = TiledCamera(self.camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[0].GetPath().pathString, self.camera_cfg.prim_path)
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (1, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (1, 4))
self.assertEqual(camera.data.quat_w_world.shape, (1, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (1, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (1, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for im_type, im_data in camera.data.output.to_dict().items():
if im_type == "rgb":
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width, 3))
else:
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width, 1))
self.assertGreater(im_data.mean().item(), 0.0)
del camera
def test_multi_camera_init(self):
"""Test multi-camera initialization."""
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
# Create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.prim_path = "/World/Origin_.*/CameraSensor"
camera = TiledCamera(camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[1].GetPath().pathString, "/World/Origin_01/CameraSensor")
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (2, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (2, 4))
self.assertEqual(camera.data.quat_w_world.shape, (2, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (2, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (2, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for im_type, im_data in camera.data.output.to_dict().items():
if im_type == "rgb":
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 3))
else:
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 1))
self.assertGreater(im_data[0].mean().item(), 0.0)
self.assertGreater(im_data[1].mean().item(), 0.0)
del camera
def test_rgb_only_camera(self):
"""Test initialization with only RGB."""
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
# Create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.data_types = ["rgb"]
camera_cfg.prim_path = "/World/Origin_.*/CameraSensor"
camera = TiledCamera(camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[1].GetPath().pathString, "/World/Origin_01/CameraSensor")
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
self.assertListEqual(list(camera.data.output.keys()), ["rgb"])
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (2, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (2, 4))
self.assertEqual(camera.data.quat_w_world.shape, (2, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (2, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (2, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for _, im_data in camera.data.output.to_dict().items():
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 3))
self.assertGreater(im_data[0].mean().item(), 0.0)
self.assertGreater(im_data[1].mean().item(), 0.0)
del camera
def test_depth_only_camera(self):
"""Test initialization with only depth."""
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
# Create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.data_types = ["depth"]
camera_cfg.prim_path = "/World/Origin_.*/CameraSensor"
camera = TiledCamera(camera_cfg)
# Check simulation parameter is set correctly
self.assertTrue(self.sim.has_rtx_sensors())
# Play sim
self.sim.reset()
# Check if camera is initialized
self.assertTrue(camera._is_initialized)
# Check if camera prim is set correctly and that it is a camera prim
self.assertEqual(camera._sensor_prims[1].GetPath().pathString, "/World/Origin_01/CameraSensor")
self.assertIsInstance(camera._sensor_prims[0], UsdGeom.Camera)
self.assertListEqual(list(camera.data.output.keys()), ["depth"])
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Check buffers that exists and have correct shapes
self.assertEqual(camera.data.pos_w.shape, (2, 3))
self.assertEqual(camera.data.quat_w_ros.shape, (2, 4))
self.assertEqual(camera.data.quat_w_world.shape, (2, 4))
self.assertEqual(camera.data.quat_w_opengl.shape, (2, 4))
self.assertEqual(camera.data.intrinsic_matrices.shape, (2, 3, 3))
self.assertEqual(camera.data.image_shape, (self.camera_cfg.height, self.camera_cfg.width))
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# check image data
for _, im_data in camera.data.output.to_dict().items():
self.assertEqual(im_data.shape, (2, self.camera_cfg.height, self.camera_cfg.width, 1))
self.assertGreater(im_data[0].mean().item(), 0.0)
self.assertGreater(im_data[1].mean().item(), 0.0)
del camera
def test_throughput(self):
"""Test tiled camera throughput."""
# create camera
camera_cfg = copy.deepcopy(self.camera_cfg)
camera_cfg.height = 480
camera_cfg.width = 640
camera = TiledCamera(camera_cfg)
# Play simulator
self.sim.reset()
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()
# Simulate physics
for _ in range(5):
# perform rendering
self.sim.step()
# update camera
with Timer(f"Time taken for updating camera with shape {camera.image_shape}"):
camera.update(self.dt)
# Check image data
for im_type, im_data in camera.data.output.to_dict().items():
if im_type == "rgb":
self.assertEqual(im_data.shape, (1, camera_cfg.height, camera_cfg.width, 3))
else:
self.assertEqual(im_data.shape, (1, camera_cfg.height, camera_cfg.width, 1))
self.assertGreater(im_data.mean().item(), 0.0)
del camera
"""
Helper functions.
"""
@staticmethod
def _populate_scene():
"""Add prims to the scene."""
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.SphereLightCfg()
cfg.func("/World/Light/GreySphere", cfg, translation=(4.5, 3.5, 10.0))
cfg.func("/World/Light/WhiteSphere", cfg, translation=(-4.5, 3.5, 10.0))
# Random objects
random.seed(0)
for i in range(10):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# create prim
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = prim_utils.create_prim(
f"/World/Objects/Obj_{i:02d}",
prim_type,
translation=position,
scale=(0.25, 0.25, 0.25),
semantic_label=prim_type,
)
# cast to geom prim
geom_prim = getattr(UsdGeom, prim_type)(prim)
# set random color
color = Gf.Vec3f(random.random(), random.random(), random.random())
geom_prim.CreateDisplayColorAttr()
geom_prim.GetDisplayColorAttr().Set([color])
# add rigid properties
GeometryPrim(f"/World/Objects/Obj_{i:02d}", collision=True)
RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0)
if __name__ == "__main__":
run_tests()
| 14,127 |
Python
| 40.069767 | 116 | 0.604799 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/envs/test_base_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# Can set this to False to see the GUI for debugging
HEADLESS = True
# launch omniverse app
app_launcher = AppLauncher(headless=HEADLESS)
simulation_app = app_launcher.app
"""Rest everything follows."""
import torch
import unittest
import omni.usd
from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.utils import configclass
@configclass
class EmptyActionsCfg:
"""Action specifications for the environment."""
pass
@configclass
class EmptySceneCfg(InteractiveSceneCfg):
"""Configuration for an empty scene."""
pass
def get_empty_base_env_cfg(device: str = "cuda:0", num_envs: int = 1, env_spacing: float = 1.0):
"""Generate base environment config based on device"""
@configclass
class EmptyEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the empty test environment."""
# Scene settings
scene: EmptySceneCfg = EmptySceneCfg(num_envs=num_envs, env_spacing=env_spacing)
# Basic settings
actions: EmptyActionsCfg = EmptyActionsCfg()
def __post_init__(self):
"""Post initialization."""
# step settings
self.decimation = 4 # env step every 4 sim steps: 200Hz / 4 = 50Hz
# simulation settings
self.sim.dt = 0.005 # sim step every 5ms: 200Hz
# pass device down from test
self.sim.device = device
return EmptyEnvCfg()
class TestBaseEnv(unittest.TestCase):
"""Test for base env class"""
"""
Tests
"""
def test_initialization(self):
for device in ("cuda:0", "cpu"):
with self.subTest(device=device):
# create a new stage
omni.usd.get_context().new_stage()
# create environment
env = ManagerBasedEnv(cfg=get_empty_base_env_cfg(device=device))
# check size of action manager terms
self.assertEqual(env.action_manager.total_action_dim, 0)
self.assertEqual(len(env.action_manager.active_terms), 0)
self.assertEqual(len(env.action_manager.action_term_dim), 0)
# check size of observation manager terms
self.assertEqual(len(env.observation_manager.active_terms), 0)
self.assertEqual(len(env.observation_manager.group_obs_dim), 0)
self.assertEqual(len(env.observation_manager.group_obs_term_dim), 0)
self.assertEqual(len(env.observation_manager.group_obs_concatenate), 0)
# create actions of correct size (1,0)
act = torch.randn_like(env.action_manager.action)
# step environment to verify setup
for _ in range(2):
obs, ext = env.step(action=act)
# close the environment
env.close()
if __name__ == "__main__":
run_tests()
| 3,291 |
Python
| 30.056603 | 96 | 0.635369 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/deps/isaacsim/check_floating_base_made_fixed.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script demonstrates how to make a floating robot fixed in Isaac Sim."""
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.kit import SimulationApp
# add argparse arguments
parser = argparse.ArgumentParser(
description="This script shows the issue in Isaac Sim with making a floating robot fixed."
)
parser.add_argument("--headless", action="store_true", help="Run in headless mode.")
parser.add_argument("--fix-base", action="store_true", help="Whether to fix the base of the robot.")
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
simulation_app = SimulationApp({"headless": args_cli.headless})
"""Rest everything follows."""
import torch
import carb
import omni.isaac.core.utils.nucleus as nucleus_utils
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.kit.commands
import omni.physx
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.carb import set_carb_setting
from omni.isaac.core.utils.viewports import set_camera_view
from omni.isaac.core.world import World
from pxr import PhysxSchema, UsdPhysics
# check nucleus connection
if nucleus_utils.get_assets_root_path() is None:
msg = (
"Unable to perform Nucleus login on Omniverse. Assets root path is not set.\n"
"\tPlease check: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html#omniverse-nucleus"
)
carb.log_error(msg)
raise RuntimeError(msg)
ISAAC_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac"
"""Path to the `Isaac` directory on the NVIDIA Nucleus Server."""
ISAACLAB_NUCLEUS_DIR = f"{nucleus_utils.get_assets_root_path()}/Isaac/Samples/Orbit"
"""Path to the `Isaac/Samples/Orbit` directory on the NVIDIA Nucleus Server."""
"""
Main
"""
def main():
"""Spawns the ANYmal robot and makes it fixed."""
# Load kit helper
world = World(physics_dt=0.005, rendering_dt=0.005, backend="torch", device="cpu")
# Set main camera
set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Enable hydra scene-graph instancing
# this is needed to visualize the scene when flatcache is enabled
set_carb_setting(world._settings, "/persistent/omnihydra/useSceneGraphInstancing", True)
# Spawn things into stage
# Ground-plane
world.scene.add_default_ground_plane(prim_path="/World/defaultGroundPlane", z_position=0.0)
# Lights-1
prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0))
# Lights-2
prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0))
# -- Robot
# resolve asset
usd_path = f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd"
root_prim_path = "/World/Robot/base"
# add asset
print("Loading robot from: ", usd_path)
prim_utils.create_prim(
"/World/Robot",
usd_path=usd_path,
translation=(0.0, 0.0, 0.6),
)
# create fixed joint
if args_cli.fix_base:
# get all necessary information
stage = stage_utils.get_current_stage()
root_prim = stage.GetPrimAtPath(root_prim_path)
parent_prim = root_prim.GetParent()
# here we assume that the root prim is a rigid body
# there is no clear way to deal with situation where the root prim is not a rigid body but has articulation api
# in that case, it is unclear how to get the link to the first link in the tree
if not root_prim.HasAPI(UsdPhysics.RigidBodyAPI):
raise RuntimeError("The root prim does not have the RigidBodyAPI applied.")
# create fixed joint
omni.kit.commands.execute(
"CreateJointCommand",
stage=stage,
joint_type="Fixed",
from_prim=None,
to_prim=root_prim,
)
# move the root to the parent if this is a rigid body
# having a fixed joint on a rigid body makes physx treat it as a part of the maximal coordinate tree
# if we put to joint on the parent, physx parser treats it as a fixed base articulation
# get parent prim
parent_prim = root_prim.GetParent()
# apply api to parent
UsdPhysics.ArticulationRootAPI.Apply(parent_prim)
PhysxSchema.PhysxArticulationAPI.Apply(parent_prim)
# copy the attributes
# -- usd attributes
root_usd_articulation_api = UsdPhysics.ArticulationRootAPI(root_prim)
for attr_name in root_usd_articulation_api.GetSchemaAttributeNames():
attr = root_prim.GetAttribute(attr_name)
parent_prim.GetAttribute(attr_name).Set(attr.Get())
# -- physx attributes
root_physx_articulation_api = PhysxSchema.PhysxArticulationAPI(root_prim)
for attr_name in root_physx_articulation_api.GetSchemaAttributeNames():
attr = root_prim.GetAttribute(attr_name)
parent_prim.GetAttribute(attr_name).Set(attr.Get())
# remove api from root
root_prim.RemoveAPI(UsdPhysics.ArticulationRootAPI)
root_prim.RemoveAPI(PhysxSchema.PhysxArticulationAPI)
# rename root path to parent path
root_prim_path = parent_prim.GetPath().pathString
# Setup robot
robot_view = ArticulationView(root_prim_path, name="ANYMAL")
world.scene.add(robot_view)
# Play the simulator
world.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# dummy actions
# actions = torch.zeros(robot.count, robot.num_actions, device=robot.device)
init_root_pos_w, init_root_quat_w = robot_view.get_world_poses()
# Define simulation stepping
sim_dt = world.get_physics_dt()
# episode counter
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# If simulation is stopped, then exit.
if world.is_stopped():
break
# If simulation is paused, then skip.
if not world.is_playing():
world.step(render=False)
continue
# do reset
if count % 20 == 0:
# reset
sim_time = 0.0
count = 0
# reset root state
root_pos_w = init_root_pos_w.clone()
root_pos_w[:, :2] += torch.rand_like(root_pos_w[:, :2]) * 0.5
robot_view.set_world_poses(root_pos_w, init_root_quat_w)
# print if it is fixed base
print("Fixed base: ", robot_view._physics_view.shared_metatype.fixed_base)
print("Moving base to: ", root_pos_w[0].cpu().numpy())
print("-" * 50)
# apply random joint actions
actions = torch.rand_like(robot_view.get_joint_positions()) * 0.001
robot_view.set_joint_efforts(actions)
# perform step
world.step()
# update sim-time
sim_time += sim_dt
count += 1
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 7,173 |
Python
| 34.87 | 119 | 0.657187 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/performance/test_robot_load_performance.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
from __future__ import annotations
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
import unittest
import omni
from omni.isaac.cloner import GridCloner
from omni.isaac.lab_assets import ANYMAL_D_CFG, CARTPOLE_CFG
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.sim import build_simulation_context
from omni.isaac.lab.utils.timer import Timer
class TestRobotLoadPerformance(unittest.TestCase):
"""Test robot load performance."""
"""
Tests
"""
def test_robot_load_performance(self):
"""Test robot load time."""
test_configs = {
"Cartpole": {"robot_cfg": CARTPOLE_CFG, "expected_load_time": 10.0},
"Anymal_D": {"robot_cfg": ANYMAL_D_CFG, "expected_load_time": 40.0},
}
for test_config in test_configs.items():
for device in ("cuda:0", "cpu"):
with self.subTest(test_config=test_config, device=device):
with build_simulation_context(device=device) as sim:
cloner = GridCloner(spacing=2)
target_paths = cloner.generate_paths("/World/Robots", 4096)
omni.usd.get_context().get_stage().DefinePrim(target_paths[0], "Xform")
_ = cloner.clone(
source_prim_path=target_paths[0],
prim_paths=target_paths,
replicate_physics=False,
copy_from_source=True,
)
with Timer(f"{test_config[0]} load time for device {device}") as timer:
robot = Articulation( # noqa: F841
test_config[1]["robot_cfg"].replace(prim_path="/World/Robots_.*/Robot")
)
sim.reset()
elapsed_time = timer.time_elapsed
self.assertLessEqual(elapsed_time, test_config[1]["expected_load_time"])
if __name__ == "__main__":
run_tests()
| 2,349 |
Python
| 35.153846 | 103 | 0.568327 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/performance/test_kit_startup_performance.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
from __future__ import annotations
import time
import unittest
from omni.isaac.lab.app import run_tests
class TestKitStartUpPerformance(unittest.TestCase):
"""Test kit startup performance."""
def test_kit_start_up_time(self):
"""Test kit start-up time."""
from omni.isaac.lab.app import AppLauncher
start_time = time.time()
self.app_launcher = AppLauncher(headless=True).app
end_time = time.time()
elapsed_time = end_time - start_time
self.assertLessEqual(elapsed_time, 8.0)
if __name__ == "__main__":
run_tests()
| 797 |
Python
| 23.181817 | 60 | 0.681305 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/terrains/test_terrain_importer.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
simulation_app = AppLauncher(headless=True).app
"""Rest everything follows."""
import numpy as np
import torch
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.kit.commands
from omni.isaac.cloner import GridCloner
from omni.isaac.core.materials import PhysicsMaterial, PreviewSurface
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import GeometryPrim, RigidPrim, RigidPrimView
import omni.isaac.lab.terrains as terrain_gen
from omni.isaac.lab.sim import SimulationContext, build_simulation_context
from omni.isaac.lab.terrains import TerrainImporter, TerrainImporterCfg
from omni.isaac.lab.terrains.config.rough import ROUGH_TERRAINS_CFG
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
class TestTerrainImporter(unittest.TestCase):
"""Test the terrain importer for different ground and procedural terrains."""
def test_grid_clone_env_origins(self):
"""Tests that env origins are consistent when computed using the TerrainImporter and IsaacSim GridCloner."""
# iterate over different number of environments and environment spacing
for device in ("cuda:0", "cpu"):
for env_spacing in [1.0, 4.325, 8.0]:
for num_envs in [1, 4, 125, 379, 1024]:
with self.subTest(num_envs=num_envs, env_spacing=env_spacing):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
# create terrain importer
terrain_importer_cfg = TerrainImporterCfg(
num_envs=num_envs,
env_spacing=env_spacing,
prim_path="/World/ground",
terrain_type="plane", # for flat ground, origins are in grid
terrain_generator=None,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# obtain env origins using terrain importer
terrain_importer_origins = terrain_importer.env_origins
# obtain env origins using grid cloner
grid_cloner_origins = self._obtain_grid_cloner_env_origins(
num_envs, env_spacing, device=sim.device
)
# check if the env origins are the same
torch.testing.assert_close(
terrain_importer_origins, grid_cloner_origins, rtol=1e-5, atol=1e-5
)
def test_terrain_generation(self) -> None:
"""Generates assorted terrains and tests that the resulting mesh has the correct size."""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as _:
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
max_init_terrain_level=None,
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG.replace(curriculum=True),
num_envs=1,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# check mesh exists
mesh = terrain_importer.meshes["terrain"]
self.assertIsNotNone(mesh)
# calculate expected size from config
cfg = terrain_importer.cfg.terrain_generator
self.assertIsNotNone(cfg)
expectedSizeX = cfg.size[0] * cfg.num_rows + 2 * cfg.border_width
expectedSizeY = cfg.size[1] * cfg.num_cols + 2 * cfg.border_width
# get size from mesh bounds
bounds = mesh.bounds
actualSize = abs(bounds[1] - bounds[0])
self.assertAlmostEqual(actualSize[0], expectedSizeX)
self.assertAlmostEqual(actualSize[1], expectedSizeY)
def test_plane(self) -> None:
"""Generates a plane and tests that the resulting mesh has the correct size."""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as _:
expectedSizeX = 2.0e6
expectedSizeY = 2.0e6
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
num_envs=1,
env_spacing=1.0,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# check mesh exists
mesh = terrain_importer.meshes["terrain"]
self.assertIsNotNone(mesh)
# get size from mesh bounds
bounds = mesh.bounds
actualSize = abs(bounds[1] - bounds[0])
self.assertAlmostEqual(actualSize[0], expectedSizeX)
self.assertAlmostEqual(actualSize[1], expectedSizeY)
def test_usd(self) -> None:
"""Imports terrain from a usd and tests that the resulting mesh has the correct size."""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as _:
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="usd",
usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd",
num_envs=1,
env_spacing=1.0,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# check mesh exists
mesh = terrain_importer.meshes["terrain"]
self.assertIsNotNone(mesh)
# expect values from USD file
expectedSizeX = 96
expectedSizeY = 96
# get size from mesh bounds
bounds = mesh.bounds
actualSize = abs(bounds[1] - bounds[0])
self.assertAlmostEqual(actualSize[0], expectedSizeX)
self.assertAlmostEqual(actualSize[1], expectedSizeY)
def test_ball_drop(self) -> None:
"""Generates assorted terrains and spheres. Tests that spheres fall onto terrain and do not pass through it"""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
self._populate_scene(geom_sphere=False, sim=sim)
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
sim.reset()
# Initialize the ball views for physics simulation
ball_view.initialize()
# Play simulator
for _ in range(500):
sim.step(render=False)
# Ball may have some small non-zero velocity if the roll on terrain <~.2
# If balls fall through terrain velocity is much higher ~82.0
max_velocity_z = torch.max(torch.abs(ball_view.get_linear_velocities()[:, 2]))
self.assertLessEqual(max_velocity_z.item(), 0.5)
def test_ball_drop_geom_sphere(self) -> None:
"""Generates assorted terrains and geom sepheres. Tests that spheres fall onto terrain and do not pass through it"""
for device in ("cuda:0", "cpu"):
with build_simulation_context(device=device, auto_add_lighting=True) as sim:
self._populate_scene(geom_sphere=False, sim=sim)
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
sim.reset()
# Initialize the ball views for physics simulation
ball_view.initialize()
# Play simulator
for _ in range(500):
sim.step(render=False)
# Ball may have some small non-zero velocity if the roll on terrain <~.2
# If balls fall through terrain velocity is much higher ~82.0
max_velocity_z = torch.max(torch.abs(ball_view.get_linear_velocities()[:, 2]))
self.assertLessEqual(max_velocity_z.item(), 0.5)
"""
Helper functions.
"""
@staticmethod
def _obtain_grid_cloner_env_origins(num_envs: int, env_spacing: float, device: str) -> torch.Tensor:
"""Obtain the env origins generated by IsaacSim GridCloner (grid_cloner.py)."""
# create grid cloner
cloner = GridCloner(spacing=env_spacing)
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_envs)
prim_utils.define_prim("/World/envs/env_0")
# clone envs using grid cloner
env_origins = cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True
)
# return as tensor
return torch.tensor(env_origins, dtype=torch.float32, device=device)
def _populate_scene(self, sim: SimulationContext, num_balls: int = 2048, geom_sphere: bool = False):
"""Create a scene with terrain and randomly spawned balls.
The spawned balls are either USD Geom Spheres or are USD Meshes. We check against both these to make sure
both USD-shape and USD-mesh collisions work as expected.
"""
# Handler for terrains importing
terrain_importer_cfg = terrain_gen.TerrainImporterCfg(
prim_path="/World/ground",
max_init_terrain_level=None,
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG.replace(curriculum=True),
num_envs=num_balls,
)
terrain_importer = TerrainImporter(terrain_importer_cfg)
# Create interface to clone the scene
cloner = GridCloner(spacing=2.0)
cloner.define_base_env("/World/envs")
# Everything under the namespace "/World/envs/env_0" will be cloned
prim_utils.define_prim(prim_path="/World/envs/env_0", prim_type="Xform")
# Define the scene
# -- Ball
if geom_sphere:
# -- Ball physics
_ = DynamicSphere(
prim_path="/World/envs/env_0/ball", translation=np.array([0.0, 0.0, 5.0]), mass=0.5, radius=0.25
)
else:
# -- Ball geometry
cube_prim_path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")[1]
prim_utils.move_prim(cube_prim_path, "/World/envs/env_0/ball")
# -- Ball physics
RigidPrim(prim_path="/World/envs/env_0/ball", mass=0.5, scale=(0.5, 0.5, 0.5), translation=(0.0, 0.0, 0.5))
GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True)
# -- Ball material
sphere_geom = GeometryPrim(prim_path="/World/envs/env_0/ball", collision=True)
visual_material = PreviewSurface(prim_path="/World/Looks/ballColorMaterial", color=np.asarray([0.0, 0.0, 1.0]))
physics_material = PhysicsMaterial(
prim_path="/World/Looks/ballPhysicsMaterial",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sphere_geom.set_collision_approximation("convexHull")
sphere_geom.apply_visual_material(visual_material)
sphere_geom.apply_physics_material(physics_material)
# Clone the scene
cloner.define_base_env("/World/envs")
envs_prim_paths = cloner.generate_paths("/World/envs/env", num_paths=num_balls)
cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=envs_prim_paths, replicate_physics=True)
physics_scene_path = sim.get_physics_context().prim_path
cloner.filter_collisions(
physics_scene_path, "/World/collisions", prim_paths=envs_prim_paths, global_paths=["/World/ground"]
)
# Set ball positions over terrain origins
# Create a view over all the balls
ball_view = RigidPrimView("/World/envs/env_.*/ball", reset_xform_properties=False)
# cache initial state of the balls
ball_initial_positions = terrain_importer.env_origins
ball_initial_positions[:, 2] += 5.0
# set initial poses
# note: setting here writes to USD :)
ball_view.set_world_poses(positions=ball_initial_positions)
if __name__ == "__main__":
run_tests()
| 13,060 |
Python
| 44.82807 | 124 | 0.591807 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/utils/test_math.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
import unittest
from math import pi as PI
"""Launch Isaac Sim Simulator first.
This is only needed because of warp dependency.
"""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app in headless mode
simulation_app = AppLauncher(headless=True).app
import omni.isaac.lab.utils.math as math_utils
class TestMathUtilities(unittest.TestCase):
"""Test fixture for checking math utilities in Isaac Lab."""
def test_is_identity_pose(self):
"""Test is_identity_pose method."""
identity_pos_one_row = torch.zeros(3)
identity_rot_one_row = torch.tensor((1.0, 0.0, 0.0, 0.0))
self.assertTrue(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row))
identity_pos_one_row[0] = 1.0
identity_rot_one_row[1] = 1.0
self.assertFalse(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row))
identity_pos_multi_row = torch.zeros(3, 3)
identity_rot_multi_row = torch.zeros(3, 4)
identity_rot_multi_row[:, 0] = 1.0
self.assertTrue(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row))
identity_pos_multi_row[0, 0] = 1.0
identity_rot_multi_row[0, 1] = 1.0
self.assertFalse(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row))
def test_axis_angle_from_quat(self):
"""Test axis_angle_from_quat method."""
# Quaternions of the form (2,4) and (2,2,4)
quats = [
torch.Tensor([[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]]),
torch.Tensor([
[[1.0, 0.0, 0.0, 0.0], [0.8418536, 0.142006, 0.0, 0.5206887]],
[[1.0, 0.0, 0.0, 0.0], [0.9850375, 0.0995007, 0.0995007, 0.0995007]],
]),
]
# Angles of the form (2,3) and (2,2,3)
angles = [
torch.Tensor([[0.0, 0.0, 0.0], [0.3, 0.0, 1.1]]),
torch.Tensor([[[0.0, 0.0, 0.0], [0.3, 0.0, 1.1]], [[0.0, 0.0, 0.0], [0.2, 0.2, 0.2]]]),
]
for quat, angle in zip(quats, angles):
with self.subTest(quat=quat, angle=angle):
torch.testing.assert_close(math_utils.axis_angle_from_quat(quat), angle)
def test_axis_angle_from_quat_approximation(self):
"""Test the Taylor approximation from axis_angle_from_quat method.
This test checks for unstable conversions where theta is very small.
"""
# Generate a small rotation quaternion
# Small angle
theta = torch.Tensor([0.0000001])
# Arbitrary normalized axis of rotation in rads, (x,y,z)
axis = [-0.302286, 0.205494, -0.930803]
# Generate quaternion
qw = torch.cos(theta / 2)
quat_vect = [qw] + [d * torch.sin(theta / 2) for d in axis]
quaternion = torch.tensor(quat_vect, dtype=torch.float32)
# Convert quaternion to axis-angle
axis_angle_computed = math_utils.axis_angle_from_quat(quaternion)
# Expected axis-angle representation
axis_angle_expected = torch.tensor([theta * d for d in axis], dtype=torch.float32)
# Assert that the computed values are close to the expected values
torch.testing.assert_close(axis_angle_computed, axis_angle_expected)
def test_quat_error_magnitude(self):
"""Test quat_error_magnitude method."""
# Define test cases
# Each tuple contains: q1, q2, expected error
test_cases = [
# No rotation
(torch.Tensor([1, 0, 0, 0]), torch.Tensor([1, 0, 0, 0]), torch.Tensor([0.0])),
# PI/2 rotation
(torch.Tensor([1.0, 0, 0.0, 0]), torch.Tensor([0.7071068, 0.7071068, 0, 0]), torch.Tensor([PI / 2])),
# PI rotation
(torch.Tensor([1.0, 0, 0.0, 0]), torch.Tensor([0.0, 0.0, 1.0, 0]), torch.Tensor([PI])),
]
# Test higher dimension (batched) inputs
q1_list = torch.stack([t[0] for t in test_cases], dim=0)
q2_list = torch.stack([t[1] for t in test_cases], dim=0)
expected_diff_list = torch.stack([t[2] for t in test_cases], dim=0).flatten()
test_cases += [(q1_list, q2_list, expected_diff_list)]
# Iterate over test cases
for q1, q2, expected_diff in test_cases:
with self.subTest(q1=q1, q2=q2):
# Compute the error
q12_diff = math_utils.quat_error_magnitude(q1, q2)
# Check that the error is close to the expected value
if len(q1.shape) > 1:
torch.testing.assert_close(q12_diff, expected_diff)
else:
self.assertAlmostEqual(q12_diff.item(), expected_diff.item(), places=5)
def test_quat_unique(self):
"""Test quat_unique method."""
# Define test cases
quats = math_utils.random_orientation(num=1024, device="cpu")
# Test positive real quaternion
pos_real_quats = math_utils.quat_unique(quats)
# Test that the real part is positive
self.assertTrue(torch.all(pos_real_quats[:, 0] > 0).item())
non_pos_indices = quats[:, 0] < 0
# Check imaginary part have sign flipped if real part is negative
torch.testing.assert_close(pos_real_quats[non_pos_indices], -quats[non_pos_indices])
torch.testing.assert_close(pos_real_quats[~non_pos_indices], quats[~non_pos_indices])
def test_quat_mul_with_quat_unique(self):
"""Test quat_mul method with different quaternions.
This test checks that the quaternion multiplication is consistent when using positive real quaternions
and regular quaternions. It makes sure that the result is the same regardless of the input quaternion sign
(i.e. q and -q are same quaternion in the context of rotations).
"""
quats_1 = math_utils.random_orientation(num=1024, device="cpu")
quats_2 = math_utils.random_orientation(num=1024, device="cpu")
# Make quats positive real
quats_1_pos_real = math_utils.quat_unique(quats_1)
quats_2_pos_real = math_utils.quat_unique(quats_2)
# Option 1: Direct computation on quaternions
quat_result_1 = math_utils.quat_mul(quats_1, math_utils.quat_conjugate(quats_2))
quat_result_1 = math_utils.quat_unique(quat_result_1)
# Option 2: Computation on positive real quaternions
quat_result_2 = math_utils.quat_mul(quats_1_pos_real, math_utils.quat_conjugate(quats_2_pos_real))
quat_result_2 = math_utils.quat_unique(quat_result_2)
# Option 3: Mixed computation
quat_result_3 = math_utils.quat_mul(quats_1, math_utils.quat_conjugate(quats_2_pos_real))
quat_result_3 = math_utils.quat_unique(quat_result_3)
# Check that the result is close to the expected value
torch.testing.assert_close(quat_result_1, quat_result_2)
torch.testing.assert_close(quat_result_2, quat_result_3)
torch.testing.assert_close(quat_result_3, quat_result_1)
def test_quat_error_mag_with_quat_unique(self):
"""Test quat_error_magnitude method with positive real quaternions."""
quats_1 = math_utils.random_orientation(num=1024, device="cpu")
quats_2 = math_utils.random_orientation(num=1024, device="cpu")
# Make quats positive real
quats_1_pos_real = math_utils.quat_unique(quats_1)
quats_2_pos_real = math_utils.quat_unique(quats_2)
# Compute the error
error_1 = math_utils.quat_error_magnitude(quats_1, quats_2)
error_2 = math_utils.quat_error_magnitude(quats_1_pos_real, quats_2_pos_real)
error_3 = math_utils.quat_error_magnitude(quats_1, quats_2_pos_real)
error_4 = math_utils.quat_error_magnitude(quats_1_pos_real, quats_2)
# Check that the error is close to the expected value
torch.testing.assert_close(error_1, error_2)
torch.testing.assert_close(error_2, error_3)
torch.testing.assert_close(error_3, error_4)
torch.testing.assert_close(error_4, error_1)
if __name__ == "__main__":
run_tests()
| 8,261 |
Python
| 41.153061 | 114 | 0.622806 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/test/utils/test_assets.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
from omni.isaac.lab.app import AppLauncher, run_tests
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import unittest
import omni.isaac.lab.utils.assets as assets_utils
class TestAssetsUtils(unittest.TestCase):
"""Test cases for the assets utility functions."""
def test_nucleus_connection(self):
"""Test checking the Nucleus connection."""
# check nucleus connection
self.assertIsNotNone(assets_utils.NUCLEUS_ASSET_ROOT_DIR)
def test_check_file_path_nucleus(self):
"""Test checking a file path on the Nucleus server."""
# robot file path
usd_path = f"{assets_utils.ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd"
# check file path
self.assertEqual(assets_utils.check_file_path(usd_path), 2)
def test_check_file_path_invalid(self):
"""Test checking an invalid file path."""
# robot file path
usd_path = f"{assets_utils.ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_xyz.usd"
# check file path
self.assertEqual(assets_utils.check_file_path(usd_path), 0)
if __name__ == "__main__":
run_tests()
| 1,430 |
Python
| 28.812499 | 99 | 0.68951 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/config/extension.toml
|
[package]
# Note: Semantic Versioning is used: https://semver.org/
version = "0.17.11"
# Description
title = "Isaac Lab framework for Robot Learning"
description="Extension providing main framework interfaces and abstractions for robot learning."
readme = "docs/README.md"
repository = "https://github.com/isaac-sim/IsaacLab"
category = "robotics"
keywords = ["kit", "robotics", "learning", "ai"]
[dependencies]
"omni.isaac.core" = {}
"omni.replicator.core" = {}
[[python.module]]
name = "omni.isaac.lab"
| 510 |
TOML
| 24.549999 | 96 | 0.719608 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Package containing the core framework."""
import os
import toml
# Conveniences to other module directories via relative paths
ISAACLAB_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))
"""Path to the extension source directory."""
ISAACLAB_METADATA = toml.load(os.path.join(ISAACLAB_EXT_DIR, "config", "extension.toml"))
"""Extension metadata dictionary parsed from the extension.toml file."""
# Configure the module-level variables
__version__ = ISAACLAB_METADATA["package"]["version"]
| 651 |
Python
| 31.599998 | 89 | 0.732719 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/camera/tiled_camera.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import math
import numpy as np
import torch
from collections.abc import Sequence
from tensordict import TensorDict
from typing import TYPE_CHECKING, Any
import omni.usd
import warp as wp
from omni.isaac.core.prims import XFormPrimView
from pxr import UsdGeom
from omni.isaac.lab.utils.warp.kernels import reshape_tiled_image
from ..sensor_base import SensorBase
from .camera import Camera
if TYPE_CHECKING:
from .camera_cfg import TiledCameraCfg
class TiledCamera(Camera):
r"""The tiled rendering camera sensor for acquiring RGB and depth data.
This class wraps over the `UsdGeom Camera`_ for providing a consistent API for acquiring visual data.
It ensures that the camera follows the ROS convention for the coordinate system.
The following sensor types are supported:
- ``"rgb"``: A rendered color image.
- ``"depth"``: An image containing the distance to camera optical center.
.. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html
"""
cfg: TiledCameraCfg
"""The configuration parameters."""
SUPPORTED_TYPES: set[str] = {"rgb", "depth"}
"""The set of sensor types that are supported."""
def __init__(self, cfg: TiledCameraCfg):
"""Initializes the tiled camera sensor.
Args:
cfg: The configuration parameters.
Raises:
RuntimeError: If no camera prim is found at the given path.
ValueError: If the provided data types are not supported by the camera.
"""
super().__init__(cfg)
def __del__(self):
"""Unsubscribes from callbacks and detach from the replicator registry."""
SensorBase.__del__(self)
self._annotator.detach(self.render_product_paths)
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
# message for class
return (
f"Tiled Camera @ '{self.cfg.prim_path}': \n"
f"\tdata types : {self.data.output.sorted_keys} \n"
f"\tupdate period (s): {self.cfg.update_period}\n"
f"\tshape : {self.image_shape}\n"
f"\tnumber of sensors : {self._view.count}"
)
"""
Operations
"""
def reset(self, env_ids: Sequence[int] | None = None):
if not self._is_initialized:
raise RuntimeError(
"TiledCamera could not be initialized. Please ensure --enable_cameras is used to enable rendering."
)
# reset the timestamps
SensorBase.reset(self, env_ids)
if env_ids is None:
env_ids = self._ALL_INDICES
# Reset the frame count
self._frame[env_ids] = 0
"""
Implementation.
"""
def _initialize_impl(self):
"""Initializes the sensor handles and internal buffers.
This function creates handles and registers the provided data types with the replicator registry to
be able to access the data from the sensor. It also initializes the internal buffers to store the data.
Raises:
RuntimeError: If the number of camera prims in the view does not match the number of environments.
RuntimeError: If replicator was not found.
"""
try:
import omni.replicator.core as rep
except ModuleNotFoundError:
raise RuntimeError(
"Replicator was not found for rendering. Please use --enable_cameras to enable rendering."
)
# Initialize parent class
SensorBase._initialize_impl(self)
# Create a view for the sensor
self._view = XFormPrimView(self.cfg.prim_path, reset_xform_properties=False)
self._view.initialize()
# Check that sizes are correct
if self._view.count != self._num_envs:
raise RuntimeError(
f"Number of camera prims in the view ({self._view.count}) does not match"
f" the number of environments ({self._num_envs})."
)
# Create all env_ids buffer
self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long)
# Create frame count buffer
self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long)
# Obtain current stage
stage = omni.usd.get_context().get_stage()
# Convert all encapsulated prims to Camera
for cam_prim_path in self._view.prim_paths:
# Get camera prim
cam_prim = stage.GetPrimAtPath(cam_prim_path)
# Check if prim is a camera
if not cam_prim.IsA(UsdGeom.Camera):
raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.")
# Add to list
sensor_prim = UsdGeom.Camera(cam_prim)
self._sensor_prims.append(sensor_prim)
rep.orchestrator._orchestrator._is_started = True
sensor = rep.create.tiled_sensor(
cameras=self._view.prim_paths,
camera_resolution=[self.image_shape[1], self.image_shape[0]],
tiled_resolution=self._tiled_image_shape(),
output_types=self.cfg.data_types,
)
render_prod_path = rep.create.render_product(camera=sensor, resolution=self._tiled_image_shape())
if not isinstance(render_prod_path, str):
render_prod_path = render_prod_path.path
self._render_product_paths = [render_prod_path]
self._annotator = rep.AnnotatorRegistry.get_annotator("RtxSensorGpu", device=self.device, do_array_copy=False)
self._annotator.attach(self._render_product_paths)
# Create internal buffers
self._create_buffers()
def _create_annotator_data(self):
raise RuntimeError("Annotator data is not available for the tiled camera sensor.")
def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]:
raise RuntimeError("Annotator data is not available for the tiled camera sensor.")
def _update_buffers_impl(self, env_ids: Sequence[int]):
# Increment frame count
self._frame[env_ids] += 1
# Extract the flattened image buffer
tiled_data_buffer = self._annotator.get_data()
if isinstance(tiled_data_buffer, np.ndarray):
tiled_data_buffer = wp.array(tiled_data_buffer, device=self.device)
else:
tiled_data_buffer = tiled_data_buffer.to(device=self.device)
# The offset is needed when the buffer contains rgb and depth (the buffer has RGB data first and then depth)
offset = self._data.output["rgb"].numel() if "rgb" in self.cfg.data_types else 0
for data_type in self.cfg.data_types:
wp.launch(
kernel=reshape_tiled_image,
dim=(self._view.count, self.cfg.height, self.cfg.width),
inputs=[
tiled_data_buffer,
wp.from_torch(self._data.output[data_type]), # zero-copy alias
*list(self._data.output[data_type].shape[1:]), # height, width, num_channels
self._tiling_grid_shape()[0], # num_tiles_x
offset if data_type == "depth" else 0,
],
device=self.device,
)
"""
Private Helpers
"""
def _tiled_image_shape(self) -> tuple[int, int]:
"""A tuple containing the dimension of the tiled image."""
cols, rows = self._tiling_grid_shape()
return (self.cfg.width * cols, self.cfg.height * rows)
def _tiling_grid_shape(self) -> tuple[int, int]:
"""A tuple containing the tiling grid dimension."""
cols = round(math.sqrt(self._view.count))
rows = math.ceil(self._view.count / cols)
return (cols, rows)
def _check_supported_data_types(self, cfg: TiledCameraCfg):
"""Checks if the data types are supported by the camera."""
if not set(cfg.data_types).issubset(TiledCamera.SUPPORTED_TYPES):
raise ValueError(
f"The TiledCamera class only supports the following types {TiledCamera.SUPPORTED_TYPES} but the"
f" following where provided: {cfg.data_types}"
)
def _create_buffers(self):
"""Create buffers for storing data."""
# create the data object
# -- pose of the cameras
self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device)
self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device)
self._update_poses(self._ALL_INDICES)
# -- intrinsic matrix
self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device)
self._update_intrinsic_matrices(self._ALL_INDICES)
self._data.image_shape = self.image_shape
# -- output data
data_dict = dict()
if "rgb" in self.cfg.data_types:
data_dict["rgb"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device
).contiguous()
if "depth" in self.cfg.data_types:
data_dict["depth"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device
).contiguous()
self._data.output = TensorDict(data_dict, batch_size=self._view.count, device=self.device)
"""
Internal simulation callbacks.
"""
def _invalidate_initialize_callback(self, event):
"""Invalidates the scene elements."""
# call parent
super()._invalidate_initialize_callback(event)
# set all existing views to None to invalidate them
self._view = None
| 9,888 |
Python
| 38.875 | 118 | 0.620044 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/camera/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module for camera wrapper around USD camera prim."""
from .camera import Camera
from .camera_cfg import CameraCfg, TiledCameraCfg
from .camera_data import CameraData
from .tiled_camera import TiledCamera
from .utils import * # noqa: F401, F403
| 380 |
Python
| 28.30769 | 60 | 0.765789 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/contact_sensor/contact_sensor_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.markers import VisualizationMarkersCfg
from omni.isaac.lab.markers.config import CONTACT_SENSOR_MARKER_CFG
from omni.isaac.lab.utils import configclass
from ..sensor_base_cfg import SensorBaseCfg
from .contact_sensor import ContactSensor
@configclass
class ContactSensorCfg(SensorBaseCfg):
"""Configuration for the contact sensor."""
class_type: type = ContactSensor
track_pose: bool = False
"""Whether to track the pose of the sensor's origin. Defaults to False."""
track_air_time: bool = False
"""Whether to track the air/contact time of the bodies (time between contacts). Defaults to False."""
force_threshold: float = 1.0
"""The threshold on the norm of the contact force that determines whether two bodies are in collision or not.
This value is only used for tracking the mode duration (the time in contact or in air),
if :attr:`track_air_time` is True.
"""
filter_prim_paths_expr: list[str] = list()
"""The list of primitive paths (or expressions) to filter contacts with. Defaults to an empty list, in which case
no filtering is applied.
The contact sensor allows reporting contacts between the primitive specified with :attr:`prim_path` and
other primitives in the scene. For instance, in a scene containing a robot, a ground plane and an object,
you can obtain individual contact reports of the base of the robot with the ground plane and the object.
.. note::
The expression in the list can contain the environment namespace regex ``{ENV_REGEX_NS}`` which
will be replaced with the environment namespace.
Example: ``{ENV_REGEX_NS}/Object`` will be replaced with ``/World/envs/env_.*/Object``.
"""
visualizer_cfg: VisualizationMarkersCfg = CONTACT_SENSOR_MARKER_CFG.replace(prim_path="/Visuals/ContactSensor")
"""The configuration object for the visualization markers. Defaults to CONTACT_SENSOR_MARKER_CFG.
.. note::
This attribute is only used when debug visualization is enabled.
"""
| 2,183 |
Python
| 38.70909 | 117 | 0.726981 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package for environment definitions.
Environments define the interface between the agent and the simulation.
In the simplest case, the environment provides the agent with the current
observations and executes the actions provided by the agent. However, the
environment can also provide additional information such as the current
reward, done flag, and information about the current episode.
Based on these, there are two types of environments:
* :class:`ManagerBasedEnv`: The manager-based workflow base environment which
only provides the agent with the
current observations and executes the actions provided by the agent.
* :class:`ManagerBasedRLEnv`: The manager-based workflow RL task environment which
besides the functionality of
the base environment also provides additional Markov Decision Process (MDP)
related information such as the current reward, done flag, and information.
In addition, RL task environments can use the direct workflow implementation:
* :class:`DirectRLEnv`: The direct workflow RL task environment which provides implementations
for implementing scene setup, computing dones, performing resets, and computing
reward and observation.
"""
from . import mdp, ui
from .base_env_cfg import ManagerBasedEnvCfg, ViewerCfg
from .direct_rl_env import DirectRLEnv
from .manager_based_env import ManagerBasedEnv
from .manager_based_rl_env import ManagerBasedRLEnv
from .rl_env_cfg import DirectRLEnvCfg, ManagerBasedRLEnvCfg
from .types import VecEnvObs, VecEnvStepReturn
| 1,648 |
Python
| 41.28205 | 94 | 0.808252 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_rl_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import builtins
import gymnasium as gym
import inspect
import math
import numpy as np
import torch
import weakref
from abc import abstractmethod
from collections.abc import Sequence
from typing import Any, ClassVar
import omni.isaac.core.utils.torch as torch_utils
import omni.kit.app
from omni.isaac.version import get_version
from omni.isaac.lab.envs.types import VecEnvObs, VecEnvStepReturn
from omni.isaac.lab.managers import EventManager
from omni.isaac.lab.scene import InteractiveScene
from omni.isaac.lab.sim import SimulationContext
from omni.isaac.lab.utils.noise import NoiseModel
from omni.isaac.lab.utils.timer import Timer
from .rl_env_cfg import DirectRLEnvCfg
from .ui import ViewportCameraController
class DirectRLEnv(gym.Env):
"""The superclass for the direct workflow reinforcement learning-based environments.
This class implements the core functionality for reinforcement learning-based
environments. It is designed to be used with any RL library. The class is designed
to be used with vectorized environments, i.e., the environment is expected to be run
in parallel with multiple sub-environments.
While the environment itself is implemented as a vectorized environment, we do not
inherit from :class:`gym.vector.VectorEnv`. This is mainly because the class adds
various methods (for wait and asynchronous updates) which are not required.
Additionally, each RL library typically has its own definition for a vectorized
environment. Thus, to reduce complexity, we directly use the :class:`gym.Env` over
here and leave it up to library-defined wrappers to take care of wrapping this
environment for their agents.
Note:
For vectorized environments, it is recommended to **only** call the :meth:`reset`
method once before the first call to :meth:`step`, i.e. after the environment is created.
After that, the :meth:`step` function handles the reset of terminated sub-environments.
This is because the simulator does not support resetting individual sub-environments
in a vectorized environment.
"""
is_vector_env: ClassVar[bool] = True
"""Whether the environment is a vectorized environment."""
metadata: ClassVar[dict[str, Any]] = {
"render_modes": [None, "human", "rgb_array"],
"isaac_sim_version": get_version(),
}
"""Metadata for the environment."""
def __init__(self, cfg: DirectRLEnvCfg, render_mode: str | None = None, **kwargs):
"""Initialize the environment.
Args:
cfg: The configuration object for the environment.
Raises:
RuntimeError: If a simulation context already exists. The environment must always create one
since it configures the simulation context and controls the simulation.
"""
# store inputs to class
self.cfg = cfg
# store the render mode
self.render_mode = render_mode
# initialize internal variables
self._is_closed = False
# create a simulation context to control the simulator
if SimulationContext.instance() is None:
self.sim: SimulationContext = SimulationContext(self.cfg.sim)
else:
raise RuntimeError("Simulation context already exists. Cannot create a new one.")
# print useful information
print("[INFO]: Base environment:")
print(f"\tEnvironment device : {self.device}")
print(f"\tPhysics step-size : {self.physics_dt}")
print(f"\tRendering step-size : {self.physics_dt * self.cfg.sim.substeps}")
print(f"\tEnvironment step-size : {self.step_dt}")
print(f"\tPhysics GPU pipeline : {self.cfg.sim.use_gpu_pipeline}")
print(f"\tPhysics GPU simulation: {self.cfg.sim.physx.use_gpu}")
# generate scene
with Timer("[INFO]: Time taken for scene creation"):
self.scene = InteractiveScene(self.cfg.scene)
self._setup_scene()
print("[INFO]: Scene manager: ", self.scene)
# set up camera viewport controller
# viewport is not available in other rendering modes so the function will throw a warning
# FIXME: This needs to be fixed in the future when we unify the UI functionalities even for
# non-rendering modes.
if self.sim.render_mode >= self.sim.RenderMode.PARTIAL_RENDERING:
self.viewport_camera_controller = ViewportCameraController(self, self.cfg.viewer)
else:
self.viewport_camera_controller = None
# play the simulator to activate physics handles
# note: this activates the physics simulation view that exposes TensorAPIs
# note: when started in extension mode, first call sim.reset_async() and then initialize the managers
if builtins.ISAAC_LAUNCHED_FROM_TERMINAL is False:
print("[INFO]: Starting the simulation. This may take a few seconds. Please wait...")
with Timer("[INFO]: Time taken for simulation start"):
self.sim.reset()
# -- event manager used for randomization
if self.cfg.events:
self.event_manager = EventManager(self.cfg.events, self)
print("[INFO] Event Manager: ", self.event_manager)
# make sure torch is running on the correct device
if "cuda" in self.device:
torch.cuda.set_device(self.device)
# check if debug visualization is has been implemented by the environment
source_code = inspect.getsource(self._set_debug_vis_impl)
self.has_debug_vis_implementation = "NotImplementedError" not in source_code
self._debug_vis_handle = None
# extend UI elements
# we need to do this here after all the managers are initialized
# this is because they dictate the sensors and commands right now
if self.sim.has_gui() and self.cfg.ui_window_class_type is not None:
self._window = self.cfg.ui_window_class_type(self, window_name="IsaacLab")
else:
# if no window, then we don't need to store the window
self._window = None
# allocate dictionary to store metrics
self.extras = {}
# initialize data and constants
# -- counter for curriculum
self.common_step_counter = 0
# -- init buffers
self.episode_length_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
self.reset_terminated = torch.zeros(self.num_envs, device=self.device, dtype=torch.bool)
self.reset_time_outs = torch.zeros_like(self.reset_terminated)
self.reset_buf = torch.zeros(self.num_envs, dtype=torch.bool, device=self.sim.device)
self.actions = torch.zeros(self.num_envs, self.cfg.num_actions, device=self.sim.device)
# setup the action and observation spaces for Gym
self._configure_gym_env_spaces()
# -- noise cfg for adding action and observation noise
if self.cfg.action_noise_model:
self._action_noise_model: NoiseModel = self.cfg.action_noise_model.class_type(
self.num_envs, self.cfg.action_noise_model, self.device
)
if self.cfg.observation_noise_model:
self._observation_noise_model: NoiseModel = self.cfg.observation_noise_model.class_type(
self.num_envs, self.cfg.observation_noise_model, self.device
)
# perform events at the start of the simulation
if self.cfg.events:
if "startup" in self.event_manager.available_modes:
self.event_manager.apply(mode="startup")
# print the environment information
print("[INFO]: Completed setting up the environment...")
def __del__(self):
"""Cleanup for the environment."""
self.close()
"""
Properties.
"""
@property
def num_envs(self) -> int:
"""The number of instances of the environment that are running."""
return self.scene.num_envs
@property
def physics_dt(self) -> float:
"""The physics time-step (in s).
This is the lowest time-decimation at which the simulation is happening.
"""
return self.cfg.sim.dt
@property
def step_dt(self) -> float:
"""The environment stepping time-step (in s).
This is the time-step at which the environment steps forward.
"""
return self.cfg.sim.dt * self.cfg.decimation
@property
def device(self):
"""The device on which the environment is running."""
return self.sim.device
@property
def max_episode_length_s(self) -> float:
"""Maximum episode length in seconds."""
return self.cfg.episode_length_s
@property
def max_episode_length(self):
"""The maximum episode length in steps adjusted from s."""
return math.ceil(self.max_episode_length_s / (self.cfg.sim.dt * self.cfg.decimation))
"""
Operations.
"""
def reset(self, seed: int | None = None, options: dict[str, Any] | None = None) -> tuple[VecEnvObs, dict]:
"""Resets all the environments and returns observations.
Args:
seed: The seed to use for randomization. Defaults to None, in which case the seed is not set.
options: Additional information to specify how the environment is reset. Defaults to None.
Note:
This argument is used for compatibility with Gymnasium environment definition.
Returns:
A tuple containing the observations and extras.
"""
# set the seed
if seed is not None:
self.seed(seed)
# reset state of scene
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
self._reset_idx(indices)
obs = self._get_observations()
# return observations
return obs, self.extras
def step(self, action: torch.Tensor) -> VecEnvStepReturn:
"""Execute one time-step of the environment's dynamics.
The environment steps forward at a fixed time-step, while the physics simulation is
decimated at a lower time-step. This is to ensure that the simulation is stable. These two
time-steps can be configured independently using the :attr:`DirectRLEnvCfg.decimation` (number of
simulation steps per environment step) and the :attr:`DirectRLEnvCfg.physics_dt` (physics time-step).
Based on these parameters, the environment time-step is computed as the product of the two.
Args:
action: The actions to apply on the environment. Shape is (num_envs, action_dim).
Returns:
A tuple containing the observations and extras.
"""
# add action noise
if self.cfg.action_noise_model:
action = self._action_noise_model.apply(action.clone())
# process actions
self._pre_physics_step(action)
# perform physics stepping
for _ in range(self.cfg.decimation):
# set actions into buffers
self._apply_action()
# set actions into simulator
self.scene.write_data_to_sim()
# simulate
self.sim.step(render=False)
# update buffers at sim dt
self.scene.update(dt=self.physics_dt)
# perform rendering if gui is enabled
if self.sim.has_gui() or self.sim.has_rtx_sensors():
self.sim.render()
# post-step:
# -- update env counters (used for curriculum generation)
self.episode_length_buf += 1 # step in current episode (per env)
self.common_step_counter += 1 # total step (common for all envs)
self.reset_terminated[:], self.reset_time_outs[:] = self._get_dones()
self.reset_buf = self.reset_terminated | self.reset_time_outs
self.reward_buf = self._get_rewards()
# -- reset envs that terminated/timed-out and log the episode information
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self._reset_idx(reset_env_ids)
# post-step: step interval event
if self.cfg.events:
if "interval" in self.event_manager.available_modes:
self.event_manager.apply(mode="interval", dt=self.step_dt)
# update observations
self.obs_buf = self._get_observations()
# add observation noise
if self.cfg.observation_noise_model:
self.obs_buf["policy"] = self._observation_noise_model.apply(self.obs_buf["policy"])
# return observations, rewards, resets and extras
return self.obs_buf, self.reward_buf, self.reset_terminated, self.reset_time_outs, self.extras
@staticmethod
def seed(seed: int = -1) -> int:
"""Set the seed for the environment.
Args:
seed: The seed for random generator. Defaults to -1.
Returns:
The seed used for random generator.
"""
# set seed for replicator
try:
import omni.replicator.core as rep
rep.set_global_seed(seed)
except ModuleNotFoundError:
pass
# set seed for torch and other libraries
return torch_utils.set_seed(seed)
def render(self, recompute: bool = False) -> np.ndarray | None:
"""Run rendering without stepping through the physics.
By convention, if mode is:
- **human**: Render to the current display and return nothing. Usually for human consumption.
- **rgb_array**: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an
x-by-y pixel image, suitable for turning into a video.
Args:
recompute: Whether to force a render even if the simulator has already rendered the scene.
Defaults to False.
Returns:
The rendered image as a numpy array if mode is "rgb_array". Otherwise, returns None.
Raises:
RuntimeError: If mode is set to "rgb_data" and simulation render mode does not support it.
In this case, the simulation render mode must be set to ``RenderMode.PARTIAL_RENDERING``
or ``RenderMode.FULL_RENDERING``.
NotImplementedError: If an unsupported rendering mode is specified.
"""
# run a rendering step of the simulator
# if we have rtx sensors, we do not need to render again sin
if not self.sim.has_rtx_sensors() and not recompute:
self.sim.render()
# decide the rendering mode
if self.render_mode == "human" or self.render_mode is None:
return None
elif self.render_mode == "rgb_array":
# check that if any render could have happened
if self.sim.render_mode.value < self.sim.RenderMode.PARTIAL_RENDERING.value:
raise RuntimeError(
f"Cannot render '{self.render_mode}' when the simulation render mode is"
f" '{self.sim.render_mode.name}'. Please set the simulation render mode to:"
f"'{self.sim.RenderMode.PARTIAL_RENDERING.name}' or '{self.sim.RenderMode.FULL_RENDERING.name}'."
" If running headless, make sure --enable_cameras is set."
)
# create the annotator if it does not exist
if not hasattr(self, "_rgb_annotator"):
import omni.replicator.core as rep
# create render product
self._render_product = rep.create.render_product(
self.cfg.viewer.cam_prim_path, self.cfg.viewer.resolution
)
# create rgb annotator -- used to read data from the render product
self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu")
self._rgb_annotator.attach([self._render_product])
# obtain the rgb data
rgb_data = self._rgb_annotator.get_data()
# convert to numpy array
rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape)
# return the rgb data
# note: initially the renerer is warming up and returns empty data
if rgb_data.size == 0:
return np.zeros((self.cfg.viewer.resolution[1], self.cfg.viewer.resolution[0], 3), dtype=np.uint8)
else:
return rgb_data[:, :, :3]
else:
raise NotImplementedError(
f"Render mode '{self.render_mode}' is not supported. Please use: {self.metadata['render_modes']}."
)
def close(self):
"""Cleanup for the environment."""
if not self._is_closed:
if self.cfg.events:
del self.event_manager
del self.scene
if self.viewport_camera_controller is not None:
del self.viewport_camera_controller
# clear callbacks and instance
self.sim.clear_all_callbacks()
self.sim.clear_instance()
# destroy the window
if self._window is not None:
self._window = None
# update closing status
self._is_closed = True
def set_debug_vis(self, debug_vis: bool) -> bool:
"""Toggles the environment debug visualization.
Args:
debug_vis: Whether to visualize the environment debug visualization.
Returns:
Whether the debug visualization was successfully set. False if the environment
does not support debug visualization.
"""
# check if debug visualization is supported
if not self.has_debug_vis_implementation:
return False
# toggle debug visualization objects
self._set_debug_vis_impl(debug_vis)
# toggle debug visualization handles
if debug_vis:
# create a subscriber for the post update event if it doesn't exist
if self._debug_vis_handle is None:
app_interface = omni.kit.app.get_app_interface()
self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop(
lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event)
)
else:
# remove the subscriber if it exists
if self._debug_vis_handle is not None:
self._debug_vis_handle.unsubscribe()
self._debug_vis_handle = None
# return success
return True
"""
Helper functions.
"""
def _configure_gym_env_spaces(self):
"""Configure the action and observation spaces for the Gym environment."""
# observation space (unbounded since we don't impose any limits)
self.num_actions = self.cfg.num_actions
self.num_observations = self.cfg.num_observations
self.num_states = self.cfg.num_states
# set up spaces
self.single_observation_space = gym.spaces.Dict()
self.single_observation_space["policy"] = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=(self.num_observations,)
)
self.single_action_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.num_actions,))
# batch the spaces for vectorized environments
self.observation_space = gym.vector.utils.batch_space(self.single_observation_space["policy"], self.num_envs)
self.action_space = gym.vector.utils.batch_space(self.single_action_space, self.num_envs)
if self.num_states > 0:
self.single_observation_space["critic"] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.num_states,))
self.state_space = gym.vector.utils.batch_space(self.single_observation_space["critic"], self.num_envs)
def _reset_idx(self, env_ids: Sequence[int]):
"""Reset environments based on specified indices.
Args:
env_ids: List of environment ids which must be reset
"""
self.scene.reset(env_ids)
# apply events such as randomizations for environments that need a reset
if self.cfg.events:
if "reset" in self.event_manager.available_modes:
self.event_manager.apply(env_ids=env_ids, mode="reset")
if self.cfg.action_noise_model:
self._action_noise_model.reset(env_ids)
if self.cfg.observation_noise_model:
self._observation_noise_model.reset(env_ids)
# reset the episode length buffer
self.episode_length_buf[env_ids] = 0
# this can be done through configs as well
def _setup_scene(self):
pass
def _set_debug_vis_impl(self, debug_vis: bool):
"""Set debug visualization into visualization objects.
This function is responsible for creating the visualization objects if they don't exist
and input ``debug_vis`` is True. If the visualization objects exist, the function should
set their visibility into the stage.
"""
raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.")
@abstractmethod
def _pre_physics_step(self, actions: torch.Tensor):
return NotImplementedError
@abstractmethod
def _apply_action(self):
return NotImplementedError
@abstractmethod
def _get_observations(self) -> VecEnvObs:
return NotImplementedError
def _get_states(self) -> VecEnvObs | None:
return None
@abstractmethod
def _get_rewards(self) -> torch.Tensor:
return NotImplementedError
@abstractmethod
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
return NotImplementedError
| 21,989 |
Python
| 40.885714 | 120 | 0.636682 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/types.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import Dict
VecEnvObs = Dict[str, torch.Tensor | Dict[str, torch.Tensor]]
"""Observation returned by the environment.
The observations are stored in a dictionary. The keys are the group to which the observations belong.
This is useful for various setups such as reinforcement learning with asymmetric actor-critic or
multi-agent learning. For non-learning paradigms, this may include observations for different components
of a system.
Within each group, the observations can be stored either as a dictionary with keys as the names of each
observation term in the group, or a single tensor obtained from concatenating all the observation terms.
For example, for asymmetric actor-critic, the observation for the actor and the critic can be accessed
using the keys ``"policy"`` and ``"critic"`` respectively.
Note:
By default, most learning frameworks deal with default and privileged observations in different ways.
This handling must be taken care of by the wrapper around the :class:`ManagerBasedRLEnv` instance.
For included frameworks (RSL-RL, RL-Games, skrl), the observations must have the key "policy". In case,
the key "critic" is also present, then the critic observations are taken from the "critic" group.
Otherwise, they are the same as the "policy" group.
"""
VecEnvStepReturn = tuple[VecEnvObs, torch.Tensor, torch.Tensor, torch.Tensor, Dict]
"""The environment signals processed at the end of each step.
The tuple contains batched information for each sub-environment. The information is stored in the following order:
1. **Observations**: The observations from the environment.
2. **Rewards**: The rewards from the environment.
3. **Terminated Dones**: Whether the environment reached a terminal state, such as task success or robot falling etc.
4. **Timeout Dones**: Whether the environment reached a timeout state, such as end of max episode length.
5. **Extras**: A dictionary containing additional information from the environment.
"""
| 2,171 |
Python
| 47.266666 | 117 | 0.77614 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/rl_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.noise.noise_cfg import NoiseModelCfg
from .base_env_cfg import ManagerBasedEnvCfg, ViewerCfg
from .ui import BaseEnvWindow, ManagerBasedRLEnvWindow
@configclass
class ManagerBasedRLEnvCfg(ManagerBasedEnvCfg):
"""Configuration for a reinforcement learning environment with the manager-based workflow."""
# ui settings
ui_window_class_type: type | None = ManagerBasedRLEnvWindow
# general settings
is_finite_horizon: bool = False
"""Whether the learning task is treated as a finite or infinite horizon problem for the agent.
Defaults to False, which means the task is treated as an infinite horizon problem.
This flag handles the subtleties of finite and infinite horizon tasks:
* **Finite horizon**: no penalty or bootstrapping value is required by the the agent for
running out of time. However, the environment still needs to terminate the episode after the
time limit is reached.
* **Infinite horizon**: the agent needs to bootstrap the value of the state at the end of the episode.
This is done by sending a time-limit (or truncated) done signal to the agent, which triggers this
bootstrapping calculation.
If True, then the environment is treated as a finite horizon problem and no time-out (or truncated) done signal
is sent to the agent. If False, then the environment is treated as an infinite horizon problem and a time-out
(or truncated) done signal is sent to the agent.
Note:
The base :class:`ManagerBasedRLEnv` class does not use this flag directly. It is used by the environment
wrappers to determine what type of done signal to send to the corresponding learning agent.
"""
episode_length_s: float = MISSING
"""Duration of an episode (in seconds).
Based on the decimation rate and physics time step, the episode length is calculated as:
.. code-block:: python
episode_length_steps = ceil(episode_length_s / (decimation_rate * physics_time_step))
For example, if the decimation rate is 10, the physics time step is 0.01, and the episode length is 10 seconds,
then the episode length in steps is 100.
"""
# environment settings
rewards: object = MISSING
"""Reward settings.
Please refer to the :class:`omni.isaac.lab.managers.RewardManager` class for more details.
"""
terminations: object = MISSING
"""Termination settings.
Please refer to the :class:`omni.isaac.lab.managers.TerminationManager` class for more details.
"""
curriculum: object = MISSING
"""Curriculum settings.
Please refer to the :class:`omni.isaac.lab.managers.CurriculumManager` class for more details.
"""
commands: object = MISSING
"""Command settings.
Please refer to the :class:`omni.isaac.lab.managers.CommandManager` class for more details.
"""
@configclass
class DirectRLEnvCfg(ManagerBasedEnvCfg):
"""Configuration for a reinforcement learning environment with the direct workflow."""
# simulation settings
viewer: ViewerCfg = ViewerCfg()
"""Viewer configuration. Default is ViewerCfg()."""
sim: SimulationCfg = SimulationCfg()
"""Physics simulation configuration. Default is SimulationCfg()."""
# ui settings
ui_window_class_type: type | None = BaseEnvWindow
"""The class type of the UI window. Default is None.
If None, then no UI window is created.
Note:
If you want to make your own UI window, you can create a class that inherits from
from :class:`omni.isaac.lab.envs.ui.base_env_window.BaseEnvWindow`. Then, you can set
this attribute to your class type.
"""
# general settings
decimation: int = MISSING
"""Number of control action updates @ sim dt per policy dt.
For instance, if the simulation dt is 0.01s and the policy dt is 0.1s, then the decimation is 10.
This means that the control action is updated every 10 simulation steps.
"""
# environment settings
scene: InteractiveSceneCfg = MISSING
"""Scene settings.
Please refer to the :class:`omni.isaac.lab.scene.InteractiveSceneCfg` class for more details.
"""
# general settings
is_finite_horizon: bool = False
"""Whether the learning task is treated as a finite or infinite horizon problem for the agent.
Defaults to False, which means the task is treated as an infinite horizon problem.
This flag handles the subtleties of finite and infinite horizon tasks:
* **Finite horizon**: no penalty or bootstrapping value is required by the the agent for
running out of time. However, the environment still needs to terminate the episode after the
time limit is reached.
* **Infinite horizon**: the agent needs to bootstrap the value of the state at the end of the episode.
This is done by sending a time-limit (or truncated) done signal to the agent, which triggers this
bootstrapping calculation.
If True, then the environment is treated as a finite horizon problem and no time-out (or truncated) done signal
is sent to the agent. If False, then the environment is treated as an infinite horizon problem and a time-out
(or truncated) done signal is sent to the agent.
Note:
The base :class:`ManagerBasedRLEnv` class does not use this flag directly. It is used by the environment
wrappers to determine what type of done signal to send to the corresponding learning agent.
"""
episode_length_s: float = MISSING
"""Duration of an episode (in seconds).
Based on the decimation rate and physics time step, the episode length is calculated as:
.. code-block:: python
episode_length_steps = ceil(episode_length_s / (decimation_rate * physics_time_step))
For example, if the decimation rate is 10, the physics time step is 0.01, and the episode length is 10 seconds,
then the episode length in steps is 100.
"""
num_observations: int = MISSING
"""The size of the observation for each environment."""
num_states: int = 0
"""The size of the state-space for each environment. Default is 0.
This is used for asymmetric actor-critic and defines the observation space for the critic.
"""
num_actions: int = MISSING
"""The size of the action space for each environment."""
events: object = None
"""Settings for specifying domain randomization terms during training.
Please refer to the :class:`omni.isaac.lab.managers.EventManager` class for more details.
"""
action_noise_model: NoiseModelCfg | None = None
"""Settings for adding noise to the action buffer.
Please refer to the :class:`omni.isaac.lab.utils.noise.NoiseModel` class for more details.
"""
observation_noise_model: NoiseModelCfg | None = None
"""Settings for adding noise to the observation buffer.
Please refer to the :class:`omni.isaac.lab.utils.noise.NoiseModel` class for more details.
"""
| 7,302 |
Python
| 38.475675 | 115 | 0.718022 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/ui/manager_based_rl_env_window.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from typing import TYPE_CHECKING
from .base_env_window import BaseEnvWindow
if TYPE_CHECKING:
from ..manager_based_rl_env import ManagerBasedRLEnv
class ManagerBasedRLEnvWindow(BaseEnvWindow):
"""Window manager for the RL environment.
On top of the basic environment window, this class adds controls for the RL environment.
This includes visualization of the command manager.
"""
def __init__(self, env: ManagerBasedRLEnv, window_name: str = "IsaacLab"):
"""Initialize the window.
Args:
env: The environment object.
window_name: The name of the window. Defaults to "IsaacLab".
"""
# initialize base window
super().__init__(env, window_name)
# add custom UI elements
with self.ui_window_elements["main_vstack"]:
with self.ui_window_elements["debug_frame"]:
with self.ui_window_elements["debug_vstack"]:
self._create_debug_vis_ui_element("commands", self.env.command_manager)
self._create_debug_vis_ui_element("actions", self.env.action_manager)
| 1,287 |
Python
| 32.02564 | 92 | 0.660451 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/events.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to enable different events.
Events include anything related to altering the simulation state. This includes changing the physics
materials, applying external forces, and resetting the state of the asset.
The functions can be passed to the :class:`omni.isaac.lab.managers.EventTermCfg` object to enable
the event introduced by the function.
"""
from __future__ import annotations
import torch
import warnings
from typing import TYPE_CHECKING, Literal
import carb
import omni.isaac.lab.sim as sim_utils
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.actuators import ImplicitActuator
from omni.isaac.lab.assets import Articulation, RigidObject
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.terrains import TerrainImporter
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedEnv
def randomize_rigid_body_material(
env: ManagerBasedEnv,
env_ids: torch.Tensor | None,
static_friction_range: tuple[float, float],
dynamic_friction_range: tuple[float, float],
restitution_range: tuple[float, float],
num_buckets: int,
asset_cfg: SceneEntityCfg,
):
"""Randomize the physics materials on all geometries of the asset.
This function creates a set of physics materials with random static friction, dynamic friction, and restitution
values. The number of materials is specified by ``num_buckets``. The materials are generated by sampling
uniform random values from the given ranges.
The material properties are then assigned to the geometries of the asset. The assignment is done by
creating a random integer tensor of shape (num_instances, max_num_shapes) where ``num_instances``
is the number of assets spawned and ``max_num_shapes`` is the maximum number of shapes in the asset (over
all bodies). The integer values are used as indices to select the material properties from the
material buckets.
.. attention::
This function uses CPU tensors to assign the material properties. It is recommended to use this function
only during the initialization of the environment. Otherwise, it may lead to a significant performance
overhead.
.. note::
PhysX only allows 64000 unique physics materials in the scene. If the number of materials exceeds this
limit, the simulation will crash.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
if not isinstance(asset, (RigidObject, Articulation)):
raise ValueError(
f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{asset_cfg.name}'"
f" with type: '{type(asset)}'."
)
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device="cpu")
else:
env_ids = env_ids.cpu()
# retrieve material buffer
materials = asset.root_physx_view.get_material_properties()
# sample material properties from the given ranges
material_samples = torch.zeros(materials[env_ids].shape)
material_samples[..., 0].uniform_(*static_friction_range)
material_samples[..., 1].uniform_(*dynamic_friction_range)
material_samples[..., 2].uniform_(*restitution_range)
# create uniform range tensor for bucketing
lo = torch.tensor([static_friction_range[0], dynamic_friction_range[0], restitution_range[0]], device="cpu")
hi = torch.tensor([static_friction_range[1], dynamic_friction_range[1], restitution_range[1]], device="cpu")
# to avoid 64k material limit in physx, we bucket materials by binning randomized material properties
# into buckets based on the number of buckets specified
for d in range(3):
buckets = torch.tensor([(hi[d] - lo[d]) * i / num_buckets + lo[d] for i in range(num_buckets)], device="cpu")
material_samples[..., d] = buckets[torch.searchsorted(buckets, material_samples[..., d].contiguous()) - 1]
# update material buffer with new samples
if isinstance(asset, Articulation) and asset_cfg.body_ids != slice(None):
# obtain number of shapes per body (needed for indexing the material properties correctly)
# note: this is a workaround since the Articulation does not provide a direct way to obtain the number of shapes
# per body. We use the physics simulation view to obtain the number of shapes per body.
num_shapes_per_body = []
for link_path in asset.root_physx_view.link_paths[0]:
link_physx_view = asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore
num_shapes_per_body.append(link_physx_view.max_shapes)
# sample material properties from the given ranges
for body_id in asset_cfg.body_ids:
# start index of shape
start_idx = sum(num_shapes_per_body[:body_id])
# end index of shape
end_idx = start_idx + num_shapes_per_body[body_id]
# assign the new materials
# material ids are of shape: num_env_ids x num_shapes
# material_buckets are of shape: num_buckets x 3
materials[env_ids, start_idx:end_idx] = material_samples[:, start_idx:end_idx]
else:
materials[env_ids] = material_samples
# apply to simulation
asset.root_physx_view.set_material_properties(materials, env_ids)
def add_body_mass(
env: ManagerBasedEnv,
env_ids: torch.Tensor | None,
mass_distribution_params: tuple[float, float],
asset_cfg: SceneEntityCfg,
):
"""Randomize the mass of the bodies by adding a random value sampled from the given range.
.. tip::
This function uses CPU tensors to assign the body masses. It is recommended to use this function
only during the initialization of the environment.
.. deprecated:: v0.4
This function is deprecated. Please use :func:`randomize_rigid_body_mass` with ``operation="add"`` instead.
"""
msg = "Event term 'add_body_mass' is deprecated. Please use 'randomize_rigid_body_mass' with operation='add'."
warnings.warn(msg, DeprecationWarning)
carb.log_warn(msg)
# call the new function
randomize_rigid_body_mass(
env, env_ids, asset_cfg, mass_distribution_params, operation="add", distribution="uniform"
)
def randomize_rigid_body_mass(
env: ManagerBasedEnv,
env_ids: torch.Tensor | None,
asset_cfg: SceneEntityCfg,
mass_distribution_params: tuple[float, float],
operation: Literal["add", "scale", "abs"],
distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
):
"""Randomize the mass of the bodies by adding, scaling, or setting random values.
This function allows randomizing the mass of the bodies of the asset. The function samples random values from the
given distribution parameters and adds, scales, or sets the values into the physics simulation based on the operation.
.. tip::
This function uses CPU tensors to assign the body masses. It is recommended to use this function
only during the initialization of the environment.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device="cpu")
else:
env_ids = env_ids.cpu()
# resolve body indices
if asset_cfg.body_ids == slice(None):
body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu")
else:
body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu")
# get the current masses of the bodies (num_assets, num_bodies)
masses = asset.root_physx_view.get_masses()
# apply randomization on default values
masses[env_ids[:, None], body_ids] = asset.data.default_mass[env_ids[:, None], body_ids].clone()
# sample from the given range
# note: we modify the masses in-place for all environments
# however, the setter takes care that only the masses of the specified environments are modified
masses = _randomize_prop_by_op(
masses, mass_distribution_params, env_ids, body_ids, operation=operation, distribution=distribution
)
# set the mass into the physics simulation
asset.root_physx_view.set_masses(masses, env_ids)
def randomize_physics_scene_gravity(
env: ManagerBasedEnv,
env_ids: torch.Tensor | None,
gravity_distribution_params: tuple[list[float], list[float]],
operation: Literal["add", "scale", "abs"],
distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
):
"""Randomize gravity by adding, scaling, or setting random values.
This function allows randomizing gravity of the physics scene. The function samples random values from the
given distribution parameters and adds, scales, or sets the values into the physics simulation based on the operation.
.. attention::
This function applied the same gravity for all the environments.
.. tip::
This function uses CPU tensors to assign gravity.
"""
# get the current gravity
gravity = torch.tensor(env.sim.cfg.gravity, device="cpu").unsqueeze(0)
dist_param_0 = torch.tensor(gravity_distribution_params[0], device="cpu")
dist_param_1 = torch.tensor(gravity_distribution_params[1], device="cpu")
gravity = _randomize_prop_by_op(
gravity,
(dist_param_0, dist_param_1),
None,
slice(None),
operation=operation,
distribution=distribution,
)[0]
# set the gravity into the physics simulation
sim_utils.SimulationContext.instance().physics_sim_view.set_gravity(carb.Float3(gravity[0], gravity[1], gravity[2]))
def randomize_actuator_gains(
env: ManagerBasedEnv,
env_ids: torch.Tensor | None,
asset_cfg: SceneEntityCfg,
stiffness_distribution_params: tuple[float, float] | None = None,
damping_distribution_params: tuple[float, float] | None = None,
operation: Literal["add", "scale", "abs"] = "abs",
distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
):
"""Randomize the actuator gains in an articulation by adding, scaling, or setting random values.
This function allows randomizing the actuator stiffness and damping gains.
The function samples random values from the given distribution parameters and applies the operation to the joint properties.
It then sets the values into the actuator models. If the distribution parameters are not provided for a particular property,
the function does not modify the property.
.. tip::
For implicit actuators, this function uses CPU tensors to assign the actuator gains into the simulation.
In such cases, it is recommended to use this function only during the initialization of the environment.
Raises:
NotImplementedError: If the joint indices are in explicit motor mode. This operation is currently
not supported for explicit actuator models.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device=asset.device)
# resolve joint indices
if asset_cfg.joint_ids == slice(None):
joint_ids_list = range(asset.num_joints)
joint_ids = slice(None) # for optimization purposes
else:
joint_ids_list = asset_cfg.joint_ids
joint_ids = torch.tensor(asset_cfg.joint_ids, dtype=torch.int, device=asset.device)
# check if none of the joint indices are in explicit motor mode
for joint_index in joint_ids_list:
for act_name, actuator in asset.actuators.items():
# if joint indices are a slice (i.e., all joints are captured) or the joint index is in the actuator
if actuator.joint_indices == slice(None) or joint_index in actuator.joint_indices:
if not isinstance(actuator, ImplicitActuator):
raise NotImplementedError(
"Event term 'randomize_actuator_stiffness_and_damping' is performed on asset"
f" '{asset_cfg.name}' on the joint '{asset.joint_names[joint_index]}' ('{joint_index}') which"
f" uses an explicit actuator model '{act_name}<{actuator.__class__.__name__}>'. This operation"
" is currently not supported for explicit actuator models."
)
# sample joint properties from the given ranges and set into the physics simulation
# -- stiffness
if stiffness_distribution_params is not None:
stiffness = asset.data.default_joint_stiffness.to(asset.device).clone()
stiffness = _randomize_prop_by_op(
stiffness, stiffness_distribution_params, env_ids, joint_ids, operation=operation, distribution=distribution
)[env_ids][:, joint_ids]
asset.write_joint_stiffness_to_sim(stiffness, joint_ids=joint_ids, env_ids=env_ids)
# -- damping
if damping_distribution_params is not None:
damping = asset.data.default_joint_damping.to(asset.device).clone()
damping = _randomize_prop_by_op(
damping, damping_distribution_params, env_ids, joint_ids, operation=operation, distribution=distribution
)[env_ids][:, joint_ids]
asset.write_joint_damping_to_sim(damping, joint_ids=joint_ids, env_ids=env_ids)
def randomize_joint_parameters(
env: ManagerBasedEnv,
env_ids: torch.Tensor | None,
asset_cfg: SceneEntityCfg,
friction_distribution_params: tuple[float, float] | None = None,
armature_distribution_params: tuple[float, float] | None = None,
lower_limit_distribution_params: tuple[float, float] | None = None,
upper_limit_distribution_params: tuple[float, float] | None = None,
operation: Literal["add", "scale", "abs"] = "abs",
distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
):
"""Randomize the joint parameters of an articulation by adding, scaling, or setting random values.
This function allows randomizing the joint parameters of the asset.
These correspond to the physics engine joint properties that affect the joint behavior.
The function samples random values from the given distribution parameters and applies the operation to the joint properties.
It then sets the values into the physics simulation. If the distribution parameters are not provided for a
particular property, the function does not modify the property.
.. tip::
This function uses CPU tensors to assign the joint properties. It is recommended to use this function
only during the initialization of the environment.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device=asset.device)
# resolve joint indices
if asset_cfg.joint_ids == slice(None):
joint_ids = slice(None) # for optimization purposes
else:
joint_ids = torch.tensor(asset_cfg.joint_ids, dtype=torch.int, device=asset.device)
# sample joint properties from the given ranges and set into the physics simulation
# -- friction
if friction_distribution_params is not None:
friction = asset.data.default_joint_friction.to(asset.device).clone()
friction = _randomize_prop_by_op(
friction, friction_distribution_params, env_ids, joint_ids, operation=operation, distribution=distribution
)[env_ids][:, joint_ids]
asset.write_joint_friction_to_sim(friction, joint_ids=joint_ids, env_ids=env_ids)
# -- armature
if armature_distribution_params is not None:
armature = asset.data.default_joint_armature.to(asset.device).clone()
armature = _randomize_prop_by_op(
armature, armature_distribution_params, env_ids, joint_ids, operation=operation, distribution=distribution
)[env_ids][:, joint_ids]
asset.write_joint_armature_to_sim(armature, joint_ids=joint_ids, env_ids=env_ids)
# -- dof limits
if lower_limit_distribution_params is not None or upper_limit_distribution_params is not None:
dof_limits = asset.data.default_joint_limits.to(asset.device).clone()
if lower_limit_distribution_params is not None:
lower_limits = dof_limits[..., 0]
lower_limits = _randomize_prop_by_op(
lower_limits,
lower_limit_distribution_params,
env_ids,
joint_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, joint_ids]
dof_limits[env_ids[:, None], joint_ids, 0] = lower_limits
if upper_limit_distribution_params is not None:
upper_limits = dof_limits[..., 1]
upper_limits = _randomize_prop_by_op(
upper_limits,
upper_limit_distribution_params,
env_ids,
joint_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, joint_ids]
dof_limits[env_ids[:, None], joint_ids, 1] = upper_limits
if (dof_limits[env_ids[:, None], joint_ids, 0] > dof_limits[env_ids[:, None], joint_ids, 1]).any():
raise ValueError(
"Randomization term 'randomize_joint_parameters' is setting lower joint limits that are greater than"
" upper joint limits."
)
asset.write_joint_limits_to_sim(dof_limits[env_ids][:, joint_ids], joint_ids=joint_ids, env_ids=env_ids)
def randomize_fixed_tendon_parameters(
env: ManagerBasedEnv,
env_ids: torch.Tensor | None,
asset_cfg: SceneEntityCfg,
stiffness_distribution_params: tuple[float, float] | None = None,
damping_distribution_params: tuple[float, float] | None = None,
limit_stiffness_distribution_params: tuple[float, float] | None = None,
lower_limit_distribution_params: tuple[float, float] | None = None,
upper_limit_distribution_params: tuple[float, float] | None = None,
rest_length_distribution_params: tuple[float, float] | None = None,
offset_distribution_params: tuple[float, float] | None = None,
operation: Literal["add", "scale", "abs"] = "abs",
distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform",
):
"""Randomize the fixed tendon parameters of an articulation by adding, scaling, or setting random values.
This function allows randomizing the fixed tendon parameters of the asset.
These correspond to the physics engine tendon properties that affect the joint behavior.
The function samples random values from the given distribution parameters and applies the operation to the tendon properties.
It then sets the values into the physics simulation. If the distribution parameters are not provided for a
particular property, the function does not modify the property.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device=asset.device)
# resolve joint indices
if asset_cfg.fixed_tendon_ids == slice(None):
fixed_tendon_ids = slice(None) # for optimization purposes
else:
fixed_tendon_ids = torch.tensor(asset_cfg.fixed_tendon_ids, dtype=torch.int, device=asset.device)
# sample tendon properties from the given ranges and set into the physics simulation
# -- stiffness
if stiffness_distribution_params is not None:
stiffness = asset.data.default_fixed_tendon_stiffness.clone()
stiffness = _randomize_prop_by_op(
stiffness,
stiffness_distribution_params,
env_ids,
fixed_tendon_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, fixed_tendon_ids]
asset.set_fixed_tendon_stiffness(stiffness, fixed_tendon_ids, env_ids)
# -- damping
if damping_distribution_params is not None:
damping = asset.data.default_fixed_tendon_damping.clone()
damping = _randomize_prop_by_op(
damping,
damping_distribution_params,
env_ids,
fixed_tendon_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, fixed_tendon_ids]
asset.set_fixed_tendon_damping(damping, fixed_tendon_ids, env_ids)
# -- limit stiffness
if limit_stiffness_distribution_params is not None:
limit_stiffness = asset.data.default_fixed_tendon_limit_stiffness.clone()
limit_stiffness = _randomize_prop_by_op(
limit_stiffness,
limit_stiffness_distribution_params,
env_ids,
fixed_tendon_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, fixed_tendon_ids]
asset.set_fixed_tendon_limit_stiffness(limit_stiffness, fixed_tendon_ids, env_ids)
# -- limits
if lower_limit_distribution_params is not None or upper_limit_distribution_params is not None:
limit = asset.data.default_fixed_tendon_limit.clone()
# -- lower limit
if lower_limit_distribution_params is not None:
lower_limit = limit[..., 0]
lower_limit = _randomize_prop_by_op(
lower_limit,
lower_limit_distribution_params,
env_ids,
fixed_tendon_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, fixed_tendon_ids]
limit[env_ids[:, None], fixed_tendon_ids, 0] = lower_limit
# -- upper limit
if upper_limit_distribution_params is not None:
upper_limit = limit[..., 1]
upper_limit = _randomize_prop_by_op(
upper_limit,
upper_limit_distribution_params,
env_ids,
fixed_tendon_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, fixed_tendon_ids]
limit[env_ids[:, None], fixed_tendon_ids, 1] = upper_limit
if (limit[env_ids[:, None], fixed_tendon_ids, 0] > limit[env_ids[:, None], fixed_tendon_ids, 1]).any():
raise ValueError(
"Randomization term 'randomize_fixed_tendon_parameters' is setting lower tendon limits that are greater"
" than upper tendon limits."
)
asset.set_fixed_tendon_limit(limit, fixed_tendon_ids, env_ids)
# -- rest length
if rest_length_distribution_params is not None:
rest_length = asset.data.default_fixed_tendon_rest_length.clone()
rest_length = _randomize_prop_by_op(
rest_length,
rest_length_distribution_params,
env_ids,
fixed_tendon_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, fixed_tendon_ids]
asset.set_fixed_tendon_rest_length(rest_length, fixed_tendon_ids, env_ids)
# -- offset
if offset_distribution_params is not None:
offset = asset.data.default_fixed_tendon_offset.clone()
offset = _randomize_prop_by_op(
offset,
offset_distribution_params,
env_ids,
fixed_tendon_ids,
operation=operation,
distribution=distribution,
)[env_ids][:, fixed_tendon_ids]
asset.set_fixed_tendon_offset(offset, fixed_tendon_ids, env_ids)
asset.write_fixed_tendon_properties_to_sim(fixed_tendon_ids, env_ids)
def apply_external_force_torque(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
force_range: tuple[float, float],
torque_range: tuple[float, float],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Randomize the external forces and torques applied to the bodies.
This function creates a set of random forces and torques sampled from the given ranges. The number of forces
and torques is equal to the number of bodies times the number of environments. The forces and torques are
applied to the bodies by calling ``asset.set_external_force_and_torque``. The forces and torques are only
applied when ``asset.write_data_to_sim()`` is called in the environment.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device=asset.device)
# resolve number of bodies
num_bodies = len(asset_cfg.body_ids) if isinstance(asset_cfg.body_ids, list) else asset.num_bodies
# sample random forces and torques
size = (len(env_ids), num_bodies, 3)
forces = math_utils.sample_uniform(*force_range, size, asset.device)
torques = math_utils.sample_uniform(*torque_range, size, asset.device)
# set the forces and torques into the buffers
# note: these are only applied when you call: `asset.write_data_to_sim()`
asset.set_external_force_and_torque(forces, torques, env_ids=env_ids, body_ids=asset_cfg.body_ids)
def push_by_setting_velocity(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Push the asset by setting the root velocity to a random value within the given ranges.
This creates an effect similar to pushing the asset with a random impulse that changes the asset's velocity.
It samples the root velocity from the given ranges and sets the velocity into the physics simulation.
The function takes a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary
are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form ``(min, max)``.
If the dictionary does not contain a key, the velocity is set to zero for that axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# velocities
vel_w = asset.data.root_vel_w[env_ids]
# sample random velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
vel_w[:] = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], vel_w.shape, device=asset.device)
# set the velocities into the physics simulation
asset.write_root_velocity_to_sim(vel_w, env_ids=env_ids)
def reset_root_state_uniform(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
pose_range: dict[str, tuple[float, float]],
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the asset root state to a random position and velocity uniformly within the given ranges.
This function randomizes the root position and velocity of the asset.
* It samples the root position from the given ranges and adds them to the default root position, before setting
them into the physics simulation.
* It samples the root orientation from the given ranges and sets them into the physics simulation.
* It samples the root velocity from the given ranges and sets them into the physics simulation.
The function takes a dictionary of pose and velocity ranges for each axis and rotation. The keys of the
dictionary are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form
``(min, max)``. If the dictionary does not contain a key, the position or velocity is set to zero for that axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# get default root state
root_states = asset.data.default_root_state[env_ids].clone()
# poses
range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3]
orientations = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5])
# velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
velocities = root_states[:, 7:13] + rand_samples
# set into the physics simulation
asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids)
asset.write_root_velocity_to_sim(velocities, env_ids=env_ids)
def reset_root_state_with_random_orientation(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
pose_range: dict[str, tuple[float, float]],
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the asset root position and velocities sampled randomly within the given ranges
and the asset root orientation sampled randomly from the SO(3).
This function randomizes the root position and velocity of the asset.
* It samples the root position from the given ranges and adds them to the default root position, before setting
them into the physics simulation.
* It samples the root orientation uniformly from the SO(3) and sets them into the physics simulation.
* It samples the root velocity from the given ranges and sets them into the physics simulation.
The function takes a dictionary of position and velocity ranges for each axis and rotation:
* :attr:`pose_range` - a dictionary of position ranges for each axis. The keys of the dictionary are ``x``,
``y``, and ``z``. The orientation is sampled uniformly from the SO(3).
* :attr:`velocity_range` - a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary
are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``.
The values are tuples of the form ``(min, max)``. If the dictionary does not contain a particular key,
the position is set to zero for that axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
# get default root state
root_states = asset.data.default_root_state[env_ids].clone()
# poses
range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device)
positions = root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples
orientations = math_utils.random_orientation(len(env_ids), device=asset.device)
# velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
velocities = root_states[:, 7:13] + rand_samples
# set into the physics simulation
asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids)
asset.write_root_velocity_to_sim(velocities, env_ids=env_ids)
def reset_root_state_from_terrain(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
pose_range: dict[str, tuple[float, float]],
velocity_range: dict[str, tuple[float, float]],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the asset root state by sampling a random valid pose from the terrain.
This function samples a random valid pose(based on flat patches) from the terrain and sets the root state
of the asset to this position. The function also samples random velocities from the given ranges and sets them
into the physics simulation.
The function takes a dictionary of position and velocity ranges for each axis and rotation:
* :attr:`pose_range` - a dictionary of pose ranges for each axis. The keys of the dictionary are ``roll``,
``pitch``, and ``yaw``. The position is sampled from the flat patches of the terrain.
* :attr:`velocity_range` - a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary
are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``.
The values are tuples of the form ``(min, max)``. If the dictionary does not contain a particular key,
the position is set to zero for that axis.
Note:
The function expects the terrain to have valid flat patches under the key "init_pos". The flat patches
are used to sample the random pose for the robot.
Raises:
ValueError: If the terrain does not have valid flat patches under the key "init_pos".
"""
# access the used quantities (to enable type-hinting)
asset: RigidObject | Articulation = env.scene[asset_cfg.name]
terrain: TerrainImporter = env.scene.terrain
# obtain all flat patches corresponding to the valid poses
valid_positions: torch.Tensor = terrain.flat_patches.get("init_pos")
if valid_positions is None:
raise ValueError(
"The event term 'reset_root_state_from_terrain' requires valid flat patches under 'init_pos'."
f" Found: {list(terrain.flat_patches.keys())}"
)
# sample random valid poses
ids = torch.randint(0, valid_positions.shape[2], size=(len(env_ids),), device=env.device)
positions = valid_positions[terrain.terrain_levels[env_ids], terrain.terrain_types[env_ids], ids]
positions += asset.data.default_root_state[env_ids, :3]
# sample random orientations
range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device)
# convert to quaternions
orientations = math_utils.quat_from_euler_xyz(rand_samples[:, 0], rand_samples[:, 1], rand_samples[:, 2])
# sample random velocities
range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]]
ranges = torch.tensor(range_list, device=asset.device)
rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=asset.device)
velocities = asset.data.default_root_state[:, 7:13] + rand_samples
# set into the physics simulation
asset.write_root_pose_to_sim(torch.cat([positions, orientations], dim=-1), env_ids=env_ids)
asset.write_root_velocity_to_sim(velocities, env_ids=env_ids)
def reset_joints_by_scale(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
position_range: tuple[float, float],
velocity_range: tuple[float, float],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the robot joints by scaling the default position and velocity by the given ranges.
This function samples random values from the given ranges and scales the default joint positions and velocities
by these values. The scaled values are then set into the physics simulation.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# get default joint state
joint_pos = asset.data.default_joint_pos[env_ids].clone()
joint_vel = asset.data.default_joint_vel[env_ids].clone()
# scale these values randomly
joint_pos *= math_utils.sample_uniform(*position_range, joint_pos.shape, joint_pos.device)
joint_vel *= math_utils.sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device)
# clamp joint pos to limits
joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids]
joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1])
# clamp joint vel to limits
joint_vel_limits = asset.data.soft_joint_vel_limits[env_ids]
joint_vel = joint_vel.clamp_(-joint_vel_limits, joint_vel_limits)
# set into the physics simulation
asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
def reset_joints_by_offset(
env: ManagerBasedEnv,
env_ids: torch.Tensor,
position_range: tuple[float, float],
velocity_range: tuple[float, float],
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
):
"""Reset the robot joints with offsets around the default position and velocity by the given ranges.
This function samples random values from the given ranges and biases the default joint positions and velocities
by these values. The biased values are then set into the physics simulation.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# get default joint state
joint_pos = asset.data.default_joint_pos[env_ids].clone()
joint_vel = asset.data.default_joint_vel[env_ids].clone()
# bias these values randomly
joint_pos += math_utils.sample_uniform(*position_range, joint_pos.shape, joint_pos.device)
joint_vel += math_utils.sample_uniform(*velocity_range, joint_vel.shape, joint_vel.device)
# clamp joint pos to limits
joint_pos_limits = asset.data.soft_joint_pos_limits[env_ids]
joint_pos = joint_pos.clamp_(joint_pos_limits[..., 0], joint_pos_limits[..., 1])
# clamp joint vel to limits
joint_vel_limits = asset.data.soft_joint_vel_limits[env_ids]
joint_vel = joint_vel.clamp_(-joint_vel_limits, joint_vel_limits)
# set into the physics simulation
asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
def reset_scene_to_default(env: ManagerBasedEnv, env_ids: torch.Tensor):
"""Reset the scene to the default state specified in the scene configuration."""
# rigid bodies
for rigid_object in env.scene.rigid_objects.values():
# obtain default and deal with the offset for env origins
default_root_state = rigid_object.data.default_root_state[env_ids].clone()
default_root_state[:, 0:3] += env.scene.env_origins[env_ids]
# set into the physics simulation
rigid_object.write_root_state_to_sim(default_root_state, env_ids=env_ids)
# articulations
for articulation_asset in env.scene.articulations.values():
# obtain default and deal with the offset for env origins
default_root_state = articulation_asset.data.default_root_state[env_ids].clone()
default_root_state[:, 0:3] += env.scene.env_origins[env_ids]
# set into the physics simulation
articulation_asset.write_root_state_to_sim(default_root_state, env_ids=env_ids)
# obtain default joint positions
default_joint_pos = articulation_asset.data.default_joint_pos[env_ids].clone()
default_joint_vel = articulation_asset.data.default_joint_vel[env_ids].clone()
# set into the physics simulation
articulation_asset.write_joint_state_to_sim(default_joint_pos, default_joint_vel, env_ids=env_ids)
"""
Internal helper functions.
"""
def _randomize_prop_by_op(
data: torch.Tensor,
distribution_parameters: tuple[float, float],
dim_0_ids: torch.Tensor | None,
dim_1_ids: torch.Tensor | slice,
operation: Literal["add", "scale", "abs"],
distribution: Literal["uniform", "log_uniform", "gaussian"],
) -> torch.Tensor:
"""Perform data randomization based on the given operation and distribution.
Args:
data: The data tensor to be randomized. Shape is (dim_0, dim_1).
distribution_parameters: The parameters for the distribution to sample values from.
dim_0_ids: The indices of the first dimension to randomize.
dim_1_ids: The indices of the second dimension to randomize.
operation: The operation to perform on the data. Options: 'add', 'scale', 'abs'.
distribution: The distribution to sample the random values from. Options: 'uniform', 'log_uniform'.
Returns:
The data tensor after randomization. Shape is (dim_0, dim_1).
Raises:
NotImplementedError: If the operation or distribution is not supported.
"""
# resolve shape
# -- dim 0
if dim_0_ids is None:
n_dim_0 = data.shape[0]
dim_0_ids = slice(None)
else:
n_dim_0 = len(dim_0_ids)
dim_0_ids = dim_0_ids[:, None]
# -- dim 1
if isinstance(dim_1_ids, slice):
n_dim_1 = data.shape[1]
else:
n_dim_1 = len(dim_1_ids)
# resolve the distribution
if distribution == "uniform":
dist_fn = math_utils.sample_uniform
elif distribution == "log_uniform":
dist_fn = math_utils.sample_log_uniform
elif distribution == "gaussian":
dist_fn = math_utils.sample_gaussian
else:
raise NotImplementedError(
f"Unknown distribution: '{distribution}' for joint properties randomization."
" Please use 'uniform', 'log_uniform', 'gaussian'."
)
# perform the operation
if operation == "add":
data[dim_0_ids, dim_1_ids] += dist_fn(*distribution_parameters, (n_dim_0, n_dim_1), device=data.device)
elif operation == "scale":
data[dim_0_ids, dim_1_ids] *= dist_fn(*distribution_parameters, (n_dim_0, n_dim_1), device=data.device)
elif operation == "abs":
data[dim_0_ids, dim_1_ids] = dist_fn(*distribution_parameters, (n_dim_0, n_dim_1), device=data.device)
else:
raise NotImplementedError(
f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'scale', or 'abs'."
)
return data
| 42,465 |
Python
| 45.975664 | 129 | 0.676086 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/terminations.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Common functions that can be used to activate certain terminations.
The functions can be passed to the :class:`omni.isaac.lab.managers.TerminationTermCfg` object to enable
the termination introduced by the function.
"""
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.lab.assets import Articulation, RigidObject
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.sensors import ContactSensor
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedRLEnv
from omni.isaac.lab.managers.command_manager import CommandTerm
"""
MDP terminations.
"""
def time_out(env: ManagerBasedRLEnv) -> torch.Tensor:
"""Terminate the episode when the episode length exceeds the maximum episode length."""
return env.episode_length_buf >= env.max_episode_length
def command_resample(env: ManagerBasedRLEnv, command_name: str, num_resamples: int = 1) -> torch.Tensor:
"""Terminate the episode based on the total number of times commands have been re-sampled.
This makes the maximum episode length fluid in nature as it depends on how the commands are
sampled. It is useful in situations where delayed rewards are used :cite:`rudin2022advanced`.
"""
command: CommandTerm = env.command_manager.get_term(command_name)
return torch.logical_and((command.time_left <= env.step_dt), (command.command_counter == num_resamples))
"""
Root terminations.
"""
def bad_orientation(
env: ManagerBasedRLEnv, limit_angle: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's orientation is too far from the desired orientation limits.
This is computed by checking the angle between the projected gravity vector and the z-axis.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return torch.acos(-asset.data.projected_gravity_b[:, 2]).abs() > limit_angle
def root_height_below_minimum(
env: ManagerBasedRLEnv, minimum_height: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's root height is below the minimum height.
Note:
This is currently only supported for flat terrains, i.e. the minimum height is in the world frame.
"""
# extract the used quantities (to enable type-hinting)
asset: RigidObject = env.scene[asset_cfg.name]
return asset.data.root_pos_w[:, 2] < minimum_height
"""
Joint terminations.
"""
def joint_pos_out_of_limit(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Terminate when the asset's joint positions are outside of the soft joint limits."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute any violations
out_of_upper_limits = torch.any(asset.data.joint_pos > asset.data.soft_joint_pos_limits[..., 1], dim=1)
out_of_lower_limits = torch.any(asset.data.joint_pos < asset.data.soft_joint_pos_limits[..., 0], dim=1)
return torch.logical_or(out_of_upper_limits[:, asset_cfg.joint_ids], out_of_lower_limits[:, asset_cfg.joint_ids])
def joint_pos_out_of_manual_limit(
env: ManagerBasedRLEnv, bounds: tuple[float, float], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's joint positions are outside of the configured bounds.
Note:
This function is similar to :func:`joint_pos_out_of_limit` but allows the user to specify the bounds manually.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
if asset_cfg.joint_ids is None:
asset_cfg.joint_ids = slice(None)
# compute any violations
out_of_upper_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] > bounds[1], dim=1)
out_of_lower_limits = torch.any(asset.data.joint_pos[:, asset_cfg.joint_ids] < bounds[0], dim=1)
return torch.logical_or(out_of_upper_limits, out_of_lower_limits)
def joint_vel_out_of_limit(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
"""Terminate when the asset's joint velocities are outside of the soft joint limits."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute any violations
limits = asset.data.soft_joint_vel_limits
return torch.any(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) > limits[:, asset_cfg.joint_ids], dim=1)
def joint_vel_out_of_manual_limit(
env: ManagerBasedRLEnv, max_velocity: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when the asset's joint velocities are outside the provided limits."""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# compute any violations
return torch.any(torch.abs(asset.data.joint_vel[:, asset_cfg.joint_ids]) > max_velocity, dim=1)
def joint_effort_out_of_limit(
env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Terminate when effort applied on the asset's joints are outside of the soft joint limits.
In the actuators, the applied torque are the efforts applied on the joints. These are computed by clipping
the computed torques to the joint limits. Hence, we check if the computed torques are equal to the applied
torques.
"""
# extract the used quantities (to enable type-hinting)
asset: Articulation = env.scene[asset_cfg.name]
# check if any joint effort is out of limit
out_of_limits = torch.isclose(
asset.data.computed_torque[:, asset_cfg.joint_ids], asset.data.applied_torque[:, asset_cfg.joint_ids]
)
return torch.any(out_of_limits, dim=1)
"""
Contact sensor.
"""
def illegal_contact(env: ManagerBasedRLEnv, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Terminate when the contact force on the sensor exceeds the force threshold."""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
net_contact_forces = contact_sensor.data.net_forces_w_history
# check if any contact force exceeds the threshold
return torch.any(
torch.max(torch.norm(net_contact_forces[:, :, sensor_cfg.body_ids], dim=-1), dim=1)[0] > threshold, dim=1
)
| 6,680 |
Python
| 41.018868 | 120 | 0.722754 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions_to_limits.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
import carb
import omni.isaac.lab.utils.math as math_utils
import omni.isaac.lab.utils.string as string_utils
from omni.isaac.lab.assets.articulation import Articulation
from omni.isaac.lab.managers.action_manager import ActionTerm
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedEnv
from . import actions_cfg
class JointPositionToLimitsAction(ActionTerm):
"""Joint position action term that scales the input actions to the joint limits and applies them to the
articulation's joints.
This class is similar to the :class:`JointPositionAction` class. However, it performs additional
re-scaling of input actions to the actuator joint position limits.
While processing the actions, it performs the following operations:
1. Apply scaling to the raw actions based on :attr:`actions_cfg.JointPositionToLimitsActionCfg.scale`.
2. Clip the scaled actions to the range [-1, 1] and re-scale them to the joint limits if
:attr:`actions_cfg.JointPositionToLimitsActionCfg.rescale_to_limits` is set to True.
The processed actions are then sent as position commands to the articulation's joints.
"""
cfg: actions_cfg.JointPositionToLimitsActionCfg
"""The configuration of the action term."""
_asset: Articulation
"""The articulation asset on which the action term is applied."""
_scale: torch.Tensor | float
"""The scaling factor applied to the input action."""
def __init__(self, cfg: actions_cfg.JointPositionToLimitsActionCfg, env: ManagerBasedEnv):
# initialize the action term
super().__init__(cfg, env)
# resolve the joints over which the action term is applied
self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names)
self._num_joints = len(self._joint_ids)
# log the resolved joint names for debugging
carb.log_info(
f"Resolved joint names for the action term {self.__class__.__name__}:"
f" {self._joint_names} [{self._joint_ids}]"
)
# Avoid indexing across all joints for efficiency
if self._num_joints == self._asset.num_joints:
self._joint_ids = slice(None)
# create tensors for raw and processed actions
self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device)
self._processed_actions = torch.zeros_like(self.raw_actions)
# parse scale
if isinstance(cfg.scale, (float, int)):
self._scale = float(cfg.scale)
elif isinstance(cfg.scale, dict):
self._scale = torch.ones(self.num_envs, self.action_dim, device=self.device)
# resolve the dictionary config
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.scale, self._joint_names)
self._scale[:, index_list] = torch.tensor(value_list, device=self.device)
else:
raise ValueError(f"Unsupported scale type: {type(cfg.scale)}. Supported types are float and dict.")
"""
Properties.
"""
@property
def action_dim(self) -> int:
return self._num_joints
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self._processed_actions
"""
Operations.
"""
def process_actions(self, actions: torch.Tensor):
# store the raw actions
self._raw_actions[:] = actions
# apply affine transformations
self._processed_actions = self._raw_actions * self._scale
# rescale the position targets if configured
# this is useful when the input actions are in the range [-1, 1]
if self.cfg.rescale_to_limits:
# clip to [-1, 1]
actions = self._processed_actions.clamp(-1.0, 1.0)
# rescale within the joint limits
actions = math_utils.unscale_transform(
actions,
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 0],
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 1],
)
self._processed_actions[:] = actions[:]
def apply_actions(self):
# set position targets
self._asset.set_joint_position_target(self.processed_actions, joint_ids=self._joint_ids)
def reset(self, env_ids: Sequence[int] | None = None) -> None:
self._raw_actions[env_ids] = 0.0
class EMAJointPositionToLimitsAction(JointPositionToLimitsAction):
r"""Joint action term that applies exponential moving average (EMA) over the processed actions as the
articulation's joints position commands.
Exponential moving average (EMA) is a type of moving average that gives more weight to the most recent data points.
This action term applies the processed actions as moving average position action commands.
The moving average is computed as:
.. math::
\text{applied action} = \alpha \times \text{processed actions} + (1 - \alpha) \times \text{previous applied action}
where :math:`\alpha` is the weight for the moving average, :math:`\text{processed actions}` are the
processed actions, and :math:`\text{previous action}` is the previous action that was applied to the articulation's
joints.
In the trivial case where the weight is 1.0, the action term behaves exactly like
the :class:`JointPositionToLimitsAction` class.
On reset, the previous action is initialized to the current joint positions of the articulation's joints.
"""
cfg: actions_cfg.EMAJointPositionToLimitsActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: actions_cfg.EMAJointPositionToLimitsActionCfg, env: ManagerBasedEnv):
# initialize the action term
super().__init__(cfg, env)
# parse and save the moving average weight
if isinstance(cfg.alpha, float):
# check that the weight is in the valid range
if not 0.0 <= cfg.alpha <= 1.0:
raise ValueError(f"Moving average weight must be in the range [0, 1]. Got {cfg.alpha}.")
self._alpha = cfg.alpha
elif isinstance(cfg.alpha, dict):
self._alpha = torch.ones((env.num_envs, self.action_dim), device=self.device)
# resolve the dictionary config
index_list, names_list, value_list = string_utils.resolve_matching_names_values(
cfg.alpha, self._joint_names
)
# check that the weights are in the valid range
for name, value in zip(names_list, value_list):
if not 0.0 <= value <= 1.0:
raise ValueError(
f"Moving average weight must be in the range [0, 1]. Got {value} for joint {name}."
)
self._alpha[:, index_list] = torch.tensor(value_list, device=self.device)
else:
raise ValueError(
f"Unsupported moving average weight type: {type(cfg.alpha)}. Supported types are float and dict."
)
# initialize the previous targets
self._prev_applied_actions = torch.zeros_like(self.processed_actions)
def reset(self, env_ids: Sequence[int] | None = None) -> None:
# check if specific environment ids are provided
if env_ids is None:
env_ids = slice(None)
super().reset(env_ids)
# reset history to current joint positions
self._prev_applied_actions[env_ids, :] = self._asset.data.joint_pos[env_ids, self._joint_ids]
def process_actions(self, actions: torch.Tensor):
# apply affine transformations
super().process_actions(actions)
# set position targets as moving average
ema_actions = self._alpha * self._processed_actions
ema_actions += (1.0 - self._alpha) * self._prev_applied_actions
# clamp the targets
self._processed_actions[:] = torch.clamp(
ema_actions,
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 0],
self._asset.data.soft_joint_pos_limits[:, self._joint_ids, 1],
)
# update previous targets
self._prev_applied_actions[:] = self._processed_actions[:]
| 8,538 |
Python
| 40.857843 | 123 | 0.651792 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/converters/mesh_converter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import asyncio
import os
import omni
import omni.kit.commands
import omni.usd
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Usd, UsdGeom, UsdPhysics, UsdUtils
from omni.isaac.lab.sim.converters.asset_converter_base import AssetConverterBase
from omni.isaac.lab.sim.converters.mesh_converter_cfg import MeshConverterCfg
from omni.isaac.lab.sim.schemas import schemas
from omni.isaac.lab.sim.utils import export_prim_to_file
class MeshConverter(AssetConverterBase):
"""Converter for a mesh file in OBJ / STL / FBX format to a USD file.
This class wraps around the `omni.kit.asset_converter`_ extension to provide a lazy implementation
for mesh to USD conversion. It stores the output USD file in an instanceable format since that is
what is typically used in all learning related applications.
To make the asset instanceable, we must follow a certain structure dictated by how USD scene-graph
instancing and physics work. The rigid body component must be added to each instance and not the
referenced asset (i.e. the prototype prim itself). This is because the rigid body component defines
properties that are specific to each instance and cannot be shared under the referenced asset. For
more information, please check the `documentation <https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/rigid-bodies.html#instancing-rigid-bodies>`_.
Due to the above, we follow the following structure:
* ``{prim_path}`` - The root prim that is an Xform with the rigid body and mass APIs if configured.
* ``{prim_path}/geometry`` - The prim that contains the mesh and optionally the materials if configured.
If instancing is enabled, this prim will be an instanceable reference to the prototype prim.
.. _omni.kit.asset_converter: https://docs.omniverse.nvidia.com/extensions/latest/ext_asset-converter.html
.. caution::
When converting STL files, Z-up convention is assumed, even though this is not the default for many CAD
export programs. Asset orientation convention can either be modified directly in the CAD program's export
process or an offset can be added within the config in Isaac Lab.
"""
cfg: MeshConverterCfg
"""The configuration instance for mesh to USD conversion."""
def __init__(self, cfg: MeshConverterCfg):
"""Initializes the class.
Args:
cfg: The configuration instance for mesh to USD conversion.
"""
super().__init__(cfg=cfg)
"""
Implementation specific methods.
"""
def _convert_asset(self, cfg: MeshConverterCfg):
"""Generate USD from OBJ, STL or FBX.
It stores the asset in the following format:
/file_name (default prim)
|- /geometry <- Made instanceable if requested
|- /Looks
|- /mesh
Args:
cfg: The configuration for conversion of mesh to USD.
Raises:
RuntimeError: If the conversion using the Omniverse asset converter fails.
"""
# resolve mesh name and format
mesh_file_basename, mesh_file_format = os.path.basename(cfg.asset_path).split(".")
mesh_file_format = mesh_file_format.lower()
# Convert USD
asyncio.get_event_loop().run_until_complete(
self._convert_mesh_to_usd(
in_file=cfg.asset_path, out_file=self.usd_path, prim_path=f"/{mesh_file_basename}"
)
)
# Open converted USD stage
# note: This opens a new stage and does not use the stage created earlier by the user
# create a new stage
stage = Usd.Stage.Open(self.usd_path)
# add USD to stage cache
stage_id = UsdUtils.StageCache.Get().Insert(stage)
# Get the default prim (which is the root prim) -- "/{mesh_file_basename}"
xform_prim = stage.GetDefaultPrim()
geom_prim = stage.GetPrimAtPath(f"/{mesh_file_basename}/geometry")
# Move all meshes to underneath new Xform
for child_mesh_prim in geom_prim.GetChildren():
if child_mesh_prim.GetTypeName() == "Mesh":
# Apply collider properties to mesh
if cfg.collision_props is not None:
# -- Collision approximation to mesh
# TODO: https://github.com/isaac-orbit/orbit/issues/163 Move this to a new Schema
mesh_collision_api = UsdPhysics.MeshCollisionAPI.Apply(child_mesh_prim)
mesh_collision_api.GetApproximationAttr().Set(cfg.collision_approximation)
# -- Collider properties such as offset, scale, etc.
schemas.define_collision_properties(
prim_path=child_mesh_prim.GetPath(), cfg=cfg.collision_props, stage=stage
)
# Delete the old Xform and make the new Xform the default prim
stage.SetDefaultPrim(xform_prim)
# Handle instanceable
# Create a new Xform prim that will be the prototype prim
if cfg.make_instanceable:
# Export Xform to a file so we can reference it from all instances
export_prim_to_file(
path=os.path.join(self.usd_dir, self.usd_instanceable_meshes_path),
source_prim_path=geom_prim.GetPath(),
stage=stage,
)
# Delete the original prim that will now be a reference
geom_prim_path = geom_prim.GetPath().pathString
omni.kit.commands.execute("DeletePrims", paths=[geom_prim_path], stage=stage)
# Update references to exported Xform and make it instanceable
geom_undef_prim = stage.DefinePrim(geom_prim_path)
geom_undef_prim.GetReferences().AddReference(self.usd_instanceable_meshes_path, primPath=geom_prim_path)
geom_undef_prim.SetInstanceable(True)
# Apply mass and rigid body properties after everything else
# Properties are applied to the top level prim to avoid the case where all instances of this
# asset unintentionally share the same rigid body properties
# apply mass properties
if cfg.mass_props is not None:
schemas.define_mass_properties(prim_path=xform_prim.GetPath(), cfg=cfg.mass_props, stage=stage)
# apply rigid body properties
if cfg.rigid_props is not None:
schemas.define_rigid_body_properties(prim_path=xform_prim.GetPath(), cfg=cfg.rigid_props, stage=stage)
# Save changes to USD stage
stage.Save()
if stage_id is not None:
UsdUtils.StageCache.Get().Erase(stage_id)
"""
Helper methods.
"""
@staticmethod
async def _convert_mesh_to_usd(
in_file: str, out_file: str, prim_path: str = "/World", load_materials: bool = True
) -> bool:
"""Convert mesh from supported file types to USD.
This function uses the Omniverse Asset Converter extension to convert a mesh file to USD.
It is an asynchronous function and should be called using `asyncio.get_event_loop().run_until_complete()`.
The converted asset is stored in the USD format in the specified output file.
The USD file has Y-up axis and is scaled to meters.
The asset hierarchy is arranged as follows:
.. code-block:: none
prim_path (default prim)
|- /geometry/Looks
|- /geometry/mesh
Args:
in_file: The file to convert.
out_file: The path to store the output file.
prim_path: The prim path of the mesh.
load_materials: Set to True to enable attaching materials defined in the input file
to the generated USD mesh. Defaults to True.
Returns:
True if the conversion succeeds.
"""
enable_extension("omni.kit.asset_converter")
enable_extension("omni.usd.metrics.assembler")
import omni.kit.asset_converter
import omni.usd
from omni.metrics.assembler.core import get_metrics_assembler_interface
# Create converter context
converter_context = omni.kit.asset_converter.AssetConverterContext()
# Set up converter settings
# Don't import/export materials
converter_context.ignore_materials = not load_materials
converter_context.ignore_animations = True
converter_context.ignore_camera = True
converter_context.ignore_light = True
# Merge all meshes into one
converter_context.merge_all_meshes = True
# Sets world units to meters, this will also scale asset if it's centimeters model.
# This does not work right now :(, so we need to scale the mesh manually
converter_context.use_meter_as_world_unit = True
converter_context.baking_scales = True
# Uses double precision for all transform ops.
converter_context.use_double_precision_to_usd_transform_op = True
# Create converter task
instance = omni.kit.asset_converter.get_instance()
out_file_non_metric = out_file.replace(".usd", "_non_metric.usd")
task = instance.create_converter_task(in_file, out_file_non_metric, None, converter_context)
# Start conversion task and wait for it to finish
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break
temp_stage = Usd.Stage.CreateInMemory()
UsdGeom.SetStageUpAxis(temp_stage, UsdGeom.Tokens.z)
UsdGeom.SetStageMetersPerUnit(temp_stage, 1.0)
UsdPhysics.SetStageKilogramsPerUnit(temp_stage, 1.0)
base_prim = temp_stage.DefinePrim(prim_path, "Xform")
prim = temp_stage.DefinePrim(f"{prim_path}/geometry", "Xform")
prim.GetReferences().AddReference(out_file_non_metric)
cache = UsdUtils.StageCache.Get()
cache.Insert(temp_stage)
stage_id = cache.GetId(temp_stage).ToLongInt()
get_metrics_assembler_interface().resolve_stage(stage_id)
temp_stage.SetDefaultPrim(base_prim)
temp_stage.Export(out_file)
return success
| 10,403 |
Python
| 44.038961 | 164 | 0.657118 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/utils/noise/noise_model.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from . import noise_cfg
def constant_noise(data: torch.Tensor, cfg: noise_cfg.ConstantNoiseCfg) -> torch.Tensor:
"""Constant noise."""
if cfg.operation == "add":
return data + cfg.bias
elif cfg.operation == "scale":
return data * cfg.bias
elif cfg.operation == "abs":
return torch.zeros_like(data) + cfg.bias
else:
raise ValueError(f"Unknown operation in noise: {cfg.operation}")
def uniform_noise(data: torch.Tensor, cfg: noise_cfg.UniformNoiseCfg) -> torch.Tensor:
"""Uniform noise."""
if cfg.operation == "add":
return data + torch.rand_like(data) * (cfg.n_max - cfg.n_min) + cfg.n_min
elif cfg.operation == "scale":
return data * (torch.rand_like(data) * (cfg.n_max - cfg.n_min) + cfg.n_min)
elif cfg.operation == "abs":
return torch.rand_like(data) * (cfg.n_max - cfg.n_min) + cfg.n_min
else:
raise ValueError(f"Unknown operation in noise: {cfg.operation}")
def gaussian_noise(data: torch.Tensor, cfg: noise_cfg.GaussianNoiseCfg) -> torch.Tensor:
"""Gaussian noise."""
if cfg.operation == "add":
return data + cfg.mean + cfg.std * torch.randn_like(data)
elif cfg.operation == "scale":
return data * (cfg.mean + cfg.std * torch.randn_like(data))
elif cfg.operation == "abs":
return cfg.mean + cfg.std * torch.randn_like(data)
else:
raise ValueError(f"Unknown operation in noise: {cfg.operation}")
class NoiseModel:
"""Base class for noise models."""
def __init__(self, num_envs: int, noise_model_cfg: noise_cfg.NoiseModelCfg):
"""Initialize the noise model.
Args:
num_envs: The number of environments.
noise_model_cfg: The noise configuration to use.
"""
self._num_envs = num_envs
self._noise_model_cfg = noise_model_cfg
def apply(self, data: torch.Tensor) -> torch.Tensor:
r"""Apply the noise to the data.
Args:
data: The data to apply the noise to, which is a tensor of shape (num_envs, \*data_shape).
"""
return self._noise_model_cfg.noise_cfg.func(data, self._noise_model_cfg.noise_cfg)
def reset(self, env_ids: Sequence[int]):
"""Reset the noise model.
This method can be implemented by derived classes to reset the noise model.
This is useful when implementing temporal noise models such as random walk.
Args:
env_ids: The environment ids to reset the noise model for.
"""
pass
class NoiseModelWithAdditiveBias(NoiseModel):
"""Noise model with an additive bias.
The bias term is sampled from a the specified distribution on reset.
"""
def __init__(self, num_envs: int, noise_model_cfg: noise_cfg.NoiseModelWithAdditiveBiasCfg, device: str):
super().__init__(num_envs, noise_model_cfg)
self._device = device
self._bias_noise_cfg = noise_model_cfg.bias_noise_cfg
self._bias = torch.zeros((num_envs, 1), device=self._device)
def apply(self, data: torch.Tensor) -> torch.Tensor:
r"""Apply the noise + bias.
Args:
data: The data to apply the noise to, which is a tensor of shape (num_envs, \*data_shape).
"""
return super().apply(data) + self._bias
def reset(self, env_ids: Sequence[int]):
"""Reset the noise model.
This method resets the bias term for the specified environments.
Args:
env_ids: The environment ids to reset the noise model for.
"""
self._bias[env_ids] = self._bias_noise_cfg.func(self._bias[env_ids], self._bias_noise_cfg)
| 3,932 |
Python
| 33.2 | 109 | 0.631994 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/utils/noise/noise_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from collections.abc import Callable
from dataclasses import MISSING
from typing import Literal
from omni.isaac.lab.utils import configclass
from . import noise_model
@configclass
class NoiseCfg:
"""Base configuration for a noise term."""
func: Callable[[torch.Tensor, NoiseCfg], torch.Tensor] = MISSING
"""The function to be called for applying the noise.
Note:
The shape of the input and output tensors must be the same.
"""
operation: Literal["add", "scale", "abs"] = "add"
"""The operation to apply the noise on the data. Defaults to "add"."""
@configclass
class ConstantNoiseCfg(NoiseCfg):
"""Configuration for an additive constant noise term."""
func = noise_model.constant_noise
bias: torch.Tensor | float = 0.0
"""The bias to add. Defaults to 0.0."""
# Backward compatibility
ConstantBiasNoiseCfg = ConstantNoiseCfg
@configclass
class UniformNoiseCfg(NoiseCfg):
"""Configuration for a additive uniform noise term."""
func = noise_model.uniform_noise
n_min: torch.Tensor | float = -1.0
"""The minimum value of the noise. Defaults to -1.0."""
n_max: torch.Tensor | float = 1.0
"""The maximum value of the noise. Defaults to 1.0."""
# Backward compatibility
AdditiveUniformNoiseCfg = UniformNoiseCfg
@configclass
class GaussianNoiseCfg(NoiseCfg):
"""Configuration for an additive gaussian noise term."""
func = noise_model.gaussian_noise
mean: torch.Tensor | float = 0.0
"""The mean of the noise. Defaults to 0.0."""
std: torch.Tensor | float = 1.0
"""The standard deviation of the noise. Defaults to 1.0."""
# Backward compatibility
AdditiveGaussianNoiseCfg = GaussianNoiseCfg
@configclass
class NoiseModelCfg:
"""Configuration for a noise model."""
class_type: type = noise_model.NoiseModel
"""The class type of the noise model."""
noise_cfg: NoiseCfg = MISSING
"""The noise configuration to use."""
@configclass
class NoiseModelWithAdditiveBiasCfg(NoiseModelCfg):
"""Configuration for an additive gaussian noise with bias model."""
class_type: type = noise_model.NoiseModelWithAdditiveBias
bias_noise_cfg: NoiseCfg = MISSING
| 2,373 |
Python
| 23.729166 | 74 | 0.707122 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab/omni/isaac/lab/assets/articulation/articulation_data.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
from dataclasses import dataclass
from ..rigid_object import RigidObjectData
@dataclass
class ArticulationData(RigidObjectData):
"""Data container for an articulation."""
##
# Properties.
##
joint_names: list[str] = None
"""Joint names in the order parsed by the simulation view."""
##
# Default states.
##
default_joint_pos: torch.Tensor = None
"""Default joint positions of all joints. Shape is (num_instances, num_joints)."""
default_joint_vel: torch.Tensor = None
"""Default joint velocities of all joints. Shape is (num_instances, num_joints)."""
##
# Joint states <- From simulation.
##
joint_pos: torch.Tensor = None
"""Joint positions of all joints. Shape is (num_instances, num_joints)."""
joint_vel: torch.Tensor = None
"""Joint velocities of all joints. Shape is (num_instances, num_joints)."""
joint_acc: torch.Tensor = None
"""Joint acceleration of all joints. Shape is (num_instances, num_joints)."""
##
# Joint commands -- Set into simulation.
##
joint_pos_target: torch.Tensor = None
"""Joint position targets commanded by the user. Shape is (num_instances, num_joints).
For an implicit actuator model, the targets are directly set into the simulation.
For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`),
which are then set into the simulation.
"""
joint_vel_target: torch.Tensor = None
"""Joint velocity targets commanded by the user. Shape is (num_instances, num_joints).
For an implicit actuator model, the targets are directly set into the simulation.
For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`),
which are then set into the simulation.
"""
joint_effort_target: torch.Tensor = None
"""Joint effort targets commanded by the user. Shape is (num_instances, num_joints).
For an implicit actuator model, the targets are directly set into the simulation.
For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`),
which are then set into the simulation.
"""
##
# Joint properties.
##
joint_stiffness: torch.Tensor = None
"""Joint stiffness provided to simulation. Shape is (num_instances, num_joints)."""
joint_damping: torch.Tensor = None
"""Joint damping provided to simulation. Shape is (num_instances, num_joints)."""
joint_armature: torch.Tensor = None
"""Joint armature provided to simulation. Shape is (num_instances, num_joints)."""
joint_friction: torch.Tensor = None
"""Joint friction provided to simulation. Shape is (num_instances, num_joints)."""
joint_limits: torch.Tensor = None
"""Joint limits provided to simulation. Shape is (num_instances, num_joints, 2)."""
##
# Default joint properties
##
default_joint_stiffness: torch.Tensor = None
"""Default joint stiffness of all joints. Shape is (num_instances, num_joints)."""
default_joint_damping: torch.Tensor = None
"""Default joint damping of all joints. Shape is (num_instances, num_joints)."""
default_joint_armature: torch.Tensor = None
"""Default joint armature of all joints. Shape is (num_instances, num_joints)."""
default_joint_friction: torch.Tensor = None
"""Default joint friction of all joints. Shape is (num_instances, num_joints)."""
default_joint_limits: torch.Tensor = None
"""Default joint limits of all joints. Shape is (num_instances, num_joints, 2)."""
##
# Joint commands -- Explicit actuators.
##
computed_torque: torch.Tensor = None
"""Joint torques computed from the actuator model (before clipping). Shape is (num_instances, num_joints).
This quantity is the raw torque output from the actuator mode, before any clipping is applied.
It is exposed for users who want to inspect the computations inside the actuator model.
For instance, to penalize the learning agent for a difference between the computed and applied torques.
Note: The torques are zero for implicit actuator models.
"""
applied_torque: torch.Tensor = None
"""Joint torques applied from the actuator model (after clipping). Shape is (num_instances, num_joints).
These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the
actuator model.
Note: The torques are zero for implicit actuator models.
"""
##
# Fixed tendon properties.
##
fixed_tendon_stiffness: torch.Tensor = None
"""Fixed tendon stiffness provided to simulation. Shape is (num_instances, num_fixed_tendons)."""
fixed_tendon_damping: torch.Tensor = None
"""Fixed tendon damping provided to simulation. Shape is (num_instances, num_fixed_tendons)."""
fixed_tendon_limit_stiffness: torch.Tensor = None
"""Fixed tendon limit stiffness provided to simulation. Shape is (num_instances, num_fixed_tendons)."""
fixed_tendon_rest_length: torch.Tensor = None
"""Fixed tendon rest length provided to simulation. Shape is (num_instances, num_fixed_tendons)."""
fixed_tendon_offset: torch.Tensor = None
"""Fixed tendon offset provided to simulation. Shape is (num_instances, num_fixed_tendons)."""
fixed_tendon_limit: torch.Tensor = None
"""Fixed tendon limits provided to simulation. Shape is (num_instances, num_fixed_tendons, 2)."""
##
# Default fixed tendon properties
##
default_fixed_tendon_stiffness: torch.Tensor = None
"""Default tendon stiffness of all tendons. Shape is (num_instances, num_fixed_tendons)."""
default_fixed_tendon_damping: torch.Tensor = None
"""Default tendon damping of all tendons. Shape is (num_instances, num_fixed_tendons)."""
default_fixed_tendon_limit_stiffness: torch.Tensor = None
"""Default tendon limit stiffness of all tendons. Shape is (num_instances, num_fixed_tendons)."""
default_fixed_tendon_rest_length: torch.Tensor = None
"""Default tendon rest length of all tendons. Shape is (num_instances, num_fixed_tendons)."""
default_fixed_tendon_offset: torch.Tensor = None
"""Default tendon offset of all tendons. Shape is (num_instances, num_fixed_tendons)."""
default_fixed_tendon_limit: torch.Tensor = None
"""Default tendon limits of all tendons. Shape is (num_instances, num_fixed_tendons, 2)."""
##
# Other Data.
##
soft_joint_pos_limits: torch.Tensor = None
"""Joint positions limits for all joints. Shape is (num_instances, num_joints, 2)."""
soft_joint_vel_limits: torch.Tensor = None
"""Joint velocity limits for all joints. Shape is (num_instances, num_joints)."""
gear_ratio: torch.Tensor = None
"""Gear ratio for relating motor torques to applied Joint torques. Shape is (num_instances, num_joints)."""
| 7,088 |
Python
| 36.115183 | 115 | 0.693567 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/config/extension.toml
|
[package]
# Note: Semantic Versioning is used: https://semver.org/
version = "0.7.5"
# Description
title = "Isaac Lab Environments"
description="Extension containing suite of environments for robot learning."
readme = "docs/README.md"
repository = "https://github.com/isaac-sim/IsaacLab"
category = "robotics"
keywords = ["robotics", "rl", "il", "learning"]
[dependencies]
"omni.isaac.lab" = {}
"omni.isaac.lab_assets" = {}
"omni.isaac.core" = {}
"omni.isaac.gym" = {}
"omni.replicator.isaac" = {}
[[python.module]]
name = "omni.isaac.lab_tasks"
| 551 |
TOML
| 22.999999 | 76 | 0.69147 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/ant/ant_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from omni.isaac.lab_assets.ant import ANT_CFG
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.envs import DirectRLEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.direct.locomotion.locomotion_env import LocomotionEnv
@configclass
class AntEnvCfg(DirectRLEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg(dt=1 / 120)
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="average",
restitution_combine_mode="average",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
debug_vis=False,
)
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=4.0, replicate_physics=True)
# robot
robot: ArticulationCfg = ANT_CFG.replace(prim_path="/World/envs/env_.*/Robot")
joint_gears: list = [15, 15, 15, 15, 15, 15, 15, 15]
# env
episode_length_s = 15.0
decimation = 2
action_scale = 0.5
num_actions = 8
num_observations = 36
num_states = 0
heading_weight: float = 0.5
up_weight: float = 0.1
energy_cost_scale: float = 0.05
actions_cost_scale: float = 0.005
alive_reward_scale: float = 0.5
dof_vel_scale: float = 0.2
death_cost: float = -2.0
termination_height: float = 0.31
angular_velocity_scale: float = 1.0
contact_force_scale: float = 0.1
class AntEnv(LocomotionEnv):
cfg: AntEnvCfg
def __init__(self, cfg: AntEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
| 2,110 |
Python
| 27.527027 | 108 | 0.674408 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/ant/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Ant locomotion environment.
"""
import gymnasium as gym
from . import agents
from .ant_env import AntEnv, AntEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Ant-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.ant:AntEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": AntEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.AntPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
| 707 |
Python
| 22.599999 | 79 | 0.666195 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/quadcopter/quadcopter_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation, ArticulationCfg
from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg
from omni.isaac.lab.envs.ui import BaseEnvWindow
from omni.isaac.lab.markers import VisualizationMarkers
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.math import subtract_frame_transforms
##
# Pre-defined configs
##
from omni.isaac.lab_assets import CRAZYFLIE_CFG # isort: skip
from omni.isaac.lab.markers import CUBOID_MARKER_CFG # isort: skip
class QuadcopterEnvWindow(BaseEnvWindow):
"""Window manager for the Quadcopter environment."""
def __init__(self, env: QuadcopterEnv, window_name: str = "IsaacLab"):
"""Initialize the window.
Args:
env: The environment object.
window_name: The name of the window. Defaults to "IsaacLab".
"""
# initialize base window
super().__init__(env, window_name)
# add custom UI elements
with self.ui_window_elements["main_vstack"]:
with self.ui_window_elements["debug_frame"]:
with self.ui_window_elements["debug_vstack"]:
# add command manager visualization
self._create_debug_vis_ui_element("targets", self.env)
@configclass
class QuadcopterEnvCfg(DirectRLEnvCfg):
ui_window_class_type = QuadcopterEnvWindow
# simulation
sim: SimulationCfg = SimulationCfg(
dt=1 / 100,
disable_contact_processing=True,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
)
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
debug_vis=False,
)
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=2.5, replicate_physics=True)
# robot
robot: ArticulationCfg = CRAZYFLIE_CFG.replace(prim_path="/World/envs/env_.*/Robot")
thrust_to_weight = 1.9
moment_scale = 0.01
# env
episode_length_s = 10.0
decimation = 2
num_actions = 4
num_observations = 12
num_states = 0
debug_vis = True
# reward scales
lin_vel_reward_scale = -0.05
ang_vel_reward_scale = -0.01
distance_to_goal_reward_scale = 15.0
class QuadcopterEnv(DirectRLEnv):
cfg: QuadcopterEnvCfg
def __init__(self, cfg: QuadcopterEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
# Total thrust and moment applied to the base of the quadcopter
self._actions = torch.zeros(self.num_envs, self.cfg.num_actions, device=self.device)
self._thrust = torch.zeros(self.num_envs, 1, 3, device=self.device)
self._moment = torch.zeros(self.num_envs, 1, 3, device=self.device)
# Goal position
self._desired_pos_w = torch.zeros(self.num_envs, 3, device=self.device)
# Logging
self._episode_sums = {
key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
for key in [
"lin_vel",
"ang_vel",
"distance_to_goal",
]
}
# Get specific body indices
self._body_id = self._robot.find_bodies("body")[0]
self._robot_mass = self._robot.root_physx_view.get_masses()[0].sum()
self._gravity_magnitude = torch.tensor(self.sim.cfg.gravity, device=self.device).norm()
self._robot_weight = (self._robot_mass * self._gravity_magnitude).item()
# add handle for debug visualization (this is set to a valid handle inside set_debug_vis)
self.set_debug_vis(self.cfg.debug_vis)
def _setup_scene(self):
self._robot = Articulation(self.cfg.robot)
self.scene.articulations["robot"] = self._robot
self.cfg.terrain.num_envs = self.scene.cfg.num_envs
self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing
self._terrain = self.cfg.terrain.class_type(self.cfg.terrain)
# clone, filter, and replicate
self.scene.clone_environments(copy_from_source=False)
self.scene.filter_collisions(global_prim_paths=[self.cfg.terrain.prim_path])
# add lights
light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
light_cfg.func("/World/Light", light_cfg)
def _pre_physics_step(self, actions: torch.Tensor):
self._actions = actions.clone().clamp(-1.0, 1.0)
self._thrust[:, 0, 2] = self.cfg.thrust_to_weight * self._robot_weight * (self._actions[:, 0] + 1.0) / 2.0
self._moment[:, 0, :] = self.cfg.moment_scale * self._actions[:, 1:]
def _apply_action(self):
self._robot.set_external_force_and_torque(self._thrust, self._moment, body_ids=self._body_id)
def _get_observations(self) -> dict:
desired_pos_b, _ = subtract_frame_transforms(
self._robot.data.root_state_w[:, :3], self._robot.data.root_state_w[:, 3:7], self._desired_pos_w
)
obs = torch.cat(
[
self._robot.data.root_lin_vel_b,
self._robot.data.root_ang_vel_b,
self._robot.data.projected_gravity_b,
desired_pos_b,
],
dim=-1,
)
observations = {"policy": obs}
return observations
def _get_rewards(self) -> torch.Tensor:
lin_vel = torch.sum(torch.square(self._robot.data.root_lin_vel_b), dim=1)
ang_vel = torch.sum(torch.square(self._robot.data.root_ang_vel_b), dim=1)
distance_to_goal = torch.linalg.norm(self._desired_pos_w - self._robot.data.root_pos_w, dim=1)
distance_to_goal_mapped = 1 - torch.tanh(distance_to_goal / 0.8)
rewards = {
"lin_vel": lin_vel * self.cfg.lin_vel_reward_scale * self.step_dt,
"ang_vel": ang_vel * self.cfg.ang_vel_reward_scale * self.step_dt,
"distance_to_goal": distance_to_goal_mapped * self.cfg.distance_to_goal_reward_scale * self.step_dt,
}
reward = torch.sum(torch.stack(list(rewards.values())), dim=0)
# Logging
for key, value in rewards.items():
self._episode_sums[key] += value
return reward
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
time_out = self.episode_length_buf >= self.max_episode_length - 1
died = torch.logical_or(self._robot.data.root_pos_w[:, 2] < 0.1, self._robot.data.root_pos_w[:, 2] > 2.0)
return died, time_out
def _reset_idx(self, env_ids: torch.Tensor | None):
if env_ids is None or len(env_ids) == self.num_envs:
env_ids = self._robot._ALL_INDICES
# Logging
final_distance_to_goal = torch.linalg.norm(
self._desired_pos_w[env_ids] - self._robot.data.root_pos_w[env_ids], dim=1
).mean()
extras = dict()
for key in self._episode_sums.keys():
episodic_sum_avg = torch.mean(self._episode_sums[key][env_ids])
extras["Episode Reward/" + key] = episodic_sum_avg / self.max_episode_length_s
self._episode_sums[key][env_ids] = 0.0
self.extras["log"] = dict()
self.extras["log"].update(extras)
extras = dict()
extras["Episode Termination/died"] = torch.count_nonzero(self.reset_terminated[env_ids]).item()
extras["Episode Termination/time_out"] = torch.count_nonzero(self.reset_time_outs[env_ids]).item()
extras["Metrics/final_distance_to_goal"] = final_distance_to_goal.item()
self.extras["log"].update(extras)
self._robot.reset(env_ids)
super()._reset_idx(env_ids)
if len(env_ids) == self.num_envs:
# Spread out the resets to avoid spikes in training when many environments reset at a similar time
self.episode_length_buf = torch.randint_like(self.episode_length_buf, high=int(self.max_episode_length))
self._actions[env_ids] = 0.0
# Sample new commands
self._desired_pos_w[env_ids, :2] = torch.zeros_like(self._desired_pos_w[env_ids, :2]).uniform_(-2.0, 2.0)
self._desired_pos_w[env_ids, :2] += self._terrain.env_origins[env_ids, :2]
self._desired_pos_w[env_ids, 2] = torch.zeros_like(self._desired_pos_w[env_ids, 2]).uniform_(0.5, 1.5)
# Reset robot state
joint_pos = self._robot.data.default_joint_pos[env_ids]
joint_vel = self._robot.data.default_joint_vel[env_ids]
default_root_state = self._robot.data.default_root_state[env_ids]
default_root_state[:, :3] += self._terrain.env_origins[env_ids]
self._robot.write_root_pose_to_sim(default_root_state[:, :7], env_ids)
self._robot.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids)
self._robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids)
def _set_debug_vis_impl(self, debug_vis: bool):
# create markers if necessary for the first tome
if debug_vis:
if not hasattr(self, "goal_pos_visualizer"):
marker_cfg = CUBOID_MARKER_CFG.copy()
marker_cfg.markers["cuboid"].size = (0.05, 0.05, 0.05)
# -- goal pose
marker_cfg.prim_path = "/Visuals/Command/goal_position"
self.goal_pos_visualizer = VisualizationMarkers(marker_cfg)
# set their visibility to true
self.goal_pos_visualizer.set_visibility(True)
else:
if hasattr(self, "goal_pos_visualizer"):
self.goal_pos_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# update the markers
self.goal_pos_visualizer.visualize(self._desired_pos_w)
| 10,494 |
Python
| 41.148594 | 116 | 0.621307 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/quadcopter/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Quacopter environment.
"""
import gymnasium as gym
from . import agents
from .quadcopter_env import QuadcopterEnv, QuadcopterEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Quadcopter-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.quadcopter:QuadcopterEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": QuadcopterEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.QuadcopterPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
| 758 |
Python
| 24.299999 | 79 | 0.689974 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_camera_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import gymnasium as gym
import math
import numpy as np
import torch
from collections.abc import Sequence
from omni.isaac.lab_assets.cartpole import CARTPOLE_CFG
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation, ArticulationCfg
from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg, ViewerCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sensors import TiledCamera, TiledCameraCfg
from omni.isaac.lab.sim import SimulationCfg
from omni.isaac.lab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.math import sample_uniform
@configclass
class CartpoleRGBCameraEnvCfg(DirectRLEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg(dt=1 / 120)
# robot
robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot")
cart_dof_name = "slider_to_cart"
pole_dof_name = "cart_to_pole"
# camera
tiled_camera: TiledCameraCfg = TiledCameraCfg(
prim_path="/World/envs/env_.*/Camera",
offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"),
data_types=["rgb"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0)
),
width=80,
height=80,
)
# change viewer settings
viewer = ViewerCfg(eye=(20.0, 20.0, 20.0))
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=256, env_spacing=20.0, replicate_physics=True)
# env
decimation = 2
episode_length_s = 5.0
action_scale = 100.0 # [N]
num_actions = 1
num_channels = 3
num_observations = num_channels * tiled_camera.height * tiled_camera.width
num_states = 0
# reset
max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m]
initial_pole_angle_range = [-0.125, 0.125] # the range in which the pole angle is sampled from on reset [rad]
# reward scales
rew_scale_alive = 1.0
rew_scale_terminated = -2.0
rew_scale_pole_pos = -1.0
rew_scale_cart_vel = -0.01
rew_scale_pole_vel = -0.005
class CartpoleDepthCameraEnvCfg(CartpoleRGBCameraEnvCfg):
# camera
tiled_camera: TiledCameraCfg = TiledCameraCfg(
prim_path="/World/envs/env_.*/Camera",
offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"),
data_types=["depth"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0)
),
width=80,
height=80,
)
# env
num_channels = 1
num_observations = num_channels * tiled_camera.height * tiled_camera.width
class CartpoleCameraEnv(DirectRLEnv):
cfg: CartpoleRGBCameraEnvCfg | CartpoleDepthCameraEnvCfg
def __init__(
self, cfg: CartpoleRGBCameraEnvCfg | CartpoleDepthCameraEnvCfg, render_mode: str | None = None, **kwargs
):
super().__init__(cfg, render_mode, **kwargs)
self._cart_dof_idx, _ = self._cartpole.find_joints(self.cfg.cart_dof_name)
self._pole_dof_idx, _ = self._cartpole.find_joints(self.cfg.pole_dof_name)
self.action_scale = self.cfg.action_scale
self.joint_pos = self._cartpole.data.joint_pos
self.joint_vel = self._cartpole.data.joint_vel
if len(self.cfg.tiled_camera.data_types) != 1:
raise ValueError(
"The Cartpole camera environment only supports one image type at a time but the following were"
f" provided: {self.cfg.tiled_camera.data_types}"
)
def close(self):
"""Cleanup for the environment."""
super().close()
def _configure_gym_env_spaces(self):
"""Configure the action and observation spaces for the Gym environment."""
# observation space (unbounded since we don't impose any limits)
self.num_actions = self.cfg.num_actions
self.num_observations = self.cfg.num_observations
self.num_states = self.cfg.num_states
# set up spaces
self.single_observation_space = gym.spaces.Dict()
self.single_observation_space["policy"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(self.cfg.tiled_camera.height, self.cfg.tiled_camera.width, self.cfg.num_channels),
)
if self.num_states > 0:
self.single_observation_space["critic"] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=(self.cfg.tiled_camera.height, self.cfg.tiled_camera.width, self.cfg.num_channels),
)
self.single_action_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.num_actions,))
# batch the spaces for vectorized environments
self.observation_space = gym.vector.utils.batch_space(self.single_observation_space, self.num_envs)
self.action_space = gym.vector.utils.batch_space(self.single_action_space, self.num_envs)
# RL specifics
self.actions = torch.zeros(self.num_envs, self.num_actions, device=self.sim.device)
def _setup_scene(self):
"""Setup the scene with the cartpole and camera."""
self._cartpole = Articulation(self.cfg.robot_cfg)
self._tiled_camera = TiledCamera(self.cfg.tiled_camera)
# add ground plane
spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg(size=(500, 500)))
# clone, filter, and replicate
self.scene.clone_environments(copy_from_source=False)
self.scene.filter_collisions(global_prim_paths=[])
# add articultion and sensors to scene
self.scene.articulations["cartpole"] = self._cartpole
self.scene.sensors["tiled_camera"] = self._tiled_camera
# add lights
light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
light_cfg.func("/World/Light", light_cfg)
def _pre_physics_step(self, actions: torch.Tensor) -> None:
self.actions = self.action_scale * actions.clone()
def _apply_action(self) -> None:
self._cartpole.set_joint_effort_target(self.actions, joint_ids=self._cart_dof_idx)
def _get_observations(self) -> dict:
data_type = "rgb" if "rgb" in self.cfg.tiled_camera.data_types else "depth"
observations = {"policy": self._tiled_camera.data.output[data_type].clone()}
return observations
def _get_rewards(self) -> torch.Tensor:
total_reward = compute_rewards(
self.cfg.rew_scale_alive,
self.cfg.rew_scale_terminated,
self.cfg.rew_scale_pole_pos,
self.cfg.rew_scale_cart_vel,
self.cfg.rew_scale_pole_vel,
self.joint_pos[:, self._pole_dof_idx[0]],
self.joint_vel[:, self._pole_dof_idx[0]],
self.joint_pos[:, self._cart_dof_idx[0]],
self.joint_vel[:, self._cart_dof_idx[0]],
self.reset_terminated,
)
return total_reward
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
self.joint_pos = self._cartpole.data.joint_pos
self.joint_vel = self._cartpole.data.joint_vel
time_out = self.episode_length_buf >= self.max_episode_length - 1
out_of_bounds = torch.any(torch.abs(self.joint_pos[:, self._cart_dof_idx]) > self.cfg.max_cart_pos, dim=1)
out_of_bounds = out_of_bounds | torch.any(torch.abs(self.joint_pos[:, self._pole_dof_idx]) > math.pi / 2, dim=1)
return out_of_bounds, time_out
def _reset_idx(self, env_ids: Sequence[int] | None):
if env_ids is None:
env_ids = self._cartpole._ALL_INDICES
super()._reset_idx(env_ids)
joint_pos = self._cartpole.data.default_joint_pos[env_ids]
joint_pos[:, self._pole_dof_idx] += sample_uniform(
self.cfg.initial_pole_angle_range[0] * math.pi,
self.cfg.initial_pole_angle_range[1] * math.pi,
joint_pos[:, self._pole_dof_idx].shape,
joint_pos.device,
)
joint_vel = self._cartpole.data.default_joint_vel[env_ids]
default_root_state = self._cartpole.data.default_root_state[env_ids]
default_root_state[:, :3] += self.scene.env_origins[env_ids]
self.joint_pos[env_ids] = joint_pos
self.joint_vel[env_ids] = joint_vel
self._cartpole.write_root_pose_to_sim(default_root_state[:, :7], env_ids)
self._cartpole.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids)
self._cartpole.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids)
@torch.jit.script
def compute_rewards(
rew_scale_alive: float,
rew_scale_terminated: float,
rew_scale_pole_pos: float,
rew_scale_cart_vel: float,
rew_scale_pole_vel: float,
pole_pos: torch.Tensor,
pole_vel: torch.Tensor,
cart_pos: torch.Tensor,
cart_vel: torch.Tensor,
reset_terminated: torch.Tensor,
):
rew_alive = rew_scale_alive * (1.0 - reset_terminated.float())
rew_termination = rew_scale_terminated * reset_terminated.float()
rew_pole_pos = rew_scale_pole_pos * torch.sum(torch.square(pole_pos).unsqueeze(dim=1), dim=-1)
rew_cart_vel = rew_scale_cart_vel * torch.sum(torch.abs(cart_vel).unsqueeze(dim=1), dim=-1)
rew_pole_vel = rew_scale_pole_vel * torch.sum(torch.abs(pole_vel).unsqueeze(dim=1), dim=-1)
total_reward = rew_alive + rew_termination + rew_pole_pos + rew_cart_vel + rew_pole_vel
return total_reward
| 9,829 |
Python
| 38.959349 | 120 | 0.64676 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Cartpole balancing environment.
"""
import gymnasium as gym
from . import agents
from .cartpole_camera_env import CartpoleCameraEnv, CartpoleDepthCameraEnvCfg, CartpoleRGBCameraEnvCfg
from .cartpole_env import CartpoleEnv, CartpoleEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Cartpole-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.cartpole:CartpoleEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": CartpoleEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.CartpolePPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
"sb3_cfg_entry_point": f"{agents.__name__}:sb3_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Cartpole-RGB-Camera-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.cartpole:CartpoleCameraEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": CartpoleRGBCameraEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Cartpole-Depth-Camera-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.cartpole:CartpoleCameraEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": CartpoleDepthCameraEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_camera_ppo_cfg.yaml",
},
)
| 1,588 |
Python
| 29.557692 | 102 | 0.688287 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/humanoid/humanoid_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from omni.isaac.lab_assets import HUMANOID_CFG
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg
from omni.isaac.lab.envs import DirectRLEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab_tasks.direct.locomotion.locomotion_env import LocomotionEnv
@configclass
class HumanoidEnvCfg(DirectRLEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg(dt=1 / 120)
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="average",
restitution_combine_mode="average",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
debug_vis=False,
)
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=4.0, replicate_physics=True)
# robot
robot: ArticulationCfg = HUMANOID_CFG.replace(prim_path="/World/envs/env_.*/Robot")
joint_gears: list = [
67.5000, # lower_waist
67.5000, # lower_waist
67.5000, # right_upper_arm
67.5000, # right_upper_arm
67.5000, # left_upper_arm
67.5000, # left_upper_arm
67.5000, # pelvis
45.0000, # right_lower_arm
45.0000, # left_lower_arm
45.0000, # right_thigh: x
135.0000, # right_thigh: y
45.0000, # right_thigh: z
45.0000, # left_thigh: x
135.0000, # left_thigh: y
45.0000, # left_thigh: z
90.0000, # right_knee
90.0000, # left_knee
22.5, # right_foot
22.5, # right_foot
22.5, # left_foot
22.5, # left_foot
]
# env
episode_length_s = 15.0
decimation = 2
action_scale = 1.0
num_actions = 21
num_observations = 75
num_states = 0
heading_weight: float = 0.5
up_weight: float = 0.1
energy_cost_scale: float = 0.05
actions_cost_scale: float = 0.01
alive_reward_scale: float = 2.0
dof_vel_scale: float = 0.1
death_cost: float = -1.0
termination_height: float = 0.8
angular_velocity_scale: float = 0.25
contact_force_scale: float = 0.01
class HumanoidEnv(LocomotionEnv):
cfg: HumanoidEnvCfg
def __init__(self, cfg: HumanoidEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
| 2,796 |
Python
| 28.135416 | 108 | 0.631617 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Cartpole balancing environment.
"""
import gymnasium as gym
from . import agents
from .shadow_hand_env import ShadowHandEnv
from .shadow_hand_env_cfg import ShadowHandEnvCfg, ShadowHandOpenAIEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Shadow-Hand-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.shadow_hand:ShadowHandEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": ShadowHandEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.ShadowHandPPORunnerCfg,
},
)
gym.register(
id="Isaac-Shadow-Hand-OpenAI-FF-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.shadow_hand:ShadowHandEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": ShadowHandOpenAIEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_ff_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.ShadowHandAsymFFPPORunnerCfg,
},
)
gym.register(
id="Isaac-Shadow-Hand-OpenAI-LSTM-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.shadow_hand:ShadowHandEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": ShadowHandOpenAIEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_lstm_cfg.yaml",
},
)
| 1,496 |
Python
| 28.352941 | 85 | 0.691176 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import numpy as np
import torch
from collections.abc import Sequence
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation, RigidObject
from omni.isaac.lab.envs import DirectRLEnv
from omni.isaac.lab.markers import VisualizationMarkers
from omni.isaac.lab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane
from omni.isaac.lab.utils.math import quat_conjugate, quat_from_angle_axis, quat_mul, sample_uniform, saturate
from .shadow_hand_env_cfg import ShadowHandEnvCfg
class ShadowHandEnv(DirectRLEnv):
cfg: ShadowHandEnvCfg
def __init__(self, cfg: ShadowHandEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
self.num_hand_dofs = self.hand.num_joints
# buffers for position targets
self.hand_dof_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.prev_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_hand_dofs), dtype=torch.float, device=self.device)
# list of actuated joints
self.actuated_dof_indices = list()
for joint_name in cfg.actuated_joint_names:
self.actuated_dof_indices.append(self.hand.joint_names.index(joint_name))
self.actuated_dof_indices.sort()
# finger bodies
self.finger_bodies = list()
for body_name in self.cfg.fingertip_body_names:
self.finger_bodies.append(self.hand.body_names.index(body_name))
self.finger_bodies.sort()
self.num_fingertips = len(self.finger_bodies)
# joint limits
joint_pos_limits = self.hand.root_physx_view.get_dof_limits().to(self.device)
self.hand_dof_lower_limits = joint_pos_limits[..., 0]
self.hand_dof_upper_limits = joint_pos_limits[..., 1]
# track goal resets
self.reset_goal_buf = torch.zeros(self.num_envs, dtype=torch.bool, device=self.device)
# used to compare object position
self.in_hand_pos = self.object.data.default_root_state[:, 0:3].clone()
self.in_hand_pos[:, 2] -= 0.04
# default goal positions
self.goal_rot = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device)
self.goal_rot[:, 0] = 1.0
self.goal_pos = torch.zeros((self.num_envs, 3), dtype=torch.float, device=self.device)
self.goal_pos[:, :] = torch.tensor([-0.2, -0.45, 0.68], device=self.device)
# initialize goal marker
self.goal_markers = VisualizationMarkers(self.cfg.goal_object_cfg)
# track successes
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
# unit tensors
self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = torch.tensor([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
def _setup_scene(self):
# add hand, in-hand object, and goal object
self.hand = Articulation(self.cfg.robot_cfg)
self.object = RigidObject(self.cfg.object_cfg)
# add ground plane
spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg())
# clone and replicate (no need to filter for this environment)
self.scene.clone_environments(copy_from_source=False)
# add articultion to scene - we must register to scene to randomize with EventManager
self.scene.articulations["robot"] = self.hand
self.scene.rigid_objects["object"] = self.object
# add lights
light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
light_cfg.func("/World/Light", light_cfg)
def _pre_physics_step(self, actions: torch.Tensor) -> None:
self.actions = actions.clone()
def _apply_action(self) -> None:
self.cur_targets[:, self.actuated_dof_indices] = scale(
self.actions,
self.hand_dof_lower_limits[:, self.actuated_dof_indices],
self.hand_dof_upper_limits[:, self.actuated_dof_indices],
)
self.cur_targets[:, self.actuated_dof_indices] = (
self.cfg.act_moving_average * self.cur_targets[:, self.actuated_dof_indices]
+ (1.0 - self.cfg.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
)
self.cur_targets[:, self.actuated_dof_indices] = saturate(
self.cur_targets[:, self.actuated_dof_indices],
self.hand_dof_lower_limits[:, self.actuated_dof_indices],
self.hand_dof_upper_limits[:, self.actuated_dof_indices],
)
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
self.hand.set_joint_position_target(
self.cur_targets[:, self.actuated_dof_indices], joint_ids=self.actuated_dof_indices
)
def _get_observations(self) -> dict:
if self.cfg.asymmetric_obs:
self.fingertip_force_sensors = self.hand.root_physx_view.get_link_incoming_joint_force()[
:, self.finger_bodies
]
if self.cfg.obs_type == "openai":
obs = self.compute_reduced_observations()
elif self.cfg.obs_type == "full":
obs = self.compute_full_observations()
else:
print("Unknown observations type!")
if self.cfg.asymmetric_obs:
states = self.compute_full_state()
observations = {"policy": obs}
if self.cfg.asymmetric_obs:
observations = {"policy": obs, "critic": states}
return observations
def _get_rewards(self) -> torch.Tensor:
(
total_reward,
self.reset_goal_buf,
self.successes[:],
self.consecutive_successes[:],
) = compute_rewards(
self.reset_buf,
self.reset_goal_buf,
self.successes,
self.consecutive_successes,
self.max_episode_length,
self.object_pos,
self.object_rot,
self.in_hand_pos,
self.goal_rot,
self.cfg.dist_reward_scale,
self.cfg.rot_reward_scale,
self.cfg.rot_eps,
self.actions,
self.cfg.action_penalty_scale,
self.cfg.success_tolerance,
self.cfg.reach_goal_bonus,
self.cfg.fall_dist,
self.cfg.fall_penalty,
self.cfg.av_factor,
)
if "log" not in self.extras:
self.extras["log"] = dict()
self.extras["log"]["consecutive_successes"] = self.consecutive_successes.mean()
# reset goals if the goal has been reached
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
if len(goal_env_ids) > 0:
self._reset_target_pose(goal_env_ids)
return total_reward
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
self._compute_intermediate_values()
# reset when cube has fallen
goal_dist = torch.norm(self.object_pos - self.in_hand_pos, p=2, dim=-1)
out_of_reach = goal_dist >= self.cfg.fall_dist
if self.cfg.max_consecutive_success > 0:
# Reset progress (episode length buf) on goal envs if max_consecutive_success > 0
rot_dist = rotation_distance(self.object_rot, self.goal_rot)
self.episode_length_buf = torch.where(
torch.abs(rot_dist) <= self.cfg.success_tolerance,
torch.zeros_like(self.episode_length_buf),
self.episode_length_buf,
)
max_success_reached = self.successes >= self.cfg.max_consecutive_success
time_out = self.episode_length_buf >= self.max_episode_length - 1
if self.cfg.max_consecutive_success > 0:
time_out = time_out | max_success_reached
return out_of_reach, time_out
def _reset_idx(self, env_ids: Sequence[int] | None):
if env_ids is None:
env_ids = self.hand._ALL_INDICES
# resets articulation and rigid body attributes
super()._reset_idx(env_ids)
# reset goals
self._reset_target_pose(env_ids)
# reset object
object_default_state = self.object.data.default_root_state.clone()[env_ids]
pos_noise = sample_uniform(-1.0, 1.0, (len(env_ids), 3), device=self.device)
# global object positions
object_default_state[:, 0:3] = (
object_default_state[:, 0:3] + self.cfg.reset_position_noise * pos_noise + self.scene.env_origins[env_ids]
)
rot_noise = sample_uniform(-1.0, 1.0, (len(env_ids), 2), device=self.device) # noise for X and Y rotation
object_default_state[:, 3:7] = randomize_rotation(
rot_noise[:, 0], rot_noise[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]
)
object_default_state[:, 7:] = torch.zeros_like(self.object.data.default_root_state[env_ids, 7:])
self.object.write_root_state_to_sim(object_default_state, env_ids)
# reset hand
delta_max = self.hand_dof_upper_limits[env_ids] - self.hand.data.default_joint_pos[env_ids]
delta_min = self.hand_dof_lower_limits[env_ids] - self.hand.data.default_joint_pos[env_ids]
dof_pos_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device)
rand_delta = delta_min + (delta_max - delta_min) * 0.5 * dof_pos_noise
dof_pos = self.hand.data.default_joint_pos[env_ids] + self.cfg.reset_dof_pos_noise * rand_delta
dof_vel_noise = sample_uniform(-1.0, 1.0, (len(env_ids), self.num_hand_dofs), device=self.device)
dof_vel = self.hand.data.default_joint_vel[env_ids] + self.cfg.reset_dof_vel_noise * dof_vel_noise
self.prev_targets[env_ids] = dof_pos
self.cur_targets[env_ids] = dof_pos
self.hand_dof_targets[env_ids] = dof_pos
self.hand.set_joint_position_target(dof_pos, env_ids=env_ids)
self.hand.write_joint_state_to_sim(dof_pos, dof_vel, env_ids=env_ids)
self.successes[env_ids] = 0
self._compute_intermediate_values()
def _reset_target_pose(self, env_ids):
# reset goal rotation
rand_floats = sample_uniform(-1.0, 1.0, (len(env_ids), 2), device=self.device)
new_rot = randomize_rotation(
rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]
)
# update goal pose and markers
self.goal_rot[env_ids] = new_rot
goal_pos = self.goal_pos + self.scene.env_origins
self.goal_markers.visualize(goal_pos, self.goal_rot)
self.reset_goal_buf[env_ids] = 0
def _compute_intermediate_values(self):
# data for hand
self.fingertip_pos = self.hand.data.body_pos_w[:, self.finger_bodies]
self.fingertip_rot = self.hand.data.body_quat_w[:, self.finger_bodies]
self.fingertip_pos -= self.scene.env_origins.repeat((1, self.num_fingertips)).reshape(
self.num_envs, self.num_fingertips, 3
)
self.fingertip_velocities = self.hand.data.body_vel_w[:, self.finger_bodies]
self.hand_dof_pos = self.hand.data.joint_pos
self.hand_dof_vel = self.hand.data.joint_vel
# data for object
self.object_pos = self.object.data.root_pos_w - self.scene.env_origins
self.object_rot = self.object.data.root_quat_w
self.object_velocities = self.object.data.root_vel_w
self.object_linvel = self.object.data.root_lin_vel_w
self.object_angvel = self.object.data.root_ang_vel_w
def compute_reduced_observations(self):
# Per https://arxiv.org/pdf/1808.00177.pdf Table 2
# Fingertip positions
# Object Position, but not orientation
# Relative target orientation
obs = torch.cat(
(
self.fingertip_pos.view(self.num_envs, self.num_fingertips * 3), # 0:15
self.object_pos, # 15:18
quat_mul(self.object_rot, quat_conjugate(self.goal_rot)), # 18:22
self.actions, # 22:42
),
dim=-1,
)
return obs
def compute_full_observations(self):
obs = torch.cat(
(
# hand
unscale(self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits), # 0:24
self.cfg.vel_obs_scale * self.hand_dof_vel, # 24:48
# object
self.object_pos, # 48:51
self.object_rot, # 51:55
self.object_linvel, # 55:58
self.cfg.vel_obs_scale * self.object_angvel, # 58:61
# goal
self.in_hand_pos, # 61:64
self.goal_rot, # 64:68
quat_mul(self.object_rot, quat_conjugate(self.goal_rot)), # 68:72
# fingertips
self.fingertip_pos.view(self.num_envs, self.num_fingertips * 3), # 72:87
self.fingertip_rot.view(self.num_envs, self.num_fingertips * 4), # 87:107
self.fingertip_velocities.view(self.num_envs, self.num_fingertips * 6), # 107:137
# actions
self.actions, # 137:157
),
dim=-1,
)
return obs
def compute_full_state(self):
states = torch.cat(
(
# hand
unscale(self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits), # 0:24
self.cfg.vel_obs_scale * self.hand_dof_vel, # 24:48
# object
self.object_pos, # 48:51
self.object_rot, # 51:55
self.object_linvel, # 55:58
self.cfg.vel_obs_scale * self.object_angvel, # 58:61
# goal
self.in_hand_pos, # 61:64
self.goal_rot, # 64:68
quat_mul(self.object_rot, quat_conjugate(self.goal_rot)), # 68:72
# fingertips
self.fingertip_pos.view(self.num_envs, self.num_fingertips * 3), # 72:87
self.fingertip_rot.view(self.num_envs, self.num_fingertips * 4), # 87:107
self.fingertip_velocities.view(self.num_envs, self.num_fingertips * 6), # 107:137
self.cfg.force_torque_obs_scale
* self.fingertip_force_sensors.view(self.num_envs, self.num_fingertips * 6), # 137:167
# actions
self.actions, # 167:187
),
dim=-1,
)
return states
@torch.jit.script
def scale(x, lower, upper):
return 0.5 * (x + 1.0) * (upper - lower) + lower
@torch.jit.script
def unscale(x, lower, upper):
return (2.0 * x - upper - lower) / (upper - lower)
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(
quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)
)
@torch.jit.script
def rotation_distance(object_rot, target_rot):
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
return 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[:, 1:4], p=2, dim=-1), max=1.0)) # changed quat convention
@torch.jit.script
def compute_rewards(
reset_buf: torch.Tensor,
reset_goal_buf: torch.Tensor,
successes: torch.Tensor,
consecutive_successes: torch.Tensor,
max_episode_length: float,
object_pos: torch.Tensor,
object_rot: torch.Tensor,
target_pos: torch.Tensor,
target_rot: torch.Tensor,
dist_reward_scale: float,
rot_reward_scale: float,
rot_eps: float,
actions: torch.Tensor,
action_penalty_scale: float,
success_tolerance: float,
reach_goal_bonus: float,
fall_dist: float,
fall_penalty: float,
av_factor: float,
):
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
rot_dist = rotation_distance(object_rot, target_rot)
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0 / (torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = torch.sum(actions**2, dim=-1)
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + rot_rew + action_penalty * action_penalty_scale
# Find out which envs hit the goal and update successes count
goal_resets = torch.where(torch.abs(rot_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward)
# Fall penalty: distance to the goal is larger than a threshold
reward = torch.where(goal_dist >= fall_dist, reward + fall_penalty, reward)
# Check env termination conditions, including maximum success number
resets = torch.where(goal_dist >= fall_dist, torch.ones_like(reset_buf), reset_buf)
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(
num_resets > 0,
av_factor * finished_cons_successes / num_resets + (1.0 - av_factor) * consecutive_successes,
consecutive_successes,
)
return reward, goal_resets, successes, cons_successes
| 18,039 |
Python
| 40.953488 | 120 | 0.613615 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab_assets.shadow_hand import SHADOW_HAND_CFG
import omni.isaac.lab.envs.mdp as mdp
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg, RigidObjectCfg
from omni.isaac.lab.envs import DirectRLEnvCfg
from omni.isaac.lab.managers import EventTermCfg as EventTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.markers import VisualizationMarkersCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import PhysxCfg, SimulationCfg
from omni.isaac.lab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.noise import GaussianNoiseCfg, NoiseModelWithAdditiveBiasCfg
@configclass
class EventCfg:
"""Configuration for randomization."""
# -- robot
robot_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.7, 1.3),
"dynamic_friction_range": (1.0, 1.0),
"restitution_range": (1.0, 1.0),
"num_buckets": 250,
},
)
robot_joint_stiffness_and_damping = EventTerm(
func=mdp.randomize_actuator_gains,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", joint_names=".*"),
"stiffness_distribution_params": (0.75, 1.5),
"damping_distribution_params": (0.3, 3.0),
"operation": "scale",
"distribution": "log_uniform",
},
)
robot_joint_limits = EventTerm(
func=mdp.randomize_joint_parameters,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", joint_names=".*"),
"lower_limit_distribution_params": (0.00, 0.01),
"upper_limit_distribution_params": (0.00, 0.01),
"operation": "add",
"distribution": "gaussian",
},
)
robot_tendon_properties = EventTerm(
func=mdp.randomize_fixed_tendon_parameters,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", fixed_tendon_names=".*"),
"stiffness_distribution_params": (0.75, 1.5),
"damping_distribution_params": (0.3, 3.0),
"operation": "scale",
"distribution": "log_uniform",
},
)
# -- object
object_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("object", body_names=".*"),
"static_friction_range": (0.7, 1.3),
"dynamic_friction_range": (1.0, 1.0),
"restitution_range": (1.0, 1.0),
"num_buckets": 250,
},
)
object_scale_mass = EventTerm(
func=mdp.randomize_rigid_body_mass,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("object"),
"mass_distribution_params": (0.5, 1.5),
"operation": "scale",
"distribution": "uniform",
},
)
# -- scene
reset_gravity = EventTerm(
func=mdp.randomize_physics_scene_gravity,
mode="interval",
is_global_time=True,
interval_range_s=(36.0, 36.0), # time_s = num_steps * (decimation * dt)
params={
"gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.4]),
"operation": "add",
"distribution": "gaussian",
},
)
@configclass
class ShadowHandEnvCfg(DirectRLEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg(
dt=1 / 120,
physics_material=RigidBodyMaterialCfg(
static_friction=1.0,
dynamic_friction=1.0,
),
physx=PhysxCfg(
bounce_threshold_velocity=0.2,
),
)
# robot
robot_cfg: ArticulationCfg = SHADOW_HAND_CFG.replace(prim_path="/World/envs/env_.*/Robot").replace(
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 0.5),
rot=(1.0, 0.0, 0.0, 0.0),
joint_pos={".*": 0.0},
)
)
actuated_joint_names = [
"robot0_WRJ1",
"robot0_WRJ0",
"robot0_FFJ3",
"robot0_FFJ2",
"robot0_FFJ1",
"robot0_MFJ3",
"robot0_MFJ2",
"robot0_MFJ1",
"robot0_RFJ3",
"robot0_RFJ2",
"robot0_RFJ1",
"robot0_LFJ4",
"robot0_LFJ3",
"robot0_LFJ2",
"robot0_LFJ1",
"robot0_THJ4",
"robot0_THJ3",
"robot0_THJ2",
"robot0_THJ1",
"robot0_THJ0",
]
fingertip_body_names = [
"robot0_ffdistal",
"robot0_mfdistal",
"robot0_rfdistal",
"robot0_lfdistal",
"robot0_thdistal",
]
# in-hand object
object_cfg: RigidObjectCfg = RigidObjectCfg(
prim_path="/World/envs/env_.*/object",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
kinematic_enabled=False,
disable_gravity=False,
enable_gyroscopic_forces=True,
solver_position_iteration_count=8,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.0025,
max_depenetration_velocity=1000.0,
),
mass_props=sim_utils.MassPropertiesCfg(density=567.0),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.39, 0.6), rot=(1.0, 0.0, 0.0, 0.0)),
)
# goal object
goal_object_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg(
prim_path="/Visuals/goal_marker",
markers={
"goal": sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
scale=(1.0, 1.0, 1.0),
)
},
)
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=8192, env_spacing=0.75, replicate_physics=True)
# env
decimation = 2
episode_length_s = 10.0
num_actions = 20
num_observations = 157 # (full)
num_states = 0
asymmetric_obs = False
obs_type = "full"
# reset
reset_position_noise = 0.01 # range of position at reset
reset_dof_pos_noise = 0.2 # range of dof pos at reset
reset_dof_vel_noise = 0.0 # range of dof vel at reset
# reward scales
dist_reward_scale = -10.0
rot_reward_scale = 1.0
rot_eps = 0.1
action_penalty_scale = -0.0002
reach_goal_bonus = 250
fall_penalty = 0
fall_dist = 0.24
vel_obs_scale = 0.2
success_tolerance = 0.1
max_consecutive_success = 0
av_factor = 0.1
act_moving_average = 1.0
force_torque_obs_scale = 10.0
@configclass
class ShadowHandOpenAIEnvCfg(ShadowHandEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg(
dt=1 / 60,
physics_material=RigidBodyMaterialCfg(
static_friction=1.0,
dynamic_friction=1.0,
),
physx=PhysxCfg(
bounce_threshold_velocity=0.2,
gpu_max_rigid_contact_count=2**23,
gpu_max_rigid_patch_count=2**23,
),
)
# env
decimation = 3
episode_length_s = 8.0
num_actions = 20
num_observations = 42
num_states = 187
asymmetric_obs = True
obs_type = "openai"
# reset
reset_position_noise = 0.01 # range of position at reset
reset_dof_pos_noise = 0.2 # range of dof pos at reset
reset_dof_vel_noise = 0.0 # range of dof vel at reset
# reward scales
dist_reward_scale = -10.0
rot_reward_scale = 1.0
rot_eps = 0.1
action_penalty_scale = -0.0002
reach_goal_bonus = 250
fall_penalty = -50
fall_dist = 0.24
vel_obs_scale = 0.2
success_tolerance = 0.4
max_consecutive_success = 50
av_factor = 0.1
act_moving_average = 0.3
force_torque_obs_scale = 10.0
# domain randomization config
events: EventCfg = EventCfg()
# at every time-step add gaussian noise + bias. The bias is a gaussian sampled at reset
action_noise_model: NoiseModelWithAdditiveBiasCfg = NoiseModelWithAdditiveBiasCfg(
noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.05, operation="add"),
bias_noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.015, operation="abs"),
)
# at every time-step add gaussian noise + bias. The bias is a gaussian sampled at reset
observation_noise_model: NoiseModelWithAdditiveBiasCfg = NoiseModelWithAdditiveBiasCfg(
noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.002, operation="add"),
bias_noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.0001, operation="abs"),
)
| 9,087 |
Python
| 32.167883 | 109 | 0.592165 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/franka_cabinet/franka_cabinet_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.transformations import tf_combine, tf_inverse, tf_vector
from pxr import UsdGeom
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.actuators.actuator_cfg import ImplicitActuatorCfg
from omni.isaac.lab.assets import Articulation, ArticulationCfg
from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.math import sample_uniform
@configclass
class FrankaCabinetEnvCfg(DirectRLEnvCfg):
# env
episode_length_s = 8.3333 # 500 timesteps
decimation = 2
num_actions = 9
num_observations = 23
num_states = 0
action_scale = 7.5
dof_velocity_scale = 0.1
# simulation
sim: SimulationCfg = SimulationCfg(
dt=1 / 120,
disable_contact_processing=True,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
)
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=3.0, replicate_physics=True)
# robot
robot = ArticulationCfg(
prim_path="/World/envs/env_.*/Robot",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Franka/franka_instanceable.usd",
activate_contact_sensors=False,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False, solver_position_iteration_count=12, solver_velocity_iteration_count=1
),
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"panda_joint1": 1.157,
"panda_joint2": -1.066,
"panda_joint3": -0.155,
"panda_joint4": -2.239,
"panda_joint5": -1.841,
"panda_joint6": 1.003,
"panda_joint7": 0.469,
"panda_finger_joint.*": 0.035,
},
pos=(1.0, 0.0, 0.0),
rot=(0.0, 0.0, 0.0, 1.0),
),
actuators={
"panda_shoulder": ImplicitActuatorCfg(
joint_names_expr=["panda_joint[1-4]"],
effort_limit=87.0,
velocity_limit=2.175,
stiffness=80.0,
damping=4.0,
),
"panda_forearm": ImplicitActuatorCfg(
joint_names_expr=["panda_joint[5-7]"],
effort_limit=12.0,
velocity_limit=2.61,
stiffness=80.0,
damping=4.0,
),
"panda_hand": ImplicitActuatorCfg(
joint_names_expr=["panda_finger_joint.*"],
effort_limit=200.0,
velocity_limit=0.2,
stiffness=2e3,
damping=1e2,
),
},
)
# cabinet
cabinet = ArticulationCfg(
prim_path="/World/envs/env_.*/Cabinet",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd",
activate_contact_sensors=False,
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0, 0.4),
rot=(0.1, 0.0, 0.0, 0.0),
joint_pos={
"door_left_joint": 0.0,
"door_right_joint": 0.0,
"drawer_bottom_joint": 0.0,
"drawer_top_joint": 0.0,
},
),
actuators={
"drawers": ImplicitActuatorCfg(
joint_names_expr=["drawer_top_joint", "drawer_bottom_joint"],
effort_limit=87.0,
velocity_limit=100.0,
stiffness=10.0,
damping=1.0,
),
"doors": ImplicitActuatorCfg(
joint_names_expr=["door_left_joint", "door_right_joint"],
effort_limit=87.0,
velocity_limit=100.0,
stiffness=10.0,
damping=2.5,
),
},
)
# ground plane
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
)
# reward scales
dist_reward_scale = 2.0
rot_reward_scale = 0.5
around_handle_reward_scale = 0.0
open_reward_scale = 7.5
action_penalty_scale = 0.01
finger_dist_reward_scale = 0.0
finger_close_reward_scale = 10.0
class FrankaCabinetEnv(DirectRLEnv):
# pre-physics step calls
# |-- _pre_physics_step(action)
# |-- _apply_action()
# post-physics step calls
# |-- _get_dones()
# |-- _get_rewards()
# |-- _reset_idx(env_ids)
# |-- _get_observations()
cfg: FrankaCabinetEnvCfg
def __init__(self, cfg: FrankaCabinetEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
def get_env_local_pose(env_pos: torch.Tensor, xformable: UsdGeom.Xformable, device: torch.device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device)
self.dt = self.cfg.sim.dt * self.cfg.decimation
# create auxiliary variables for computing applied action, observations and rewards
self.robot_dof_lower_limits = self._robot.data.soft_joint_pos_limits[0, :, 0].to(device=self.device)
self.robot_dof_upper_limits = self._robot.data.soft_joint_pos_limits[0, :, 1].to(device=self.device)
self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits)
self.robot_dof_speed_scales[self._robot.find_joints("panda_finger_joint1")[0]] = 0.1
self.robot_dof_speed_scales[self._robot.find_joints("panda_finger_joint2")[0]] = 0.1
self.robot_dof_targets = torch.zeros((self.num_envs, self._robot.num_joints), device=self.device)
stage = get_current_stage()
hand_pose = get_env_local_pose(
self.scene.env_origins[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/Robot/panda_link7")),
self.device,
)
lfinger_pose = get_env_local_pose(
self.scene.env_origins[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/Robot/panda_leftfinger")),
self.device,
)
rfinger_pose = get_env_local_pose(
self.scene.env_origins[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/Robot/panda_rightfinger")),
self.device,
)
finger_pose = torch.zeros(7, device=self.device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = tf_inverse(hand_pose[3:7], hand_pose[0:3])
robot_local_grasp_pose_rot, robot_local_pose_pos = tf_combine(
hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]
)
robot_local_pose_pos += torch.tensor([0, 0.04, 0], device=self.device)
self.robot_local_grasp_pos = robot_local_pose_pos.repeat((self.num_envs, 1))
self.robot_local_grasp_rot = robot_local_grasp_pose_rot.repeat((self.num_envs, 1))
drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self.device)
self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self.num_envs, 1))
self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self.num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self.device, dtype=torch.float32).repeat(
(self.num_envs, 1)
)
self.drawer_inward_axis = torch.tensor([-1, 0, 0], device=self.device, dtype=torch.float32).repeat(
(self.num_envs, 1)
)
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self.device, dtype=torch.float32).repeat(
(self.num_envs, 1)
)
self.drawer_up_axis = torch.tensor([0, 0, 1], device=self.device, dtype=torch.float32).repeat(
(self.num_envs, 1)
)
self.hand_link_idx = self._robot.find_bodies("panda_link7")[0][0]
self.left_finger_link_idx = self._robot.find_bodies("panda_leftfinger")[0][0]
self.right_finger_link_idx = self._robot.find_bodies("panda_rightfinger")[0][0]
self.drawer_link_idx = self._cabinet.find_bodies("drawer_top")[0][0]
self.robot_grasp_rot = torch.zeros((self.num_envs, 4), device=self.device)
self.robot_grasp_pos = torch.zeros((self.num_envs, 3), device=self.device)
self.drawer_grasp_rot = torch.zeros((self.num_envs, 4), device=self.device)
self.drawer_grasp_pos = torch.zeros((self.num_envs, 3), device=self.device)
def _setup_scene(self):
self._robot = Articulation(self.cfg.robot)
self._cabinet = Articulation(self.cfg.cabinet)
self.scene.articulations["robot"] = self._robot
self.scene.articulations["cabinet"] = self._cabinet
self.cfg.terrain.num_envs = self.scene.cfg.num_envs
self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing
self._terrain = self.cfg.terrain.class_type(self.cfg.terrain)
# clone, filter, and replicate
self.scene.clone_environments(copy_from_source=False)
self.scene.filter_collisions(global_prim_paths=[self.cfg.terrain.prim_path])
# add lights
light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
light_cfg.func("/World/Light", light_cfg)
# pre-physics step calls
def _pre_physics_step(self, actions: torch.Tensor):
self.actions = actions.clone().clamp(-1.0, 1.0)
targets = self.robot_dof_targets + self.robot_dof_speed_scales * self.dt * self.actions * self.cfg.action_scale
self.robot_dof_targets[:] = torch.clamp(targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits)
def _apply_action(self):
self._robot.set_joint_position_target(self.robot_dof_targets)
# post-physics step calls
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
terminated = self._cabinet.data.joint_pos[:, 3] > 0.39
truncated = self.episode_length_buf >= self.max_episode_length - 1
return terminated, truncated
def _get_rewards(self) -> torch.Tensor:
# Refresh the intermediate values after the physics steps
self._compute_intermediate_values()
robot_left_finger_pos = self._robot.data.body_pos_w[:, self.left_finger_link_idx]
robot_right_finger_pos = self._robot.data.body_pos_w[:, self.right_finger_link_idx]
return self._compute_rewards(
self.actions,
self._cabinet.data.joint_pos,
self.robot_grasp_pos,
self.drawer_grasp_pos,
self.robot_grasp_rot,
self.drawer_grasp_rot,
robot_left_finger_pos,
robot_right_finger_pos,
self.gripper_forward_axis,
self.drawer_inward_axis,
self.gripper_up_axis,
self.drawer_up_axis,
self.num_envs,
self.cfg.dist_reward_scale,
self.cfg.rot_reward_scale,
self.cfg.around_handle_reward_scale,
self.cfg.open_reward_scale,
self.cfg.finger_dist_reward_scale,
self.cfg.action_penalty_scale,
self._robot.data.joint_pos,
self.cfg.finger_close_reward_scale,
)
def _reset_idx(self, env_ids: torch.Tensor | None):
super()._reset_idx(env_ids)
# robot state
joint_pos = self._robot.data.default_joint_pos[env_ids] + sample_uniform(
-0.125,
0.125,
(len(env_ids), self._robot.num_joints),
self.device,
)
joint_pos = torch.clamp(joint_pos, self.robot_dof_lower_limits, self.robot_dof_upper_limits)
joint_vel = torch.zeros_like(joint_pos)
self._robot.set_joint_position_target(joint_pos, env_ids=env_ids)
self._robot.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
# cabinet state
zeros = torch.zeros((len(env_ids), self._cabinet.num_joints), device=self.device)
self._cabinet.write_joint_state_to_sim(zeros, zeros, env_ids=env_ids)
# Need to refresh the intermediate values so that _get_observations() can use the latest values
self._compute_intermediate_values(env_ids)
def _get_observations(self) -> dict:
dof_pos_scaled = (
2.0
* (self._robot.data.joint_pos - self.robot_dof_lower_limits)
/ (self.robot_dof_upper_limits - self.robot_dof_lower_limits)
- 1.0
)
to_target = self.drawer_grasp_pos - self.robot_grasp_pos
obs = torch.cat(
(
dof_pos_scaled,
self._robot.data.joint_vel * self.cfg.dof_velocity_scale,
to_target,
self._cabinet.data.joint_pos[:, 3].unsqueeze(-1),
self._cabinet.data.joint_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
return {"policy": torch.clamp(obs, -5.0, 5.0)}
# auxiliary methods
def _compute_intermediate_values(self, env_ids: torch.Tensor | None = None):
if env_ids is None:
env_ids = self._robot._ALL_INDICES
hand_pos = self._robot.data.body_pos_w[env_ids, self.hand_link_idx]
hand_rot = self._robot.data.body_quat_w[env_ids, self.hand_link_idx]
drawer_pos = self._cabinet.data.body_pos_w[env_ids, self.drawer_link_idx]
drawer_rot = self._cabinet.data.body_quat_w[env_ids, self.drawer_link_idx]
(
self.robot_grasp_rot[env_ids],
self.robot_grasp_pos[env_ids],
self.drawer_grasp_rot[env_ids],
self.drawer_grasp_pos[env_ids],
) = self._compute_grasp_transforms(
hand_rot,
hand_pos,
self.robot_local_grasp_rot[env_ids],
self.robot_local_grasp_pos[env_ids],
drawer_rot,
drawer_pos,
self.drawer_local_grasp_rot[env_ids],
self.drawer_local_grasp_pos[env_ids],
)
def _compute_rewards(
self,
actions,
cabinet_dof_pos,
franka_grasp_pos,
drawer_grasp_pos,
franka_grasp_rot,
drawer_grasp_rot,
franka_lfinger_pos,
franka_rfinger_pos,
gripper_forward_axis,
drawer_inward_axis,
gripper_up_axis,
drawer_up_axis,
num_envs,
dist_reward_scale,
rot_reward_scale,
around_handle_reward_scale,
open_reward_scale,
finger_dist_reward_scale,
action_penalty_scale,
joint_positions,
finger_close_reward_scale,
):
# distance from hand to the drawer
d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1)
dist_reward = 1.0 / (1.0 + d**2)
dist_reward *= dist_reward
dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward)
axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis)
axis3 = tf_vector(franka_grasp_rot, gripper_up_axis)
axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis)
dot1 = (
torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of forward axis for gripper
dot2 = (
torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of up axis for gripper
# reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1**2 + torch.sign(dot2) * dot2**2)
# bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward
),
around_handle_reward,
)
# reward for distance of each finger from the drawer
finger_dist_reward = torch.zeros_like(rot_reward)
lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
finger_dist_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
(0.04 - lfinger_dist) + (0.04 - rfinger_dist),
finger_dist_reward,
),
finger_dist_reward,
)
finger_close_reward = torch.zeros_like(rot_reward)
finger_close_reward = torch.where(
d <= 0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward
)
# regularization on the actions (summed for each environment)
action_penalty = torch.sum(actions**2, dim=-1)
# how far the cabinet has been opened out
open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint
rewards = (
dist_reward_scale * dist_reward
+ rot_reward_scale * rot_reward
+ around_handle_reward_scale * around_handle_reward
+ open_reward_scale * open_reward
+ finger_dist_reward_scale * finger_dist_reward
- action_penalty_scale * action_penalty
+ finger_close_reward * finger_close_reward_scale
)
self.extras["log"] = {
"dist_reward": (dist_reward_scale * dist_reward).mean(),
"rot_reward": (rot_reward_scale * rot_reward).mean(),
"around_handle_reward": (around_handle_reward_scale * around_handle_reward).mean(),
"open_reward": (open_reward_scale * open_reward).mean(),
"finger_dist_reward": (finger_dist_reward_scale * finger_dist_reward).mean(),
"action_penalty": (action_penalty_scale * action_penalty).mean(),
"finger_close_reward": (finger_close_reward * finger_close_reward_scale).mean(),
}
# bonus for opening drawer properly
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards)
return rewards
def _compute_grasp_transforms(
self,
hand_rot,
hand_pos,
franka_local_grasp_rot,
franka_local_grasp_pos,
drawer_rot,
drawer_pos,
drawer_local_grasp_rot,
drawer_local_grasp_pos,
):
global_franka_rot, global_franka_pos = tf_combine(
hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos
)
global_drawer_rot, global_drawer_pos = tf_combine(
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos
)
return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
| 20,965 |
Python
| 39.087954 | 119 | 0.587455 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/franka_cabinet/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Franka-Cabinet environment.
"""
import gymnasium as gym
from . import agents
from .franka_cabinet_env import FrankaCabinetEnv, FrankaCabinetEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Franka-Cabinet-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.franka_cabinet:FrankaCabinetEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": FrankaCabinetEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.FrankaCabinetPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
| 789 |
Python
| 26.241378 | 82 | 0.698352 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/locomotion/locomotion_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
import omni.isaac.core.utils.torch as torch_utils
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
class LocomotionEnv(DirectRLEnv):
cfg: DirectRLEnvCfg
def __init__(self, cfg: DirectRLEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
self.action_scale = self.cfg.action_scale
self.joint_gears = torch.tensor(self.cfg.joint_gears, dtype=torch.float32, device=self.sim.device)
self.motor_effort_ratio = torch.ones_like(self.joint_gears, device=self.sim.device)
self._joint_dof_idx, _ = self.robot.find_joints(".*")
self.potentials = torch.zeros(self.num_envs, dtype=torch.float32, device=self.sim.device)
self.prev_potentials = torch.zeros_like(self.potentials)
self.targets = torch.tensor([1000, 0, 0], dtype=torch.float32, device=self.sim.device).repeat(
(self.num_envs, 1)
)
self.targets += self.scene.env_origins
self.start_rotation = torch.tensor([1, 0, 0, 0], device=self.sim.device, dtype=torch.float32)
self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self.sim.device).repeat((self.num_envs, 1))
self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self.sim.device).repeat(
(self.num_envs, 1)
)
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
def _setup_scene(self):
self.robot = Articulation(self.cfg.robot)
# add ground plane
self.cfg.terrain.num_envs = self.scene.cfg.num_envs
self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing
self.terrain = self.cfg.terrain.class_type(self.cfg.terrain)
# clone, filter, and replicate
self.scene.clone_environments(copy_from_source=False)
self.scene.filter_collisions(global_prim_paths=[self.cfg.terrain.prim_path])
# add articultion to scene
self.scene.articulations["robot"] = self.robot
# add lights
light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
light_cfg.func("/World/Light", light_cfg)
def _pre_physics_step(self, actions: torch.Tensor):
self.actions = actions.clone()
def _apply_action(self):
forces = self.action_scale * self.joint_gears * self.actions
self.robot.set_joint_effort_target(forces, joint_ids=self._joint_dof_idx)
def _compute_intermediate_values(self):
self.torso_position, self.torso_rotation = self.robot.data.root_pos_w, self.robot.data.root_quat_w
self.velocity, self.ang_velocity = self.robot.data.root_lin_vel_w, self.robot.data.root_ang_vel_w
self.dof_pos, self.dof_vel = self.robot.data.joint_pos, self.robot.data.joint_vel
(
self.up_proj,
self.heading_proj,
self.up_vec,
self.heading_vec,
self.vel_loc,
self.angvel_loc,
self.roll,
self.pitch,
self.yaw,
self.angle_to_target,
self.dof_pos_scaled,
self.prev_potentials,
self.potentials,
) = compute_intermediate_values(
self.targets,
self.torso_position,
self.torso_rotation,
self.velocity,
self.ang_velocity,
self.dof_pos,
self.robot.data.soft_joint_pos_limits[0, :, 0],
self.robot.data.soft_joint_pos_limits[0, :, 1],
self.inv_start_rot,
self.basis_vec0,
self.basis_vec1,
self.potentials,
self.prev_potentials,
self.cfg.sim.dt,
)
def _get_observations(self) -> dict:
obs = torch.cat(
(
self.torso_position[:, 2].view(-1, 1),
self.vel_loc,
self.angvel_loc * self.cfg.angular_velocity_scale,
normalize_angle(self.yaw).unsqueeze(-1),
normalize_angle(self.roll).unsqueeze(-1),
normalize_angle(self.angle_to_target).unsqueeze(-1),
self.up_proj.unsqueeze(-1),
self.heading_proj.unsqueeze(-1),
self.dof_pos_scaled,
self.dof_vel * self.cfg.dof_vel_scale,
self.actions,
),
dim=-1,
)
observations = {"policy": obs}
return observations
def _get_rewards(self) -> torch.Tensor:
total_reward = compute_rewards(
self.actions,
self.reset_terminated,
self.cfg.up_weight,
self.cfg.heading_weight,
self.heading_proj,
self.up_proj,
self.dof_vel,
self.dof_pos_scaled,
self.potentials,
self.prev_potentials,
self.cfg.actions_cost_scale,
self.cfg.energy_cost_scale,
self.cfg.dof_vel_scale,
self.cfg.death_cost,
self.cfg.alive_reward_scale,
self.motor_effort_ratio,
)
return total_reward
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
self._compute_intermediate_values()
time_out = self.episode_length_buf >= self.max_episode_length - 1
died = self.torso_position[:, 2] < self.cfg.termination_height
return died, time_out
def _reset_idx(self, env_ids: torch.Tensor | None):
if env_ids is None or len(env_ids) == self.num_envs:
env_ids = self.robot._ALL_INDICES
self.robot.reset(env_ids)
super()._reset_idx(env_ids)
joint_pos = self.robot.data.default_joint_pos[env_ids]
joint_vel = self.robot.data.default_joint_vel[env_ids]
default_root_state = self.robot.data.default_root_state[env_ids]
default_root_state[:, :3] += self.scene.env_origins[env_ids]
self.robot.write_root_pose_to_sim(default_root_state[:, :7], env_ids)
self.robot.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids)
self.robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids)
to_target = self.targets[env_ids] - default_root_state[:, :3]
to_target[:, 2] = 0.0
self.potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.cfg.sim.dt
self._compute_intermediate_values()
@torch.jit.script
def compute_rewards(
actions: torch.Tensor,
reset_terminated: torch.Tensor,
up_weight: float,
heading_weight: float,
heading_proj: torch.Tensor,
up_proj: torch.Tensor,
dof_vel: torch.Tensor,
dof_pos_scaled: torch.Tensor,
potentials: torch.Tensor,
prev_potentials: torch.Tensor,
actions_cost_scale: float,
energy_cost_scale: float,
dof_vel_scale: float,
death_cost: float,
alive_reward_scale: float,
motor_effort_ratio: torch.Tensor,
):
heading_weight_tensor = torch.ones_like(heading_proj) * heading_weight
heading_reward = torch.where(heading_proj > 0.8, heading_weight_tensor, heading_weight * heading_proj / 0.8)
# aligning up axis of robot and environment
up_reward = torch.zeros_like(heading_reward)
up_reward = torch.where(up_proj > 0.93, up_reward + up_weight, up_reward)
# energy penalty for movement
actions_cost = torch.sum(actions**2, dim=-1)
electricity_cost = torch.sum(
torch.abs(actions * dof_vel * dof_vel_scale) * motor_effort_ratio.unsqueeze(0),
dim=-1,
)
# dof at limit cost
dof_at_limit_cost = torch.sum(dof_pos_scaled > 0.98, dim=-1)
# reward for duration of staying alive
alive_reward = torch.ones_like(potentials) * alive_reward_scale
progress_reward = potentials - prev_potentials
total_reward = (
progress_reward
+ alive_reward
+ up_reward
+ heading_reward
- actions_cost_scale * actions_cost
- energy_cost_scale * electricity_cost
- dof_at_limit_cost
)
# adjust reward for fallen agents
total_reward = torch.where(reset_terminated, torch.ones_like(total_reward) * death_cost, total_reward)
return total_reward
@torch.jit.script
def compute_intermediate_values(
targets: torch.Tensor,
torso_position: torch.Tensor,
torso_rotation: torch.Tensor,
velocity: torch.Tensor,
ang_velocity: torch.Tensor,
dof_pos: torch.Tensor,
dof_lower_limits: torch.Tensor,
dof_upper_limits: torch.Tensor,
inv_start_rot: torch.Tensor,
basis_vec0: torch.Tensor,
basis_vec1: torch.Tensor,
potentials: torch.Tensor,
prev_potentials: torch.Tensor,
dt: float,
):
to_target = targets - torso_position
to_target[:, 2] = 0.0
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2
)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, velocity, ang_velocity, targets, torso_position
)
dof_pos_scaled = torch_utils.maths.unscale(dof_pos, dof_lower_limits, dof_upper_limits)
to_target = targets - torso_position
to_target[:, 2] = 0.0
prev_potentials[:] = potentials
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
return (
up_proj,
heading_proj,
up_vec,
heading_vec,
vel_loc,
angvel_loc,
roll,
pitch,
yaw,
angle_to_target,
dof_pos_scaled,
prev_potentials,
potentials,
)
| 10,072 |
Python
| 35.103943 | 117 | 0.615469 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/anymal_c/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Ant locomotion environment.
"""
import gymnasium as gym
from . import agents
from .anymal_c_env import AnymalCEnv, AnymalCFlatEnvCfg, AnymalCRoughEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Anymal-C-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.anymal_c:AnymalCEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": AnymalCFlatEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_flat_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.AnymalCFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-C-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct.anymal_c:AnymalCEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": AnymalCRoughEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_rough_ppo_cfg.yaml",
"rsl_rl_cfg_entry_point": agents.rsl_rl_ppo_cfg.AnymalCRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,279 |
Python
| 29.47619 | 85 | 0.679437 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/anymal_c/anymal_c_env.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation, ArticulationCfg
from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sensors import ContactSensor, ContactSensorCfg, RayCaster, RayCasterCfg, patterns
from omni.isaac.lab.sim import SimulationCfg
from omni.isaac.lab.terrains import TerrainImporterCfg
from omni.isaac.lab.utils import configclass
##
# Pre-defined configs
##
from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip
from omni.isaac.lab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip
@configclass
class AnymalCFlatEnvCfg(DirectRLEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg(
dt=1 / 200,
disable_contact_processing=True,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
)
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
debug_vis=False,
)
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=4.0, replicate_physics=True)
# robot
robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="/World/envs/env_.*/Robot")
contact_sensor: ContactSensorCfg = ContactSensorCfg(
prim_path="/World/envs/env_.*/Robot/.*", history_length=3, update_period=0.005, track_air_time=True
)
# env
episode_length_s = 20.0
decimation = 4
action_scale = 0.5
num_actions = 12
num_observations = 48
num_states = 0
# reward scales
lin_vel_reward_scale = 1.0
yaw_rate_reward_scale = 0.5
z_vel_reward_scale = -2.0
ang_vel_reward_scale = -0.05
joint_torque_reward_scale = -2.5e-5
joint_accel_reward_scale = -2.5e-7
action_rate_reward_scale = -0.01
feet_air_time_reward_scale = 0.5
undersired_contact_reward_scale = -1.0
flat_orientation_reward_scale = -5.0
@configclass
class AnymalCRoughEnvCfg(AnymalCFlatEnvCfg):
# env
num_observations = 235
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="generator",
terrain_generator=ROUGH_TERRAINS_CFG,
max_init_terrain_level=9,
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
),
visual_material=sim_utils.MdlFileCfg(
mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl",
project_uvw=True,
),
debug_vis=False,
)
# we add a height scanner for perceptive locomotion
height_scanner = RayCasterCfg(
prim_path="/World/envs/env_.*/Robot/base",
offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)),
attach_yaw_only=True,
pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]),
debug_vis=False,
mesh_prim_paths=["/World/ground"],
)
# reward scales (override from flat config)
flat_orientation_reward_scale = 0.0
class AnymalCEnv(DirectRLEnv):
cfg: AnymalCFlatEnvCfg | AnymalCRoughEnvCfg
def __init__(self, cfg: AnymalCFlatEnvCfg | AnymalCRoughEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
# Joint position command (deviation from default joint positions)
self._actions = torch.zeros(self.num_envs, self.cfg.num_actions, device=self.device)
self._previous_actions = torch.zeros(self.num_envs, self.cfg.num_actions, device=self.device)
# X/Y linear velocity and yaw angular velocity commands
self._commands = torch.zeros(self.num_envs, 3, device=self.device)
# Logging
self._episode_sums = {
key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
for key in [
"track_lin_vel_xy_exp",
"track_ang_vel_z_exp",
"lin_vel_z_l2",
"ang_vel_xy_l2",
"dof_torques_l2",
"dof_acc_l2",
"action_rate_l2",
"feet_air_time",
"undesired_contacts",
"flat_orientation_l2",
]
}
# Get specific body indices
self._base_id, _ = self._contact_sensor.find_bodies("base")
self._feet_ids, _ = self._contact_sensor.find_bodies(".*FOOT")
self._underisred_contact_body_ids, _ = self._contact_sensor.find_bodies(".*THIGH")
# Randomize robot friction
env_ids = self._robot._ALL_INDICES
mat_props = self._robot.root_physx_view.get_material_properties()
mat_props[:, :, :2].uniform_(0.6, 0.8)
self._robot.root_physx_view.set_material_properties(mat_props, env_ids.cpu())
# Randomize base mass
base_id, _ = self._robot.find_bodies("base")
masses = self._robot.root_physx_view.get_masses()
masses[:, base_id] += torch.zeros_like(masses[:, base_id]).uniform_(-5.0, 5.0)
self._robot.root_physx_view.set_masses(masses, env_ids.cpu())
def _setup_scene(self):
self._robot = Articulation(self.cfg.robot)
self.scene.articulations["robot"] = self._robot
self._contact_sensor = ContactSensor(self.cfg.contact_sensor)
self.scene.sensors["contact_sensor"] = self._contact_sensor
if isinstance(self.cfg, AnymalCRoughEnvCfg):
# we add a height scanner for perceptive locomotion
self._height_scanner = RayCaster(self.cfg.height_scanner)
self.scene.sensors["height_scanner"] = self._height_scanner
self.cfg.terrain.num_envs = self.scene.cfg.num_envs
self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing
self._terrain = self.cfg.terrain.class_type(self.cfg.terrain)
# clone, filter, and replicate
self.scene.clone_environments(copy_from_source=False)
self.scene.filter_collisions(global_prim_paths=[self.cfg.terrain.prim_path])
# add lights
light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
light_cfg.func("/World/Light", light_cfg)
def _pre_physics_step(self, actions: torch.Tensor):
self._actions = actions.clone()
self._processed_actions = self.cfg.action_scale * self._actions + self._robot.data.default_joint_pos
def _apply_action(self):
self._robot.set_joint_position_target(self._processed_actions)
def _get_observations(self) -> dict:
self._previous_actions = self._actions.clone()
height_data = None
if isinstance(self.cfg, AnymalCRoughEnvCfg):
height_data = (
self._height_scanner.data.pos_w[:, 2].unsqueeze(1) - self._height_scanner.data.ray_hits_w[..., 2] - 0.5
).clip(-1.0, 1.0)
obs = torch.cat(
[
tensor
for tensor in (
self._robot.data.root_lin_vel_b,
self._robot.data.root_ang_vel_b,
self._robot.data.projected_gravity_b,
self._commands,
self._robot.data.joint_pos - self._robot.data.default_joint_pos,
self._robot.data.joint_vel,
height_data,
self._actions,
)
if tensor is not None
],
dim=-1,
)
observations = {"policy": obs}
return observations
def _get_rewards(self) -> torch.Tensor:
# linear velocity tracking
lin_vel_error = torch.sum(torch.square(self._commands[:, :2] - self._robot.data.root_lin_vel_b[:, :2]), dim=1)
lin_vel_error_mapped = torch.exp(-lin_vel_error / 0.25)
# yaw rate tracking
yaw_rate_error = torch.square(self._commands[:, 2] - self._robot.data.root_ang_vel_b[:, 2])
yaw_rate_error_mapped = torch.exp(-yaw_rate_error / 0.25)
# z velocity tracking
z_vel_error = torch.square(self._robot.data.root_lin_vel_b[:, 2])
# angular velocity x/y
ang_vel_error = torch.sum(torch.square(self._robot.data.root_ang_vel_b[:, :2]), dim=1)
# joint torques
joint_torques = torch.sum(torch.square(self._robot.data.applied_torque), dim=1)
# joint acceleration
joint_accel = torch.sum(torch.square(self._robot.data.joint_acc), dim=1)
# action rate
action_rate = torch.sum(torch.square(self._actions - self._previous_actions), dim=1)
# feet air time
first_contact = self._contact_sensor.compute_first_contact(self.step_dt)[:, self._feet_ids]
last_air_time = self._contact_sensor.data.last_air_time[:, self._feet_ids]
air_time = torch.sum((last_air_time - 0.5) * first_contact, dim=1) * (
torch.norm(self._commands[:, :2], dim=1) > 0.1
)
# undersired contacts
net_contact_forces = self._contact_sensor.data.net_forces_w_history
is_contact = (
torch.max(torch.norm(net_contact_forces[:, :, self._underisred_contact_body_ids], dim=-1), dim=1)[0] > 1.0
)
contacts = torch.sum(is_contact, dim=1)
# flat orientation
flat_orientation = torch.sum(torch.square(self._robot.data.projected_gravity_b[:, :2]), dim=1)
rewards = {
"track_lin_vel_xy_exp": lin_vel_error_mapped * self.cfg.lin_vel_reward_scale * self.step_dt,
"track_ang_vel_z_exp": yaw_rate_error_mapped * self.cfg.yaw_rate_reward_scale * self.step_dt,
"lin_vel_z_l2": z_vel_error * self.cfg.z_vel_reward_scale * self.step_dt,
"ang_vel_xy_l2": ang_vel_error * self.cfg.ang_vel_reward_scale * self.step_dt,
"dof_torques_l2": joint_torques * self.cfg.joint_torque_reward_scale * self.step_dt,
"dof_acc_l2": joint_accel * self.cfg.joint_accel_reward_scale * self.step_dt,
"action_rate_l2": action_rate * self.cfg.action_rate_reward_scale * self.step_dt,
"feet_air_time": air_time * self.cfg.feet_air_time_reward_scale * self.step_dt,
"undesired_contacts": contacts * self.cfg.undersired_contact_reward_scale * self.step_dt,
"flat_orientation_l2": flat_orientation * self.cfg.flat_orientation_reward_scale * self.step_dt,
}
reward = torch.sum(torch.stack(list(rewards.values())), dim=0)
# Logging
for key, value in rewards.items():
self._episode_sums[key] += value
return reward
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
time_out = self.episode_length_buf >= self.max_episode_length - 1
net_contact_forces = self._contact_sensor.data.net_forces_w_history
died = torch.any(torch.max(torch.norm(net_contact_forces[:, :, self._base_id], dim=-1), dim=1)[0] > 1.0, dim=1)
return died, time_out
def _reset_idx(self, env_ids: torch.Tensor | None):
if env_ids is None or len(env_ids) == self.num_envs:
env_ids = self._robot._ALL_INDICES
self._robot.reset(env_ids)
super()._reset_idx(env_ids)
if len(env_ids) == self.num_envs:
# Spread out the resets to avoid spikes in training when many environments reset at a similar time
self.episode_length_buf[:] = torch.randint_like(self.episode_length_buf, high=int(self.max_episode_length))
self._actions[env_ids] = 0.0
self._previous_actions[env_ids] = 0.0
# Sample new commands
self._commands[env_ids] = torch.zeros_like(self._commands[env_ids]).uniform_(-1.0, 1.0)
# Reset robot state
joint_pos = self._robot.data.default_joint_pos[env_ids]
joint_vel = self._robot.data.default_joint_vel[env_ids]
default_root_state = self._robot.data.default_root_state[env_ids]
default_root_state[:, :3] += self._terrain.env_origins[env_ids]
self._robot.write_root_pose_to_sim(default_root_state[:, :7], env_ids)
self._robot.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids)
self._robot.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids)
# Logging
extras = dict()
for key in self._episode_sums.keys():
episodic_sum_avg = torch.mean(self._episode_sums[key][env_ids])
extras["Episode Reward/" + key] = episodic_sum_avg / self.max_episode_length_s
self._episode_sums[key][env_ids] = 0.0
self.extras["log"] = dict()
self.extras["log"].update(extras)
extras = dict()
extras["Episode Termination/base_contact"] = torch.count_nonzero(self.reset_terminated[env_ids]).item()
extras["Episode Termination/time_out"] = torch.count_nonzero(self.reset_time_outs[env_ids]).item()
self.extras["log"].update(extras)
| 13,576 |
Python
| 42.938511 | 119 | 0.617045 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/inhand_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import MISSING
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg
from omni.isaac.lab.envs import ManagerBasedRLEnvCfg
from omni.isaac.lab.managers import EventTermCfg as EventTerm
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import RewardTermCfg as RewTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim.simulation_cfg import PhysxCfg, SimulationCfg
from omni.isaac.lab.sim.spawners.materials.physics_materials_cfg import RigidBodyMaterialCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.lab.utils.noise import AdditiveGaussianNoiseCfg as Gnoise
import omni.isaac.lab_tasks.manager_based.manipulation.inhand.mdp as mdp
##
# Scene definition
##
@configclass
class InHandObjectSceneCfg(InteractiveSceneCfg):
"""Configuration for a scene with an object and a dexterous hand."""
# robots
robot: ArticulationCfg = MISSING
# objects
object: RigidObjectCfg = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/object",
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
kinematic_enabled=False,
disable_gravity=False,
enable_gyroscopic_forces=True,
solver_position_iteration_count=8,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.0025,
max_depenetration_velocity=1000.0,
),
mass_props=sim_utils.MassPropertiesCfg(density=400.0),
),
init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, -0.19, 0.56), rot=(1.0, 0.0, 0.0, 0.0)),
)
# lights
light = AssetBaseCfg(
prim_path="/World/light",
spawn=sim_utils.DistantLightCfg(color=(0.95, 0.95, 0.95), intensity=1000.0),
)
dome_light = AssetBaseCfg(
prim_path="/World/domeLight",
spawn=sim_utils.DomeLightCfg(color=(0.02, 0.02, 0.02), intensity=1000.0),
)
##
# MDP settings
##
@configclass
class CommandsCfg:
"""Command specifications for the MDP."""
object_pose = mdp.InHandReOrientationCommandCfg(
asset_name="object",
init_pos_offset=(0.0, 0.0, -0.04),
update_goal_on_success=True,
orientation_success_threshold=0.1,
make_quat_unique=False,
marker_pos_offset=(-0.2, -0.06, 0.08),
debug_vis=True,
)
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
joint_pos = mdp.EMAJointPositionToLimitsActionCfg(
asset_name="robot",
joint_names=[".*"],
alpha=0.95,
rescale_to_limits=True,
)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class KinematicObsGroupCfg(ObsGroup):
"""Observations with full-kinematic state information.
This does not include acceleration or force information.
"""
# observation terms (order preserved)
# -- robot terms
joint_pos = ObsTerm(func=mdp.joint_pos_limit_normalized, noise=Gnoise(std=0.005))
joint_vel = ObsTerm(func=mdp.joint_vel_rel, scale=0.2, noise=Gnoise(std=0.01))
# -- object terms
object_pos = ObsTerm(
func=mdp.root_pos_w, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")}
)
object_quat = ObsTerm(
func=mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object"), "make_quat_unique": False}
)
object_lin_vel = ObsTerm(
func=mdp.root_lin_vel_w, noise=Gnoise(std=0.002), params={"asset_cfg": SceneEntityCfg("object")}
)
object_ang_vel = ObsTerm(
func=mdp.root_ang_vel_w,
scale=0.2,
noise=Gnoise(std=0.002),
params={"asset_cfg": SceneEntityCfg("object")},
)
# -- command terms
goal_pose = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"})
goal_quat_diff = ObsTerm(
func=mdp.goal_quat_diff,
params={"asset_cfg": SceneEntityCfg("object"), "command_name": "object_pose", "make_quat_unique": False},
)
# -- action terms
last_action = ObsTerm(func=mdp.last_action)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
@configclass
class NoVelocityKinematicObsGroupCfg(KinematicObsGroupCfg):
"""Observations with partial kinematic state information.
In contrast to the full-kinematic state group, this group does not include velocity information
about the robot joints and the object root frame. This is useful for tasks where velocity information
is not available or has a lot of noise.
"""
def __post_init__(self):
# call parent post init
super().__post_init__()
# set unused terms to None
self.joint_vel = None
self.object_lin_vel = None
self.object_ang_vel = None
# observation groups
policy: KinematicObsGroupCfg = KinematicObsGroupCfg()
@configclass
class EventCfg:
"""Configuration for randomization."""
# startup
# -- robot
robot_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.7, 1.3),
"dynamic_friction_range": (0.7, 1.3),
"restitution_range": (0.0, 0.0),
"num_buckets": 250,
},
)
robot_scale_mass = EventTerm(
func=mdp.randomize_rigid_body_mass,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"mass_distribution_params": (0.95, 1.05),
"operation": "scale",
},
)
robot_joint_stiffness_and_damping = EventTerm(
func=mdp.randomize_actuator_gains,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", joint_names=".*"),
"stiffness_distribution_params": (0.3, 3.0), # default: 3.0
"damping_distribution_params": (0.75, 1.5), # default: 0.1
"operation": "scale",
"distribution": "log_uniform",
},
)
# -- object
object_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("object", body_names=".*"),
"static_friction_range": (0.7, 1.3),
"dynamic_friction_range": (0.7, 1.3),
"restitution_range": (0.0, 0.0),
"num_buckets": 250,
},
)
object_scale_mass = EventTerm(
func=mdp.randomize_rigid_body_mass,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("object"),
"mass_distribution_params": (0.4, 1.6),
"operation": "scale",
},
)
# reset
reset_object = EventTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": [-0.01, 0.01], "y": [-0.01, 0.01], "z": [-0.01, 0.01]},
"velocity_range": {},
"asset_cfg": SceneEntityCfg("object", body_names=".*"),
},
)
reset_robot_joints = EventTerm(
func=mdp.reset_joints_within_limits_range,
mode="reset",
params={
"position_range": {".*": [0.2, 0.2]},
"velocity_range": {".*": [0.0, 0.0]},
"use_default_offset": True,
"operation": "scale",
},
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# -- task
# track_pos_l2 = RewTerm(
# func=mdp.track_pos_l2,
# weight=-10.0,
# params={"object_cfg": SceneEntityCfg("object"), "command_name": "object_pose"},
# )
track_orientation_inv_l2 = RewTerm(
func=mdp.track_orientation_inv_l2,
weight=1.0,
params={"object_cfg": SceneEntityCfg("object"), "rot_eps": 0.1, "command_name": "object_pose"},
)
success_bonus = RewTerm(
func=mdp.success_bonus,
weight=250.0,
params={"object_cfg": SceneEntityCfg("object"), "command_name": "object_pose"},
)
# -- penalties
joint_vel_l2 = RewTerm(func=mdp.joint_vel_l2, weight=-2.5e-5)
action_l2 = RewTerm(func=mdp.action_l2, weight=-0.0001)
action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01)
# -- optional penalties (these are disabled by default)
# object_away_penalty = RewTerm(
# func=mdp.is_terminated_term,
# weight=-0.0,
# params={"term_keys": "object_out_of_reach"},
# )
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
max_consecutive_success = DoneTerm(
func=mdp.max_consecutive_success, params={"num_success": 50, "command_name": "object_pose"}
)
object_out_of_reach = DoneTerm(func=mdp.object_away_from_robot, params={"threshold": 0.3})
# object_out_of_reach = DoneTerm(
# func=mdp.object_away_from_goal, params={"threshold": 0.24, "command_name": "object_pose"}
# )
##
# Environment configuration
##
@configclass
class InHandObjectEnvCfg(ManagerBasedRLEnvCfg):
"""Configuration for the in hand reorientation environment."""
# Scene settings
scene: InHandObjectSceneCfg = InHandObjectSceneCfg(num_envs=8192, env_spacing=0.6)
# Simulation settings
sim: SimulationCfg = SimulationCfg(
physics_material=RigidBodyMaterialCfg(
static_friction=1.0,
dynamic_friction=1.0,
),
physx=PhysxCfg(
bounce_threshold_velocity=0.2,
gpu_max_rigid_contact_count=2**20,
gpu_max_rigid_patch_count=2**23,
),
)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
events: EventCfg = EventCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 4
self.episode_length_s = 20.0
# simulation settings
self.sim.dt = 1.0 / 120.0
# change viewer settings
self.viewer.eye = (2.0, 2.0, 2.0)
| 11,188 |
Python
| 31.33815 | 117 | 0.610207 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""In-hand object reorientation environment.
These environments are based on the `dexterous cube manipulation`_ environments
provided in IsaacGymEnvs repository from NVIDIA. However, they contain certain
modifications and additional features.
.. _dexterous cube manipulation: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/blob/main/isaacgymenvs/tasks/allegro_hand.py
"""
| 504 |
Python
| 32.666664 | 126 | 0.797619 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/rewards.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Functions specific to the in-hand dexterous manipulation environments."""
import torch
from typing import TYPE_CHECKING
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.assets import RigidObject
from omni.isaac.lab.envs import ManagerBasedRLEnv
from omni.isaac.lab.managers import SceneEntityCfg
if TYPE_CHECKING:
from .commands import InHandReOrientationCommand
def success_bonus(
env: ManagerBasedRLEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object")
) -> torch.Tensor:
"""Bonus reward for successfully reaching the goal.
The object is considered to have reached the goal when the object orientation is within the threshold.
The reward is 1.0 if the object has reached the goal, otherwise 0.0.
Args:
env: The environment object.
command_name: The command term to be used for extracting the goal.
object_cfg: The configuration for the scene entity. Default is "object".
"""
# extract useful elements
asset: RigidObject = env.scene[object_cfg.name]
command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name)
# obtain the goal orientation
goal_quat_w = command_term.command[:, 3:7]
# obtain the threshold for the orientation error
threshold = command_term.cfg.orientation_success_threshold
# calculate the orientation error
dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w)
return dtheta <= threshold
def track_pos_l2(
env: ManagerBasedRLEnv, command_name: str, object_cfg: SceneEntityCfg = SceneEntityCfg("object")
) -> torch.Tensor:
"""Reward for tracking the object position using the L2 norm.
The reward is the distance between the object position and the goal position.
Args:
env: The environment object.
command_term: The command term to be used for extracting the goal.
object_cfg: The configuration for the scene entity. Default is "object".
"""
# extract useful elements
asset: RigidObject = env.scene[object_cfg.name]
command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name)
# obtain the goal position
goal_pos_e = command_term.command[:, 0:3]
# obtain the object position in the environment frame
object_pos_e = asset.data.root_pos_w - env.scene.env_origins
return torch.norm(goal_pos_e - object_pos_e, p=2, dim=-1)
def track_orientation_inv_l2(
env: ManagerBasedRLEnv,
command_name: str,
object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
rot_eps: float = 1e-3,
) -> torch.Tensor:
"""Reward for tracking the object orientation using the inverse of the orientation error.
The reward is the inverse of the orientation error between the object orientation and the goal orientation.
Args:
env: The environment object.
command_name: The command term to be used for extracting the goal.
object_cfg: The configuration for the scene entity. Default is "object".
rot_eps: The threshold for the orientation error. Default is 1e-3.
"""
# extract useful elements
asset: RigidObject = env.scene[object_cfg.name]
command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name)
# obtain the goal orientation
goal_quat_w = command_term.command[:, 3:7]
# calculate the orientation error
dtheta = math_utils.quat_error_magnitude(asset.data.root_quat_w, goal_quat_w)
return 1.0 / (dtheta + rot_eps)
| 3,660 |
Python
| 36.742268 | 111 | 0.721858 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/events.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Functions specific to the in-hand dexterous manipulation environments."""
from __future__ import annotations
import torch
from typing import TYPE_CHECKING, Literal
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg
from omni.isaac.lab.utils.math import sample_uniform
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedEnv
class reset_joints_within_limits_range(ManagerTermBase):
"""Reset an articulation's joints to a random position in the given limit ranges.
This function samples random values for the joint position and velocities from the given limit ranges.
The values are then set into the physics simulation.
The parameters to the function are:
* :attr:`position_range` - a dictionary of position ranges for each joint. The keys of the dictionary are the
joint names (or regular expressions) of the asset.
* :attr:`velocity_range` - a dictionary of velocity ranges for each joint. The keys of the dictionary are the
joint names (or regular expressions) of the asset.
* :attr:`use_default_offset` - a boolean flag to indicate if the ranges are offset by the default joint state.
Defaults to False.
* :attr:`asset_cfg` - the configuration of the asset to reset. Defaults to the entity named "robot" in the scene.
* :attr:`operation` - whether the ranges are scaled values of the joint limits, or absolute limits.
Defaults to "abs".
The dictionary values are a tuple of the form ``(a, b)``. Based on the operation, these values are
interpreted differently:
* If the operation is "abs", the values are the absolute minimum and maximum values for the joint, i.e.
the joint range becomes ``[a, b]``.
* If the operation is "scale", the values are the scaling factors for the joint limits, i.e. the joint range
becomes ``[a * min_joint_limit, b * max_joint_limit]``.
If the ``a`` or the ``b`` value is ``None``, the joint limits are used instead.
Note:
If the dictionary does not contain a key, the joint position or joint velocity is set to the default value for
that joint.
"""
def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv):
# initialize the base class
super().__init__(cfg, env)
# check if the cfg has the required parameters
if "position_range" not in cfg.params or "velocity_range" not in cfg.params:
raise ValueError(
"The term 'reset_joints_within_range' requires parameters: 'position_range' and 'velocity_range'."
f" Received: {list(cfg.params.keys())}."
)
# parse the parameters
asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg", SceneEntityCfg("robot"))
use_default_offset = cfg.params.get("use_default_offset", False)
operation = cfg.params.get("operation", "abs")
# check if the operation is valid
if operation not in ["abs", "scale"]:
raise ValueError(
f"For event 'reset_joints_within_limits_range', unknown operation: '{operation}'."
" Please use 'abs' or 'scale'."
)
# extract the used quantities (to enable type-hinting)
self._asset: Articulation = env.scene[asset_cfg.name]
default_joint_pos = self._asset.data.default_joint_pos[0]
default_joint_vel = self._asset.data.default_joint_vel[0]
# create buffers to store the joint position range
self._pos_ranges = self._asset.data.soft_joint_pos_limits[0].clone()
# parse joint position ranges
pos_joint_ids = []
for joint_name, joint_range in cfg.params["position_range"].items():
# find the joint ids
joint_ids = self._asset.find_joints(joint_name)[0]
pos_joint_ids.extend(joint_ids)
# set the joint position ranges based on the given values
if operation == "abs":
if joint_range[0] is not None:
self._pos_ranges[joint_ids, 0] = joint_range[0]
if joint_range[1] is not None:
self._pos_ranges[joint_ids, 1] = joint_range[1]
elif operation == "scale":
if joint_range[0] is not None:
self._pos_ranges[joint_ids, 0] *= joint_range[0]
if joint_range[1] is not None:
self._pos_ranges[joint_ids, 1] *= joint_range[1]
else:
raise ValueError(
f"Unknown operation: '{operation}' for joint position ranges. Please use 'abs' or 'scale'."
)
# add the default offset
if use_default_offset:
self._pos_ranges[joint_ids] += default_joint_pos[joint_ids].unsqueeze(1)
# store the joint pos ids (used later to sample the joint positions)
self._pos_joint_ids = torch.tensor(pos_joint_ids, device=self._pos_ranges.device)
self._pos_ranges = self._pos_ranges[self._pos_joint_ids]
# create buffers to store the joint velocity range
self._vel_ranges = torch.stack(
[-self._asset.data.soft_joint_vel_limits[0], self._asset.data.soft_joint_vel_limits[0]], dim=1
)
# parse joint velocity ranges
vel_joint_ids = []
for joint_name, joint_range in cfg.params["velocity_range"].items():
# find the joint ids
joint_ids = self._asset.find_joints(joint_name)[0]
vel_joint_ids.extend(joint_ids)
# set the joint position ranges based on the given values
if operation == "abs":
if joint_range[0] is not None:
self._vel_ranges[joint_ids, 0] = joint_range[0]
if joint_range[1] is not None:
self._vel_ranges[joint_ids, 1] = joint_range[1]
elif operation == "scale":
if joint_range[0] is not None:
self._vel_ranges[joint_ids, 0] = joint_range[0] * self._vel_ranges[joint_ids, 0]
if joint_range[1] is not None:
self._vel_ranges[joint_ids, 1] = joint_range[1] * self._vel_ranges[joint_ids, 1]
else:
raise ValueError(
f"Unknown operation: '{operation}' for joint velocity ranges. Please use 'abs' or 'scale'."
)
# add the default offset
if use_default_offset:
self._vel_ranges[joint_ids] += default_joint_vel[joint_ids].unsqueeze(1)
# store the joint vel ids (used later to sample the joint positions)
self._vel_joint_ids = torch.tensor(vel_joint_ids, device=self._vel_ranges.device)
self._vel_ranges = self._vel_ranges[self._vel_joint_ids]
def __call__(
self,
env: ManagerBasedEnv,
env_ids: torch.Tensor,
position_range: dict[str, tuple[float | None, float | None]],
velocity_range: dict[str, tuple[float | None, float | None]],
use_default_offset: bool = False,
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
operation: Literal["abs", "scale"] = "abs",
):
# get default joint state
joint_pos = self._asset.data.default_joint_pos[env_ids].clone()
joint_vel = self._asset.data.default_joint_vel[env_ids].clone()
# sample random joint positions for each joint
if len(self._pos_joint_ids) > 0:
joint_pos_shape = (len(env_ids), len(self._pos_joint_ids))
joint_pos[:, self._pos_joint_ids] = sample_uniform(
self._pos_ranges[:, 0], self._pos_ranges[:, 1], joint_pos_shape, device=joint_pos.device
)
# clip the joint positions to the joint limits
joint_pos_limits = self._asset.data.soft_joint_pos_limits[0, self._pos_joint_ids]
joint_pos = joint_pos.clamp(joint_pos_limits[:, 0], joint_pos_limits[:, 1])
# sample random joint velocities for each joint
if len(self._vel_joint_ids) > 0:
joint_vel_shape = (len(env_ids), len(self._vel_joint_ids))
joint_vel[:, self._vel_joint_ids] = sample_uniform(
self._vel_ranges[:, 0], self._vel_ranges[:, 1], joint_vel_shape, device=joint_vel.device
)
# clip the joint velocities to the joint limits
joint_vel_limits = self._asset.data.soft_joint_vel_limits[0, self._vel_joint_ids]
joint_vel = joint_vel.clamp(-joint_vel_limits, joint_vel_limits)
# set into the physics simulation
self._asset.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
| 8,840 |
Python
| 46.789189 | 118 | 0.61448 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/terminations.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Functions specific to the in-hand dexterous manipulation environments."""
import torch
from typing import TYPE_CHECKING
from omni.isaac.lab.envs import ManagerBasedRLEnv
from omni.isaac.lab.managers import SceneEntityCfg
if TYPE_CHECKING:
from .commands import InHandReOrientationCommand
def max_consecutive_success(env: ManagerBasedRLEnv, num_success: int, command_name: str) -> torch.Tensor:
"""Check if the task has been completed consecutively for a certain number of times.
Args:
env: The environment object.
num_success: Threshold for the number of consecutive successes required.
command_name: The command term to be used for extracting the goal.
"""
command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name)
return command_term.metrics["consecutive_success"] >= num_success
def object_away_from_goal(
env: ManagerBasedRLEnv,
threshold: float,
command_name: str,
object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
"""Check if object has gone far from the goal.
The object is considered to be out-of-reach if the distance between the goal and the object is greater
than the threshold.
Args:
env: The environment object.
threshold: The threshold for the distance between the robot and the object.
command_name: The command term to be used for extracting the goal.
object_cfg: The configuration for the scene entity. Default is "object".
"""
# extract useful elements
command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name)
asset = env.scene[object_cfg.name]
# object pos
asset_pos_e = asset.data.root_pos_w - env.scene.env_origins
goal_pos_e = command_term.command[:, :3]
return torch.norm(asset_pos_e - goal_pos_e, p=2, dim=1) > threshold
def object_away_from_robot(
env: ManagerBasedRLEnv,
threshold: float,
asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"),
object_cfg: SceneEntityCfg = SceneEntityCfg("object"),
) -> torch.Tensor:
"""Check if object has gone far from the robot.
The object is considered to be out-of-reach if the distance between the robot and the object is greater
than the threshold.
Args:
env: The environment object.
threshold: The threshold for the distance between the robot and the object.
asset_cfg: The configuration for the robot entity. Default is "robot".
object_cfg: The configuration for the object entity. Default is "object".
"""
# extract useful elements
robot = env.scene[asset_cfg.name]
object = env.scene[object_cfg.name]
# compute distance
dist = torch.norm(robot.data.root_pos_w - object.data.root_pos_w, dim=1)
return dist > threshold
| 2,952 |
Python
| 34.154761 | 107 | 0.711721 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/observations.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Functions specific to the in-hand dexterous manipulation environments."""
import torch
from typing import TYPE_CHECKING
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.assets import RigidObject
from omni.isaac.lab.envs import ManagerBasedRLEnv
from omni.isaac.lab.managers import SceneEntityCfg
if TYPE_CHECKING:
from .commands import InHandReOrientationCommand
def goal_quat_diff(
env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg, command_name: str, make_quat_unique: bool
) -> torch.Tensor:
"""Goal orientation relative to the asset's root frame.
The quaternion is represented as (w, x, y, z). The real part is always positive.
"""
# extract useful elements
asset: RigidObject = env.scene[asset_cfg.name]
command_term: InHandReOrientationCommand = env.command_manager.get_term(command_name)
# obtain the orientations
goal_quat_w = command_term.command[:, 3:7]
asset_quat_w = asset.data.root_quat_w
# compute quaternion difference
quat = math_utils.quat_mul(asset_quat_w, math_utils.quat_conjugate(goal_quat_w))
# make sure the quaternion real-part is always positive
return math_utils.quat_unique(quat) if make_quat_unique else quat
| 1,353 |
Python
| 33.717948 | 96 | 0.747228 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/commands/commands_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from dataclasses import MISSING
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.managers import CommandTermCfg
from omni.isaac.lab.markers import VisualizationMarkersCfg
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
from .orientation_command import InHandReOrientationCommand
@configclass
class InHandReOrientationCommandCfg(CommandTermCfg):
"""Configuration for the uniform 3D orientation command term.
Please refer to the :class:`InHandReOrientationCommand` class for more details.
"""
class_type: type = InHandReOrientationCommand
resampling_time_range: tuple[float, float] = (1e6, 1e6) # no resampling based on time
asset_name: str = MISSING
"""Name of the asset in the environment for which the commands are generated."""
init_pos_offset: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Position offset of the asset from its default position.
This is used to account for the offset typically present in the object's default position
so that the object is spawned at a height above the robot's palm. When the position command
is generated, the object's default position is used as the reference and the offset specified
is added to it to get the desired position of the object.
"""
make_quat_unique: bool = MISSING
"""Whether to make the quaternion unique or not.
If True, the quaternion is made unique by ensuring the real part is positive.
"""
orientation_success_threshold: float = MISSING
"""Threshold for the orientation error to consider the goal orientation to be reached."""
update_goal_on_success: bool = MISSING
"""Whether to update the goal orientation when the goal orientation is reached."""
marker_pos_offset: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Position offset of the marker from the object's desired position.
This is useful to position the marker at a height above the object's desired position.
Otherwise, the marker may occlude the object in the visualization.
"""
visualizer_cfg: VisualizationMarkersCfg = VisualizationMarkersCfg(
prim_path="/Visuals/Command/goal_marker",
markers={
"goal": sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
scale=(1.0, 1.0, 1.0),
),
},
)
"""Configuration for the visualization markers. Default is a cube marker."""
| 2,649 |
Python
| 37.970588 | 97 | 0.717252 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing command generators for 3D orientation goals for objects."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.assets import RigidObject
from omni.isaac.lab.managers import CommandTerm
from omni.isaac.lab.markers.visualization_markers import VisualizationMarkers
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedRLEnv
from .commands_cfg import InHandReOrientationCommandCfg
class InHandReOrientationCommand(CommandTerm):
"""Command term that generates 3D pose commands for in-hand manipulation task.
This command term generates 3D orientation commands for the object. The orientation commands
are sampled uniformly from the 3D orientation space. The position commands are the default
root state of the object.
The constant position commands is to encourage that the object does not move during the task.
For instance, the object should not fall off the robot's palm.
Unlike typical command terms, where the goals are resampled based on time, this command term
does not resample the goals based on time. Instead, the goals are resampled when the object
reaches the goal orientation. The goal orientation is considered to be reached when the
orientation error is below a certain threshold.
"""
cfg: InHandReOrientationCommandCfg
"""Configuration for the command term."""
def __init__(self, cfg: InHandReOrientationCommandCfg, env: ManagerBasedRLEnv):
"""Initialize the command term class.
Args:
cfg: The configuration parameters for the command term.
env: The environment object.
"""
# initialize the base class
super().__init__(cfg, env)
# object
self.object: RigidObject = env.scene[cfg.asset_name]
# create buffers to store the command
# -- command: (x, y, z)
init_pos_offset = torch.tensor(cfg.init_pos_offset, dtype=torch.float, device=self.device)
self.pos_command_e = self.object.data.default_root_state[:, :3] + init_pos_offset
self.pos_command_w = self.pos_command_e + self._env.scene.env_origins
# -- orientation: (w, x, y, z)
self.quat_command_w = torch.zeros(self.num_envs, 4, device=self.device)
self.quat_command_w[:, 0] = 1.0 # set the scalar component to 1.0
# -- unit vectors
self._X_UNIT_VEC = torch.tensor([1.0, 0, 0], device=self.device).repeat((self.num_envs, 1))
self._Y_UNIT_VEC = torch.tensor([0, 1.0, 0], device=self.device).repeat((self.num_envs, 1))
self._Z_UNIT_VEC = torch.tensor([0, 0, 1.0], device=self.device).repeat((self.num_envs, 1))
# -- metrics
self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device)
self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device)
self.metrics["consecutive_success"] = torch.zeros(self.num_envs, device=self.device)
def __str__(self) -> str:
msg = "InHandManipulationCommandGenerator:\n"
msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n"
return msg
"""
Properties
"""
@property
def command(self) -> torch.Tensor:
"""The desired goal pose in the environment frame. Shape is (num_envs, 7)."""
return torch.cat((self.pos_command_e, self.quat_command_w), dim=-1)
"""
Implementation specific functions.
"""
def _update_metrics(self):
# logs data
# -- compute the orientation error
self.metrics["orientation_error"] = math_utils.quat_error_magnitude(
self.object.data.root_quat_w, self.quat_command_w
)
# -- compute the position error
self.metrics["position_error"] = torch.norm(self.object.data.root_pos_w - self.pos_command_w, dim=1)
# -- compute the number of consecutive successes
successes = self.metrics["orientation_error"] < self.cfg.orientation_success_threshold
self.metrics["consecutive_success"] += successes.float()
def _resample_command(self, env_ids: Sequence[int]):
# sample new orientation targets
rand_floats = 2.0 * torch.rand((len(env_ids), 2), device=self.device) - 1.0
# rotate randomly about x-axis and then y-axis
quat = math_utils.quat_mul(
math_utils.quat_from_angle_axis(rand_floats[:, 0] * torch.pi, self._X_UNIT_VEC[env_ids]),
math_utils.quat_from_angle_axis(rand_floats[:, 1] * torch.pi, self._Y_UNIT_VEC[env_ids]),
)
# make sure the quaternion real-part is always positive
self.quat_command_w[env_ids] = math_utils.quat_unique(quat) if self.cfg.make_quat_unique else quat
def _update_command(self):
# update the command if goal is reached
if self.cfg.update_goal_on_success:
# compute the goal resets
goal_resets = self.metrics["orientation_error"] < self.cfg.orientation_success_threshold
goal_reset_ids = goal_resets.nonzero(as_tuple=False).squeeze(-1)
# resample the goals
self._resample(goal_reset_ids)
def _set_debug_vis_impl(self, debug_vis: TYPE_CHECKING):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
# create markers if necessary for the first time
if not hasattr(self, "goal_marker_visualizer"):
self.goal_marker_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg)
# set visibility
self.goal_marker_visualizer.set_visibility(True)
else:
if hasattr(self, "goal_marker_visualizer"):
self.goal_marker_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# add an offset to the marker position to visualize the goal
marker_pos = self.pos_command_w + torch.tensor(self.cfg.marker_pos_offset, device=self.device)
marker_quat = self.quat_command_w
# visualize the goal marker
self.goal_marker_visualizer.visualize(translations=marker_pos, orientations=marker_quat)
| 6,403 |
Python
| 43.165517 | 108 | 0.668437 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/allegro_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.utils import configclass
import omni.isaac.lab_tasks.manager_based.manipulation.inhand.inhand_env_cfg as inhand_env_cfg
##
# Pre-defined configs
##
from omni.isaac.lab_assets import ALLEGRO_HAND_CFG # isort: skip
@configclass
class AllegroCubeEnvCfg(inhand_env_cfg.InHandObjectEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch robot to allegro hand
self.scene.robot = ALLEGRO_HAND_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
@configclass
class AllegroCubeEnvCfg_PLAY(AllegroCubeEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove termination due to timeouts
self.terminations.time_out = None
##
# Environment configuration with no velocity observations.
##
@configclass
class AllegroCubeNoVelObsEnvCfg(AllegroCubeEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# switch observation group to no velocity group
self.observations.policy = inhand_env_cfg.ObservationsCfg.NoVelocityKinematicObsGroupCfg()
@configclass
class AllegroCubeNoVelObsEnvCfg_PLAY(AllegroCubeNoVelObsEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove termination due to timeouts
self.terminations.time_out = None
| 1,882 |
Python
| 27.96923 | 98 | 0.684378 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, allegro_env_cfg
##
# Register Gym environments.
##
##
# Full kinematic state observations.
##
gym.register(
id="Isaac-Repose-Cube-Allegro-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": allegro_env_cfg.AllegroCubeEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubePPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Repose-Cube-Allegro-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": allegro_env_cfg.AllegroCubeEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubePPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
##
# Kinematic state observations without velocity information.
##
gym.register(
id="Isaac-Repose-Cube-Allegro-NoVelObs-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": allegro_env_cfg.AllegroCubeNoVelObsEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubeNoVelObsPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Repose-Cube-Allegro-NoVelObs-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": allegro_env_cfg.AllegroCubeNoVelObsEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AllegroCubeNoVelObsPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
)
| 2,240 |
Python
| 31.47826 | 84 | 0.671429 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, ik_abs_env_cfg, ik_rel_env_cfg, joint_pos_env_cfg
##
# Register Gym environments.
##
##
# Joint Position Control
##
gym.register(
id="Isaac-Open-Drawer-Franka-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CabinetPPORunnerCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml",
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_ppo_cfg.yaml",
},
disable_env_checker=True,
)
gym.register(
id="Isaac-Open-Drawer-Franka-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
kwargs={
"env_cfg_entry_point": joint_pos_env_cfg.FrankaCabinetEnvCfg_PLAY,
},
disable_env_checker=True,
)
##
# Inverse Kinematics - Absolute Pose Control
##
gym.register(
id="Isaac-Open-Drawer-Franka-IK-Abs-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
kwargs={
"env_cfg_entry_point": ik_abs_env_cfg.FrankaCabinetEnvCfg,
},
disable_env_checker=True,
)
##
# Inverse Kinematics - Relative Pose Control
##
gym.register(
id="Isaac-Open-Drawer-Franka-IK-Rel-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
kwargs={
"env_cfg_entry_point": ik_rel_env_cfg.FrankaCabinetEnvCfg,
},
disable_env_checker=True,
)
| 1,568 |
Python
| 23.138461 | 79 | 0.672194 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/mdp/rewards.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.sensors import ContactSensor
from omni.isaac.lab.utils.math import quat_rotate_inverse, yaw_quat
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedRLEnv
def feet_air_time(
env: ManagerBasedRLEnv, command_name: str, sensor_cfg: SceneEntityCfg, threshold: float
) -> torch.Tensor:
"""Reward long steps taken by the feet using L2-kernel.
This function rewards the agent for taking steps that are longer than a threshold. This helps ensure
that the robot lifts its feet off the ground and takes steps. The reward is computed as the sum of
the time for which the feet are in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
# extract the used quantities (to enable type-hinting)
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
first_contact = contact_sensor.compute_first_contact(env.step_dt)[:, sensor_cfg.body_ids]
last_air_time = contact_sensor.data.last_air_time[:, sensor_cfg.body_ids]
reward = torch.sum((last_air_time - threshold) * first_contact, dim=1)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
def feet_air_time_positive_biped(env, command_name: str, threshold: float, sensor_cfg: SceneEntityCfg) -> torch.Tensor:
"""Reward long steps taken by the feet for bipeds.
This function rewards the agent for taking steps up to a specified threshold and also keep one foot at
a time in the air.
If the commands are small (i.e. the agent is not supposed to take a step), then the reward is zero.
"""
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
# compute the reward
air_time = contact_sensor.data.current_air_time[:, sensor_cfg.body_ids]
contact_time = contact_sensor.data.current_contact_time[:, sensor_cfg.body_ids]
in_contact = contact_time > 0.0
in_mode_time = torch.where(in_contact, contact_time, air_time)
single_stance = torch.sum(in_contact.int(), dim=1) == 1
reward = torch.min(torch.where(single_stance.unsqueeze(-1), in_mode_time, 0.0), dim=1)[0]
reward = torch.clamp(reward, max=threshold)
# no reward for zero command
reward *= torch.norm(env.command_manager.get_command(command_name)[:, :2], dim=1) > 0.1
return reward
def feet_slide(env, sensor_cfg: SceneEntityCfg, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
# Penalize feet sliding
contact_sensor: ContactSensor = env.scene.sensors[sensor_cfg.name]
contacts = contact_sensor.data.net_forces_w_history[:, :, sensor_cfg.body_ids, :].norm(dim=-1).max(dim=1)[0] > 1.0
asset = env.scene[asset_cfg.name]
body_vel = asset.data.body_lin_vel_w[:, asset_cfg.body_ids, :2]
reward = torch.sum(body_vel.norm(dim=-1) * contacts, dim=1)
return reward
def track_lin_vel_xy_yaw_frame_exp(
env, std: float, command_name: str, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Reward tracking of linear velocity commands (xy axes) in the gravity aligned robot frame using exponential kernel."""
# extract the used quantities (to enable type-hinting)
asset = env.scene[asset_cfg.name]
vel_yaw = quat_rotate_inverse(yaw_quat(asset.data.root_quat_w), asset.data.root_lin_vel_w[:, :3])
lin_vel_error = torch.sum(
torch.square(env.command_manager.get_command(command_name)[:, :2] - vel_yaw[:, :2]), dim=1
)
return torch.exp(-lin_vel_error / std**2)
def track_ang_vel_z_world_exp(
env, command_name: str, std: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
) -> torch.Tensor:
"""Reward tracking of angular velocity commands (yaw) in world frame using exponential kernel."""
# extract the used quantities (to enable type-hinting)
asset = env.scene[asset_cfg.name]
ang_vel_error = torch.square(env.command_manager.get_command(command_name)[:, 2] - asset.data.root_ang_vel_w[:, 2])
return torch.exp(-ang_vel_error / std**2)
| 4,373 |
Python
| 45.531914 | 124 | 0.70638 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo1FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo1RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo1RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,836 |
Python
| 31.22807 | 80 | 0.679739 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from omni.isaac.lab.managers import RewardTermCfg as RewTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm
from omni.isaac.lab.utils import configclass
import omni.isaac.lab_tasks.manager_based.locomotion.velocity.mdp as mdp
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import (
LocomotionVelocityRoughEnvCfg,
RewardsCfg,
)
##
# Pre-defined configs
##
from omni.isaac.lab_assets import H1_MINIMAL_CFG # isort: skip
@configclass
class H1Rewards(RewardsCfg):
termination_penalty = RewTerm(func=mdp.is_terminated, weight=-200.0)
lin_vel_z_l2 = None
track_lin_vel_xy_exp = RewTerm(
func=mdp.track_lin_vel_xy_yaw_frame_exp,
weight=1.0,
params={"command_name": "base_velocity", "std": 0.5},
)
track_ang_vel_z_exp = RewTerm(
func=mdp.track_ang_vel_z_world_exp, weight=1.0, params={"command_name": "base_velocity", "std": 0.5}
)
feet_air_time = RewTerm(
func=mdp.feet_air_time_positive_biped,
weight=0.25,
params={
"command_name": "base_velocity",
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"),
"threshold": 0.4,
},
)
feet_slide = RewTerm(
func=mdp.feet_slide,
weight=-0.25,
params={
"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*ankle_link"),
"asset_cfg": SceneEntityCfg("robot", body_names=".*ankle_link"),
},
)
# Penalize ankle joint limits
dof_pos_limits = RewTerm(
func=mdp.joint_pos_limits, weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot", joint_names=".*_ankle")}
)
# Penalize deviation from default of the joints that are not essential for locomotion
joint_deviation_hip = RewTerm(
func=mdp.joint_deviation_l1,
weight=-0.2,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_hip_yaw", ".*_hip_roll"])},
)
joint_deviation_arms = RewTerm(
func=mdp.joint_deviation_l1,
weight=-0.2,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*_shoulder_.*", ".*_elbow"])},
)
joint_deviation_torso = RewTerm(
func=mdp.joint_deviation_l1, weight=-0.1, params={"asset_cfg": SceneEntityCfg("robot", joint_names="torso")}
)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
base_contact = DoneTerm(
func=mdp.illegal_contact,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*torso_link"), "threshold": 1.0},
)
@configclass
class H1RoughEnvCfg(LocomotionVelocityRoughEnvCfg):
rewards: H1Rewards = H1Rewards()
terminations: TerminationsCfg = TerminationsCfg()
def __post_init__(self):
# post init of parent
super().__post_init__()
# Scene
self.scene.robot = H1_MINIMAL_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/torso_link"
# Randomization
self.events.push_robot = None
self.events.add_base_mass = None
self.events.reset_robot_joints.params["position_range"] = (1.0, 1.0)
self.events.base_external_force_torque.params["asset_cfg"].body_names = [".*torso_link"]
self.events.reset_base.params = {
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (0.0, 0.0),
"y": (0.0, 0.0),
"z": (0.0, 0.0),
"roll": (0.0, 0.0),
"pitch": (0.0, 0.0),
"yaw": (0.0, 0.0),
},
}
# Terminations
self.terminations.base_contact.params["sensor_cfg"].body_names = [".*torso_link"]
# Rewards
self.rewards.undesired_contacts = None
self.rewards.flat_orientation_l2.weight = -1.0
self.rewards.dof_torques_l2.weight = 0.0
self.rewards.action_rate_l2.weight = -0.005
self.rewards.dof_acc_l2.weight = -1.25e-7
# Commands
self.commands.base_velocity.ranges.lin_vel_x = (0.0, 1.0)
self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0)
self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0)
@configclass
class H1RoughEnvCfg_PLAY(H1RoughEnvCfg):
def __post_init__(self):
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
self.episode_length_s = 40.0
# spawn the robot randomly in the grid (instead of their terrain levels)
self.scene.terrain.max_init_terrain_level = None
# reduce the number of terrains to save memory
if self.scene.terrain.terrain_generator is not None:
self.scene.terrain.terrain_generator.num_rows = 5
self.scene.terrain.terrain_generator.num_cols = 5
self.scene.terrain.terrain_generator.curriculum = False
self.commands.base_velocity.ranges.lin_vel_x = (1.0, 1.0)
self.commands.base_velocity.ranges.lin_vel_y = (0.0, 0.0)
self.commands.base_velocity.ranges.ang_vel_z = (-1.0, 1.0)
self.commands.base_velocity.ranges.heading = (0.0, 0.0)
# disable randomization for play
self.observations.policy.enable_corruption = False
# remove random pushing
self.events.base_external_force_torque = None
self.events.push_robot = None
| 5,774 |
Python
| 36.258064 | 117 | 0.616903 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Rough-H1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.H1RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1RoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Rough-H1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.H1RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1RoughPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-H1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.H1FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1FlatPPORunnerCfg,
},
)
gym.register(
id="Isaac-Velocity-Flat-H1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.H1FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.H1FlatPPORunnerCfg,
},
)
| 1,430 |
Python
| 24.105263 | 72 | 0.675524 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/cassie/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Cassie-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Cassie-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.CassieFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Cassie-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Cassie-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.CassieRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.CassieRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,784 |
Python
| 30.315789 | 77 | 0.672646 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Anymal-D-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Anymal-D-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.AnymalDFlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDFlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-D-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Anymal-D-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.AnymalDRoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.AnymalDRoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,800 |
Python
| 30.596491 | 77 | 0.673333 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go2-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-Go2-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeGo2FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go2-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-Go2-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeGo2RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeGo2RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,836 |
Python
| 31.22807 | 80 | 0.679739 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, flat_env_cfg, rough_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Velocity-Flat-Unitree-A1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Flat-Unitree-A1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": flat_env_cfg.UnitreeA1FlatEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1FlatPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_flat_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-A1-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
gym.register(
id="Isaac-Velocity-Rough-Unitree-A1-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": rough_env_cfg.UnitreeA1RoughEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.UnitreeA1RoughPPORunnerCfg,
"skrl_cfg_entry_point": f"{agents.__name__}:skrl_rough_ppo_cfg.yaml",
},
)
| 1,824 |
Python
| 31.017543 | 79 | 0.677632 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/mdp/rewards.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedRLEnv
def position_command_error_tanh(env: ManagerBasedRLEnv, std: float, command_name: str) -> torch.Tensor:
"""Reward position tracking with tanh kernel."""
command = env.command_manager.get_command(command_name)
des_pos_b = command[:, :3]
distance = torch.norm(des_pos_b, dim=1)
return 1 - torch.tanh(distance / std)
def heading_command_error_abs(env: ManagerBasedRLEnv, command_name: str) -> torch.Tensor:
"""Penalize tracking orientation error."""
command = env.command_manager.get_command(command_name)
heading_b = command[:, 3]
return heading_b.abs()
| 874 |
Python
| 30.249999 | 103 | 0.718535 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/mdp/pre_trained_policy_action.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import torch
from dataclasses import MISSING
from typing import TYPE_CHECKING
import omni.isaac.lab.utils.math as math_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.managers import ActionTerm, ActionTermCfg, ObservationGroupCfg, ObservationManager
from omni.isaac.lab.markers import VisualizationMarkers
from omni.isaac.lab.markers.config import BLUE_ARROW_X_MARKER_CFG, GREEN_ARROW_X_MARKER_CFG
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import check_file_path, read_file
if TYPE_CHECKING:
from omni.isaac.lab.envs import ManagerBasedRLEnv
class PreTrainedPolicyAction(ActionTerm):
r"""Pre-trained policy action term.
This action term infers a pre-trained policy and applies the corresponding low-level actions to the robot.
The raw actions correspond to the commands for the pre-trained policy.
"""
cfg: PreTrainedPolicyActionCfg
"""The configuration of the action term."""
def __init__(self, cfg: PreTrainedPolicyActionCfg, env: ManagerBasedRLEnv) -> None:
# initialize the action term
super().__init__(cfg, env)
self.robot: Articulation = env.scene[cfg.asset_name]
# load policy
if not check_file_path(cfg.policy_path):
raise FileNotFoundError(f"Policy file '{cfg.policy_path}' does not exist.")
file_bytes = read_file(cfg.policy_path)
self.policy = torch.jit.load(file_bytes).to(env.device).eval()
self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device)
# prepare low level actions
self._low_level_action_term: ActionTerm = cfg.low_level_actions.class_type(cfg.low_level_actions, env)
self.low_level_actions = torch.zeros(self.num_envs, self._low_level_action_term.action_dim, device=self.device)
# remap some of the low level observations to internal observations
cfg.low_level_observations.actions.func = lambda dummy_env: self.low_level_actions
cfg.low_level_observations.actions.params = dict()
cfg.low_level_observations.velocity_commands.func = lambda dummy_env: self._raw_actions
cfg.low_level_observations.velocity_commands.params = dict()
# add the low level observations to the observation manager
self._low_level_obs_manager = ObservationManager({"ll_policy": cfg.low_level_observations}, env)
self._counter = 0
"""
Properties.
"""
@property
def action_dim(self) -> int:
return 3
@property
def raw_actions(self) -> torch.Tensor:
return self._raw_actions
@property
def processed_actions(self) -> torch.Tensor:
return self.raw_actions
"""
Operations.
"""
def process_actions(self, actions: torch.Tensor):
self._raw_actions[:] = actions
def apply_actions(self):
if self._counter % self.cfg.low_level_decimation == 0:
low_level_obs = self._low_level_obs_manager.compute_group("ll_policy")
self.low_level_actions[:] = self.policy(low_level_obs)
self._low_level_action_term.process_actions(self.low_level_actions)
self._counter = 0
self._low_level_action_term.apply_actions()
self._counter += 1
"""
Debug visualization.
"""
def _set_debug_vis_impl(self, debug_vis: bool):
# set visibility of markers
# note: parent only deals with callbacks. not their visibility
if debug_vis:
# create markers if necessary for the first tome
if not hasattr(self, "base_vel_goal_visualizer"):
# -- goal
marker_cfg = GREEN_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Actions/velocity_goal"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_goal_visualizer = VisualizationMarkers(marker_cfg)
# -- current
marker_cfg = BLUE_ARROW_X_MARKER_CFG.copy()
marker_cfg.prim_path = "/Visuals/Actions/velocity_current"
marker_cfg.markers["arrow"].scale = (0.5, 0.5, 0.5)
self.base_vel_visualizer = VisualizationMarkers(marker_cfg)
# set their visibility to true
self.base_vel_goal_visualizer.set_visibility(True)
self.base_vel_visualizer.set_visibility(True)
else:
if hasattr(self, "base_vel_goal_visualizer"):
self.base_vel_goal_visualizer.set_visibility(False)
self.base_vel_visualizer.set_visibility(False)
def _debug_vis_callback(self, event):
# get marker location
# -- base state
base_pos_w = self.robot.data.root_pos_w.clone()
base_pos_w[:, 2] += 0.5
# -- resolve the scales and quaternions
vel_des_arrow_scale, vel_des_arrow_quat = self._resolve_xy_velocity_to_arrow(self.raw_actions[:, :2])
vel_arrow_scale, vel_arrow_quat = self._resolve_xy_velocity_to_arrow(self.robot.data.root_lin_vel_b[:, :2])
# display markers
self.base_vel_goal_visualizer.visualize(base_pos_w, vel_des_arrow_quat, vel_des_arrow_scale)
self.base_vel_visualizer.visualize(base_pos_w, vel_arrow_quat, vel_arrow_scale)
"""
Internal helpers.
"""
def _resolve_xy_velocity_to_arrow(self, xy_velocity: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""Converts the XY base velocity command to arrow direction rotation."""
# obtain default scale of the marker
default_scale = self.base_vel_goal_visualizer.cfg.markers["arrow"].scale
# arrow-scale
arrow_scale = torch.tensor(default_scale, device=self.device).repeat(xy_velocity.shape[0], 1)
arrow_scale[:, 0] *= torch.linalg.norm(xy_velocity, dim=1) * 3.0
# arrow-direction
heading_angle = torch.atan2(xy_velocity[:, 1], xy_velocity[:, 0])
zeros = torch.zeros_like(heading_angle)
arrow_quat = math_utils.quat_from_euler_xyz(zeros, zeros, heading_angle)
# convert everything back from base to world frame
base_quat_w = self.robot.data.root_quat_w
arrow_quat = math_utils.quat_mul(base_quat_w, arrow_quat)
return arrow_scale, arrow_quat
@configclass
class PreTrainedPolicyActionCfg(ActionTermCfg):
"""Configuration for pre-trained policy action term.
See :class:`PreTrainedPolicyAction` for more details.
"""
class_type: type[ActionTerm] = PreTrainedPolicyAction
""" Class of the action term."""
asset_name: str = MISSING
"""Name of the asset in the environment for which the commands are generated."""
policy_path: str = MISSING
"""Path to the low level policy (.pt files)."""
low_level_decimation: int = 4
"""Decimation factor for the low level action term."""
low_level_actions: ActionTermCfg = MISSING
"""Low level action configuration."""
low_level_observations: ObservationGroupCfg = MISSING
"""Low level observation configuration."""
debug_vis: bool = True
"""Whether to visualize debug information. Defaults to False."""
| 7,301 |
Python
| 39.793296 | 119 | 0.661005 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/anymal_c/navigation_env_cfg.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from omni.isaac.lab.envs import ManagerBasedRLEnvCfg
from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm
from omni.isaac.lab.managers import RandomizationTermCfg as RandTerm
from omni.isaac.lab.managers import RewardTermCfg as RewTerm
from omni.isaac.lab.managers import SceneEntityCfg
from omni.isaac.lab.managers import TerminationTermCfg as DoneTerm
from omni.isaac.lab.utils import configclass
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
import omni.isaac.lab_tasks.manager_based.navigation.mdp as mdp
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.config.anymal_c.flat_env_cfg import AnymalCFlatEnvCfg
LOW_LEVEL_ENV_CFG = AnymalCFlatEnvCfg()
@configclass
class EventCfg:
"""Configuration for events."""
reset_base = RandTerm(
func=mdp.reset_root_state_uniform,
mode="reset",
params={
"pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)},
"velocity_range": {
"x": (-0.0, 0.0),
"y": (-0.0, 0.0),
"z": (-0.0, 0.0),
"roll": (-0.0, 0.0),
"pitch": (-0.0, 0.0),
"yaw": (-0.0, 0.0),
},
},
)
@configclass
class ActionsCfg:
"""Action terms for the MDP."""
pre_trained_policy_action: mdp.PreTrainedPolicyActionCfg = mdp.PreTrainedPolicyActionCfg(
asset_name="robot",
policy_path=f"{ISAACLAB_NUCLEUS_DIR}/Policies/ANYmal-C/Blind/policy.pt",
low_level_decimation=4,
low_level_actions=LOW_LEVEL_ENV_CFG.actions.joint_pos,
low_level_observations=LOW_LEVEL_ENV_CFG.observations.policy,
)
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
projected_gravity = ObsTerm(func=mdp.projected_gravity)
pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "pose_command"})
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
termination_penalty = RewTerm(func=mdp.is_terminated, weight=-400.0)
position_tracking = RewTerm(
func=mdp.position_command_error_tanh,
weight=0.5,
params={"std": 2.0, "command_name": "pose_command"},
)
position_tracking_fine_grained = RewTerm(
func=mdp.position_command_error_tanh,
weight=0.5,
params={"std": 0.2, "command_name": "pose_command"},
)
orientation_tracking = RewTerm(
func=mdp.heading_command_error_abs,
weight=-0.2,
params={"command_name": "pose_command"},
)
@configclass
class CommandsCfg:
"""Command terms for the MDP."""
pose_command = mdp.UniformPose2dCommandCfg(
asset_name="robot",
simple_heading=False,
resampling_time_range=(8.0, 8.0),
debug_vis=True,
ranges=mdp.UniformPose2dCommandCfg.Ranges(pos_x=(-3.0, 3.0), pos_y=(-3.0, 3.0), heading=(-math.pi, math.pi)),
)
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
pass
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
base_contact = DoneTerm(
func=mdp.illegal_contact,
params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0},
)
@configclass
class NavigationEnvCfg(ManagerBasedRLEnvCfg):
scene: SceneEntityCfg = LOW_LEVEL_ENV_CFG.scene
commands: CommandsCfg = CommandsCfg()
actions: ActionsCfg = ActionsCfg()
observations: ObservationsCfg = ObservationsCfg()
rewards: RewardsCfg = RewardsCfg()
events: EventCfg = EventCfg()
curriculum: CurriculumCfg = CurriculumCfg()
terminations: TerminationsCfg = TerminationsCfg()
def __post_init__(self):
"""Post initialization."""
self.sim.dt = LOW_LEVEL_ENV_CFG.sim.dt
self.decimation = LOW_LEVEL_ENV_CFG.decimation * 10
self.episode_length_s = self.commands.pose_command.resampling_time_range[1]
if self.scene.height_scanner is not None:
self.scene.height_scanner.update_period = (
self.actions.pre_trained_policy_action.low_level_decimation * self.sim.dt
)
if self.scene.contact_forces is not None:
self.scene.contact_forces.update_period = self.sim.dt
class NavigationEnvCfg_PLAY(NavigationEnvCfg):
def __post_init__(self) -> None:
# post init of parent
super().__post_init__()
# make a smaller scene for play
self.scene.num_envs = 50
self.scene.env_spacing = 2.5
# disable randomization for play
self.observations.policy.enable_corruption = False
| 5,169 |
Python
| 30.333333 | 117 | 0.651964 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/anymal_c/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import gymnasium as gym
from . import agents, navigation_env_cfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Navigation-Flat-Anymal-C-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": navigation_env_cfg.NavigationEnvCfg,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.NavigationEnvPPORunnerCfg,
},
)
gym.register(
id="Isaac-Navigation-Flat-Anymal-C-Play-v0",
entry_point="omni.isaac.lab.envs:ManagerBasedRLEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": navigation_env_cfg.NavigationEnvCfg_PLAY,
"rsl_rl_cfg_entry_point": agents.rsl_rl_cfg.NavigationEnvPPORunnerCfg,
},
)
| 869 |
Python
| 25.363636 | 78 | 0.700806 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/rsl_rl/__init__.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrappers and utilities to configure an :class:`ManagerBasedRLEnv` for RSL-RL library."""
from .exporter import export_policy_as_jit, export_policy_as_onnx
from .rl_cfg import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg
from .vecenv_wrapper import RslRlVecEnvWrapper
| 422 |
Python
| 37.454542 | 91 | 0.798578 |
isaac-sim/IsaacLab/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/utils/wrappers/rsl_rl/exporter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import os
import torch
def export_policy_as_jit(actor_critic: object, normalizer: object | None, path: str, filename="policy.pt"):
"""Export policy into a Torch JIT file.
Args:
actor_critic: The actor-critic torch module.
normalizer: The empirical normalizer module. If None, Identity is used.
path: The path to the saving directory.
filename: The name of exported JIT file. Defaults to "policy.pt".
"""
policy_exporter = _TorchPolicyExporter(actor_critic, normalizer)
policy_exporter.export(path, filename)
def export_policy_as_onnx(
actor_critic: object, path: str, normalizer: object | None = None, filename="policy.onnx", verbose=False
):
"""Export policy into a Torch ONNX file.
Args:
actor_critic: The actor-critic torch module.
normalizer: The empirical normalizer module. If None, Identity is used.
path: The path to the saving directory.
filename: The name of exported ONNX file. Defaults to "policy.onnx".
verbose: Whether to print the model summary. Defaults to False.
"""
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
policy_exporter = _OnnxPolicyExporter(actor_critic, normalizer, verbose)
policy_exporter.export(path, filename)
"""
Helper Classes - Private.
"""
class _TorchPolicyExporter(torch.nn.Module):
"""Exporter of actor-critic into JIT file."""
def __init__(self, actor_critic, normalizer=None):
super().__init__()
self.actor = copy.deepcopy(actor_critic.actor)
self.is_recurrent = actor_critic.is_recurrent
if self.is_recurrent:
self.rnn = copy.deepcopy(actor_critic.memory_a.rnn)
self.rnn.cpu()
self.register_buffer("hidden_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size))
self.register_buffer("cell_state", torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size))
self.forward = self.forward_lstm
self.reset = self.reset_memory
# copy normalizer if exists
if normalizer:
self.normalizer = copy.deepcopy(normalizer)
else:
self.normalizer = torch.nn.Identity()
def forward_lstm(self, x):
x = self.normalizer(x)
x, (h, c) = self.rnn(x.unsqueeze(0), (self.hidden_state, self.cell_state))
self.hidden_state[:] = h
self.cell_state[:] = c
x = x.squeeze(0)
return self.actor(x)
def forward(self, x):
return self.actor(self.normalizer(x))
@torch.jit.export
def reset(self):
pass
def reset_memory(self):
self.hidden_state[:] = 0.0
self.cell_state[:] = 0.0
def export(self, path, filename):
os.makedirs(path, exist_ok=True)
path = os.path.join(path, filename)
self.to("cpu")
traced_script_module = torch.jit.script(self)
traced_script_module.save(path)
class _OnnxPolicyExporter(torch.nn.Module):
"""Exporter of actor-critic into ONNX file."""
def __init__(self, actor_critic, normalizer=None, verbose=False):
super().__init__()
self.verbose = verbose
self.actor = copy.deepcopy(actor_critic.actor)
self.is_recurrent = actor_critic.is_recurrent
if self.is_recurrent:
self.rnn = copy.deepcopy(actor_critic.memory_a.rnn)
self.rnn.cpu()
self.forward = self.forward_lstm
# copy normalizer if exists
if normalizer:
self.normalizer = copy.deepcopy(normalizer)
else:
self.normalizer = torch.nn.Identity()
def forward_lstm(self, x_in, h_in, c_in):
x_in = self.normalizer(x_in)
x, (h, c) = self.rnn(x_in.unsqueeze(0), (h_in, c_in))
x = x.squeeze(0)
return self.actor(x), h, c
def forward(self, x):
return self.actor(self.normalizer(x))
def export(self, path, filename):
self.to("cpu")
if self.is_recurrent:
obs = torch.zeros(1, self.rnn.input_size)
h_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)
c_in = torch.zeros(self.rnn.num_layers, 1, self.rnn.hidden_size)
actions, h_out, c_out = self(obs, h_in, c_in)
torch.onnx.export(
self,
(obs, h_in, c_in),
os.path.join(path, filename),
export_params=True,
opset_version=11,
verbose=self.verbose,
input_names=["obs", "h_in", "c_in"],
output_names=["actions", "h_out", "c_out"],
dynamic_axes={},
)
else:
obs = torch.zeros(1, self.actor[0].in_features)
torch.onnx.export(
self,
obs,
os.path.join(path, filename),
export_params=True,
opset_version=11,
verbose=self.verbose,
input_names=["obs"],
output_names=["actions"],
dynamic_axes={},
)
| 5,237 |
Python
| 33.460526 | 108 | 0.586786 |
isaac-sim/IsaacLab/source/standalone/demos/quadcopter.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to simulate a quadcopter.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
import torch
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to simulate a quadcopter.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.sim import SimulationContext
##
# Pre-defined configs
##
from omni.isaac.lab_assets import CRAZYFLIE_CFG # isort:skip
def main():
"""Main function."""
# Load kit helper
sim = SimulationContext(
sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False, dt=0.005, physx=sim_utils.PhysxCfg(use_gpu=False))
)
# Set main camera
sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
# Spawn things into stage
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Robots
robot_cfg = CRAZYFLIE_CFG
robot_cfg.spawn.func("/World/Crazyflie/Robot_1", robot_cfg.spawn, translation=(1.5, 0.5, 0.42))
# create handles for the robots
robot = Articulation(robot_cfg.replace(prim_path="/World/Crazyflie/Robot.*"))
# Play the simulator
sim.reset()
# Fetch relevant parameters to make the quadcopter hover in place
prop_body_ids = robot.find_bodies("m.*_prop")[0]
robot_mass = robot.root_physx_view.get_masses().sum()
gravity = torch.tensor(sim.cfg.gravity, device=sim.device).norm()
# Now we are ready!
print("[INFO]: Setup complete...")
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 2000 == 0:
# reset counters
sim_time = 0.0
count = 0
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
robot.write_root_pose_to_sim(robot.data.default_root_state[:, :7])
robot.write_root_velocity_to_sim(robot.data.default_root_state[:, 7:])
robot.reset()
# reset command
print(">>>>>>>> Reset!")
# apply action to the robot (make the robot float in place)
forces = torch.zeros(1, 4, 3, device=sim.device)
torques = torch.zeros_like(forces)
forces[..., 2] = robot_mass * gravity / 4.0
robot.set_external_force_and_torque(forces, torques, body_ids=prop_body_ids)
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
robot.update(sim_dt)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 3,456 |
Python
| 28.801724 | 120 | 0.643519 |
isaac-sim/IsaacLab/source/standalone/demos/bipeds.py
|
# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to simulate a bipedal robot.
"""
"""Launch Isaac Sim Simulator first."""
import argparse
import torch
from omni.isaac.lab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to simulate a bipedal robot.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.assets import Articulation
from omni.isaac.lab.sim import SimulationContext
##
# Pre-defined configs
##
from omni.isaac.lab_assets.cassie import CASSIE_CFG # isort:skip
from omni.isaac.lab_assets import H1_CFG # isort:skip
def main():
"""Main function."""
# Load kit helper
sim = SimulationContext(
sim_utils.SimulationCfg(device="cpu", use_gpu_pipeline=False, dt=0.01, physx=sim_utils.PhysxCfg(use_gpu=False))
)
# Set main camera
sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0])
# Spawn things into stage
# Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
origins = torch.tensor([
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
])
# Robots
cassie = Articulation(CASSIE_CFG.replace(prim_path="/World/Cassie"))
h1 = Articulation(H1_CFG.replace(prim_path="/World/H1"))
robots = [cassie, h1]
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Define simulation stepping
sim_dt = sim.get_physics_dt()
sim_time = 0.0
count = 0
# Simulate physics
while simulation_app.is_running():
# reset
if count % 200 == 0:
# reset counters
sim_time = 0.0
count = 0
for index, robot in enumerate(robots):
# reset dof state
joint_pos, joint_vel = robot.data.default_joint_pos, robot.data.default_joint_vel
robot.write_joint_state_to_sim(joint_pos, joint_vel)
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += origins[index]
robot.write_root_state_to_sim(root_state)
robot.reset()
# reset command
print(">>>>>>>> Reset!")
# apply action to the robot
for robot in robots:
robot.set_joint_position_target(robot.data.default_joint_pos.clone())
robot.write_data_to_sim()
# perform step
sim.step()
# update sim-time
sim_time += sim_dt
count += 1
# update buffers
for robot in robots:
robot.update(sim_dt)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 3,225 |
Python
| 27.052174 | 119 | 0.620465 |
isaac-sim/IsaacLab/docker/x11.yaml
|
services:
isaac-lab-base:
environment:
- DISPLAY
- TERM
- QT_X11_NO_MITSHM=1
- XAUTHORITY=${__ISAACLAB_TMP_XAUTH}
volumes:
- type: bind
source: ${__ISAACLAB_TMP_XAUTH}
target: ${__ISAACLAB_TMP_XAUTH}
- type: bind
source: /tmp/.X11-unix
target: /tmp/.X11-unix
- type: bind
source: /etc/localtime
target: /etc/localtime
read_only: true
isaac-lab-ros2:
environment:
- DISPLAY
- TERM
- QT_X11_NO_MITSHM=1
- XAUTHORITY=${__ISAACLAB_TMP_XAUTH}
volumes:
- type: bind
source: ${__ISAACLAB_TMP_XAUTH}
target: ${__ISAACLAB_TMP_XAUTH}
- type: bind
source: /tmp/.X11-unix
target: /tmp/.X11-unix
- type: bind
source: /etc/localtime
target: /etc/localtime
read_only: true
| 835 |
YAML
| 21.594594 | 42 | 0.558084 |
isaac-sim/IsaacLab/docs/source/how-to/add_own_library.rst
|
Adding your own learning library
================================
Isaac Lab comes pre-integrated with a number of libraries (such as RSL-RL, RL-Games, SKRL, Stable Baselines, etc.).
However, you may want to integrate your own library with Isaac Lab or use a different version of the libraries than
the one installed by Isaac Lab. This is possible as long as the library is available as Python package that supports
the Python version used by the underlying simulator. For instance, if you are using Isaac Sim 2023.1.1, you need
to ensure that the library is available for Python 3.10.
Using a different version of a library
--------------------------------------
If you want to use a different version of a library than the one installed by Isaac Lab, you can install the library
by building it from source or using a different version of the library available on PyPI.
For instance, if you want to use your own modified version of the `rsl-rl`_ library, you can follow these steps:
1. Follow the instructions for installing Isaac Lab. This will install the default version of the ``rsl-rl`` library.
2. Clone the ``rsl-rl`` library from the GitHub repository:
.. code-block:: bash
git clone [email protected]:leggedrobotics/rsl_rl.git
3. Install the library in your Python environment:
.. code-block:: bash
# Assuming you are in the root directory of the Isaac Lab repository
cd IsaacLab
# Note: If you are using a virtual environment, make sure to activate it before running the following command
./isaaclab.sh -p -m pip install -e /path/to/rsl_rl
In this case, the ``rsl-rl`` library will be installed in the Python environment used by Isaac Lab. You can now use the
``rsl-rl`` library in your experiments. To check the library version and other details, you can use the following
command:
.. code-block:: bash
./isaaclab.sh -p -m pip show rsl-rl
This should now show the location of the ``rsl-rl`` library as the directory where you cloned the library.
For instance, if you cloned the library to ``/home/user/git/rsl_rl``, the output of the above command should be:
.. code-block:: bash
Name: rsl_rl
Version: 2.0.2
Summary: Fast and simple RL algorithms implemented in pytorch
Home-page: https://github.com/leggedrobotics/rsl_rl
Author: ETH Zurich, NVIDIA CORPORATION
Author-email:
License: BSD-3
Location: /home/user/git/rsl_rl
Requires: torch, torchvision, numpy, GitPython, onnx
Required-by:
Integrating a new library
-------------------------
Adding a new library to Isaac Lab is similar to using a different version of a library. You can install the library
in your Python environment and use it in your experiments. However, if you want to integrate the library with
Isaac Lab, you can will first need to make a wrapper for the library, as explained in
:ref:`how-to-env-wrappers`.
The following steps can be followed to integrate a new library with Isaac Lab:
1. Add your library as an extra-dependency in the ``setup.py`` for the extension ``omni.isaac.lab_tasks``.
This will ensure that the library is installed when you install Isaac Lab or it will complain if the library is not
installed or available.
2. Install your library in the Python environment used by Isaac Lab. You can do this by following the steps mentioned
in the previous section.
3. Create a wrapper for the library. You can check the module :mod:`omni.isaac.lab_tasks.utils.wrappers`
for examples of wrappers for different libraries. You can create a new wrapper for your library and add it to the
module. You can also create a new module for the wrapper if you prefer.
4. Create workflow scripts for your library to train and evaluate agents. You can check the existing workflow scripts
in the ``source/standalone/workflows`` directory for examples. You can create new workflow
scripts for your library and add them to the directory.
Optionally, you can also add some tests and documentation for the wrapper. This will help ensure that the wrapper
works as expected and can guide users on how to use the wrapper.
* Add some tests to ensure that the wrapper works as expected and remains compatible with the library.
These tests can be added to the ``source/extensions/omni.isaac.lab_tasks/test/wrappers`` directory.
* Add some documentation for the wrapper. You can add the API documentation to the
``docs/source/api/lab_tasks/omni.isaac.lab_tasks.utils.wrappers.rst`` file.
.. _rsl-rl: https://github.com/leggedrobotics/rsl_rl
| 4,511 |
reStructuredText
| 48.043478 | 119 | 0.747949 |
isaac-sim/IsaacLab/docs/source/how-to/make_fixed_prim.rst
|
Making a physics prim fixed in the simulation
=============================================
.. currentmodule:: omni.isaac.lab
When a USD prim has physics schemas applied on it, it is affected by physics simulation.
This means that the prim can move, rotate, and collide with other prims in the simulation world.
However, there are cases where it is desirable to make certain prims static in the simulation world,
i.e. the prim should still participate in collisions but its position and orientation should not change.
The following sections describe how to spawn a prim with physics schemas and make it static in the simulation world.
Static colliders
----------------
Static colliders are prims that are not affected by physics but can collide with other prims in the simulation world.
These don't have any rigid body properties applied on them. However, this also means that they can't be accessed
using the physics tensor API (i.e., through the :class:`assets.RigidObject` class).
For instance, to spawn a cone static in the simulation world, the following code can be used:
.. code-block:: python
import omni.isaac.lab.sim as sim_utils
cone_spawn_cfg = sim_utils.ConeCfg(
radius=0.15,
height=0.5,
collision_props=sim_utils.CollisionPropertiesCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)),
)
cone_spawn_cfg.func(
"/World/Cone", cone_spawn_cfg, translation=(0.0, 0.0, 2.0), orientation=(0.5, 0.0, 0.5, 0.0)
)
Rigid object
------------
Rigid objects (i.e. object only has a single body) can be made static by setting the parameter
:attr:`sim.schemas.RigidBodyPropertiesCfg.kinematic_enabled` as True. This will make the object
kinematic and it will not be affected by physics.
For instance, to spawn a cone static in the simulation world but with rigid body schema on it,
the following code can be used:
.. code-block:: python
import omni.isaac.lab.sim as sim_utils
cone_spawn_cfg = sim_utils.ConeCfg(
radius=0.15,
height=0.5,
rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True),
mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
collision_props=sim_utils.CollisionPropertiesCfg(),
visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)),
)
cone_spawn_cfg.func(
"/World/Cone", cone_spawn_cfg, translation=(0.0, 0.0, 2.0), orientation=(0.5, 0.0, 0.5, 0.0)
)
Articulation
------------
Fixing the root of an articulation requires having a fixed joint to the root rigid body link of the articulation.
This can be achieved by setting the parameter :attr:`sim.schemas.ArticulationRootPropertiesCfg.fix_root_link`
as True. Based on the value of this parameter, the following cases are possible:
* If set to :obj:`None`, the root link is not modified.
* If the articulation already has a fixed root link, this flag will enable or disable the fixed joint.
* If the articulation does not have a fixed root link, this flag will create a fixed joint between the world
frame and the root link. The joint is created with the name "FixedJoint" under the root link.
For instance, to spawn an ANYmal robot and make it static in the simulation world, the following code can be used:
.. code-block:: python
import omni.isaac.lab.sim as sim_utils
from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR
anymal_spawn_cfg = sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd",
activate_contact_sensors=True,
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
retain_accelerations=False,
linear_damping=0.0,
angular_damping=0.0,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=1.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
fix_root_link=True,
),
)
anymal_spawn_cfg.func(
"/World/ANYmal", anymal_spawn_cfg, translation=(0.0, 0.0, 0.8), orientation=(1.0, 0.0, 0.0, 0.0)
)
This will create a fixed joint between the world frame and the root link of the ANYmal robot
at the prim path ``"/World/ANYmal/base/FixedJoint"`` since the root link is at the path ``"/World/ANYmal/base"``.
Further notes
-------------
Given the flexibility of USD asset designing the following possible scenarios are usually encountered:
1. **Articulation root schema on the rigid body prim without a fixed joint**:
This is the most common and recommended scenario for floating-base articulations. The root prim
has both the rigid body and the articulation root properties. In this case, the articulation root
is parsed as a floating-base with the root prim of the articulation ``Link0Xform``.
.. code-block:: text
ArticulationXform
└── Link0Xform (RigidBody and ArticulationRoot schema)
2. **Articulation root schema on the parent prim with a fixed joint**:
This is the expected arrangement for fixed-base articulations. The root prim has only the rigid body
properties and the articulation root properties are applied to its parent prim. In this case, the
articulation root is parsed as a fixed-base with the root prim of the articulation ``Link0Xform``.
.. code-block:: text
ArticulationXform (ArticulationRoot schema)
└── Link0Xform (RigidBody schema)
└── FixedJoint (connecting the world frame and Link0Xform)
3. **Articulation root schema on the parent prim without a fixed joint**:
This is a scenario where the root prim has only the rigid body properties and the articulation root properties
are applied to its parent prim. However, the fixed joint is not created between the world frame and the root link.
In this case, the articulation is parsed as a floating-base system. However, the PhysX parser uses its own
heuristic (such as alphabetical order) to determine the root prim of the articulation. It may select the root prim
at ``Link0Xform`` or choose another prim as the root prim.
.. code-block:: text
ArticulationXform (ArticulationRoot schema)
└── Link0Xform (RigidBody schema)
4. **Articulation root schema on the rigid body prim with a fixed joint**:
While this is a valid scenario, it is not recommended as it may lead to unexpected behavior. In this case,
the articulation is still parsed as a floating-base system. However, the fixed joint, created between the
world frame and the root link, is considered as a part of the maximal coordinate tree. This is different from
PhysX considering the articulation as a fixed-base system. Hence, the simulation may not behave as expected.
.. code-block:: text
ArticulationXform
└── Link0Xform (RigidBody and ArticulationRoot schema)
└── FixedJoint (connecting the world frame and Link0Xform)
For floating base articulations, the root prim usually has both the rigid body and the articulation
root properties. However, directly connecting this prim to the world frame will cause the simulation
to consider the fixed joint as a part of the maximal coordinate tree. This is different from PhysX
considering the articulation as a fixed-base system.
Internally, when the parameter :attr:`sim.schemas.ArticulationRootPropertiesCfg.fix_root_link` is set to True
and the articulation is detected as a floating-base system, the fixed joint is created between the world frame
the root rigid body link of the articulation. However, to make the PhysX parser consider the articulation as a
fixed-base system, the articulation root properties are removed from the root rigid body prim and applied to
its parent prim instead.
.. note::
In future release of Isaac Sim, an explicit flag will be added to the articulation root schema from PhysX
to toggle between fixed-base and floating-base systems. This will resolve the need of the above workaround.
| 8,205 |
reStructuredText
| 44.588889 | 117 | 0.724802 |
isaac-sim/IsaacLab/docs/source/tutorials/03_envs/create_direct_rl_env.rst
|
.. _tutorial-create-oige-rl-env:
Creating a Direct Workflow RL Environment
=========================================
.. currentmodule:: omni.isaac.lab
In addition to the :class:`envs.ManagerBasedRLEnv` class, which encourages the use of configuration classes
for more modular environments, the :class:`~omni.isaac.lab.envs.DirectRLEnv` class allows for more direct control
in the scripting of environment.
Instead of using Manager classes for defining rewards and observations, the direct workflow tasks
implement the full reward and observation functions directly in the task script.
This allows for more control in the implementation of the methods, such as using pytorch jit
features, and provides a less abstracted framework that makes it easier to find the various
pieces of code.
In this tutorial, we will configure the cartpole environment using the direct workflow implementation to create a task
for balancing the pole upright. We will learn how to specify the task using by implementing functions
for scene creation, actions, resets, rewards and observations.
The Code
~~~~~~~~
For this tutorial, we use the cartpole environment defined in ``omni.isaac.lab_tasks.direct.cartpole`` module.
.. dropdown:: Code for cartpole_env.py
:icon: code
.. literalinclude:: ../../../../source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py
:language: python
:linenos:
The Code Explained
~~~~~~~~~~~~~~~~~~
Similar to the manager-based environments, a configuration class is defined for the task to hold settings
for the simulation parameters, the scene, the actors, and the task. With the direct workflow implementation,
the :class:`envs.DirectRLEnvCfg` class is used as the base class for configurations.
Since the direct workflow implementation does not use Action and Observation managers, the task
config should define the number of actions and observations for the environment.
.. code-block:: python
@configclass
class CartpoleEnvCfg(DirectRLEnvCfg):
...
num_actions = 1
num_observations = 4
num_states = 0
The config class can also be used to define task-specific attributes, such as scaling for reward terms
and thresholds for reset conditions.
.. code-block:: python
@configclass
class CartpoleEnvCfg(DirectRLEnvCfg):
...
# reset
max_cart_pos = 3.0
initial_pole_angle_range = [-0.25, 0.25]
# reward scales
rew_scale_alive = 1.0
rew_scale_terminated = -2.0
rew_scale_pole_pos = -1.0
rew_scale_cart_vel = -0.01
rew_scale_pole_vel = -0.005
When creating a new environment, the code should define a new class that inherits from :class:`~omni.isaac.lab.envs.DirectRLEnv`.
.. code-block:: python
class CartpoleEnv(DirectRLEnv):
cfg: CartpoleEnvCfg
def __init__(self, cfg: CartpoleEnvCfg, render_mode: str | None = None, **kwargs):
super().__init__(cfg, render_mode, **kwargs)
The class can also hold class variables that are accessible by all functions in the class,
including functions for applying actions, computing resets, rewards, and observations.
Scene Creation
--------------
In contrast to manager-based environments where the scene creation is taken care of by the framework,
the direct workflow implementation provides flexibility for users to implement their own scene creation
function. This includes adding actors into the stage, cloning the environments, filtering collisions
between the environments, adding the actors into the scene, and adding any additional props to the
scene, such as ground plane and lights. These operations should be implemented in the
``_setup_scene(self)`` method.
.. literalinclude:: ../../../../source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py
:language: python
:pyobject: CartpoleEnv._setup_scene
Defining Rewards
----------------
Reward function should be defined in the ``_get_rewards(self)`` API, which returns the reward
buffer as a return value. Within this function, the task is free to implement the logic of
the reward function. In this example, we implement a Pytorch jitted function that computes
the various components of the reward function.
.. code-block:: python
def _get_rewards(self) -> torch.Tensor:
total_reward = compute_rewards(
self.cfg.rew_scale_alive,
self.cfg.rew_scale_terminated,
self.cfg.rew_scale_pole_pos,
self.cfg.rew_scale_cart_vel,
self.cfg.rew_scale_pole_vel,
self.joint_pos[:, self._pole_dof_idx[0]],
self.joint_vel[:, self._pole_dof_idx[0]],
self.joint_pos[:, self._cart_dof_idx[0]],
self.joint_vel[:, self._cart_dof_idx[0]],
self.reset_terminated,
)
return total_reward
@torch.jit.script
def compute_rewards(
rew_scale_alive: float,
rew_scale_terminated: float,
rew_scale_pole_pos: float,
rew_scale_cart_vel: float,
rew_scale_pole_vel: float,
pole_pos: torch.Tensor,
pole_vel: torch.Tensor,
cart_pos: torch.Tensor,
cart_vel: torch.Tensor,
reset_terminated: torch.Tensor,
):
rew_alive = rew_scale_alive * (1.0 - reset_terminated.float())
rew_termination = rew_scale_terminated * reset_terminated.float()
rew_pole_pos = rew_scale_pole_pos * torch.sum(torch.square(pole_pos), dim=-1)
rew_cart_vel = rew_scale_cart_vel * torch.sum(torch.abs(cart_vel), dim=-1)
rew_pole_vel = rew_scale_pole_vel * torch.sum(torch.abs(pole_vel), dim=-1)
total_reward = rew_alive + rew_termination + rew_pole_pos + rew_cart_vel + rew_pole_vel
return total_reward
Defining Observations
---------------------
The observation buffer should be computed in the ``_get_observations(self)`` function,
which constructs the observation buffer for the environment. At the end of this API,
a dictionary should be returned that contains ``policy`` as the key, and the full
observation buffer as the value. For asymmetric policies, the dictionary should also
include the key ``critic`` and the states buffer as the value.
.. literalinclude:: ../../../../source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py
:language: python
:pyobject: CartpoleEnv._get_observations
Computing Dones and Performing Resets
-------------------------------------
Populating the ``dones`` buffer should be done in the ``_get_dones(self)`` method.
This method is free to implement logic that computes which environments would need to be reset
and which environments have reached the episode length limit. Both results should be
returned by the ``_get_dones(self)`` function, in the form of a tuple of boolean tensors.
.. literalinclude:: ../../../../source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py
:language: python
:pyobject: CartpoleEnv._get_dones
Once the indices for environments requiring reset have been computed, the ``_reset_idx(self, env_ids)``
function performs the reset operations on those environments. Within this function, new states
for the environments requiring reset should be set directly into simulation.
.. literalinclude:: ../../../../source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py
:language: python
:pyobject: CartpoleEnv._reset_idx
Applying Actions
----------------
There are two APIs that are designed for working with actions. The ``_pre_physics_step(self, actions)`` takes in actions
from the policy as an argument and is called once per RL step, prior to taking any physics steps. This function can
be used to process the actions buffer from the policy and cache the data in a class variable for the environment.
.. literalinclude:: ../../../../source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py
:language: python
:pyobject: CartpoleEnv._pre_physics_step
The ``_apply_action(self)`` API is called ``decimation`` number of times for each RL step, prior to taking
each physics step. This provides more flexibility for environments where actions should be applied
for each physics step.
.. literalinclude:: ../../../../source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py
:language: python
:pyobject: CartpoleEnv._apply_action
The Code Execution
~~~~~~~~~~~~~~~~~~
To run training for the direct workflow Cartpole environment, we can use the following command:
.. code-block:: bash
./isaaclab.sh -p source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-Direct-v0
All direct workflow tasks have the suffix ``-Direct`` added to the task name to differentiate the implementation style.
Domain Randomization
~~~~~~~~~~~~~~~~~~~~
In the direct workflow, domain randomization configuration uses the :class:`~omni.isaac.lab.utils.configclass` module
to specify a configuration class consisting of :class:`~managers.EventTermCfg` variables.
Below is an example of a configuration class for domain randomization:
.. code-block:: python
@configclass
class EventCfg:
robot_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.7, 1.3),
"dynamic_friction_range": (1.0, 1.0),
"restitution_range": (1.0, 1.0),
"num_buckets": 250,
},
)
robot_joint_stiffness_and_damping = EventTerm(
func=mdp.randomize_actuator_gains,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", joint_names=".*"),
"stiffness_distribution_params": (0.75, 1.5),
"damping_distribution_params": (0.3, 3.0),
"operation": "scale",
"distribution": "log_uniform",
},
)
reset_gravity = EventTerm(
func=mdp.randomize_physics_scene_gravity,
mode="interval",
is_global_time=True,
interval_range_s=(36.0, 36.0), # time_s = num_steps * (decimation * dt)
params={
"gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.4]),
"operation": "add",
"distribution": "gaussian",
},
)
Each ``EventTerm`` object is of the :class:`~managers.EventTermCfg` class and takes in a ``func`` parameter
for specifying the function to call during randomization, a ``mode`` parameter, which can be ``startup``,
``reset`` or ``interval``. THe ``params`` dictionary should provide the necessary arguments to the
function that is specified in the ``func`` parameter.
Functions specified as ``func`` for the ``EventTerm`` can be found in the :class:`~envs.mdp.events` module.
Note that as part of the ``"asset_cfg": SceneEntityCfg("robot", body_names=".*")`` parameter, the name of
the actor ``"robot"`` is provided, along with the body or joint names specified as a regex expression,
which will be the actors and bodies/joints that will have randomization applied.
Once the ``configclass`` for the randomization terms have been set up, the class must be added
to the base config class for the task and be assigned to the variable ``events``.
.. code-block:: python
@configclass
class MyTaskConfig:
events: EventCfg = EventCfg()
Action and Observation Noise
----------------------------
Actions and observation noise can also be added using the :class:`~utils.configclass` module.
Action and observation noise configs must be added to the main task config using the
``action_noise_model`` and ``observation_noise_model`` variables:
.. code-block:: python
@configclass
class MyTaskConfig:
# at every time-step add gaussian noise + bias. The bias is a gaussian sampled at reset
action_noise_model: NoiseModelWithAdditiveBiasCfg = NoiseModelWithAdditiveBiasCfg(
noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.05, operation="add"),
bias_noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.015, operation="abs"),
)
# at every time-step add gaussian noise + bias. The bias is a gaussian sampled at reset
observation_noise_model: NoiseModelWithAdditiveBiasCfg = NoiseModelWithAdditiveBiasCfg(
noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.002, operation="add"),
bias_noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.0001, operation="abs"),
)
:class:`~.utils.noise.NoiseModelWithAdditiveBiasCfg` can be used to sample both uncorrelated noise
per step as well as correlated noise that is re-sampled at reset time.
The ``noise_cfg`` term specifies the Gaussian distribution that will be sampled at each
step for all environments. This noise will be added to the corresponding actions and
observations buffers at every step.
The ``bias_noise_cfg`` term specifies the Gaussian distribution for the correlated noise
that will be sampled at reset time for the environments being reset. The same noise
will be applied each step for the remaining of the episode for the environments and
resampled at the next reset.
If only per-step noise is desired, :class:`~utils.noise.GaussianNoiseCfg` can be used
to specify an additive Gaussian distribution that adds the sampled noise to the input buffer.
.. code-block:: python
@configclass
class MyTaskConfig:
action_noise_model: GaussianNoiseCfg = GaussianNoiseCfg(mean=0.0, std=0.05, operation="add")
In this tutorial, we learnt how to create a direct workflow task environment for reinforcement learning. We do this
by extending the base environment to include the scene setup, actions, dones, reset, reward and observaion functions.
While it is possible to manually create an instance of :class:`~omni.isaac.lab.envs.DirectRLEnv` class for a desired task,
this is not scalable as it requires specialized scripts for each task. Thus, we exploit the
:meth:`gymnasium.make` function to create the environment with the gym interface. We will learn how to do this
in the next tutorial.
| 14,115 |
reStructuredText
| 41.137313 | 129 | 0.71045 |
isaac-sim/IsaacLab/docs/source/tutorials/03_envs/index.rst
|
Designing an Environment
========================
The following tutorials introduce the concept of manager-based environments: :class:`~omni.isaac.lab.envs.ManagerBasedEnv`
and its derivative :class:`~omni.isaac.lab.envs.ManagerBasedRLEnv`, as well as the direct workflow base class
:class:`~omni.isaac.lab.envs.DirectRLEnv`. These environments bring-in together
different aspects of the framework to create a simulation environment for agent interaction.
.. toctree::
:maxdepth: 1
:titlesonly:
create_base_env
create_rl_env
create_direct_rl_env
register_rl_env_gym
run_rl_training
| 613 |
reStructuredText
| 33.111109 | 122 | 0.735726 |
isaac-sim/IsaacLab/docs/source/tutorials/04_sensors/index.rst
|
Integrating Sensors
===================
The following tutorial shows you how to integrate sensors into the simulation environment. The
tutorials introduce the :class:`~omni.isaac.lab.sensors.SensorBase` class and its derivatives
such as :class:`~omni.isaac.lab.sensors.Camera` and :class:`~omni.isaac.lab.sensors.RayCaster`.
.. toctree::
:maxdepth: 1
:titlesonly:
add_sensors_on_robot
| 400 |
reStructuredText
| 29.846152 | 95 | 0.725 |
isaac-sim/IsaacLab/docs/source/setup/template.rst
|
Building your Own Project
=========================
Traditionally, building new projects that utilize Isaac Lab's features required creating your own
extensions within the Isaac Lab repository. However, this approach can obscure project visibility and
complicate updates from one version of Isaac Lab to another. To circumvent these challenges, we now
provide a pre-configured and customizable `extension template <https://github.com/isaac-sim/IsaacLab.ext_template>`_
for creating projects in an isolated environment.
This template serves three distinct use cases:
* **Project Template**: Provides essential access to Isaac Sim and Isaac Lab's features, making it ideal for projects
that require a standalone environment.
* **Python Package**: Facilitates integration with Isaac Sim's native or virtual Python environment, allowing for
the creation of Python packages that can be shared and reused across multiple projects.
* **Omniverse Extension**: Supports direct integration into Omniverse extension workflow.
.. note::
We recommend using the extension template for new projects, as it provides a more streamlined and
efficient workflow. Additionally it ensures that your project remains up-to-date with the latest
features and improvements in Isaac Lab.
To get started, please follow the instructions in the `extension template repository <https://github.com/isaac-sim/IsaacLab.ext_template>`_.
| 1,418 |
reStructuredText
| 53.576921 | 140 | 0.79055 |
isaac-sim/IsaacLab/docs/source/setup/installation/pip_installation.rst
|
Installation using Isaac Sim pip
================================
Installing Isaac Sim
--------------------
.. note::
Installing Isaac Sim from pip is currently an experimental feature.
If errors occur, please report them to the
`Isaac Sim Forums <https://docs.omniverse.nvidia.com/isaacsim/latest/common/feedback.html>`_
and install Isaac Sim from pre-built binaries.
- To use the pip installation approach for Isaac Sim, we recommend first creating a virtual environment.
Ensure that the python version of the virtual environment is **Python 3.10**.
.. tabs::
.. tab:: Conda
.. code-block:: bash
conda create -n isaaclab python=3.10
conda activate isaaclab
.. tab:: Virtual environment (venv)
.. code-block:: bash
python3.10 -m venv isaaclab
# on Linux
source isaaclab/bin/activate
# on Windows
isaaclab\Scripts\activate
- Next, install a CUDA-enabled PyTorch 2.2.2 build based on the CUDA version available on your system.
.. tabs::
.. tab:: CUDA 11
.. code-block:: bash
pip install torch==2.2.2 --index-url https://download.pytorch.org/whl/cu118
.. tab:: CUDA 12
.. code-block:: bash
pip install torch==2.2.2 --index-url https://download.pytorch.org/whl/cu121
- Then, install the Isaac Sim packages necessary for running Isaac Lab:
.. code-block:: bash
pip install isaacsim-rl isaacsim-replicator --index-url https://pypi.nvidia.com/
Installing Isaac Lab
--------------------
Cloning Isaac Lab
~~~~~~~~~~~~~~~~~
.. note::
We recommend making a `fork <https://github.com/isaac-sim/IsaacLab/fork>`_ of the Isaac Lab repository to contribute
to the project but this is not mandatory to use the framework. If you
make a fork, please replace ``isaac-sim`` with your username
in the following instructions.
Clone the Isaac Lab repository into your workspace:
.. code:: bash
# Option 1: With SSH
git clone [email protected]:isaac-sim/IsaacLab.git
# Option 2: With HTTPS
git clone https://github.com/isaac-sim/IsaacLab.git
.. note::
We provide a helper executable `isaaclab.sh <https://github.com/isaac-sim/IsaacLab/blob/main/isaaclab.sh>`_ that provides
utilities to manage extensions:
.. tabs::
.. tab:: Linux
.. code:: text
./isaaclab.sh --help
usage: isaaclab.sh [-h] [-i] [-f] [-p] [-s] [-t] [-o] [-v] [-d] [-c] -- Utility to manage Isaac Lab.
optional arguments:
-h, --help Display the help content.
-i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl_games, rsl_rl, sb3, skrl) as extra dependencies. Default is 'all'.
-f, --format Run pre-commit to format the code and check lints.
-p, --python Run the python executable provided by Isaac Sim or virtual environment (if active).
-s, --sim Run the simulator executable (isaac-sim.sh) provided by Isaac Sim.
-t, --test Run all python unittest tests.
-o, --docker Run the docker container helper script (docker/container.sh).
-v, --vscode Generate the VSCode settings file from template.
-d, --docs Build the documentation from source using sphinx.
-c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'isaaclab'.
.. tab:: Windows
.. code:: text
isaaclab.bat --help
usage: isaaclab.bat [-h] [-i] [-f] [-p] [-s] [-v] [-d] [-c] -- Utility to manage Isaac Lab.
optional arguments:
-h, --help Display the help content.
-i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl_games, rsl_rl, sb3, skrl) as extra dependencies. Default is 'all'.
-f, --format Run pre-commit to format the code and check lints.
-p, --python Run the python executable provided by Isaac Sim or virtual environment (if active).
-s, --sim Run the simulator executable (isaac-sim.bat) provided by Isaac Sim.
-t, --test Run all python unittest tests.
-v, --vscode Generate the VSCode settings file from template.
-d, --docs Build the documentation from source using sphinx.
-c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'isaaclab'.
Installation
~~~~~~~~~~~~
- Install dependencies using ``apt`` (on Ubuntu):
.. code:: bash
sudo apt install cmake build-essential
- Run the install command that iterates over all the extensions in ``source/extensions`` directory and installs them
using pip (with ``--editable`` flag):
.. tabs::
.. tab:: Linux
.. code:: bash
./isaaclab.sh --install # or "./isaaclab.sh -i"
.. tab:: Windows
.. code:: bash
isaaclab.bat --install :: or "isaaclab.bat -i"
.. note::
By default, this will install all the learning frameworks. If you want to install only a specific framework, you can
pass the name of the framework as an argument. For example, to install only the ``rl_games`` framework, you can run
.. tabs::
.. tab:: Linux
.. code:: bash
./isaaclab.sh --install rl_games
.. tab:: Windows
.. code:: bash
isaaclab.bat --install rl_games :: or "isaaclab.bat -i"
The valid options are ``rl_games``, ``rsl_rl``, ``sb3``, ``skrl``, ``robomimic``, ``none``.
| 5,768 |
reStructuredText
| 31.965714 | 170 | 0.596047 |
isaac-sim/IsaacLab/docs/source/setup/installation/cloud_installation.rst
|
Running Isaac Lab in the Cloud
==============================
Isaac Lab can be run in various cloud infrastructures with the use of `Isaac Automator <https://github.com/isaac-sim/IsaacAutomator>`__.
Isaac Automator allows for quick deployment of Isaac Sim and Isaac Lab onto the public clouds (AWS, GCP, Azure, and Alibaba Cloud are currently supported).
The result is a fully configured remote desktop cloud workstation, which can be used for development and testing of Isaac Lab within minutes and on a budget. Isaac Automator supports variety of GPU instances and stop-start functionality to save on cloud costs and a variety of tools to aid the workflow (like uploading and downloading data, autorun, deployment management, etc).
Installing Isaac Automator
--------------------------
To use Isaac Automator, first clone the repo:
.. code-block:: bash
git clone https://github.com/isaac-sim/IsaacAutomator.git
Isaac Automator requires having ``docker`` pre-installed on the system.
* To install Docker, please follow the instructions for your operating system on the `Docker website`_.
* Follow the post-installation steps for Docker on the `post-installation steps`_ page. These steps allow you to run
Docker without using ``sudo``.
Isaac Automator also requires obtaining a NGC API key.
* Get access to the `Isaac Sim container`_ by joining the NVIDIA Developer Program credentials.
* Generate your `NGC API key`_ to access locked container images from NVIDIA GPU Cloud (NGC).
* This step requires you to create an NGC account if you do not already have one.
* Once you have your generated API key, you need to log in to NGC
from the terminal.
.. code:: bash
docker login nvcr.io
* For the username, enter ``$oauthtoken`` exactly as shown. It is a special username that is used to
authenticate with NGC.
.. code:: text
Username: $oauthtoken
Password: <Your NGC API Key>
Running Isaac Automator
-----------------------
To run Isaac Automator, first build the Isaac Automator container:
.. code-block:: bash
./build
Next, run the deployed script for your preferred cloud:
.. code-block:: bash
# AWS
./deploy-aws
# Azure
./deploy-azure
# GCP
./deploy-gcp
# Alibaba Cloud
./deploy-alicloud
Follow the prompts for entering information regarding the environment setup and credentials.
Once successful, instructions for connecting to the cloud instance will be available in the terminal.
Connections can be made using SSH, noVCN, or NoMachine.
For details on the credentials and setup required for each cloud, please visit the
`Isaac Automator <https://github.com/isaac-sim/IsaacAutomator?tab=readme-ov-file#deploying-isaac-sim>`__
page for more instructions.
Running Isaac Lab on the Cloud
------------------------------
Once connected to the cloud instance, the desktop will have an icon showing ``isaaclab.sh``.
Launch the ``isaaclab.sh`` executable, which will open a new Terminal. Within the terminal,
Isaac Lab commands can be executed in the same way as running locally.
For example:
.. code-block:: bash
./isaaclab.sh -p source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0
Destroying a Development
-------------------------
To save costs, deployments can be destroyed when not being used.
This can be done from within the Automator container, which can be entered with command ``./run``.
To destroy a deployment, run:
.. code:: bash
./destroy <deployment-name>
.. _`Docker website`: https://docs.docker.com/desktop/install/linux-install/
.. _`post-installation steps`: https://docs.docker.com/engine/install/linux-postinstall/
.. _`Isaac Sim container`: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim
.. _`NGC API key`: https://docs.nvidia.com/ngc/gpu-cloud/ngc-user-guide/index.html#generating-api-key
| 3,871 |
reStructuredText
| 34.522935 | 378 | 0.726427 |
isaac-sim/IsaacLab/docs/source/setup/installation/binaries_installation.rst
|
Installation using Isaac Sim Binaries
=====================================
Installing Isaac Sim
--------------------
Downloading pre-built binaries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Please follow the Isaac Sim
`documentation <https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html>`__
to install the latest Isaac Sim release.
To check the minimum system requirements,refer to the documentation
`here <https://docs.omniverse.nvidia.com/isaacsim/latest/installation/requirements.html>`__.
.. note::
We have tested Isaac Lab with Isaac Sim 4.0 release on Ubuntu
20.04LTS with NVIDIA driver 525.147.
.. tabs::
.. tab:: Linux
On Linux systems, by default, Isaac Sim is installed in the directory
``${HOME}/.local/share/ov/pkg/isaac_sim-*``, with ``*`` corresponding to the Isaac Sim version.
.. tab:: Windows
On Windows systems, by default,Isaac Sim is installed in the directory
``C:\Users\user\AppData\Local\ov\pkg\isaac_sim-*``, with ``*`` corresponding to the Isaac Sim version.
Installing Isaac Lab
--------------------
Cloning Isaac Lab
~~~~~~~~~~~~~~~~~
.. note::
We recommend making a `fork <https://github.com/isaac-sim/IsaacLab/fork>`_ of the Isaac Lab repository to contribute
to the project but this is not mandatory to use the framework. If you
make a fork, please replace ``isaac-sim`` with your username
in the following instructions.
Clone the Isaac Lab repository into your workspace:
.. code:: bash
# Option 1: With SSH
git clone [email protected]:isaac-sim/IsaacLab.git
# Option 2: With HTTPS
git clone https://github.com/isaac-sim/IsaacLab.git
.. note::
We provide a helper executable `isaaclab.sh <https://github.com/isaac-sim/IsaacLab/blob/main/isaaclab.sh>`_ that provides
utilities to manage extensions:
.. tabs::
.. tab:: Linux
.. code:: text
./isaaclab.sh --help
usage: isaaclab.sh [-h] [-i] [-f] [-p] [-s] [-t] [-o] [-v] [-d] [-c] -- Utility to manage Isaac Lab.
optional arguments:
-h, --help Display the help content.
-i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl-games, rsl-rl, sb3, skrl) as extra dependencies. Default is 'all'.
-f, --format Run pre-commit to format the code and check lints.
-p, --python Run the python executable provided by Isaac Sim or virtual environment (if active).
-s, --sim Run the simulator executable (isaac-sim.sh) provided by Isaac Sim.
-t, --test Run all python unittest tests.
-o, --docker Run the docker container helper script (docker/container.sh).
-v, --vscode Generate the VSCode settings file from template.
-d, --docs Build the documentation from source using sphinx.
-c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'isaaclab'.
.. tab:: Windows
.. code:: text
isaaclab.bat --help
usage: isaaclab.bat [-h] [-i] [-f] [-p] [-s] [-v] [-d] [-c] -- Utility to manage Isaac Lab.
optional arguments:
-h, --help Display the help content.
-i, --install [LIB] Install the extensions inside Isaac Lab and learning frameworks (rl-games, rsl-rl, sb3, skrl) as extra dependencies. Default is 'all'.
-f, --format Run pre-commit to format the code and check lints.
-p, --python Run the python executable provided by Isaac Sim or virtual environment (if active).
-s, --sim Run the simulator executable (isaac-sim.bat) provided by Isaac Sim.
-t, --test Run all python unittest tests.
-v, --vscode Generate the VSCode settings file from template.
-d, --docs Build the documentation from source using sphinx.
-c, --conda [NAME] Create the conda environment for Isaac Lab. Default name is 'isaaclab'.
Creating the Isaac Sim Symbolic Link
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Set up a symbolic link between the installed Isaac Sim root folder
and ``_isaac_sim`` in the Isaac Lab directory. This makes it convenient
to index the python modules and look for extensions shipped with Isaac Sim.
.. tabs::
.. tab:: Linux
.. code:: bash
# enter the cloned repository
cd IsaacLab
# create a symbolic link
ln -s path_to_isaac_sim _isaac_sim
# For example: ln -s /home/nvidia/.local/share/ov/pkg/isaac-sim-4.0.0 _isaac_sim
.. tab:: Windows
.. code:: batch
:: enter the cloned repository
cd IsaacLab
:: create a symbolic link - requires launching Command Prompt with Administrator access
mklink /D _isaac_sim path_to_isaac_sim
# For example: mklink /D _isaac_sim C:/Users/nvidia/AppData/Local/ov/pkg/isaac-sim-4.0.0
Setting up the conda environment (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. attention::
This step is optional. If you are using the bundled python with Isaac Sim, you can skip this step.
The executable ``isaaclab.sh`` automatically fetches the python bundled with Isaac
Sim, using ``./isaaclab.sh -p`` command (unless inside a virtual environment). This executable
behaves like a python executable, and can be used to run any python script or
module with the simulator. For more information, please refer to the
`documentation <https://docs.omniverse.nvidia.com/isaacsim/latest/manual_standalone_python.html#isaac-sim-python-environment>`__.
Although using a virtual environment is optional, we recommend using ``conda``. To install
``conda``, please follow the instructions `here <https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html>`__.
In case you want to use ``conda`` to create a virtual environment, you can
use the following command:
.. tabs::
.. tab:: Linux
.. code:: bash
# Option 1: Default name for conda environment is 'isaaclab'
./isaaclab.sh --conda # or "./isaaclab.sh -c"
# Option 2: Custom name for conda environment
./isaaclab.sh --conda my_env # or "./isaaclab.sh -c my_env"
.. tab:: Windows
.. code:: batch
:: Option 1: Default name for conda environment is 'isaaclab'
isaaclab.bat --conda :: or "isaaclab.bat -c"
:: Option 2: Custom name for conda environment
isaaclab.bat --conda my_env :: or "isaaclab.bat -c my_env"
If you are using ``conda`` to create a virtual environment, make sure to
activate the environment before running any scripts. For example:
.. code:: bash
conda activate isaaclab # or "conda activate my_env"
Once you are in the virtual environment, you do not need to use ``./isaaclab.sh -p`` / ``isaaclab.bat -p``
to run python scripts. You can use the default python executable in your environment
by running ``python`` or ``python3``. However, for the rest of the documentation,
we will assume that you are using ``./isaaclab.sh -p`` / ``isaaclab.bat -p`` to run python scripts. This command
is equivalent to running ``python`` or ``python3`` in your virtual environment.
Installation
~~~~~~~~~~~~
- Install dependencies using ``apt`` (on Ubuntu):
.. code:: bash
sudo apt install cmake build-essential
- Run the install command that iterates over all the extensions in ``source/extensions`` directory and installs them
using pip (with ``--editable`` flag):
.. tabs::
.. tab:: Linux
.. code:: bash
./isaaclab.sh --install # or "./isaaclab.sh -i"
.. tab:: Windows
.. code:: bash
isaaclab.bat --install :: or "isaaclab.bat -i"
.. note::
By default, this will install all the learning frameworks. If you want to install only a specific framework, you can
pass the name of the framework as an argument. For example, to install only the ``rl_games`` framework, you can run
.. tabs::
.. tab:: Linux
.. code:: bash
./isaaclab.sh --install rl_games
.. tab:: Windows
.. code:: bash
isaaclab.bat --install rl_games :: or "isaaclab.bat -i"
The valid options are ``rl_games``, ``rsl_rl``, ``sb3``, ``skrl``, ``robomimic``, ``none``.
| 8,488 |
reStructuredText
| 35.748918 | 170 | 0.627003 |
isaac-sim/IsaacLab/docs/source/setup/installation/verifying_installation.rst
|
Verifying the Installation
==========================
Verifying the Isaac Sim installation
------------------------------------
Isaac Sim installed from pip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Make sure that your virtual environment is activated (if applicable)
- Check that the simulator runs as expected:
.. code:: bash
# note: you can pass the argument "--help" to see all arguments possible.
isaacsim
By default, this will launch an empty mini Kit window.
- To run with a specific experience file, run:
.. code:: bash
# experience files can be absolute path, or relative path searched in isaacsim/apps or omni/apps
isaacsim omni.isaac.sim.python.kit
.. attention::
When running Isaac Sim for the first time, all dependent extensions will be pulled from the registry.
This process can take upwards of 10 minutes and is required on the first run of each experience file.
Once the extensions are pulled, consecutive runs using the same experience file will use the cached extensions.
In addition, the first run will prompt users to accept the Nvidia Omniverse License Agreement.
To accept the EULA, reply ``Yes`` when prompted with the below message:
.. code:: bash
By installing or using Isaac Sim, I agree to the terms of NVIDIA OMNIVERSE LICENSE AGREEMENT (EULA)
in https://docs.omniverse.nvidia.com/isaacsim/latest/common/NVIDIA_Omniverse_License_Agreement.html
Do you accept the EULA? (Yes/No): Yes
If the simulator does not run or crashes while following the above
instructions, it means that something is incorrectly configured. To
debug and troubleshoot, please check Isaac Sim
`documentation <https://docs.omniverse.nvidia.com/dev-guide/latest/linux-troubleshooting.html>`__
and the
`forums <https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_sim_forums.html>`__.
Isaac Sim installed from binaries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To avoid the overhead of finding and locating the Isaac Sim installation
directory every time, we recommend exporting the following environment
variables to your terminal for the remaining of the installation instructions:
.. tabs::
.. tab:: Linux
.. code:: bash
# Isaac Sim root directory
export ISAACSIM_PATH="${HOME}/.local/share/ov/pkg/isaac-sim-4.0.0"
# Isaac Sim python executable
export ISAACSIM_PYTHON_EXE="${ISAACSIM_PATH}/python.sh"
.. tab:: Windows
.. code:: batch
:: Isaac Sim root directory
set ISAACSIM_PATH="C:\Users\user\AppData\Local\ov\pkg\isaac-sim-4.0.0"
:: Isaac Sim python executable
set ISAACSIM_PYTHON_EXE="%ISAACSIM_PATH%\python.bat"
For more information on common paths, please check the Isaac Sim
`documentation <https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_faq.html#common-path-locations>`__.
- Check that the simulator runs as expected:
.. tabs::
.. tab:: Linux
.. code:: bash
# note: you can pass the argument "--help" to see all arguments possible.
${ISAACSIM_PATH}/isaac-sim.sh
.. tab:: Windows
.. code:: batch
:: note: you can pass the argument "--help" to see all arguments possible.
%ISAACSIM_PATH%\isaac-sim.bat
- Check that the simulator runs from a standalone python script:
.. tabs::
.. tab:: Linux
.. code:: bash
# checks that python path is set correctly
${ISAACSIM_PYTHON_EXE} -c "print('Isaac Sim configuration is now complete.')"
# checks that Isaac Sim can be launched from python
${ISAACSIM_PYTHON_EXE} ${ISAACSIM_PATH}/standalone_examples/api/omni.isaac.core/add_cubes.py
.. tab:: Windows
.. code:: batch
:: checks that python path is set correctly
%ISAACSIM_PYTHON_EXE% -c "print('Isaac Sim configuration is now complete.')"
:: checks that Isaac Sim can be launched from python
%ISAACSIM_PYTHON_EXE% %ISAACSIM_PATH%\standalone_examples\api\omni.isaac.core\add_cubes.py
.. attention::
If you have been using a previous version of Isaac Sim, you
need to run the following command for the *first* time after
installation to remove all the old user data and cached variables:
.. tabs::
.. tab:: Linux
.. code:: bash
${ISAACSIM_PATH}/isaac-sim.sh --reset-user
.. tab:: Windows
.. code:: batch
%ISAACSIM_PATH%\isaac-sim.bat --reset-user
If the simulator does not run or crashes while following the above
instructions, it means that something is incorrectly configured. To
debug and troubleshoot, please check Isaac Sim
`documentation <https://docs.omniverse.nvidia.com/dev-guide/latest/linux-troubleshooting.html>`__
and the
`forums <https://docs.omniverse.nvidia.com/isaacsim/latest/isaac_sim_forums.html>`__.
Verifying the Isaac Lab installation
------------------------------------
To verify that the installation was successful, run the following command from the
top of the repository:
.. tabs::
.. tab:: Linux
.. code:: bash
# Option 1: Using the isaaclab.sh executable
# note: this works for both the bundled python and the virtual environment
./isaaclab.sh -p source/standalone/tutorials/00_sim/create_empty.py
# Option 2: Using python in your virtual environment
python source/standalone/tutorials/00_sim/create_empty.py
.. tab:: Windows
.. code:: batch
:: Option 1: Using the isaaclab.bat executable
:: note: this works for both the bundled python and the virtual environment
isaaclab.bat -p source\standalone\tutorials\00_sim\create_empty.py
:: Option 2: Using python in your virtual environment
python source\standalone\tutorials\00_sim\create_empty.py
The above command should launch the simulator and display a window with a black
ground plane. You can exit the script by pressing ``Ctrl+C`` on your terminal.
On Windows machines, please terminate the process from Command Prompt using
``Ctrl+Break`` or ``Ctrl+fn+B``.
If you see this, then the installation was successful! |:tada:|
| 6,221 |
reStructuredText
| 30.744898 | 122 | 0.675133 |
isaac-sim/IsaacLab/docs/source/setup/installation/index.rst
|
Installation Guide
===================
.. image:: https://img.shields.io/badge/IsaacSim-4.0-silver.svg
:target: https://developer.nvidia.com/isaac-sim
:alt: IsaacSim 4.0
.. image:: https://img.shields.io/badge/python-3.10-blue.svg
:target: https://www.python.org/downloads/release/python-31013/
:alt: Python 3.10
.. image:: https://img.shields.io/badge/platform-linux--64-orange.svg
:target: https://releases.ubuntu.com/20.04/
:alt: Ubuntu 20.04
.. image:: https://img.shields.io/badge/platform-windows--64-orange.svg
:target: https://www.microsoft.com/en-ca/windows/windows-11
:alt: Windows 11
.. caution::
We have dropped support for Isaac Sim versions 2023.1.0 and below. We recommend using the latest
Isaac Sim 4.0 release to benefit from the latest features and improvements.
For more information, please refer to the
`Isaac Sim release notes <https://docs.omniverse.nvidia.com/isaacsim/latest/release_notes.html>`__.
.. note::
We recommend system requirements with at least 32GB RAM and 16GB VRAM for Isaac Lab.
For the full list of system requirements for Isaac Sim, please refer to the
`Isaac Sim system requirements <https://docs.omniverse.nvidia.com/isaacsim/latest/installation/requirements.html#system-requirements>`_.
As an experimental feature in Isaac Sim 4.0 release, Isaac Sim can also be installed through pip.
This simplifies the installation
process by avoiding the need to download the Omniverse Launcher and installing Isaac Sim through
the launcher. Therefore, there are two ways to install Isaac Lab:
.. toctree::
:maxdepth: 2
Installation using Isaac Sim pip (experimental) <pip_installation>
binaries_installation
verifying_installation
cloud_installation
| 1,762 |
reStructuredText
| 37.326086 | 140 | 0.737798 |
isaac-sim/IsaacLab/docs/source/api/index.rst
|
API Reference
=============
This page gives an overview of all the modules and classes in the Isaac Lab extensions.
omni.isaac.lab extension
------------------------
The following modules are available in the ``omni.isaac.lab`` extension:
.. currentmodule:: omni.isaac.lab
.. autosummary::
:toctree: lab
app
actuators
assets
controllers
devices
envs
managers
markers
scene
sensors
sim
terrains
utils
.. toctree::
:hidden:
lab/omni.isaac.lab.envs.mdp
lab/omni.isaac.lab.envs.ui
lab/omni.isaac.lab.sensors.patterns
lab/omni.isaac.lab.sim.converters
lab/omni.isaac.lab.sim.schemas
lab/omni.isaac.lab.sim.spawners
omni.isaac.lab_tasks extension
--------------------------------
The following modules are available in the ``omni.isaac.lab_tasks`` extension:
.. currentmodule:: omni.isaac.lab_tasks
.. autosummary::
:toctree: lab_tasks
utils
.. toctree::
:hidden:
lab_tasks/omni.isaac.lab_tasks.utils.wrappers
lab_tasks/omni.isaac.lab_tasks.utils.data_collector
| 1,050 |
reStructuredText
| 17.120689 | 87 | 0.664762 |
isaac-sim/IsaacLab/docs/source/api/lab/omni.isaac.lab.utils.rst
|
omni.isaac.lab.utils
====================
.. automodule:: omni.isaac.lab.utils
.. Rubric:: Submodules
.. autosummary::
io
array
assets
dict
math
noise
string
timer
warp
.. Rubric:: Functions
.. autosummary::
configclass
Configuration class
~~~~~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.configclass
:members:
:show-inheritance:
IO operations
~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.io
:members:
:imported-members:
:show-inheritance:
Array operations
~~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.array
:members:
:show-inheritance:
Asset operations
~~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.assets
:members:
:show-inheritance:
Dictionary operations
~~~~~~~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.dict
:members:
:show-inheritance:
Math operations
~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.math
:members:
:inherited-members:
:show-inheritance:
Noise operations
~~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.noise
:members:
:imported-members:
:inherited-members:
:show-inheritance:
:exclude-members: __init__, func
String operations
~~~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.string
:members:
:show-inheritance:
Timer operations
~~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.timer
:members:
:show-inheritance:
Warp operations
~~~~~~~~~~~~~~~
.. automodule:: omni.isaac.lab.utils.warp
:members:
:imported-members:
:show-inheritance:
| 1,598 |
reStructuredText
| 14.831683 | 48 | 0.590738 |
isaac-sim/IsaacLab/docs/source/api/lab/omni.isaac.lab.envs.rst
|
omni.isaac.lab.envs
===================
.. automodule:: omni.isaac.lab.envs
.. rubric:: Submodules
.. autosummary::
mdp
ui
.. rubric:: Classes
.. autosummary::
ManagerBasedEnv
ManagerBasedEnvCfg
ViewerCfg
ManagerBasedRLEnv
ManagerBasedRLEnvCfg
DirectRLEnv
DirectRLEnvCfg
Manager Based Environment
-------------------------
.. autoclass:: ManagerBasedEnv
:members:
.. autoclass:: ManagerBasedEnvCfg
:members:
:exclude-members: __init__, class_type
.. autoclass:: ViewerCfg
:members:
:exclude-members: __init__
Manager Based RL Environment
----------------------------
.. autoclass:: ManagerBasedRLEnv
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: ManagerBasedRLEnvCfg
:members:
:inherited-members:
:show-inheritance:
:exclude-members: __init__, class_type
Direct RL Environment
---------------------
.. autoclass:: DirectRLEnv
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: DirectRLEnvCfg
:members:
:inherited-members:
:show-inheritance:
:exclude-members: __init__, class_type
| 1,149 |
reStructuredText
| 16.424242 | 42 | 0.616188 |
isaac-sim/IsaacLab/docs/source/features/multi_gpu.rst
|
Multi-GPU and Multi-Node Training
=================================
.. currentmodule:: omni.isaac.lab
Isaac Lab supports multi-GPU and multi-node reinforcement learning on Linux.
Multi-GPU Training
------------------
For complex reinforcement learning environments, it may be desirable to scale up training across multiple GPUs.
This is possible in Isaac Lab with the ``rl_games`` RL library through the use of the
`PyTorch distributed <https://pytorch.org/docs/stable/distributed.html>`_ framework.
In this workflow, ``torch.distributed`` is used to launch multiple processes of training, where the number of
processes must be equal to or less than the number of GPUs available. Each process runs on
a dedicated GPU and launches its own instance of Isaac Sim and the Isaac Lab environment.
Each process collects its own rollouts during the training process and has its own copy of the policy
network. During training, gradients are aggregated across the processes and broadcasted back to the process
at the end of the epoch.
.. image:: ../_static/multigpu.png
:align: center
:alt: Multi-GPU training paradigm
To train with multiple GPUs, use the following command, where ``--proc_per_node`` represents the number of available GPUs:
.. code-block:: shell
python -m torch.distributed.run --nnodes=1 --nproc_per_node=2 source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0 --headless --distributed
Due to limitations of NCCL on Windows, this feature is currently supported on Linux only.
Multi-Node Training
-------------------
To scale up training beyond multiple GPUs on a single machine, it is also possible to train across multiple nodes.
To train across multiple nodes/machines, it is required to launch an individual process on each node.
For the master node, use the following command, where ``--proc_per_node`` represents the number of available GPUs, and ``--nnodes`` represents the number of nodes:
.. code-block:: shell
python -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=0 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=localhost:5555 source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0 --headless --distributed
Note that the port (``5555``) can be replaced with any other available port.
For non-master nodes, use the following command, replacing ``--node_rank`` with the index of each machine:
.. code-block:: shell
python -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=1 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=ip_of_master_machine:5555 source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0 --headless --distributed
For more details on multi-node training with PyTorch, please visit the `PyTorch documentation <https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html>`_. As mentioned in the PyTorch documentation, "multinode training is bottlenecked by inter-node communication latencies". When this latency is high, it is possible multi-node training will perform worse than running on a single node instance.
Due to limitations of NCCL on Windows, this feature is currently supported on Linux only.
| 3,178 |
reStructuredText
| 52.881355 | 407 | 0.758024 |
isaac-sim/IsaacLab/docs/source/features/workflows.rst
|
.. _feature-workflows:
Task Design Workflows
=====================
.. currentmodule:: omni.isaac.lab
Reinforcement learning environments can be implemented using two different workflows: Manager-based and Direct.
This page outlines the two workflows, explaining their benefits and usecases.
In addition, multi-GPU and multi-node reinforcement learning support is explained, along with the tiled rendering API,
which can be used for efficient vectorized rendering across environments.
Manager-Based Environments
--------------------------
Manager-based environments promote modular implementations of reinforcement learning tasks
through the use of Managers. Each component of the task, such as rewards, observations, termination
can all be specified as individual configuration classes that are then passed to the corresponding
manager classes. Each manager is responsible for parsing the configurations and processing
the contents specified in each config class. The manager implementations are taken care of by
the base class :class:`envs.ManagerBasedRLEnv`.
With this approach, it is simple to switch implementations of some components in the task
while leaving the remaining of the code intact. This is desirable when collaborating with others
on implementing a reinforcement learning environment, where contributors may choose to use
different combinations of configurations for the reinforcement learning components of the task.
A class definition of a manager-based environment consists of defining a task configuration class that
inherits from :class:`envs.ManagerBasedRLEnvCfg`. This class should contain variables assigned to various
configuration classes for each of the components of the RL task, such as the ``ObservationCfg``
or ``RewardCfg``. The entry point of the environment becomes the base class :class:`envs.ManagerBasedRLEnv`,
which will process the main task config and iterate through the individual configuration classes that are defined
in the task config class.
An example of implementing the reward function for the Cartpole task using the manager-based implementation is as follow:
.. code-block:: python
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# (1) Constant running reward
alive = RewTerm(func=mdp.is_alive, weight=1.0)
# (2) Failure penalty
terminating = RewTerm(func=mdp.is_terminated, weight=-2.0)
# (3) Primary task: keep pole upright
pole_pos = RewTerm(
func=mdp.joint_pos_target_l2,
weight=-1.0,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), "target": 0.0},
)
# (4) Shaping tasks: lower cart velocity
cart_vel = RewTerm(
func=mdp.joint_vel_l1,
weight=-0.01,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"])},
)
# (5) Shaping tasks: lower pole angular velocity
pole_vel = RewTerm(
func=mdp.joint_vel_l1,
weight=-0.005,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"])},
)
.. seealso::
We provide a more detailed tutorial for setting up a RL environment using the manager-based workflow at
`Creating a manager-based RL Environment <../tutorials/03_envs/create_rl_env.html>`_.
Direct Environments
-------------------
The direct-style environment more closely aligns with traditional implementations of reinforcement learning environments,
where a single script implements the reward function, observation function, resets, and all other components
of the environment. This approach does not use the Manager classes. Instead, users are left with the freedom
to implement the APIs from the base class :class:`envs.DirectRLEnv`. For users migrating from the IsaacGymEnvs
or OmniIsaacGymEnvs framework, this workflow will have a closer implementation to the previous frameworks.
When defining an environment following the direct-style implementation, a task configuration class inheriting from
:class:`envs.DirectRLEnvCfg` is used for defining task environment configuration variables, such as the number
of observations and actions. Adding configuration classes for the managers are not required and will not be processed
by the base class. In addition to the configuration class, the logic of the task should be defined in a new
task class that inherits from the base class :class:`envs.DirectRLEnv`. This class will then implement the main
task logics, including setting up the scene, processing the actions, computing resets, rewards, and observations.
This approach may bring more performance benefits for the environment, as it allows implementing large chunks
of logic with optimized frameworks such as `PyTorch Jit <https://pytorch.org/docs/stable/jit.html>`_ or
`Warp <https://github.com/NVIDIA/warp>`_. This may be important when scaling up training for large and complex
environments. Additionally, data may be cached in class variables and reused in multiple APIs for the class.
This method provides more transparency in the implementations of the environments, as logic is defined
within the task class instead of abstracted with the use the Managers.
An example of implementing the reward function for the Cartpole task using the Direct-style implementation is as follow:
.. code-block:: python
def _get_rewards(self) -> torch.Tensor:
total_reward = compute_rewards(
self.cfg.rew_scale_alive,
self.cfg.rew_scale_terminated,
self.cfg.rew_scale_pole_pos,
self.cfg.rew_scale_cart_vel,
self.cfg.rew_scale_pole_vel,
self.joint_pos[:, self._pole_dof_idx[0]],
self.joint_vel[:, self._pole_dof_idx[0]],
self.joint_pos[:, self._cart_dof_idx[0]],
self.joint_vel[:, self._cart_dof_idx[0]],
self.reset_terminated,
)
return total_reward
@torch.jit.script
def compute_rewards(
rew_scale_alive: float,
rew_scale_terminated: float,
rew_scale_pole_pos: float,
rew_scale_cart_vel: float,
rew_scale_pole_vel: float,
pole_pos: torch.Tensor,
pole_vel: torch.Tensor,
cart_pos: torch.Tensor,
cart_vel: torch.Tensor,
reset_terminated: torch.Tensor,
):
rew_alive = rew_scale_alive * (1.0 - reset_terminated.float())
rew_termination = rew_scale_terminated * reset_terminated.float()
rew_pole_pos = rew_scale_pole_pos * torch.sum(torch.square(pole_pos), dim=-1)
rew_cart_vel = rew_scale_cart_vel * torch.sum(torch.abs(cart_vel), dim=-1)
rew_pole_vel = rew_scale_pole_vel * torch.sum(torch.abs(pole_vel), dim=-1)
total_reward = rew_alive + rew_termination + rew_pole_pos + rew_cart_vel + rew_pole_vel
return total_reward
.. seealso::
We provide a more detailed tutorial for setting up a RL environment using the direct workflow at
`Creating a Direct Workflow RL Environment <../tutorials/03_envs/create_direct_rl_env.html>`_.
Multi-GPU Training
------------------
For complex reinforcement learning environments, it may be desirable to scale up training across multiple GPUs.
This is possible in Isaac Lab with the ``rl_games`` RL library through the use of the
`PyTorch distributed <https://pytorch.org/docs/stable/distributed.html>`_ framework.
In this workflow, ``torch.distributed`` is used to launch multiple processes of training, where the number of
processes must be equal to or less than the number of GPUs available. Each process runs on
a dedicated GPU and launches its own instance of Isaac Sim and the Isaac Lab environment.
Each process collects its own rollouts during the training process and has its own copy of the policy
network. During training, gradients are aggregated across the processes and broadcasted back to the process
at the end of the epoch.
.. image:: ../_static/multigpu.png
:align: center
:alt: Multi-GPU training paradigm
To train with multiple GPUs, use the following command, where ``--proc_per_node`` represents the number of available GPUs:
.. code-block:: shell
python -m torch.distributed.run --nnodes=1 --nproc_per_node=2 source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0 --headless --distributed
Multi-Node Training
-------------------
To scale up training beyond multiple GPUs on a single machine, it is also possible to train across multiple nodes.
To train across multiple nodes/machines, it is required to launch an individual process on each node.
For the master node, use the following command, where ``--proc_per_node`` represents the number of available GPUs, and ``--nnodes`` represents the number of nodes:
.. code-block:: shell
python -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=0 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=localhost:5555 source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0 --headless --distributed
Note that the port (``5555``) can be replaced with any other available port.
For non-master nodes, use the following command, replacing ``--node_rank`` with the index of each machine:
.. code-block:: shell
python -m torch.distributed.run --nproc_per_node=2 --nnodes=2 --node_rank=1 --rdzv_id=123 --rdzv_backend=c10d --rdzv_endpoint=ip_of_master_machine:5555 source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0 --headless --distributed
For more details on multi-node training with PyTorch, please visit the `PyTorch documentation <https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html>`_. As mentioned in the PyTorch documentation, "multinode training is bottlenecked by inter-node communication latencies". When this latency is high, it is possible multi-node training will perform worse than running on a single node instance.
Tiled Rendering
---------------
Tiled rendering APIs provide a vectorized interface for collecting data from camera sensors.
This is useful for reinforcement learning environments requiring vision in the loop.
Tiled rendering works by concatenating camera outputs from multiple cameras and rending
one single large image instead of multiple smaller images that would have been produced
by each individual camera. This reduces the amount of time required for rendering and
provides a more efficient API for working with vision data.
Isaac Lab provides tiled rendering APIs for RGB and depth data through the :class:`~sensors.TiledCamera`
class. Configurations for the tiled rendering APIs can be defined through the :class:`~sensors.TiledCameraCfg`
class, specifying parameters such as the regex expression for all camera paths, the transform
for the cameras, the desired data type, the type of cameras to add to the scene, and the camera
resolution.
.. code-block:: python
tiled_camera: TiledCameraCfg = TiledCameraCfg(
prim_path="/World/envs/env_.*/Camera",
offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"),
data_types=["rgb"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0)
),
width=80,
height=80,
)
To access the tiled rendering interface, a :class:`~sensors.TiledCamera` object can be created and used
to retrieve data from the cameras.
.. code-block:: python
tiled_camera = TiledCamera(cfg.tiled_camera)
data_type = "rgb"
data = tiled_camera.data.output[data_type]
The returned data will be transformed into the shape (num_cameras, height, width, num_channels), which
can be used directly as observation for reinforcement learning.
When working with rendering, make sure to add the ``--enable_cameras`` argument when launching the
environment. For example:
.. code-block:: shell
python source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-RGB-Camera-Direct-v0 --headless --enable_cameras
| 12,063 |
reStructuredText
| 49.476987 | 407 | 0.735555 |
isaac-sim/IsaacLab/docs/source/features/tiled_rendering.rst
|
Tiled Rendering and Recording
=============================
.. currentmodule:: omni.isaac.lab
Tiled Rendering
---------------
.. note::
This feature is only available from Isaac Sim version 4.0.0.
Tiled rendering APIs provide a vectorized interface for collecting data from camera sensors.
This is useful for reinforcement learning environments requiring vision in the loop.
Tiled rendering works by concatenating camera outputs from multiple cameras and rendering
one single large image instead of multiple smaller images that would have been produced
by each individual camera. This reduces the amount of time required for rendering and
provides a more efficient API for working with vision data.
Isaac Lab provides tiled rendering APIs for RGB and depth data through the :class:`~sensors.TiledCamera`
class. Configurations for the tiled rendering APIs can be defined through the :class:`~sensors.TiledCameraCfg`
class, specifying parameters such as the regex expression for all camera paths, the transform
for the cameras, the desired data type, the type of cameras to add to the scene, and the camera
resolution.
.. code-block:: python
tiled_camera: TiledCameraCfg = TiledCameraCfg(
prim_path="/World/envs/env_.*/Camera",
offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"),
data_types=["rgb"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0)
),
width=80,
height=80,
)
To access the tiled rendering interface, a :class:`~sensors.TiledCamera` object can be created and used
to retrieve data from the cameras.
.. code-block:: python
tiled_camera = TiledCamera(cfg.tiled_camera)
data_type = "rgb"
data = tiled_camera.data.output[data_type]
The returned data will be transformed into the shape (num_cameras, height, width, num_channels), which
can be used directly as observation for reinforcement learning.
When working with rendering, make sure to add the ``--enable_cameras`` argument when launching the
environment. For example:
.. code-block:: shell
python source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-RGB-Camera-Direct-v0 --headless --enable_cameras
Recording during training
-------------------------
Isaac Lab supports recording video clips during training using the `gymnasium.wrappers.RecordVideo <https://gymnasium.farama.org/main/_modules/gymnasium/wrappers/record_video/>`_ class.
This feature can be enabled by using the following command line arguments with the training script:
* ``--video`` - enables video recording during training
* ``--video_length`` - length of each recorded video (in steps)
* ``--video_interval`` - interval between each video recording (in steps)
Make sure to also add the ``--enable_cameras`` argument when running headless.
Note that enabling recording is equivalent to enabling rendering during training, which will slow down both startup and runtime performance.
Example usage:
.. code-block:: shell
python source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-v0 --headless --enable_cameras --video --video_length 100 --video_interval 500
Recorded videos will be saved in the same directory as the training checkpoints, under ``IsaacLab/logs/<rl_workflow>/<task>/<run>/videos``.
| 3,424 |
reStructuredText
| 41.28395 | 185 | 0.740362 |
isaac-sim/IsaacLab/docs/source/features/environments.rst
|
Environments
============
The following lists comprises of all the RL tasks implementations that are available in Isaac Lab.
While we try to keep this list up-to-date, you can always get the latest list of environments by
running the following command:
.. code-block:: bash
./isaaclab.sh -p source/standalone/environments/list_envs.py
We are actively working on adding more environments to the list. If you have any environments that
you would like to add to Isaac Lab, please feel free to open a pull request!
Classic
-------
Classic environments that are based on IsaacGymEnvs implementation of MuJoCo-style environments.
.. table::
:widths: 33 37 30
+------------------+-----------------------------+-------------------------------------------------------------------------+
| World | Environment ID | Description |
+==================+=============================+=========================================================================+
| |humanoid| | | |humanoid-link| | Move towards a direction with the MuJoCo humanoid robot |
| | | |humanoid-direct-link| | |
+------------------+-----------------------------+-------------------------------------------------------------------------+
| |ant| | | |ant-link| | Move towards a direction with the MuJoCo ant robot |
| | | |ant-direct-link| | |
+------------------+-----------------------------+-------------------------------------------------------------------------+
| |cartpole| | | |cartpole-link| | Move the cart to keep the pole upwards in the classic cartpole control |
| | | |cartpole-direct-link| | |
| | | |cartpole-camera-rgb-link|| |
| | | |cartpole-camera-dpt-link|| |
+------------------+-----------------------------+-------------------------------------------------------------------------+
.. |humanoid| image:: ../_static/tasks/classic/humanoid.jpg
.. |ant| image:: ../_static/tasks/classic/ant.jpg
.. |cartpole| image:: ../_static/tasks/classic/cartpole.jpg
.. |humanoid-link| replace:: `Isaac-Humanoid-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/humanoid/humanoid_env_cfg.py>`__
.. |ant-link| replace:: `Isaac-Ant-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/ant/ant_env_cfg.py>`__
.. |cartpole-link| replace:: `Isaac-Cartpole-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py>`__
.. |humanoid-direct-link| replace:: `Isaac-Humanoid-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/humanoid/humanoid_env.py>`__
.. |ant-direct-link| replace:: `Isaac-Ant-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/ant/ant_env.py>`__
.. |cartpole-direct-link| replace:: `Isaac-Cartpole-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_env.py>`__
.. |cartpole-camera-rgb-link| replace:: `Isaac-Cartpole-RGB-Camera-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_camera_env.py>`__
.. |cartpole-camera-dpt-link| replace:: `Isaac-Cartpole-Depth-Camera-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/cartpole/cartpole_camera_env.py>`__
Manipulation
------------
Environments based on fixed-arm manipulation tasks.
For many of these tasks, we include configurations with different arm action spaces. For example,
for the reach environment:
* |lift-cube-link|: Franka arm with joint position control
* |lift-cube-ik-abs-link|: Franka arm with absolute IK control
* |lift-cube-ik-rel-link|: Franka arm with relative IK control
.. table::
:widths: 33 37 30
+----------------+---------------------------+-----------------------------------------------------------------------------+
| World | Environment ID | Description |
+================+===========================+=============================================================================+
| |reach-franka| | |reach-franka-link| | Move the end-effector to a sampled target pose with the Franka robot |
+----------------+---------------------------+-----------------------------------------------------------------------------+
| |reach-ur10| | |reach-ur10-link| | Move the end-effector to a sampled target pose with the UR10 robot |
+----------------+---------------------------+-----------------------------------------------------------------------------+
| |lift-cube| | |lift-cube-link| | Pick a cube and bring it to a sampled target position with the Franka robot |
+----------------+---------------------------+-----------------------------------------------------------------------------+
| |cabi-franka| | |cabi-franka-link| | Grasp the handle of a cabinet's drawer and open it with the Franka robot |
+----------------+---------------------------+-----------------------------------------------------------------------------+
| |cube-allegro| | |cube-allegro-link| | In-hand reorientation of a cube using Allegro hand |
+----------------+---------------------------+-----------------------------------------------------------------------------+
| |cube-shadow| | | |cube-shadow-link| | In-hand reorientation of a cube using Shadow hand |
| | | |cube-shadow-ff-link| | |
| | | |cube-shadow-lstm-link| | |
+----------------+---------------------------+-----------------------------------------------------------------------------+
.. |reach-franka| image:: ../_static/tasks/manipulation/franka_reach.jpg
.. |reach-ur10| image:: ../_static/tasks/manipulation/ur10_reach.jpg
.. |lift-cube| image:: ../_static/tasks/manipulation/franka_lift.jpg
.. |cabi-franka| image:: ../_static/tasks/manipulation/franka_open_drawer.jpg
.. |cube-allegro| image:: ../_static/tasks/manipulation/allegro_cube.jpg
.. |cube-shadow| image:: ../_static/tasks/manipulation/shadow_cube.jpg
.. |reach-franka-link| replace:: `Isaac-Reach-Franka-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/franka/joint_pos_env_cfg.py>`__
.. |reach-ur10-link| replace:: `Isaac-Reach-UR10-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/reach/config/ur_10/joint_pos_env_cfg.py>`__
.. |lift-cube-link| replace:: `Isaac-Lift-Cube-Franka-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/joint_pos_env_cfg.py>`__
.. |lift-cube-ik-abs-link| replace:: `Isaac-Lift-Cube-Franka-IK-Abs-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py>`__
.. |lift-cube-ik-rel-link| replace:: `Isaac-Lift-Cube-Franka-IK-Rel-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/lift/config/franka/ik_rel_env_cfg.py>`__
.. |cabi-franka-link| replace:: `Isaac-Open-Drawer-Franka-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/cabinet/config/franka/joint_pos_env_cfg.py>`__
.. |cube-allegro-link| replace:: `Isaac-Repose-Cube-Allegro-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/manipulation/inhand/config/allegro_hand/allegro_env_cfg.py>`__
.. |cube-shadow-link| replace:: `Isaac-Shadow-Hand-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_env.py>`__
.. |cube-shadow-ff-link| replace:: `Isaac-Shadow-Hand-OpenAI-FF-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_env.py>`__
.. |cube-shadow-lstm-link| replace:: `Isaac-Shadow-Hand-OpenAI-LSTM-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_env.py>`__
Locomotion
----------
Environments based on legged locomotion tasks.
.. table::
:widths: 33 37 30
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| World | Environment ID | Description |
+==============================+==============================================+=========================================================================+
| |velocity-flat-anymal-b| | |velocity-flat-anymal-b-link| | Track a velocity command on flat terrain with the Anymal B robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-rough-anymal-b| | |velocity-rough-anymal-b-link| | Track a velocity command on rough terrain with the Anymal B robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-flat-anymal-c| | | |velocity-flat-anymal-c-link| | Track a velocity command on flat terrain with the Anymal C robot |
| | | |velocity-flat-anymal-c-direct-link| | |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-rough-anymal-c| | | |velocity-rough-anymal-c-link| | Track a velocity command on rough terrain with the Anymal C robot |
| | | |velocity-rough-anymal-c-direct-link| | |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-flat-anymal-d| | |velocity-flat-anymal-d-link| | Track a velocity command on flat terrain with the Anymal D robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-rough-anymal-d| | |velocity-rough-anymal-d-link| | Track a velocity command on rough terrain with the Anymal D robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-flat-unitree-a1| | |velocity-flat-unitree-a1-link| | Track a velocity command on flat terrain with the Unitree A1 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-rough-unitree-a1| | |velocity-rough-unitree-a1-link| | Track a velocity command on rough terrain with the Unitree A1 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-flat-unitree-go1| | |velocity-flat-unitree-go1-link| | Track a velocity command on flat terrain with the Unitree Go1 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-rough-unitree-go1| | |velocity-rough-unitree-go1-link| | Track a velocity command on rough terrain with the Unitree Go1 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-flat-unitree-go2| | |velocity-flat-unitree-go2-link| | Track a velocity command on flat terrain with the Unitree Go2 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-rough-unitree-go2| | |velocity-rough-unitree-go2-link| | Track a velocity command on rough terrain with the Unitree Go2 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-flat-h1| | |velocity-flat-h1-link| | Track a velocity command on flat terrain with the Unitree H1 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
| |velocity-rough-h1| | |velocity-rough-h1-link| | Track a velocity command on rough terrain with the Unitree H1 robot |
+------------------------------+----------------------------------------------+-------------------------------------------------------------------------+
.. |velocity-flat-anymal-b-link| replace:: `Isaac-Velocity-Flat-Anymal-B-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/flat_env_cfg.py>`__
.. |velocity-rough-anymal-b-link| replace:: `Isaac-Velocity-Rough-Anymal-B-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_b/rough_env_cfg.py>`__
.. |velocity-flat-anymal-c-link| replace:: `Isaac-Velocity-Flat-Anymal-C-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/flat_env_cfg.py>`__
.. |velocity-rough-anymal-c-link| replace:: `Isaac-Velocity-Rough-Anymal-C-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_c/rough_env_cfg.py>`__
.. |velocity-flat-anymal-c-direct-link| replace:: `Isaac-Velocity-Flat-Anymal-C-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/anymal_c/anymal_c_env.py>`__
.. |velocity-rough-anymal-c-direct-link| replace:: `Isaac-Velocity-Rough-Anymal-C-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/anymal_c/anymal_c_env.py>`__
.. |velocity-flat-anymal-d-link| replace:: `Isaac-Velocity-Flat-Anymal-D-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/flat_env_cfg.py>`__
.. |velocity-rough-anymal-d-link| replace:: `Isaac-Velocity-Rough-Anymal-D-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py>`__
.. |velocity-flat-unitree-a1-link| replace:: `Isaac-Velocity-Flat-Unitree-A1-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/flat_env_cfg.py>`__
.. |velocity-rough-unitree-a1-link| replace:: `Isaac-Velocity-Rough-Unitree-A1-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_a1/rough_env_cfg.py>`__
.. |velocity-flat-unitree-go1-link| replace:: `Isaac-Velocity-Flat-Unitree-Go1-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/flat_env_cfg.py>`__
.. |velocity-rough-unitree-go1-link| replace:: `Isaac-Velocity-Rough-Unitree-Go1-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go1/rough_env_cfg.py>`__
.. |velocity-flat-unitree-go2-link| replace:: `Isaac-Velocity-Flat-Unitree-Go2-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/flat_env_cfg.py>`__
.. |velocity-rough-unitree-go2-link| replace:: `Isaac-Velocity-Rough-Unitree-Go2-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/unitree_go2/rough_env_cfg.py>`__
.. |velocity-flat-h1-link| replace:: `Isaac-Velocity-Flat-H1-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/flat_env_cfg.py>`__
.. |velocity-rough-h1-link| replace:: `Isaac-Velocity-Rough-H1-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/locomotion/velocity/config/h1/rough_env_cfg.py>`__
.. |velocity-flat-anymal-b| image:: ../_static/tasks/locomotion/anymal_b_flat.jpg
.. |velocity-rough-anymal-b| image:: ../_static/tasks/locomotion/anymal_b_rough.jpg
.. |velocity-flat-anymal-c| image:: ../_static/tasks/locomotion/anymal_c_flat.jpg
.. |velocity-rough-anymal-c| image:: ../_static/tasks/locomotion/anymal_c_rough.jpg
.. |velocity-flat-anymal-d| image:: ../_static/tasks/locomotion/anymal_d_flat.jpg
.. |velocity-rough-anymal-d| image:: ../_static/tasks/locomotion/anymal_d_rough.jpg
.. |velocity-flat-unitree-a1| image:: ../_static/tasks/locomotion/a1_flat.jpg
.. |velocity-rough-unitree-a1| image:: ../_static/tasks/locomotion/a1_rough.jpg
.. |velocity-flat-unitree-go1| image:: ../_static/tasks/locomotion/go1_flat.jpg
.. |velocity-rough-unitree-go1| image:: ../_static/tasks/locomotion/go1_rough.jpg
.. |velocity-flat-unitree-go2| image:: ../_static/tasks/locomotion/go2_flat.jpg
.. |velocity-rough-unitree-go2| image:: ../_static/tasks/locomotion/go2_rough.jpg
.. |velocity-flat-h1| image:: ../_static/tasks/locomotion/h1_flat.jpg
.. |velocity-rough-h1| image:: ../_static/tasks/locomotion/h1_rough.jpg
Navigation
----------
.. table::
:widths: 33 37 30
+----------------+---------------------+-----------------------------------------------------------------------------+
| World | Environment ID | Description |
+================+=====================+=============================================================================+
| |anymal_c_nav| | |anymal_c_nav-link| | Navigate towards a target x-y position and heading with the ANYmal C robot. |
+----------------+---------------------+-----------------------------------------------------------------------------+
.. |anymal_c_nav-link| replace:: `Isaac-Navigation-Flat-Anymal-C-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/navigation/config/anymal_c/navigation_env_cfg.py>`__
.. |anymal_c_nav| image:: ../_static/tasks/navigation/anymal_c_nav.jpg
Others
------
.. table::
:widths: 33 37 30
+----------------+---------------------+-----------------------------------------------------------------------------+
| World | Environment ID | Description |
+================+=====================+=============================================================================+
| |quadcopter| | |quadcopter-link| | Fly and hover the Crazyflie copter at a goal point by applying thrust. |
+----------------+---------------------+-----------------------------------------------------------------------------+
.. |quadcopter-link| replace:: `Isaac-Quadcopter-Direct-v0 <https://github.com/isaac-sim/IsaacLab/blob/main/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/quadcopter/quadcopter_env.py>`__
.. |quadcopter| image:: ../_static/tasks/others/quadcopter.jpg
| 22,201 |
reStructuredText
| 99.461538 | 266 | 0.516553 |
isaac-sim/IsaacLab/docs/source/refs/changelog.rst
|
Extensions Changelog
====================
All notable changes to this project are documented in this file. The format is based on
`Keep a Changelog <https://keepachangelog.com/en/1.0.0/>`__ and this project adheres to
`Semantic Versioning <https://semver.org/spec/v2.0.0.html>`__. For a broader information
about the changes in the framework, please refer to the
`release notes <https://github.com/isaac-sim/IsaacLab/releases/>`__.
Each extension has its own changelog. The changelog for each extension is located in the
``docs`` directory of the extension. The changelog for each extension is also included in
this changelog to make it easier to find the changelog for a specific extension.
omni.isaac.lab
--------------
Extension containing the core framework of Isaac Lab.
.. include:: ../../../source/extensions/omni.isaac.lab/docs/CHANGELOG.rst
:start-line: 3
omni.isaac.lab_assets
---------------------
Extension for configurations of various assets and sensors for Isaac Lab.
.. include:: ../../../source/extensions/omni.isaac.lab_assets/docs/CHANGELOG.rst
:start-line: 3
omni.isaac.lab_tasks
--------------------
Extension containing the environments built using Isaac Lab.
.. include:: ../../../source/extensions/omni.isaac.lab_tasks/docs/CHANGELOG.rst
:start-line: 3
| 1,299 |
reStructuredText
| 32.333333 | 89 | 0.712086 |
isaac-sim/IsaacLab/docs/source/migration/migrating_from_omniisaacgymenvs.rst
|
.. _migrating-from-omniisaacgymenvs:
Migrating from OmniIsaacGymEnvs
===============================
.. currentmodule:: omni.isaac.lab
OmniIsaacGymEnvs was a reinforcement learning framework using the Isaac Sim platform.
Features from OmniIsaacGymEnvs have been integrated into the Isaac Lab framework.
We have updated OmniIsaacGymEnvs to Isaac Sim version 4.0.0 to support the migration process
to Isaac Lab. Moving forward, OmniIsaacGymEnvs will be deprecated and future development
will continue in Isaac Lab.
Task Config Setup
~~~~~~~~~~~~~~~~~
In OmniIsaacGymEnvs, task config files were defined in ``.yaml`` format. With Isaac Lab, configs are now specified using a specialized
Python class :class:`~omni.isaac.lab.utils.configclass`. The :class:`~omni.isaac.lab.utils.configclass` module provides a wrapper on top of Python's ``dataclasses`` module.
Each environment should specify its own config class annotated by ``@configclass`` that inherits from :class:`~envs.DirectRLEnvCfg`,
which can include simulation parameters, environment scene parameters, robot parameters, and task-specific parameters.
Below is an example skeleton of a task config class:
.. code-block:: python
from omni.isaac.lab.envs import DirectRLEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationCfg
@configclass
class MyEnvCfg(DirectRLEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg()
# robot
robot_cfg: ArticulationCfg = ArticulationCfg()
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg()
# env
decimation = 2
episode_length_s = 5.0
num_actions = 1
num_observations = 4
num_states = 0
# task-specific parameters
...
Simulation Config
-----------------
Simulation related parameters are defined as part of the :class:`~omni.isaac.lab.sim.SimulationCfg` class, which is a :class:`~omni.isaac.lab.utils.configclass` module
that holds simulation parameters such as ``dt``, ``device``, and ``gravity``.
Each task config must have a variable named ``sim`` defined that holds the type :class:`~omni.isaac.lab.sim.SimulationCfg`.
Simulation parameters for articulations and rigid bodies such as ``num_position_iterations``, ``num_velocity_iterations``,
``contact_offset``, ``rest_offset``, ``bounce_threshold_velocity``, ``max_depenetration_velocity`` can all
be specified on a per-actor basis in the config class for each individual articulation and rigid body.
When running simulation on the GPU, buffers in PhysX require pre-allocation for computing and storing
information such as contacts, collisions and aggregate pairs. These buffers may need to be adjusted
depending on the complexity of the environment, the number of expected contacts and collisions,
and the number of actors in the environment. The :class:`~omni.isaac.lab.sim.PhysxCfg` class provides access for setting the GPU buffer dimensions.
+--------------------------------------------------------------+-------------------------------------------------------------------+
| | |
|.. code-block:: yaml |.. code-block:: python |
| | |
| # OmniIsaacGymEnvs | # IsaacLab |
| sim: | sim: SimulationCfg = SimulationCfg( |
| dt: 0.0083 # 1/120 s | dt=1 / 120, |
| use_gpu_pipeline: ${eq:${...pipeline},"gpu"} | use_gpu_pipeline=True, |
| use_fabric: True | use_fabric=True, |
| enable_scene_query_support: False | enable_scene_query_support=False, |
| disable_contact_processing: False | disable_contact_processing=False, |
| gravity: [0.0, 0.0, -9.81] | gravity=(0.0, 0.0, -9.81), |
| | |
| default_physics_material: | physics_material=RigidBodyMaterialCfg( |
| static_friction: 1.0 | static_friction=1.0, |
| dynamic_friction: 1.0 | dynamic_friction=1.0, |
| restitution: 0.0 | restitution=0.0 |
| | ) |
| physx: | physx: PhysxCfg = PhysxCfg( |
| worker_thread_count: ${....num_threads} | # worker_thread_count is no longer needed |
| solver_type: ${....solver_type} | solver_type=1, |
| use_gpu: ${contains:"cuda",${....sim_device}} | use_gpu=True, |
| solver_position_iteration_count: 4 | max_position_iteration_count=4, |
| solver_velocity_iteration_count: 0 | max_velocity_iteration_count=0, |
| contact_offset: 0.02 | # moved to actor config |
| rest_offset: 0.001 | # moved to actor config |
| bounce_threshold_velocity: 0.2 | bounce_threshold_velocity=0.2, |
| friction_offset_threshold: 0.04 | friction_offset_threshold=0.04, |
| friction_correlation_distance: 0.025 | friction_correlation_distance=0.025, |
| enable_sleeping: True | # enable_sleeping is no longer needed |
| enable_stabilization: True | enable_stabilization=True, |
| max_depenetration_velocity: 100.0 | # moved to RigidBodyPropertiesCfg |
| | |
| gpu_max_rigid_contact_count: 524288 | gpu_max_rigid_contact_count=2**23, |
| gpu_max_rigid_patch_count: 81920 | gpu_max_rigid_patch_count=5 * 2**15, |
| gpu_found_lost_pairs_capacity: 1024 | gpu_found_lost_pairs_capacity=2**21, |
| gpu_found_lost_aggregate_pairs_capacity: 262144 | gpu_found_lost_aggregate_pairs_capacity=2**25, |
| gpu_total_aggregate_pairs_capacity: 1024 | gpu_total_aggregate_pairs_capacity=2**21, |
| gpu_heap_capacity: 67108864 | gpu_heap_capacity=2**26, |
| gpu_temp_buffer_capacity: 16777216 | gpu_temp_buffer_capacity=2**24, |
| gpu_max_num_partitions: 8 | gpu_max_num_partitions=8, |
| gpu_max_soft_body_contacts: 1048576 | gpu_max_soft_body_contacts=2**20, |
| gpu_max_particle_contacts: 1048576 | gpu_max_particle_contacts=2**20, |
| | ) |
| | ) |
+--------------------------------------------------------------+-------------------------------------------------------------------+
Parameters such as ``add_ground_plane`` and ``add_distant_light`` are now part of the task logic when creating the scene.
``enable_cameras`` is now a command line argument ``--enable_cameras`` that can be passed directly to the training script.
Scene Config
------------
The :class:`~omni.isaac.lab.scene.InteractiveSceneCfg` class can be used to specify parameters related to the scene, such as the number of environments
and the spacing between environments.
Each task config must have a variable named ``scene`` defined that holds the type :class:`~omni.isaac.lab.scene.InteractiveSceneCfg`.
+--------------------------------------------------------------+-------------------------------------------------------------------+
| | |
|.. code-block:: yaml |.. code-block:: python |
| | |
| # OmniIsaacGymEnvs | # IsaacLab |
| env: | scene: InteractiveSceneCfg = InteractiveSceneCfg( |
| numEnvs: ${resolve_default:512,${...num_envs}} | num_envs=512, |
| envSpacing: 4.0 | env_spacing=4.0) |
+--------------------------------------------------------------+-------------------------------------------------------------------+
Task Config
-----------
Each environment should specify its own config class that holds task specific parameters, such as the dimensions of the
observation and action buffers. Reward term scaling parameters can also be specified in the config class.
In Isaac Lab, the ``controlFrequencyInv`` parameter has been renamed to ``decimation``,
which must be specified as a parameter in the config class.
In addition, the maximum episode length parameter (now ``episode_length_s``) is in seconds instead of steps as it was in OmniIsaacGymEnvs.
To convert between step count to seconds, use the equation: ``episode_length_s = dt * decimation * num_steps``.
The following parameters must be set for each environment config:
.. code-block:: python
decimation = 2
episode_length_s = 5.0
num_actions = 1
num_observations = 4
num_states = 0
RL Config Setup
~~~~~~~~~~~~~~~
RL config files for the rl_games library can continue to be defined in ``.yaml`` files in Isaac Lab.
Most of the content of the config file can be copied directly from OmniIsaacGymEnvs.
Note that in Isaac Lab, we do not use hydra to resolve relative paths in config files.
Please replace any relative paths such as ``${....device}`` with the actual values of the parameters.
Additionally, the observation and action clip ranges have been moved to the RL config file.
For any ``clipObservations`` and ``clipActions`` parameters that were defined in the IsaacGymEnvs task config file,
they should be moved to the RL config file in Isaac Lab.
+--------------------------+----------------------------+
| | |
| IsaacGymEnvs Task Config | Isaac Lab RL Config |
+--------------------------+----------------------------+
|.. code-block:: yaml |.. code-block:: yaml |
| | |
| # OmniIsaacGymEnvs | # IsaacLab |
| env: | params: |
| clipObservations: 5.0 | env: |
| clipActions: 1.0 | clip_observations: 5.0 |
| | clip_actions: 1.0 |
+--------------------------+----------------------------+
Environment Creation
~~~~~~~~~~~~~~~~~~~~
In OmniIsaacGymEnvs, environment creation generally happened in the ``set_up_scene()`` API,
which involved creating the initial environment, cloning the environment, filtering collisions,
adding the ground plane and lights, and creating the ``View`` classes for the actors.
Similar functionality is performed in Isaac Lab in the ``_setup_scene()`` API.
The main difference is that the base class ``_setup_scene()`` no longer performs operations for
cloning the environment and adding ground plane and lights. Instead, these operations
should now be implemented in individual tasks' ``_setup_scene`` implementations to provide more
flexibility around the scene setup process.
Also note that by defining an ``Articulation`` or ``RigidObject`` object, the actors will be
added to the scene by parsing the ``spawn`` parameter in the actor config and a ``View`` class
will automatically be created for the actor. This avoids the need to separately define an
``ArticulationView`` or ``RigidPrimView`` object for the actors.
+------------------------------------------------------------------------------+------------------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+------------------------------------------------------------------------------+------------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def set_up_scene(self, scene) -> None: | def _setup_scene(self): |
| self.get_cartpole() | self.cartpole = Articulation(self.cfg.robot_cfg) |
| super().set_up_scene(scene) | # add ground plane |
| | spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg() |
| self._cartpoles = ArticulationView( | # clone, filter, and replicate |
| prim_paths_expr="/World/envs/.*/Cartpole", | self.scene.clone_environments(copy_from_source=False) |
| name="cartpole_view", reset_xform_properties=False | self.scene.filter_collisions(global_prim_paths=[]) |
| ) | # add articultion to scene |
| scene.add(self._cartpoles) | self.scene.articulations["cartpole"] = self.cartpole |
| | # add lights |
| | light_cfg = sim_utils.DomeLightCfg(intensity=2000.0) |
| | light_cfg.func("/World/Light", light_cfg) |
+------------------------------------------------------------------------------+------------------------------------------------------------------------+
Ground Plane
------------
In addition to the above example, more sophisticated ground planes can be defined using the :class:`~terrains.TerrainImporterCfg` class.
.. code-block:: python
from omni.isaac.lab.terrains import TerrainImporterCfg
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
)
The terrain can then be added to the scene in ``_setup_scene(self)`` by referencing the ``TerrainImporterCfg`` object:
.. code-block::python
def _setup_scene(self):
...
self.cfg.terrain.num_envs = self.scene.cfg.num_envs
self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing
self._terrain = self.cfg.terrain.class_type(self.cfg.terrain)
Actors
------
In Isaac Lab, each Articulation and Rigid Body actor can have its own config class.
The :class:`~omni.isaac.lab.assets.ArticulationCfg` can be
used to define parameters for articulation actors, including file path, simulation parameters, actuator properties, and initial states.
.. code-block::python
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
CARTPOLE_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Classic/Cartpole/cartpole.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
rigid_body_enabled=True,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=100.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 2.0), joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0}
),
actuators={
"cart_actuator": ImplicitActuatorCfg(
joint_names_expr=["slider_to_cart"],
effort_limit=400.0,
velocity_limit=100.0,
stiffness=0.0,
damping=10.0,
),
"pole_actuator": ImplicitActuatorCfg(
joint_names_expr=["cart_to_pole"], effort_limit=400.0, velocity_limit=100.0, stiffness=0.0, damping=0.0
),
},
)
Within the :class:`~assets.ArticulationCfg`, the ``spawn`` attribute can be used to add the robot to the scene by specifying the path to the robot file.
In addition, :class:`~omni.isaac.lab.sim.schemas.RigidBodyPropertiesCfg` can be used to specify simulation properties
for the rigid bodies in the articulation.
Similarly, :class:`~omni.isaac.lab.sim.schemas.ArticulationRootPropertiesCfg` can be used to specify simulation properties for the articulation.
Joint and dof properties are now specified as part of the ``actuators`` dictionary using :class:`~actuators.ImplicitActuatorCfg`.
Joints and dofs with the same properties can be grouped into regex expressions or provided as a list of names or expressions.
Actors are added to the scene by simply calling ``self.cartpole = Articulation(self.cfg.robot_cfg)``,
where ``self.cfg.robot_cfg`` is an :class:`~assets.ArticulationCfg` object. Once initialized, they should also be added
to the :class:`~scene.InteractiveScene` by calling ``self.scene.articulations["cartpole"] = self.cartpole`` so that
the :class:`~scene.InteractiveScene` can traverse through actors in the scene for writing values to the simulation and resetting.
Accessing States from Simulation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
APIs for accessing physics states in Isaac Lab require the creation of an :class:`~assets.Articulation` or :class:`~assets.RigidObject`
object. Multiple objects can be initialized for different articulations or rigid bodies in the scene by defining
corresponding :class:`~assets.ArticulationCfg` or :class:`~assets.RigidObjectCfg` config, as outlined in the section above.
This replaces the previously used ``ArticulationView`` and ``RigidPrimView`` classes used in OmniIsaacGymEnvs.
However, functionality between the classes are similar:
+------------------------------------------------------------------+-----------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+------------------------------------------------------------------+-----------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| dof_pos = self._cartpoles.get_joint_positions(clone=False) | self.joint_pos = self._robot.data.joint_pos |
| dof_vel = self._cartpoles.get_joint_velocities(clone=False) | self.joint_vel = self._robot.data.joint_vel |
+------------------------------------------------------------------+-----------------------------------------------------------------+
In Isaac Lab, :class:`~assets.Articulation` and :class:`~assets.RigidObject` classes both have a ``data`` class.
The data classes (:class:`~assets.ArticulationData` and :class:`~assets.RigidObjectData`) contain
buffers that hold the states for the articulation and rigid objects and provide
a more performant way of retrieving states from the actors.
Apart from some renamings of APIs, setting states for actors can also be performed similarly between OmniIsaacGymEnvs and Isaac Lab.
+---------------------------------------------------------------------------+---------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+---------------------------------------------------------------------------+---------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| indices = env_ids.to(dtype=torch.int32) | self._robot.write_joint_state_to_sim(joint_pos, joint_vel, |
| self._cartpoles.set_joint_positions(dof_pos, indices=indices) | joint_ids, env_ids) |
| self._cartpoles.set_joint_velocities(dof_vel, indices=indices) | |
+---------------------------------------------------------------------------+---------------------------------------------------------------+
In Isaac Lab, ``root_pose`` and ``root_velocity`` have been combined into single buffers and no longer split between
``root_position``, ``root_orientation``, ``root_linear_velocity`` and ``root_angular_velocity``.
.. code-block::python
self.cartpole.write_root_pose_to_sim(default_root_state[:, :7], env_ids)
self.cartpole.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids)
Creating a New Environment
~~~~~~~~~~~~~~~~~~~~~~~~~~
Each environment in Isaac Lab should be in its own directory following this structure:
.. code-block:: none
my_environment/
- agents/
- __init__.py
- rl_games_ppo_cfg.py
- __init__.py
my_env.py
* ``my_environment`` is the root directory of the task.
* ``my_environment/agents`` is the directory containing all RL config files for the task. Isaac Lab supports multiple RL libraries that can each have its own individual config file.
* ``my_environment/__init__.py`` is the main file that registers the environment with the Gymnasium interface. This allows the training and inferencing scripts to find the task by its name. The content of this file should be as follow:
.. code-block:: python
import gymnasium as gym
from . import agents
from .cartpole_env import CartpoleEnv, CartpoleEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Cartpole-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct_workflow.cartpole:CartpoleEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": CartpoleEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml"
},
)
* ``my_environment/my_env.py`` is the main python script that implements the task logic and task config class for the environment.
Task Logic
~~~~~~~~~~
The ``post_reset`` API in OmniIsaacGymEnvs is no longer required in Isaac Lab.
Everything that was previously done in ``post_reset`` can be done in the ``__init__`` method after
executing the base class's ``__init__``. At this point, simulation has already started.
In OmniIsaacGymEnvs, due to limitations of the GPU APIs, resets could not be performed based on states of the current step.
Instead, resets have to be performed at the beginning of the next time step.
This restriction has been eliminated in Isaac Lab, and thus, tasks follow the correct workflow of applying actions, stepping simulation,
collecting states, computing dones, calculating rewards, performing resets, and finally computing observations.
This workflow is done automatically by the framework such that a ``post_physics_step`` API is not required in the task.
However, individual tasks can override the ``step()`` API to control the workflow.
In Isaac Lab, we also separate the ``pre_physics_step`` API for processing actions from the policy with
the ``apply_action`` API, which sets the actions into the simulation. This provides more flexibility in controlling
when actions should be written to simulation when ``decimation`` is used.
``pre_physics_step`` will be called once per step before stepping simulation.
``apply_actions`` will be called ``decimation`` number of times for each RL step, once before each simulation step call.
The ordering of the calls are as follow:
+----------------------------------+----------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+----------------------------------+----------------------------------+
|.. code-block:: none |.. code-block:: none |
| | |
| pre_physics_step | pre_physics_step |
| |-- reset_idx() | |-- _pre_physics_step(action)|
| |-- apply_action | |-- _apply_action() |
| | |
| post_physics_step | post_physics_step |
| |-- get_observations() | |-- _get_dones() |
| |-- calculate_metrics() | |-- _get_rewards() |
| |-- is_done() | |-- _reset_idx() |
| | |-- _get_observations() |
+----------------------------------+----------------------------------+
With this approach, resets are performed based on actions from the current step instead of the previous step.
Observations will also be computed with the correct states after resets.
We have also performed some renamings of APIs:
* ``set_up_scene(self, scene)`` --> ``_setup_scene(self)``
* ``post_reset(self)`` --> ``__init__(...)``
* ``pre_physics_step(self, actions)`` --> ``_pre_physics_step(self, actions)`` and ``_apply_action(self)``
* ``reset_idx(self, env_ids)`` --> ``_reset_idx(self, env_ids)``
* ``get_observations(self)`` --> ``_get_observations(self)`` - ``_get_observations()`` should now return a dictionary ``{"policy": obs}``
* ``calculate_metrics(self)`` --> ``_get_rewards(self)`` - ``_get_rewards()`` should now return the reward buffer
* ``is_done(self)`` --> ``_get_dones(self)`` - ``_get_dones()`` should now return 2 buffers: ``reset`` and ``time_out`` buffers
Putting It All Together
~~~~~~~~~~~~~~~~~~~~~~~
The Cartpole environment is shown here in completion to fully show the comparison between the OmniIsaacGymEnvs implementation and the Isaac Lab implementation.
Task Config
-----------
Task config in Isaac Lab can be split into the main task configuration class and individual config objects for the actors.
+-----------------------------------------------------------------+-----------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+-----------------------------------------------------------------+-----------------------------------------------------------------+
|.. code-block:: yaml |.. code-block:: python |
| | |
| # used to create the object | @configclass |
| | class CartpoleEnvCfg(DirectRLEnvCfg): |
| name: Cartpole | |
| | # simulation |
| physics_engine: ${..physics_engine} | sim: SimulationCfg = SimulationCfg(dt=1 / 120) |
| | # robot |
| # if given, will override the device setting in gym. | robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace( |
| env: | prim_path="/World/envs/env_.*/Robot") |
| | cart_dof_name = "slider_to_cart" |
| numEnvs: ${resolve_default:512,${...num_envs}} | pole_dof_name = "cart_to_pole" |
| envSpacing: 4.0 | # scene |
| resetDist: 3.0 | scene: InteractiveSceneCfg = InteractiveSceneCfg( |
| maxEffort: 400.0 | num_envs=4096, env_spacing=4.0, replicate_physics=True) |
| | # env |
| clipObservations: 5.0 | decimation = 2 |
| clipActions: 1.0 | episode_length_s = 5.0 |
| controlFrequencyInv: 2 # 60 Hz | action_scale = 100.0 # [N] |
| | num_actions = 1 |
| sim: | num_observations = 4 |
| | num_states = 0 |
| dt: 0.0083 # 1/120 s | # reset |
| use_gpu_pipeline: ${eq:${...pipeline},"gpu"} | max_cart_pos = 3.0 |
| gravity: [0.0, 0.0, -9.81] | initial_pole_angle_range = [-0.25, 0.25] |
| add_ground_plane: True | # reward scales |
| add_distant_light: False | rew_scale_alive = 1.0 |
| use_fabric: True | rew_scale_terminated = -2.0 |
| enable_scene_query_support: False | rew_scale_pole_pos = -1.0 |
| disable_contact_processing: False | rew_scale_cart_vel = -0.01 |
| | rew_scale_pole_vel = -0.005 |
| enable_cameras: False | |
| | |
| default_physics_material: | CARTPOLE_CFG = ArticulationCfg( |
| static_friction: 1.0 | spawn=sim_utils.UsdFileCfg( |
| dynamic_friction: 1.0 | usd_path=f"{ISAACLAB_NUCLEUS_DIR}/.../cartpole.usd", |
| restitution: 0.0 | rigid_props=sim_utils.RigidBodyPropertiesCfg( |
| | rigid_body_enabled=True, |
| physx: | max_linear_velocity=1000.0, |
| worker_thread_count: ${....num_threads} | max_angular_velocity=1000.0, |
| solver_type: ${....solver_type} | max_depenetration_velocity=100.0, |
| use_gpu: ${eq:${....sim_device},"gpu"} # set to False to... | enable_gyroscopic_forces=True, |
| solver_position_iteration_count: 4 | ), |
| solver_velocity_iteration_count: 0 | articulation_props=sim_utils.ArticulationRootPropertiesCfg( |
| contact_offset: 0.02 | enabled_self_collisions=False, |
| rest_offset: 0.001 | solver_position_iteration_count=4, |
| bounce_threshold_velocity: 0.2 | solver_velocity_iteration_count=0, |
| friction_offset_threshold: 0.04 | sleep_threshold=0.005, |
| friction_correlation_distance: 0.025 | stabilization_threshold=0.001, |
| enable_sleeping: True | ), |
| enable_stabilization: True | ), |
| max_depenetration_velocity: 100.0 | init_state=ArticulationCfg.InitialStateCfg( |
| | pos=(0.0, 0.0, 2.0), |
| # GPU buffers | joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0} |
| gpu_max_rigid_contact_count: 524288 | ), |
| gpu_max_rigid_patch_count: 81920 | actuators={ |
| gpu_found_lost_pairs_capacity: 1024 | "cart_actuator": ImplicitActuatorCfg( |
| gpu_found_lost_aggregate_pairs_capacity: 262144 | joint_names_expr=["slider_to_cart"], |
| gpu_total_aggregate_pairs_capacity: 1024 | effort_limit=400.0, |
| gpu_max_soft_body_contacts: 1048576 | velocity_limit=100.0, |
| gpu_max_particle_contacts: 1048576 | stiffness=0.0, |
| gpu_heap_capacity: 67108864 | damping=10.0, |
| gpu_temp_buffer_capacity: 16777216 | ), |
| gpu_max_num_partitions: 8 | "pole_actuator": ImplicitActuatorCfg( |
| | joint_names_expr=["cart_to_pole"], effort_limit=400.0, |
| Cartpole: | velocity_limit=100.0, stiffness=0.0, damping=0.0 |
| override_usd_defaults: False | ), |
| enable_self_collisions: False | }, |
| enable_gyroscopic_forces: True | ) |
| solver_position_iteration_count: 4 | |
| solver_velocity_iteration_count: 0 | |
| sleep_threshold: 0.005 | |
| stabilization_threshold: 0.001 | |
| density: -1 | |
| max_depenetration_velocity: 100.0 | |
| contact_offset: 0.02 | |
| rest_offset: 0.001 | |
+-----------------------------------------------------------------+-----------------------------------------------------------------+
Task Setup
----------
The ``post_reset`` API in OmniIsaacGymEnvs is no longer required in Isaac Lab.
Everything that was previously done in ``post_reset`` can be done in the ``__init__`` method after
executing the base class's ``__init__``. At this point, simulation has already started.
+-------------------------------------------------------------------------+-------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+-------------------------------------------------------------------------+-------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| class CartpoleTask(RLTask): | class CartpoleEnv(DirectRLEnv): |
| | cfg: CartpoleEnvCfg |
| def __init__(self, name, sim_config, env, offset=None) -> None: | def __init__(self, cfg: CartpoleEnvCfg, |
| | render_mode: str | None = None, **kwargs): |
| self.update_config(sim_config) | super().__init__(cfg, render_mode, **kwargs) |
| self._max_episode_length = 500 | |
| | |
| self._num_observations = 4 | self._cart_dof_idx, _ = self.cartpole.find_joints( |
| self._num_actions = 1 | self.cfg.cart_dof_name) |
| | self._pole_dof_idx, _ = self.cartpole.find_joints( |
| RLTask.__init__(self, name, env) | self.cfg.pole_dof_name) |
| | self.action_scale=self.cfg.action_scale |
| def update_config(self, sim_config): | |
| self._sim_config = sim_config | self.joint_pos = self.cartpole.data.joint_pos |
| self._cfg = sim_config.config | self.joint_vel = self.cartpole.data.joint_vel |
| self._task_cfg = sim_config. | |
| task_config | |
| | |
| self._num_envs = self._task_cfg["env"]["numEnvs"] | |
| self._env_spacing = self._task_cfg["env"]["envSpacing"] | |
| self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0]) | |
| | |
| self._reset_dist = self._task_cfg["env"]["resetDist"] | |
| self._max_push_effort = self._task_cfg["env"]["maxEffort"] | |
| | |
| | |
| def post_reset(self): | |
| self._cart_dof_idx = self._cartpoles.get_dof_index( | |
| "cartJoint") | |
| self._pole_dof_idx = self._cartpoles.get_dof_index( | |
| "poleJoint") | |
| # randomize all envs | |
| indices = torch.arange( | |
| self._cartpoles.count, dtype=torch.int64, | |
| device=self._device) | |
| self.reset_idx(indices) | |
+-------------------------------------------------------------------------+-------------------------------------------------------------+
Scene Setup
-----------
``set_up_scene`` in OmniIsaacGymEnvs has been replaced by ``_setup_scene``.
In Isaac Lab, cloning and collision filtering have been provided as APIs for the task class to call when necessary.
Similarly, adding ground plane and lights should also be taken care of in the task class.
Adding actors to the scene has been replaced by ``self.scene.articulations["cartpole"] = self.cartpole``.
+-----------------------------------------------------------+----------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+-----------------------------------------------------------+----------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def set_up_scene(self, scene) -> None: | def _setup_scene(self): |
| | self.cartpole = Articulation(self.cfg.robot_cfg) |
| self.get_cartpole() | # add ground plane |
| super().set_up_scene(scene) | spawn_ground_plane(prim_path="/World/ground", |
| self._cartpoles = ArticulationView( | cfg=GroundPlaneCfg()) |
| prim_paths_expr="/World/envs/.*/Cartpole", | # clone, filter, and replicate |
| name="cartpole_view", | self.scene.clone_environments( |
| reset_xform_properties=False | copy_from_source=False) |
| ) | self.scene.filter_collisions( |
| scene.add(self._cartpoles) | global_prim_paths=[]) |
| return | # add articultion to scene |
| | self.scene.articulations["cartpole"] = self.cartpole |
| def get_cartpole(self): | |
| cartpole = Cartpole( | # add lights |
| prim_path=self.default_zero_env_path+"/Cartpole", | light_cfg = sim_utils.DomeLightCfg( |
| name="Cartpole", | intensity=2000.0, color=(0.75, 0.75, 0.75)) |
| translation=self._cartpole_positions | light_cfg.func("/World/Light", light_cfg) |
| ) | |
| # applies articulation settings from the | |
| # task configuration yaml file | |
| self._sim_config.apply_articulation_settings( | |
| "Cartpole", get_prim_at_path(cartpole.prim_path), | |
| self._sim_config.parse_actor_config("Cartpole") | |
| ) | |
+-----------------------------------------------------------+----------------------------------------------------------+
Pre-Physics Step
----------------
Note that resets are no longer performed in the ``pre_physics_step`` API.
In addition, the separation of ``_pre_physics_step`` and ``_apply_action`` allow for more flexibility
in processing the action buffer and setting actions into simulation.
+------------------------------------------------------------------+-------------------------------------------------------------+
| OmniIsaacGymEnvs | IsaacLab |
+------------------------------------------------------------------+-------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def pre_physics_step(self, actions) -> None: | def _pre_physics_step(self, |
| if not self.world.is_playing(): | actions: torch.Tensor) -> None: |
| return | self.actions = self.action_scale * actions |
| | |
| reset_env_ids = self.reset_buf.nonzero( | def _apply_action(self) -> None: |
| as_tuple=False).squeeze(-1) | self.cartpole.set_joint_effort_target( |
| if len(reset_env_ids) > 0: | self.actions, joint_ids=self._cart_dof_idx) |
| self.reset_idx(reset_env_ids) | |
| | |
| actions = actions.to(self._device) | |
| | |
| forces = torch.zeros((self._cartpoles.count, | |
| self._cartpoles.num_dof), | |
| dtype=torch.float32, device=self._device) | |
| forces[:, self._cart_dof_idx] = | |
| self._max_push_effort * actions[:, 0] | |
| | |
| indices = torch.arange(self._cartpoles.count, | |
| dtype=torch.int32, device=self._device) | |
| self._cartpoles.set_joint_efforts( | |
| forces, indices=indices) | |
+------------------------------------------------------------------+-------------------------------------------------------------+
Dones and Resets
----------------
In Isaac Lab, ``dones`` are computed in the ``_get_dones()`` method and should return two variables: ``resets`` and ``time_out``.
``_reset_idx()`` is also called after stepping simulation instead of before, as it was done in OmniIsaacGymEnvs.
``progress_buf`` has been renamed to ``episode_length_buf`` in Isaac Lab and
bookkeeping is now done automatically by the framework. Task implementations should no longer increment and reset the ``episode_length_buf`` buffer.
+------------------------------------------------------------------+--------------------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+------------------------------------------------------------------+--------------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def is_done(self) -> None: | def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: |
| resets = torch.where( | self.joint_pos = self.cartpole.data.joint_pos |
| torch.abs(self.cart_pos) > self._reset_dist, 1, 0) | self.joint_vel = self.cartpole.data.joint_vel |
| resets = torch.where( | |
| torch.abs(self.pole_pos) > math.pi / 2, 1, resets) | time_out = self.episode_length_buf >= self.max_episode_length - 1 |
| resets = torch.where( | out_of_bounds = torch.any(torch.abs( |
| self.progress_buf >= self._max_episode_length, 1, resets) | self.joint_pos[:, self._pole_dof_idx] > self.cfg.max_cart_pos), |
| self.reset_buf[:] = resets | dim=1) |
| | out_of_bounds = out_of_bounds | torch.any( |
| | torch.abs(self.joint_pos[:, self._pole_dof_idx]) > math.pi / 2, |
| | dim=1) |
| | return out_of_bounds, time_out |
| | |
| def reset_idx(self, env_ids): | def _reset_idx(self, env_ids: Sequence[int] | None): |
| num_resets = len(env_ids) | if env_ids is None: |
| | env_ids = self.cartpole._ALL_INDICES |
| # randomize DOF positions | super()._reset_idx(env_ids) |
| dof_pos = torch.zeros((num_resets, self._cartpoles.num_dof), | |
| device=self._device) | joint_pos = self.cartpole.data.default_joint_pos[env_ids] |
| dof_pos[:, self._cart_dof_idx] = 1.0 * ( | joint_pos[:, self._pole_dof_idx] += sample_uniform( |
| 1.0 - 2.0 * torch.rand(num_resets, device=self._device)) | self.cfg.initial_pole_angle_range[0] * math.pi, |
| dof_pos[:, self._pole_dof_idx] = 0.125 * math.pi * ( | self.cfg.initial_pole_angle_range[1] * math.pi, |
| 1.0 - 2.0 * torch.rand(num_resets, device=self._device)) | joint_pos[:, self._pole_dof_idx].shape, |
| | joint_pos.device, |
| # randomize DOF velocities | ) |
| dof_vel = torch.zeros((num_resets, self._cartpoles.num_dof), | joint_vel = self.cartpole.data.default_joint_vel[env_ids] |
| device=self._device) | |
| dof_vel[:, self._cart_dof_idx] = 0.5 * ( | default_root_state = self.cartpole.data.default_root_state[env_ids] |
| 1.0 - 2.0 * torch.rand(num_resets, device=self._device)) | default_root_state[:, :3] += self.scene.env_origins[env_ids] |
| dof_vel[:, self._pole_dof_idx] = 0.25 * math.pi * ( | |
| 1.0 - 2.0 * torch.rand(num_resets, device=self._device)) | self.joint_pos[env_ids] = joint_pos |
| | self.joint_vel[env_ids] = joint_vel |
| # apply resets | |
| indices = env_ids.to(dtype=torch.int32) | self.cartpole.write_root_pose_to_sim( |
| self._cartpoles.set_joint_positions(dof_pos, indices=indices) | default_root_state[:, :7], env_ids) |
| self._cartpoles.set_joint_velocities(dof_vel, indices=indices) | self.cartpole.write_root_velocity_to_sim( |
| | default_root_state[:, 7:], env_ids) |
| # bookkeeping | self.cartpole.write_joint_state_to_sim( |
| self.reset_buf[env_ids] = 0 | joint_pos, joint_vel, None, env_ids) |
| self.progress_buf[env_ids] = 0 | |
| | |
| | |
+------------------------------------------------------------------+--------------------------------------------------------------------------+
Rewards
-------
In Isaac Lab, rewards are implemented in the ``_get_rewards`` API and should return the reward buffer instead of assigning
it directly to ``self.rew_buf``. Computation in the reward function can also be performed using pytorch jit
through defining functions with the ``@torch.jit.script`` annotation.
+-------------------------------------------------------+-----------------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+-------------------------------------------------------+-----------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def calculate_metrics(self) -> None: | def _get_rewards(self) -> torch.Tensor: |
| reward = (1.0 - self.pole_pos * self.pole_pos | total_reward = compute_rewards( |
| - 0.01 * torch.abs(self.cart_vel) - 0.005 | self.cfg.rew_scale_alive, |
| * torch.abs(self.pole_vel)) | self.cfg.rew_scale_terminated, |
| reward = torch.where( | self.cfg.rew_scale_pole_pos, |
| torch.abs(self.cart_pos) > self._reset_dist, | self.cfg.rew_scale_cart_vel, |
| torch.ones_like(reward) * -2.0, reward) | self.cfg.rew_scale_pole_vel, |
| reward = torch.where( | self.joint_pos[:, self._pole_dof_idx[0]], |
| torch.abs(self.pole_pos) > np.pi / 2, | self.joint_vel[:, self._pole_dof_idx[0]], |
| torch.ones_like(reward) * -2.0, reward) | self.joint_pos[:, self._cart_dof_idx[0]], |
| | self.joint_vel[:, self._cart_dof_idx[0]], |
| self.rew_buf[:] = reward | self.reset_terminated, |
| | ) |
| | return total_reward |
| | |
| | @torch.jit.script |
| | def compute_rewards( |
| | rew_scale_alive: float, |
| | rew_scale_terminated: float, |
| | rew_scale_pole_pos: float, |
| | rew_scale_cart_vel: float, |
| | rew_scale_pole_vel: float, |
| | pole_pos: torch.Tensor, |
| | pole_vel: torch.Tensor, |
| | cart_pos: torch.Tensor, |
| | cart_vel: torch.Tensor, |
| | reset_terminated: torch.Tensor, |
| | ): |
| | rew_alive = rew_scale_alive * (1.0 - reset_terminated.float()) |
| | rew_termination = rew_scale_terminated * reset_terminated.float() |
| | rew_pole_pos = rew_scale_pole_pos * torch.sum( |
| | torch.square(pole_pos), dim=-1) |
| | rew_cart_vel = rew_scale_cart_vel * torch.sum( |
| | torch.abs(cart_vel), dim=-1) |
| | rew_pole_vel = rew_scale_pole_vel * torch.sum( |
| | torch.abs(pole_vel), dim=-1) |
| | total_reward = (rew_alive + rew_termination |
| | + rew_pole_pos + rew_cart_vel + rew_pole_vel) |
| | return total_reward |
+-------------------------------------------------------+-----------------------------------------------------------------------+
Observations
------------
In Isaac Lab, the ``_get_observations()`` API must return a dictionary with the key ``policy`` that has the observation buffer as the value.
When working with asymmetric actor-critic states, the states for the critic should have the key ``critic`` and be returned
with the observation buffer in the same dictionary.
+------------------------------------------------------------------+-------------------------------------------------------------+
| OmniIsaacGymEnvs | Isaac Lab |
+------------------------------------------------------------------+-------------------------------------------------------------+
|.. code-block:: python |.. code-block:: |
| | |
| def get_observations(self) -> dict: | def _get_observations(self) -> dict: |
| dof_pos = self._cartpoles.get_joint_positions(clone=False) | obs = torch.cat( |
| dof_vel = self._cartpoles.get_joint_velocities(clone=False) | ( |
| | self.joint_pos[:, self._pole_dof_idx[0]], |
| self.cart_pos = dof_pos[:, self._cart_dof_idx] | self.joint_vel[:, self._pole_dof_idx[0]], |
| self.cart_vel = dof_vel[:, self._cart_dof_idx] | self.joint_pos[:, self._cart_dof_idx[0]], |
| self.pole_pos = dof_pos[:, self._pole_dof_idx] | self.joint_vel[:, self._cart_dof_idx[0]], |
| self.pole_vel = dof_vel[:, self._pole_dof_idx] | ), |
| self.obs_buf[:, 0] = self.cart_pos | dim=-1, |
| self.obs_buf[:, 1] = self.cart_vel | ) |
| self.obs_buf[:, 2] = self.pole_pos | observations = {"policy": obs} |
| self.obs_buf[:, 3] = self.pole_vel | return observations |
| | |
| observations = {self._cartpoles.name: | |
| {"obs_buf": self.obs_buf}} | |
| return observations | |
+------------------------------------------------------------------+-------------------------------------------------------------+
Domain Randomization
~~~~~~~~~~~~~~~~~~~~
In OmniIsaacGymEnvs, domain randomization was specified through the task ``.yaml`` config file.
In Isaac Lab, the domain randomization configuration uses the :class:`~omni.isaac.lab.utils.configclass` module
to specify a configuration class consisting of :class:`~managers.EventTermCfg` variables.
Below is an example of a configuration class for domain randomization:
.. code-block:: python
@configclass
class EventCfg:
robot_physics_material = EventTerm(
func=mdp.randomize_rigid_body_material,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", body_names=".*"),
"static_friction_range": (0.7, 1.3),
"dynamic_friction_range": (1.0, 1.0),
"restitution_range": (1.0, 1.0),
"num_buckets": 250,
},
)
robot_joint_stiffness_and_damping = EventTerm(
func=mdp.randomize_actuator_gains,
mode="reset",
params={
"asset_cfg": SceneEntityCfg("robot", joint_names=".*"),
"stiffness_distribution_params": (0.75, 1.5),
"damping_distribution_params": (0.3, 3.0),
"operation": "scale",
"distribution": "log_uniform",
},
)
reset_gravity = EventTerm(
func=mdp.randomize_physics_scene_gravity,
mode="interval",
is_global_time=True,
interval_range_s=(36.0, 36.0), # time_s = num_steps * (decimation * dt)
params={
"gravity_distribution_params": ([0.0, 0.0, 0.0], [0.0, 0.0, 0.4]),
"operation": "add",
"distribution": "gaussian",
},
)
Each ``EventTerm`` object is of the :class:`~managers.EventTermCfg` class and takes in a ``func`` parameter
for specifying the function to call during randomization, a ``mode`` parameter, which can be ``startup``,
``reset`` or ``interval``. THe ``params`` dictionary should provide the necessary arguments to the
function that is specified in the ``func`` parameter.
Functions specified as ``func`` for the ``EventTerm`` can be found in the :class:`~envs.mdp.events` module.
Note that as part of the ``"asset_cfg": SceneEntityCfg("robot", body_names=".*")`` parameter, the name of
the actor ``"robot"`` is provided, along with the body or joint names specified as a regex expression,
which will be the actors and bodies/joints that will have randomization applied.
One difference with OmniIsaacGymEnvs is that ``interval`` randomization is now specified as seconds instead of
steps. When ``mode="interval"``, the ``interval_range_s`` parameter must also be provided, which specifies
the range of seconds for which randomization should be applied. This range will then be randomized to
determine a specific time in seconds when the next randomization will occur for the term.
To convert between steps to seconds, use the equation ``time_s = num_steps * (decimation * dt)``.
Similar to OmniIsaacGymEnvs, randomization APIs are available for randomizing articulation properties,
such as joint stiffness and damping, joint limits, rigid body materials, fixed tendon properties,
as well as rigid body properties, such as mass and rigid body materials. Randomization of the
physics scene gravity is also supported. Note that randomization of scale is current not supported
in Isaac Lab. To randomize scale, please set up the scene in a way where each environment holds the actor
at a different scale.
Once the ``configclass`` for the randomization terms have been set up, the class must be added
to the base config class for the task and be assigned to the variable ``events``.
.. code-block:: python
@configclass
class MyTaskConfig:
events: EventCfg = EventCfg()
Action and Observation Noise
----------------------------
Actions and observation noise can also be added using the :class:`~utils.configclass` module.
Action and observation noise configs must be added to the main task config using the
``action_noise_model`` and ``observation_noise_model`` variables:
.. code-block:: python
@configclass
class MyTaskConfig:
# at every time-step add gaussian noise + bias. The bias is a gaussian sampled at reset
action_noise_model: NoiseModelWithAdditiveBiasCfg = NoiseModelWithAdditiveBiasCfg(
noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.05, operation="add"),
bias_noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.015, operation="abs"),
)
# at every time-step add gaussian noise + bias. The bias is a gaussian sampled at reset
observation_noise_model: NoiseModelWithAdditiveBiasCfg = NoiseModelWithAdditiveBiasCfg(
noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.002, operation="add"),
bias_noise_cfg=GaussianNoiseCfg(mean=0.0, std=0.0001, operation="abs"),
)
:class:`~.utils.noise.NoiseModelWithAdditiveBiasCfg` can be used to sample both uncorrelated noise
per step as well as correlated noise that is re-sampled at reset time.
The ``noise_cfg`` term specifies the Gaussian distribution that will be sampled at each
step for all environments. This noise will be added to the corresponding actions and
observations buffers at every step.
The ``bias_noise_cfg`` term specifies the Gaussian distribution for the correlated noise
that will be sampled at reset time for the environments being reset. The same noise
will be applied each step for the remaining of the episode for the environments and
resampled at the next reset.
This replaces the following setup in OmniIsaacGymEnvs:
.. code-block:: yaml
domain_randomization:
randomize: True
randomization_params:
observations:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, .0001]
on_interval:
frequency_interval: 1
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, .002]
actions:
on_reset:
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0, 0.015]
on_interval:
frequency_interval: 1
operation: "additive"
distribution: "gaussian"
distribution_parameters: [0., 0.05]
Launching Training
~~~~~~~~~~~~~~~~~~
To launch a training in Isaac Lab, use the command:
.. code-block:: bash
python source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-Direct-v0 --headless
Launching Inferencing
~~~~~~~~~~~~~~~~~~~~~
To launch inferencing in Isaac Lab, use the command:
.. code-block:: bash
python source/standalone/workflows/rl_games/play.py --task=Isaac-Cartpole-Direct-v0 --num_envs=25 --checkpoint=<path/to/checkpoint>
| 78,349 |
reStructuredText
| 79.194473 | 235 | 0.381932 |
isaac-sim/IsaacLab/docs/source/migration/migrating_from_isaacgymenvs.rst
|
.. _migrating-from-isaacgymenvs:
Migrating from IsaacGymEnvs and Isaac Gym Preview Release
=========================================================
.. currentmodule:: omni.isaac.lab
IsaacGymEnvs was a reinforcement learning framework designed for the Isaac Gym Preview Release.
As both IsaacGymEnvs and the Isaac Gym Preview Release are now deprecated, the following guide walks through the key differences
between IsaacGymEnvs and Isaac Lab, as well as differences in APIs between Isaac Gym Preview Release
and Isaac Sim.
Task Config Setup
~~~~~~~~~~~~~~~~~
In IsaacGymEnvs, task config files were defined in ``.yaml`` format. With Isaac Lab, configs are now specified using a specialized
Python class :class:`~omni.isaac.lab.utils.configclass`. The :class:`~omni.isaac.lab.utils.configclass` module provides a wrapper on top of Python's ``dataclasses`` module.
Each environment should specify its own config class annotated by ``@configclass`` that inherits from :class:`~envs.DirectRLEnvCfg`,
which can include simulation parameters, environment scene parameters, robot parameters, and task-specific parameters.
Below is an example skeleton of a task config class:
.. code-block:: python
from omni.isaac.lab.envs import DirectRLEnvCfg
from omni.isaac.lab.scene import InteractiveSceneCfg
from omni.isaac.lab.sim import SimulationCfg
@configclass
class MyEnvCfg(DirectRLEnvCfg):
# simulation
sim: SimulationCfg = SimulationCfg()
# robot
robot_cfg: ArticulationCfg = ArticulationCfg()
# scene
scene: InteractiveSceneCfg = InteractiveSceneCfg()
# env
decimation = 2
episode_length_s = 5.0
num_actions = 1
num_observations = 4
num_states = 0
# task-specific parameters
...
Simulation Config
-----------------
Simulation related parameters are defined as part of the :class:`~omni.isaac.lab.sim.SimulationCfg` class, which is a :class:`~omni.isaac.lab.utils.configclass` module
that holds simulation parameters such as ``dt``, ``device``, and ``gravity``.
Each task config must have a variable named ``sim`` defined that holds the type :class:`~omni.isaac.lab.sim.SimulationCfg`.
In Isaac Lab, the use of ``substeps`` has been replaced
by a combination of the simulation ``dt`` and the ``decimation`` parameters. For example, in IsaacGymEnvs, having ``dt=1/60`` and ``substeps=2``
is equivalent to taking 2 simulation steps with ``dt=1/120``, but running the task step at ``1/60`` seconds.
The ``decimation`` parameter is a task parameter that controls the number of simulation steps to take for each task (or RL) step, replacing the ``controlFrequencyInv`` parameter in IsaacGymEnvs.
Thus, the same setup in Isaac Lab will become ``dt=1/120`` and ``decimation=2``.
In Isaac Sim, physx simulation parameters such as ``num_position_iterations``, ``num_velocity_iterations``,
``contact_offset``, ``rest_offset``, ``bounce_threshold_velocity``, ``max_depenetration_velocity`` can all
be specified on a per-actor basis. These parameters have been moved from the physx simulation config
to each individual articulation and rigid body config.
When running simulation on the GPU, buffers in PhysX require pre-allocation for computing and storing
information such as contacts, collisions and aggregate pairs. These buffers may need to be adjusted
depending on the complexity of the environment, the number of expected contacts and collisions,
and the number of actors in the environment. The :class:`~omni.isaac.lab.sim.PhysxCfg` class provides access for setting the GPU buffer dimensions.
+--------------------------------------------------------------+-------------------------------------------------------------------+
| | |
|.. code-block:: yaml |.. code-block:: python |
| | |
| # IsaacGymEnvs | # IsaacLab |
| sim: | sim: SimulationCfg = SimulationCfg( |
| dt: 0.0166 # 1/60 s | dt=1 / 120, |
| substeps: 2 | # decimation will be set in the task config |
| up_axis: "z" | # up axis will always be Z in isaac sim |
| use_gpu_pipeline: ${eq:${...pipeline},"gpu"} | use_gpu_pipeline=True, |
| gravity: [0.0, 0.0, -9.81] | gravity=(0.0, 0.0, -9.81), |
| physx: | physx: PhysxCfg = PhysxCfg( |
| num_threads: ${....num_threads} | # num_threads is no longer needed |
| solver_type: ${....solver_type} | solver_type=1, |
| use_gpu: ${contains:"cuda",${....sim_device}} | use_gpu=True, |
| num_position_iterations: 4 | max_position_iteration_count=4, |
| num_velocity_iterations: 0 | max_velocity_iteration_count=0, |
| contact_offset: 0.02 | # moved to actor config |
| rest_offset: 0.001 | # moved to actor config |
| bounce_threshold_velocity: 0.2 | bounce_threshold_velocity=0.2, |
| max_depenetration_velocity: 100.0 | # moved to actor config |
| default_buffer_size_multiplier: 2.0 | # default_buffer_size_multiplier is no longer needed |
| max_gpu_contact_pairs: 1048576 # 1024*1024 | gpu_max_rigid_contact_count=2**23 |
| num_subscenes: ${....num_subscenes} | # num_subscenes is no longer needed |
| contact_collection: 0 | # contact_collection is no longer needed |
| | )) |
+--------------------------------------------------------------+-------------------------------------------------------------------+
Scene Config
------------
The :class:`~omni.isaac.lab.scene.InteractiveSceneCfg` class can be used to specify parameters related to the scene, such as the number of environments
and the spacing between environments.
Each task config must have a variable named ``scene`` defined that holds the type :class:`~omni.isaac.lab.scene.InteractiveSceneCfg`.
+--------------------------------------------------------------+-------------------------------------------------------------------+
| | |
|.. code-block:: yaml |.. code-block:: python |
| | |
| # IsaacGymEnvs | # IsaacLab |
| env: | scene: InteractiveSceneCfg = InteractiveSceneCfg( |
| numEnvs: ${resolve_default:512,${...num_envs}} | num_envs=512, |
| envSpacing: 4.0 | env_spacing=4.0) |
+--------------------------------------------------------------+-------------------------------------------------------------------+
Task Config
-----------
Each environment should specify its own config class that holds task specific parameters, such as the dimensions of the
observation and action buffers. Reward term scaling parameters can also be specified in the config class.
The following parameters must be set for each environment config:
.. code-block:: python
decimation = 2
episode_length_s = 5.0
num_actions = 1
num_observations = 4
num_states = 0
Note that the maximum episode length parameter (now ``episode_length_s``) is in seconds instead of steps as it was in IsaacGymEnvs. To convert between
step count to seconds, use the equation: ``episode_length_s = dt * decimation * num_steps``
RL Config Setup
~~~~~~~~~~~~~~~
RL config files for the rl_games library can continue to be defined in ``.yaml`` files in Isaac Lab.
Most of the content of the config file can be copied directly from IsaacGymEnvs.
Note that in Isaac Lab, we do not use hydra to resolve relative paths in config files.
Please replace any relative paths such as ``${....device}`` with the actual values of the parameters.
Additionally, the observation and action clip ranges have been moved to the RL config file.
For any ``clipObservations`` and ``clipActions`` parameters that were defined in the IsaacGymEnvs task config file,
they should be moved to the RL config file in Isaac Lab.
+--------------------------+----------------------------+
| | |
| IsaacGymEnvs Task Config | Isaac Lab RL Config |
+--------------------------+----------------------------+
|.. code-block:: yaml |.. code-block:: yaml |
| | |
| # IsaacGymEnvs | # IsaacLab |
| env: | params: |
| clipObservations: 5.0 | env: |
| clipActions: 1.0 | clip_observations: 5.0 |
| | clip_actions: 1.0 |
+--------------------------+----------------------------+
Environment Creation
~~~~~~~~~~~~~~~~~~~~
In IsaacGymEnvs, environment creation generally included four components: creating the sim object with ``create_sim()``,
creating the ground plane, importing the assets from MJCF or URDF files, and finally creating the environments
by looping through each environment and adding actors into the environments.
Isaac Lab no longer requires calling the ``create_sim()`` method to retrieve the sim object. Instead, the simulation
context is retrieved automatically by the framework. It is also no longer required to use the ``sim`` as an
argument for the simulation APIs.
In replacement of ``create_sim()``, tasks can implement the ``_setup_scene()`` method in Isaac Lab.
This method can be used for adding actors into the scene, adding ground plane, cloning the actors, and
adding any other optional objects into the scene, such as lights.
+------------------------------------------------------------------------------+------------------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+------------------------------------------------------------------------------+------------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def create_sim(self): | def _setup_scene(self): |
| # set the up axis to be z-up | self.cartpole = Articulation(self.cfg.robot_cfg) |
| self.up_axis = self.cfg["sim"]["up_axis"] | # add ground plane |
| | spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg() |
| self.sim = super().create_sim(self.device_id, self.graphics_device_id, | # clone, filter, and replicate |
| self.physics_engine, self.sim_params) | self.scene.clone_environments(copy_from_source=False) |
| self._create_ground_plane() | self.scene.filter_collisions(global_prim_paths=[]) |
| self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], | # add articultion to scene |
| int(np.sqrt(self.num_envs))) | self.scene.articulations["cartpole"] = self.cartpole |
| | # add lights |
| | light_cfg = sim_utils.DomeLightCfg(intensity=2000.0) |
| | light_cfg.func("/World/Light", light_cfg) |
+------------------------------------------------------------------------------+------------------------------------------------------------------------+
Ground Plane
------------
In Isaac Lab, most of the environment creation process has been simplified into configs with the :class:`~omni.isaac.lab.utils.configclass` module.
The ground plane can be defined using the :class:`~terrains.TerrainImporterCfg` class.
.. code-block:: python
from omni.isaac.lab.terrains import TerrainImporterCfg
terrain = TerrainImporterCfg(
prim_path="/World/ground",
terrain_type="plane",
collision_group=-1,
physics_material=sim_utils.RigidBodyMaterialCfg(
friction_combine_mode="multiply",
restitution_combine_mode="multiply",
static_friction=1.0,
dynamic_friction=1.0,
restitution=0.0,
),
)
The terrain can then be added to the scene in ``_setup_scene(self)`` by referencing the ``TerrainImporterCfg`` object:
.. code-block::python
def _setup_scene(self):
...
self.cfg.terrain.num_envs = self.scene.cfg.num_envs
self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing
self._terrain = self.cfg.terrain.class_type(self.cfg.terrain)
Actors
------
Isaac Lab and Isaac Sim both use the `USD (Universal Scene Description) <https://github.com/PixarAnimationStudios/OpenUSD>`_ library for describing the scene. Assets defined in MJCF and URDF formats can be imported to USD using importer tools described in the `Importing a New Asset <../../how-to/import_new_asset.rst>`_ tutorial.
Each Articulation and Rigid Body actor can also have its own config class. The :class:`~omni.isaac.lab.assets.ArticulationCfg` can be
used to define parameters for articulation actors, including file path, simulation parameters, actuator properties, and initial states.
.. code-block::python
from omni.isaac.lab.actuators import ImplicitActuatorCfg
from omni.isaac.lab.assets import ArticulationCfg
CARTPOLE_CFG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/Classic/Cartpole/cartpole.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
rigid_body_enabled=True,
max_linear_velocity=1000.0,
max_angular_velocity=1000.0,
max_depenetration_velocity=100.0,
enable_gyroscopic_forces=True,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=False,
solver_position_iteration_count=4,
solver_velocity_iteration_count=0,
sleep_threshold=0.005,
stabilization_threshold=0.001,
),
),
init_state=ArticulationCfg.InitialStateCfg(
pos=(0.0, 0.0, 2.0), joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0}
),
actuators={
"cart_actuator": ImplicitActuatorCfg(
joint_names_expr=["slider_to_cart"],
effort_limit=400.0,
velocity_limit=100.0,
stiffness=0.0,
damping=10.0,
),
"pole_actuator": ImplicitActuatorCfg(
joint_names_expr=["cart_to_pole"], effort_limit=400.0, velocity_limit=100.0, stiffness=0.0, damping=0.0
),
},
)
Within the :class:`~assets.ArticulationCfg`, the ``spawn`` attribute can be used to add the robot to the scene by specifying the path to the robot file.
In addition, :class:`~omni.isaac.lab.sim.schemas.RigidBodyPropertiesCfg` can be used to specify simulation properties
for the rigid bodies in the articulation.
Similarly, :class:`~omni.isaac.lab.sim.schemas.ArticulationRootPropertiesCfg` can be used to specify simulation properties for the articulation.
Joint and dof properties are now specified as part of the ``actuators`` dictionary using :class:`~actuators.ImplicitActuatorCfg`.
Joints and dofs with the same properties can be grouped into regex expressions or provided as a list of names or expressions.
Actors are added to the scene by simply calling ``self.cartpole = Articulation(self.cfg.robot_cfg)``,
where ``self.cfg.robot_cfg`` is an :class:`~assets.ArticulationCfg` object. Once initialized, they should also be added
to the :class:`~scene.InteractiveScene` by calling ``self.scene.articulations["cartpole"] = self.cartpole`` so that
the :class:`~scene.InteractiveScene` can traverse through actors in the scene for writing values to the simulation and resetting.
Simulation Parameters for Actors
""""""""""""""""""""""""""""""""
Some simulation parameters related to Rigid Bodies and Articulations may have different
default values between Isaac Gym Preview Release and Isaac Sim.
It may be helpful to double check the USD assets to ensure that the default values are
applicable for the asset.
For instance, the following parameters in the ``RigidBodyAPI`` could be different
between Isaac Gym Preview Release and Isaac Sim:
.. list-table::
:widths: 50 50 50
:header-rows: 1
* - RigidBodyAPI Parameter
- Default Value in Isaac Sim
- Default Value in Isaac Gym Preview Release
* - Linear Damping
- 0.00
- 0.00
* - Angular Damping
- 0.05
- 0.0
* - Max Linear Velocity
- inf
- 1000
* - Max Angular Velocity
- 5729.58008 (degree/s)
- 64.0 (rad/s)
* - Max Contact Impulse
- inf
- 1e32
Articulation parameters for the ``JointAPI`` and ``DriveAPI`` could be altered as well. Note
that the Isaac Sim UI assumes the unit of angle to be degrees. It is particularly
worth noting that the ``Damping`` and ``Stiffness`` parameters in the ``DriveAPI`` have the unit
of ``1/deg`` in the Isaac Sim UI but ``1/rad`` in Isaac Gym Preview Release.
.. list-table::
:widths: 50 50 50
:header-rows: 1
* - Joint Parameter
- Default Value in Isaac Sim
- Default Value in Isaac Gym Preview Releases
* - Maximum Joint Velocity
- 1000000.0 (deg)
- 100.0 (rad)
Cloner
------
Isaac Sim introduced a concept of ``Cloner``, which is a class designed for replication during the scene creation process.
In IsaacGymEnvs, scenes had to be created by looping through the number of environments.
Within each iteration, actors were added to each environment and their handles had to be cached.
Isaac Lab eliminates the need for looping through the environments by using the ``Cloner`` APIs.
The scene creation process is as follow:
#. Construct a single environment (what the scene would look like if number of environments = 1)
#. Call ``clone_environments()`` to replicate the single environment
#. Call ``filter_collisions()`` to filter out collision between environments (if required)
.. code-block:: python
# construct a single environment with the Cartpole robot
self.cartpole = Articulation(self.cfg.robot_cfg)
# clone the environment
self.scene.clone_environments(copy_from_source=False)
# filter collisions
self.scene.filter_collisions(global_prim_paths=[self.cfg.terrain.prim_path])
Accessing States from Simulation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
APIs for accessing physics states in Isaac Lab require the creation of an :class:`~assets.Articulation` or :class:`~assets.RigidObject`
object. Multiple objects can be initialized for different articulations or rigid bodies in the scene by defining
corresponding :class:`~assets.ArticulationCfg` or :class:`~assets.RigidObjectCfg` config as outlined in the section above.
This approach eliminates the need of retrieving body handles to slice states for specific bodies in the scene.
.. code-block:: python
self._robot = Articulation(self.cfg.robot)
self._cabinet = Articulation(self.cfg.cabinet)
self._object = RigidObject(self.cfg.object_cfg)
We have also removed ``acquire`` and ``refresh`` APIs in Isaac Lab. Physics states can be directly applied or retrieved
using APIs defined for the articulations and rigid objects.
APIs provided in Isaac Lab no longer require explicit wrapping and un-wrapping of underlying buffers.
APIs can now work with tensors directly for reading and writing data.
+------------------------------------------------------------------+-----------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+------------------------------------------------------------------+-----------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) | self.joint_pos = self._robot.data.joint_pos |
| self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) | self.joint_vel = self._robot.data.joint_vel |
| self.gym.refresh_dof_state_tensor(self.sim) | |
+------------------------------------------------------------------+-----------------------------------------------------------------+
Note some naming differences between APIs in Isaac Gym Preview Release and Isaac Lab. Most ``dof`` related APIs have been
named to ``joint`` in Isaac Lab.
APIs in Isaac Lab also no longer follow the explicit ``_tensors`` or ``_tensor_indexed`` suffixes in naming.
Indexed versions of APIs now happen implicitly through the optional ``indices`` parameter.
Most APIs in Isaac Lab also provide
the option to specify an ``indices`` parameter, which can be used when reading or writing data for a subset
of environments. Note that when setting states with the ``indices`` parameter, the shape of the states buffer
should match with the dimension of the ``indices`` list.
+---------------------------------------------------------------------------+---------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+---------------------------------------------------------------------------+---------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| env_ids_int32 = env_ids.to(dtype=torch.int32) | self._robot.write_joint_state_to_sim(joint_pos, joint_vel, |
| self.gym.set_dof_state_tensor_indexed(self.sim, | joint_ids, env_ids) |
| gymtorch.unwrap_tensor(self.dof_state), | |
| gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) | |
+---------------------------------------------------------------------------+---------------------------------------------------------------+
Quaternion Convention
---------------------
Isaac Lab and Isaac Sim both adopt ``wxyz`` as the quaternion convention. However, the quaternion
convention used in Isaac Gym Preview Release was ``xyzw``.
Remember to switch all quaternions to use the ``xyzw`` convention when working indexing rotation data.
Similarly, please ensure all quaternions are in ``wxyz`` before passing them to Isaac Lab APIs.
Articulation Joint Order
------------------------
Physics simulation in Isaac Sim and Isaac Lab assumes a breadth-first
ordering for the joints in a given kinematic tree.
However, Isaac Gym Preview Release assumed a depth-first ordering for joints in the kinematic tree.
This means that indexing joints based on their ordering may be different in IsaacGymEnvs and Isaac Lab.
In Isaac Lab, the list of joint names can be retrieved with ``Articulation.data.joint_names``, which will
also correspond to the ordering of the joints in the Articulation.
Creating a New Environment
~~~~~~~~~~~~~~~~~~~~~~~~~~
Each environment in Isaac Lab should be in its own directory following this structure:
.. code-block:: none
my_environment/
- agents/
- __init__.py
- rl_games_ppo_cfg.py
- __init__.py
my_env.py
* ``my_environment`` is the root directory of the task.
* ``my_environment/agents`` is the directory containing all RL config files for the task. Isaac Lab supports multiple RL libraries that can each have its own individual config file.
* ``my_environment/__init__.py`` is the main file that registers the environment with the Gymnasium interface. This allows the training and inferencing scripts to find the task by its name. The content of this file should be as follow:
.. code-block:: python
import gymnasium as gym
from . import agents
from .cartpole_env import CartpoleEnv, CartpoleEnvCfg
##
# Register Gym environments.
##
gym.register(
id="Isaac-Cartpole-Direct-v0",
entry_point="omni.isaac.lab_tasks.direct_workflow.cartpole:CartpoleEnv",
disable_env_checker=True,
kwargs={
"env_cfg_entry_point": CartpoleEnvCfg,
"rl_games_cfg_entry_point": f"{agents.__name__}:rl_games_ppo_cfg.yaml"
},
)
* ``my_environment/my_env.py`` is the main python script that implements the task logic and task config class for the environment.
Task Logic
~~~~~~~~~~
In Isaac Lab, the ``post_physics_step`` function has been moved to the framework in the base class.
Tasks are not required to implement this method, but can choose to override it if a different workflow is desired.
By default, Isaac Lab follows the following flow in logic:
+----------------------------------+----------------------------------+
| IsaacGymEnvs | Isaac Lab |
+----------------------------------+----------------------------------+
|.. code-block:: none |.. code-block:: none |
| | |
| pre_physics_step | pre_physics_step |
| |-- apply_action | |-- _pre_physics_step(action)|
| | |-- _apply_action() |
| | |
| post_physics_step | post_physics_step |
| |-- reset_idx() | |-- _get_dones() |
| |-- compute_observation() | |-- _get_rewards() |
| |-- compute_reward() | |-- _reset_idx() |
| | |-- _get_observations() |
+----------------------------------+----------------------------------+
In Isaac Lab, we also separate the ``pre_physics_step`` API for processing actions from the policy with
the ``apply_action`` API, which sets the actions into the simulation. This provides more flexibility in controlling
when actions should be written to simulation when ``decimation`` is used.
``pre_physics_step`` will be called once per step before stepping simulation.
``apply_actions`` will be called ``decimation`` number of times for each RL step, once before each simulation step call.
With this approach, resets are performed based on actions from the current step instead of the previous step.
Observations will also be computed with the correct states after resets.
We have also performed some renamings of APIs:
* ``create_sim(self)`` --> ``_setup_scene(self)``
* ``pre_physics_step(self, actions)`` --> ``_pre_physics_step(self, actions)`` and ``_apply_action(self)``
* ``reset_idx(self, env_ids)`` --> ``_reset_idx(self, env_ids)``
* ``compute_observations(self)`` --> ``_get_observations(self)`` - ``_get_observations()`` should now return a dictionary ``{"policy": obs}``
* ``compute_reward(self)`` --> ``_get_rewards(self)`` - ``_get_rewards()`` should now return the reward buffer
* ``post_physics_step(self)`` --> moved to the base class
* In addition, Isaac Lab requires the implementation of ``_is_done(self)``, which should return two buffers: the ``reset`` buffer and the ``time_out`` buffer.
Putting It All Together
~~~~~~~~~~~~~~~~~~~~~~~
The Cartpole environment is shown here in completion to fully show the comparison between the IsaacGymEnvs implementation and the Isaac Lab implementation.
Task Config
-----------
+--------------------------------------------------------+---------------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+--------------------------------------------------------+---------------------------------------------------------------------+
|.. code-block:: yaml |.. code-block:: python |
| | |
| # used to create the object | @configclass |
| name: Cartpole | class CartpoleEnvCfg(DirectRLEnvCfg): |
| | |
| physics_engine: ${..physics_engine} | # simulation |
| | sim: SimulationCfg = SimulationCfg(dt=1 / 120) |
| # if given, will override the device setting in gym. | # robot |
| env: | robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace( |
| numEnvs: ${resolve_default:512,${...num_envs}} | prim_path="/World/envs/env_.*/Robot") |
| envSpacing: 4.0 | cart_dof_name = "slider_to_cart" |
| resetDist: 3.0 | pole_dof_name = "cart_to_pole" |
| maxEffort: 400.0 | # scene |
| | scene: InteractiveSceneCfg = InteractiveSceneCfg( |
| clipObservations: 5.0 | num_envs=4096, env_spacing=4.0, replicate_physics=True) |
| clipActions: 1.0 | # env |
| | decimation = 2 |
| asset: | episode_length_s = 5.0 |
| assetRoot: "../../assets" | action_scale = 100.0 # [N] |
| assetFileName: "urdf/cartpole.urdf" | num_actions = 1 |
| | num_observations = 4 |
| enableCameraSensors: False | num_states = 0 |
| | # reset |
| sim: | max_cart_pos = 3.0 |
| dt: 0.0166 # 1/60 s | initial_pole_angle_range = [-0.25, 0.25] |
| substeps: 2 | # reward scales |
| up_axis: "z" | rew_scale_alive = 1.0 |
| use_gpu_pipeline: ${eq:${...pipeline},"gpu"} | rew_scale_terminated = -2.0 |
| gravity: [0.0, 0.0, -9.81] | rew_scale_pole_pos = -1.0 |
| physx: | rew_scale_cart_vel = -0.01 |
| num_threads: ${....num_threads} | rew_scale_pole_vel = -0.005 |
| solver_type: ${....solver_type} | |
| use_gpu: ${contains:"cuda",${....sim_device}} | |
| num_position_iterations: 4 | |
| num_velocity_iterations: 0 | |
| contact_offset: 0.02 | |
| rest_offset: 0.001 | |
| bounce_threshold_velocity: 0.2 | |
| max_depenetration_velocity: 100.0 | |
| default_buffer_size_multiplier: 2.0 | |
| max_gpu_contact_pairs: 1048576 # 1024*1024 | |
| num_subscenes: ${....num_subscenes} | |
| contact_collection: 0 | |
+--------------------------------------------------------+---------------------------------------------------------------------+
Task Setup
----------
Isaac Lab no longer requires pre-initialization of buffers through the ``acquire_*`` APIs that were used in IsaacGymEnvs.
It is also no longer necessary to ``wrap`` and ``unwrap`` tensors.
+-------------------------------------------------------------------------+-------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+-------------------------------------------------------------------------+-------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| class Cartpole(VecTask): | class CartpoleEnv(DirectRLEnv): |
| | cfg: CartpoleEnvCfg |
| def __init__(self, cfg, rl_device, sim_device, graphics_device_id, | def __init__(self, cfg: CartpoleEnvCfg, |
| headless, virtual_screen_capture, force_render): | render_mode: str | None = None, **kwargs): |
| self.cfg = cfg | |
| | super().__init__(cfg, render_mode, **kwargs) |
| self.reset_dist = self.cfg["env"]["resetDist"] | |
| | self._cart_dof_idx, _ = self.cartpole.find_joints( |
| self.max_push_effort = self.cfg["env"]["maxEffort"] | self.cfg.cart_dof_name) |
| self.max_episode_length = 500 | self._pole_dof_idx, _ = self.cartpole.find_joints( |
| | self.cfg.pole_dof_name) |
| self.cfg["env"]["numObservations"] = 4 | self.action_scale = self.cfg.action_scale |
| self.cfg["env"]["numActions"] = 1 | |
| | self.joint_pos = self.cartpole.data.joint_pos |
| super().__init__(config=self.cfg, | self.joint_vel = self.cartpole.data.joint_vel |
| rl_device=rl_device, sim_device=sim_device, | |
| graphics_device_id=graphics_device_id, headless=headless, | |
| virtual_screen_capture=virtual_screen_capture, | |
| force_render=force_render) | |
| | |
| dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) | |
| self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) | |
| self.dof_pos = self.dof_state.view( | |
| self.num_envs, self.num_dof, 2)[..., 0] | |
| self.dof_vel = self.dof_state.view( | |
| self.num_envs, self.num_dof, 2)[..., 1] | |
+-------------------------------------------------------------------------+-------------------------------------------------------------+
Scene Setup
-----------
Scene setup is now done through the ``Cloner`` API and by specifying actor attributes in config objects.
This eliminates the need to loop through the number of environments to set up the environments and avoids
the need to set simulation parameters for actors in the task implementation.
+------------------------------------------------------------------------+---------------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+------------------------------------------------------------------------+---------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def create_sim(self): | def _setup_scene(self): |
| # set the up axis to be z-up given that assets are y-up by default | self.cartpole = Articulation(self.cfg.robot_cfg) |
| self.up_axis = self.cfg["sim"]["up_axis"] | # add ground plane |
| | spawn_ground_plane(prim_path="/World/ground", |
| self.sim = super().create_sim(self.device_id, | cfg=GroundPlaneCfg()) |
| self.graphics_device_id, self.physics_engine, | # clone, filter, and replicate |
| self.sim_params) | self.scene.clone_environments( |
| self._create_ground_plane() | copy_from_source=False) |
| self._create_envs(self.num_envs, | self.scene.filter_collisions( |
| self.cfg["env"]['envSpacing'], | global_prim_paths=[]) |
| int(np.sqrt(self.num_envs))) | # add articultion to scene |
| | self.scene.articulations["cartpole"] = self.cartpole |
| def _create_ground_plane(self): | # add lights |
| plane_params = gymapi.PlaneParams() | light_cfg = sim_utils.DomeLightCfg( |
| # set the normal force to be z dimension | intensity=2000.0, color=(0.75, 0.75, 0.75)) |
| plane_params.normal = (gymapi.Vec3(0.0, 0.0, 1.0) | light_cfg.func("/World/Light", light_cfg) |
| if self.up_axis == 'z' | |
| else gymapi.Vec3(0.0, 1.0, 0.0)) | CARTPOLE_CFG = ArticulationCfg( |
| self.gym.add_ground(self.sim, plane_params) | spawn=sim_utils.UsdFileCfg( |
| | usd_path=f"{ISAACLAB_NUCLEUS_DIR}/.../cartpole.usd", |
| def _create_envs(self, num_envs, spacing, num_per_row): | rigid_props=sim_utils.RigidBodyPropertiesCfg( |
| # define plane on which environments are initialized | rigid_body_enabled=True, |
| lower = (gymapi.Vec3(0.5 * -spacing, -spacing, 0.0) | max_linear_velocity=1000.0, |
| if self.up_axis == 'z' | max_angular_velocity=1000.0, |
| else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing)) | max_depenetration_velocity=100.0, |
| upper = gymapi.Vec3(0.5 * spacing, spacing, spacing) | enable_gyroscopic_forces=True, |
| | ), |
| asset_root = os.path.join(os.path.dirname( | articulation_props=sim_utils.ArticulationRootPropertiesCfg( |
| os.path.abspath(__file__)), "../../assets") | enabled_self_collisions=False, |
| asset_file = "urdf/cartpole.urdf" | solver_position_iteration_count=4, |
| | solver_velocity_iteration_count=0, |
| if "asset" in self.cfg["env"]: | sleep_threshold=0.005, |
| asset_root = os.path.join(os.path.dirname( | stabilization_threshold=0.001, |
| os.path.abspath(__file__)), | ), |
| self.cfg["env"]["asset"].get("assetRoot", asset_root)) | ), |
| asset_file = self.cfg["env"]["asset"].get( | init_state=ArticulationCfg.InitialStateCfg( |
| "assetFileName", asset_file) | pos=(0.0, 0.0, 2.0), |
| | joint_pos={"slider_to_cart": 0.0, "cart_to_pole": 0.0} |
| asset_path = os.path.join(asset_root, asset_file) | ), |
| asset_root = os.path.dirname(asset_path) | actuators={ |
| asset_file = os.path.basename(asset_path) | "cart_actuator": ImplicitActuatorCfg( |
| | joint_names_expr=["slider_to_cart"], |
| asset_options = gymapi.AssetOptions() | effort_limit=400.0, |
| asset_options.fix_base_link = True | velocity_limit=100.0, |
| cartpole_asset = self.gym.load_asset(self.sim, | stiffness=0.0, |
| asset_root, asset_file, asset_options) | damping=10.0, |
| self.num_dof = self.gym.get_asset_dof_count( | ), |
| cartpole_asset) | "pole_actuator": ImplicitActuatorCfg( |
| | joint_names_expr=["cart_to_pole"], effort_limit=400.0, |
| pose = gymapi.Transform() | velocity_limit=100.0, stiffness=0.0, damping=0.0 |
| if self.up_axis == 'z': | ), |
| pose.p.z = 2.0 | }, |
| pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0) | ) |
| else: | |
| pose.p.y = 2.0 | |
| pose.r = gymapi.Quat( | |
| -np.sqrt(2)/2, 0.0, 0.0, np.sqrt(2)/2) | |
| | |
| self.cartpole_handles = [] | |
| self.envs = [] | |
| for i in range(self.num_envs): | |
| # create env instance | |
| env_ptr = self.gym.create_env( | |
| self.sim, lower, upper, num_per_row | |
| ) | |
| cartpole_handle = self.gym.create_actor( | |
| env_ptr, cartpole_asset, pose, | |
| "cartpole", i, 1, 0) | |
| | |
| dof_props = self.gym.get_actor_dof_properties( | |
| env_ptr, cartpole_handle) | |
| dof_props['driveMode'][0] = gymapi.DOF_MODE_EFFORT | |
| dof_props['driveMode'][1] = gymapi.DOF_MODE_NONE | |
| dof_props['stiffness'][:] = 0.0 | |
| dof_props['damping'][:] = 0.0 | |
| self.gym.set_actor_dof_properties(env_ptr, c | |
| artpole_handle, dof_props) | |
| | |
| self.envs.append(env_ptr) | |
| self.cartpole_handles.append(cartpole_handle) | |
+------------------------------------------------------------------------+---------------------------------------------------------------------+
Pre and Post Physics Step
-------------------------
In IsaacGymEnvs, due to limitations of the GPU APIs, observations had stale data when environments had to perform resets.
This restriction has been eliminated in Isaac Lab, and thus, tasks follow the correct workflow of applying actions, stepping simulation,
collecting states, computing dones, calculating rewards, performing resets, and finally computing observations.
This workflow is done automatically by the framework such that a ``post_physics_step`` API is not required in the task.
However, individual tasks can override the ``step()`` API to control the workflow.
+------------------------------------------------------------------+-------------------------------------------------------------+
| IsaacGymEnvs | IsaacLab |
+------------------------------------------------------------------+-------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def pre_physics_step(self, actions): | def _pre_physics_step(self, actions: torch.Tensor) -> None: |
| actions_tensor = torch.zeros( | self.actions = self.action_scale * actions |
| self.num_envs * self.num_dof, | |
| device=self.device, dtype=torch.float) | def _apply_action(self) -> None: |
| actions_tensor[::self.num_dof] = actions.to( | self.cartpole.set_joint_effort_target( |
| self.device).squeeze() * self.max_push_effort | self.actions, joint_ids=self._cart_dof_idx) |
| forces = gymtorch.unwrap_tensor(actions_tensor) | |
| self.gym.set_dof_actuation_force_tensor( | |
| self.sim, forces) | |
| | |
| def post_physics_step(self): | |
| self.progress_buf += 1 | |
| | |
| env_ids = self.reset_buf.nonzero( | |
| as_tuple=False).squeeze(-1) | |
| if len(env_ids) > 0: | |
| self.reset_idx(env_ids) | |
| | |
| self.compute_observations() | |
| self.compute_reward() | |
+------------------------------------------------------------------+-------------------------------------------------------------+
Dones and Resets
----------------
In Isaac Lab, ``dones`` are computed in the ``_get_dones()`` method and should return two variables: ``resets`` and ``time_out``.
Tracking of the ``progress_buf`` has been moved to the base class and is now automatically incremented and reset by the framework.
The ``progress_buf`` variable has also been renamed to ``episode_length_buf``.
+-----------------------------------------------------------------------+---------------------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+-----------------------------------------------------------------------+---------------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def reset_idx(self, env_ids): | def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: |
| positions = 0.2 * (torch.rand((len(env_ids), self.num_dof), | self.joint_pos = self.cartpole.data.joint_pos |
| device=self.device) - 0.5) | self.joint_vel = self.cartpole.data.joint_vel |
| velocities = 0.5 * (torch.rand((len(env_ids), self.num_dof), | |
| device=self.device) - 0.5) | time_out = self.episode_length_buf >= self.max_episode_length - 1 |
| | out_of_bounds = torch.any(torch.abs( |
| self.dof_pos[env_ids, :] = positions[:] | self.joint_pos[:, self._pole_dof_idx] > self.cfg.max_cart_pos), |
| self.dof_vel[env_ids, :] = velocities[:] | dim=1) |
| | out_of_bounds = out_of_bounds | torch.any( |
| env_ids_int32 = env_ids.to(dtype=torch.int32) | torch.abs(self.joint_pos[:, self._pole_dof_idx]) > math.pi / 2, |
| self.gym.set_dof_state_tensor_indexed(self.sim, | dim=1) |
| gymtorch.unwrap_tensor(self.dof_state), | return out_of_bounds, time_out |
| gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32)) | |
| self.reset_buf[env_ids] = 0 | def _reset_idx(self, env_ids: Sequence[int] | None): |
| self.progress_buf[env_ids] = 0 | if env_ids is None: |
| | env_ids = self.cartpole._ALL_INDICES |
| | super()._reset_idx(env_ids) |
| | |
| | joint_pos = self.cartpole.data.default_joint_pos[env_ids] |
| | joint_pos[:, self._pole_dof_idx] += sample_uniform( |
| | self.cfg.initial_pole_angle_range[0] * math.pi, |
| | self.cfg.initial_pole_angle_range[1] * math.pi, |
| | joint_pos[:, self._pole_dof_idx].shape, |
| | joint_pos.device, |
| | ) |
| | joint_vel = self.cartpole.data.default_joint_vel[env_ids] |
| | |
| | default_root_state = self.cartpole.data.default_root_state[env_ids] |
| | default_root_state[:, :3] += self.scene.env_origins[env_ids] |
| | |
| | self.joint_pos[env_ids] = joint_pos |
| | |
| | self.cartpole.write_root_pose_to_sim( |
| | default_root_state[:, :7], env_ids) |
| | self.cartpole.write_root_velocity_to_sim( |
| | default_root_state[:, 7:], env_ids) |
| | self.cartpole.write_joint_state_to_sim( |
| | joint_pos, joint_vel, None, env_ids) |
+-----------------------------------------------------------------------+---------------------------------------------------------------------------+
Observations
------------
In Isaac Lab, the ``_get_observations()`` API should now return a dictionary containing the ``policy`` key with the observation
buffer as the value.
For asymmetric policies, the dictionary should also include a ``critic`` key that holds the state buffer.
+--------------------------------------------------------------------------+---------------------------------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+--------------------------------------------------------------------------+---------------------------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def compute_observations(self, env_ids=None): | def _get_observations(self) -> dict: |
| if env_ids is None: | obs = torch.cat( |
| env_ids = np.arange(self.num_envs) | ( |
| | self.joint_pos[:, self._pole_dof_idx[0]], |
| self.gym.refresh_dof_state_tensor(self.sim) | self.joint_vel[:, self._pole_dof_idx[0]], |
| | self.joint_pos[:, self._cart_dof_idx[0]], |
| self.obs_buf[env_ids, 0] = self.dof_pos[env_ids, 0] | self.joint_vel[:, self._cart_dof_idx[0]], |
| self.obs_buf[env_ids, 1] = self.dof_vel[env_ids, 0] | ), |
| self.obs_buf[env_ids, 2] = self.dof_pos[env_ids, 1] | dim=-1, |
| self.obs_buf[env_ids, 3] = self.dof_vel[env_ids, 1] | ) |
| | observations = {"policy": obs} |
| return self.obs_buf | return observations |
+--------------------------------------------------------------------------+---------------------------------------------------------------------------------------+
Rewards
-------
In Isaac Lab, the reward method ``_get_rewards`` should return the reward buffer as a return value.
Similar to IsaacGymEnvs, computations in the reward function can also be performed using pytorch jit
by adding the ``@torch.jit.script`` annotation.
+--------------------------------------------------------------------------+----------------------------------------------------------------------------------------+
| IsaacGymEnvs | Isaac Lab |
+--------------------------------------------------------------------------+----------------------------------------------------------------------------------------+
|.. code-block:: python |.. code-block:: python |
| | |
| def compute_reward(self): | def _get_rewards(self) -> torch.Tensor: |
| # retrieve environment observations from buffer | total_reward = compute_rewards( |
| pole_angle = self.obs_buf[:, 2] | self.cfg.rew_scale_alive, |
| pole_vel = self.obs_buf[:, 3] | self.cfg.rew_scale_terminated, |
| cart_vel = self.obs_buf[:, 1] | self.cfg.rew_scale_pole_pos, |
| cart_pos = self.obs_buf[:, 0] | self.cfg.rew_scale_cart_vel, |
| | self.cfg.rew_scale_pole_vel, |
| self.rew_buf[:], self.reset_buf[:] = compute_cartpole_reward( | self.joint_pos[:, self._pole_dof_idx[0]], |
| pole_angle, pole_vel, cart_vel, cart_pos, | self.joint_vel[:, self._pole_dof_idx[0]], |
| self.reset_dist, self.reset_buf, | self.joint_pos[:, self._cart_dof_idx[0]], |
| self.progress_buf, self.max_episode_length | self.joint_vel[:, self._cart_dof_idx[0]], |
| ) | self.reset_terminated, |
| | ) |
| @torch.jit.script | return total_reward |
| def compute_cartpole_reward(pole_angle, pole_vel, | |
| cart_vel, cart_pos, | @torch.jit.script |
| reset_dist, reset_buf, | def compute_rewards( |
| progress_buf, max_episode_length): | rew_scale_alive: float, |
| | rew_scale_terminated: float, |
| reward = (1.0 - pole_angle * pole_angle - | rew_scale_pole_pos: float, |
| 0.01 * torch.abs(cart_vel) - | rew_scale_cart_vel: float, |
| 0.005 * torch.abs(pole_vel)) | rew_scale_pole_vel: float, |
| | pole_pos: torch.Tensor, |
| # adjust reward for reset agents | pole_vel: torch.Tensor, |
| reward = torch.where(torch.abs(cart_pos) > reset_dist, | cart_pos: torch.Tensor, |
| torch.ones_like(reward) * -2.0, reward) | cart_vel: torch.Tensor, |
| reward = torch.where(torch.abs(pole_angle) > np.pi / 2, | reset_terminated: torch.Tensor, |
| torch.ones_like(reward) * -2.0, reward) | ): |
| | rew_alive = rew_scale_alive * (1.0 - reset_terminated.float()) |
| reset = torch.where(torch.abs(cart_pos) > reset_dist, | rew_termination = rew_scale_terminated * reset_terminated.float() |
| torch.ones_like(reset_buf), reset_buf) | rew_pole_pos = rew_scale_pole_pos * torch.sum( |
| reset = torch.where(torch.abs(pole_angle) > np.pi / 2, | torch.square(pole_pos), dim=-1) |
| torch.ones_like(reset_buf), reset_buf) | rew_cart_vel = rew_scale_cart_vel * torch.sum( |
| reset = torch.where(progress_buf >= max_episode_length - 1, | torch.abs(cart_vel), dim=-1) |
| torch.ones_like(reset_buf), reset) | rew_pole_vel = rew_scale_pole_vel * torch.sum( |
| | torch.abs(pole_vel), dim=-1) |
| | total_reward = (rew_alive + rew_termination |
| | + rew_pole_pos + rew_cart_vel + rew_pole_vel) |
| | return total_reward |
+--------------------------------------------------------------------------+----------------------------------------------------------------------------------------+
Launching Training
~~~~~~~~~~~~~~~~~~
To launch a training in Isaac Lab, use the command:
.. code-block:: bash
python source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-Direct-v0 --headless
Launching Inferencing
~~~~~~~~~~~~~~~~~~~~~
To launch inferencing in Isaac Lab, use the command:
.. code-block:: bash
python source/standalone/workflows/rl_games/play.py --task=Isaac-Cartpole-Direct-v0 --num_envs=25 --checkpoint=<path/to/checkpoint>
| 77,191 |
reStructuredText
| 83.919692 | 330 | 0.35898 |
isaac-sim/IsaacLab/docs/source/migration/index.rst
|
Migration Guides
================
The following guides show the migration process from previous frameworks that are now deprecated,
including IsaacGymEnvs, OmniIsaacGymEnvs, and Orbit.
.. toctree::
:maxdepth: 1
:titlesonly:
migrating_from_isaacgymenvs
migrating_from_omniisaacgymenvs
migrating_from_orbit
| 329 |
reStructuredText
| 20.999999 | 97 | 0.732523 |
isaac-sim/IsaacLab/docs/source/migration/migrating_from_orbit.rst
|
.. _migrating-from-orbit:
Migrating from Orbit
====================
.. currentmodule:: omni.isaac.lab
Since Orbit was used as basis for Isaac Lab, migrating from Orbit to Isaac Lab is straightforward.
The following sections describe the changes that need to be made to your code to migrate from Orbit to Isaac Lab.
Updates to scripts
~~~~~~~~~~~~~~~~~~
The script ``orbit.sh`` has been renamed to ``isaaclab.sh``.
Updates to extensions
~~~~~~~~~~~~~~~~~~~~~
The extensions ``omni.isaac.orbit``, ``omni.isaac.orbit_tasks``, and ``omni.isaac.orbit_assets`` have been renamed
to ``omni.isaac.lab``, ``omni.isaac.lab_tasks``, and ``omni.isaac.lab_assets``, respectively. Thus, the new folder structure looks like this:
- ``source/extensions/omni.isaac.lab/omni/isaac/lab``
- ``source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks``
- ``source/extensions/omni.isaac.lab_assets/omni/isaac/lab_assets``
The high level imports have to be updated as well:
+-------------------------------------+-----------------------------------+
| Orbit | Isaac Lab |
+=====================================+===================================+
| ``from omni.isaac.orbit...`` | ``from omni.isaac.lab...`` |
+-------------------------------------+-----------------------------------+
| ``from omni.isaac.orbit_tasks...`` | ``from omni.isaac.lab_tasks...`` |
+-------------------------------------+-----------------------------------+
| ``from omni.isaac.orbit_assets...`` | ``from omni.isaac.lab_assets...`` |
+-------------------------------------+-----------------------------------+
Updates to class names
~~~~~~~~~~~~~~~~~~~~~~
In Isaac Lab, we introduced the concept of task design workflows (see :ref:`feature-workflows`). The Orbit code is using
the manager-based workflow and the environment specific class names have been updated to reflect this change:
+------------------------+---------------------------------------------------------+
| Orbit | Isaac Lab |
+========================+=========================================================+
| ``BaseEnv`` | :class:`omni.isaac.lab.envs.ManagerBasedEnv` |
+------------------------+---------------------------------------------------------+
| ``BaseEnvCfg`` | :class:`omni.isaac.lab.envs.ManagerBasedEnvCfg` |
+------------------------+---------------------------------------------------------+
| ``RLTaskEnv`` | :class:`omni.isaac.lab.envs.ManagerBasedRLEnv` |
+------------------------+---------------------------------------------------------+
| ``RLTaskEnvCfg`` | :class:`omni.isaac.lab.envs.ManagerBasedRLEnvCfg` |
+------------------------+---------------------------------------------------------+
| ``RLTaskEnvWindow`` | :class:`omni.isaac.lab.envs.ui.ManagerBasedRLEnvWindow` |
+------------------------+---------------------------------------------------------+
Updates to the tasks folder structure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To support the manager-based and direct workflows, we have added two folders in the tasks extension:
- ``source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based``
- ``source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct``
The tasks from Orbit can now be found under the ``manager_based`` folder.
This change must also be reflected in the imports for your tasks. For example,
.. code-block:: python
from omni.isaac.orbit_tasks.locomotion.velocity.velocity_env_cfg ...
should now be
.. code-block:: python
from omni.isaac.lab_tasks.manager_based.locomotion.velocity.velocity_env_cfg ...
Other Breaking changes
~~~~~~~~~~~~~~~~~~~~~~
Offscreen rendering
-------------------
The input argument ``--offscreen_render`` given to :class:`omni.isaac.lab.app.AppLauncher` and the environment variable ``OFFSCREEN_RENDER``
have been renamed to ``--enable_cameras`` and ``ENABLE_CAMERAS`` respectively.
Event term distribution configuration
-------------------------------------
Some of the event functions in `events.py <https://github.com/isaac-sim/IsaacLab/blob/isaac-lab/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/events.py>`_
accepted a ``distribution`` parameter and a ``range`` to sample from. In an effort to support arbitrary distributions,
we have renamed the input argument ``AAA_range`` to ``AAA_distribution_params`` for these functions.
Therefore, event term configurations whose functions have a ``distribution`` argument should be updated. For example,
.. code-block:: python
:emphasize-lines: 6
add_base_mass = EventTerm(
func=mdp.randomize_rigid_body_mass,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names="base"),
"mass_range": (-5.0, 5.0),
"operation": "add",
},
)
should now be
.. code-block:: python
:emphasize-lines: 6
add_base_mass = EventTerm(
func=mdp.randomize_rigid_body_mass,
mode="startup",
params={
"asset_cfg": SceneEntityCfg("robot", body_names="base"),
"mass_distribution_params": (-5.0, 5.0),
"operation": "add",
},
)
| 5,268 |
reStructuredText
| 40.164062 | 165 | 0.527335 |
michaltakac/nerf-toy-car-aerodynamics/README.md
|
# Toy car simulator
Example project for Ozaj.tech.
This simulator mainly showcases the capabilities of parametrized AI-based physics simulator for simulating aerodynamics of a car, leveraging scientific deep learning methods (physics-informed neural networks and Fourier neural operators).
| 293 |
Markdown
| 47.999992 | 239 | 0.832765 |
michaltakac/nerf-toy-car-aerodynamics/config/extension.toml
|
[package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Toy car aerodynamics example"
description="Project commisioned for Faculty of materials, metallurgy and recyclation (FMMR) at Technical university of Košice."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "README.md"
changelog = "docs/CHANGELOG.md"
preview_image = "data/toy-car.png"
icon = "data/icon.png"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Simulation"
# Keywords for the extension
keywords = ["modulus", "toy-car", "STL geometry", "scenario"]
# Use omni.ui to build simple UI
[dependencies]
"modulus_ext.core" = {version="22.9.0"}
"modulus_ext.ui" = {version="2.0.0"}
"hpcvis.vtkm_bridge.core" = {version="1.0.2-alpha-03"}
# Main python module this extension provides".
[[python.module]]
name = "toy-car"
| 988 |
TOML
| 28.088234 | 128 | 0.726721 |
michaltakac/nerf-toy-car-aerodynamics/config/extension.gen.toml
|
[package]
[package.target]
python = ["cp37"]
[package.publish]
date = 1662766157
kitVersion = "103.5+release.6600.0a006a6d.tc"
| 127 |
TOML
| 17.285712 | 45 | 0.732283 |
michaltakac/nerf-toy-car-aerodynamics/docs/CHANGELOG.md
|
# Toy car aerodynamics example
## [1.0.0] - 2022-10-14
Initial version, working with Modulus 22.09
| 100 |
Markdown
| 19.199996 | 43 | 0.72 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/toy_car_runner.py
|
import sys, os
import torch
import modulus
from sympy import Symbol, Eq, Abs, tanh
import numpy as np
import logging
from typing import List, Dict, Union
from pathlib import Path
from modulus.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.models.fully_connected import FullyConnectedArch
from modulus.domain.inferencer import (
OVVoxelInferencer,
)
from modulus_ext.ui.scenario import ModulusOVProgressBar
from modulus.key import Key
from modulus.key import Key
from modulus.eq.pdes.navier_stokes import NavierStokes
from modulus.eq.pdes.basic import NormalDotVec
from modulus.geometry.tessellation import Tessellation
from .constants import bounds
from .src.geometry import ToyCarDomain
class ModulusToyCarRunner(object):
"""Toy car simulator inference runner for OV scenario
Args:
cfg (ModulusConfig): Parsed Modulus config
"""
def __init__(
self,
cfg: ModulusConfig,
progress_bar: ModulusOVProgressBar,
mask_value: float = -100,
):
logging.getLogger().addHandler(logging.StreamHandler())
##############################
# Nondimensionalization Params
##############################
# fluid params
# Water at 20°C (https://wiki.anton-paar.com/en/water/)
# https://en.wikipedia.org/wiki/Viscosity#Kinematic_viscosity
self.nu = 1.787e-06 # m2 * s-1
self.inlet_vel = Symbol("inlet_velocity")
self.rho = 1
self.scale = 1.0
self.cfg = cfg
self.progress_bar = progress_bar
self._eco = False
self._inferencer = None
self.bounds = bounds
self.mask_value = mask_value
@property
def eco(self):
return self._eco
@eco.setter
def eco(self, value: bool):
self._eco = value
if self._inferencer:
self._inferencer.eco = value
def load_inferencer(self, checkpoint_dir: Union[str, None] = None):
"""Create Modulus Toy Car simulator inferencer object. This can take time since
it will initialize the model
Parameters
----------
checkpoint_dir : Union[str, None], optional
Directory to modulus checkpoint
"""
# make list of nodes to unroll graph on
ns = NavierStokes(nu=self.nu * self.scale, rho=self.rho, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
self.progress_bar.value = 0.025
equation_nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
)
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
input_keys += [Key("inlet_velocity")]
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
flow_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=output_keys,
)
self.flow_nodes = equation_nodes + [
flow_net.make_node(name="flow_network", jit=self.cfg.jit)
]
invar_keys = [
Key.from_str("x"),
Key.from_str("y"),
Key.from_str("z"),
Key.from_str("inlet_velocity"),
]
outvar_keys = [
Key.from_str("u"),
Key.from_str("v"),
Key.from_str("w"),
Key.from_str("p"),
]
self._inferencer = OVVoxelInferencer(
nodes=self.flow_nodes,
input_keys=invar_keys,
output_keys=outvar_keys,
mask_value=self.mask_value,
requires_grad=False,
eco=False,
progress_bar=self.progress_bar,
)
# Load checkpointed model
if checkpoint_dir is not None:
absolute_checkpoint_dir = Path(__file__).parent / checkpoint_dir
if absolute_checkpoint_dir.resolve().is_dir():
self._inferencer.load_models(absolute_checkpoint_dir.resolve())
else:
print("Could not find checkpointed model")
# Set eco
self._inferencer.eco = self.eco
def load_geometry(self):
# normalize meshes
def normalize_mesh(mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
stl_path = Path(self.data_path) / Path("stl_files")
self.car_mesh = Tessellation.from_stl(
Path(stl_path) / Path("toy_bmw.stl"), airtight=True
)
center = (0, 0, 0)
scale = 1.0
self.car_mesh = normalize_mesh(self.car_mesh, center, scale)
def run_inference(
self,
inlet_velocity: float,
resolution: List[int] = [256, 256, 256],
) -> Dict[str, np.array]:
"""Runs inference for toy car simulator
Args:
resolution (List[int], optional): Voxel resolution. Defaults to [256, 256, 256].
Returns:
Dict[str, np.array]: Predicted output variables
"""
self.progress_bar.value = 0
if self._inferencer is None:
print("Loading toy car inferencer")
self.load_inferencer(checkpoint_dir="./checkpoints")
self.progress_bar.value = 0.05
print("Loading toy car geometry")
self.load_geometry()
self.progress_bar.value = 0.1
# Eco mode settings
if self._inferencer.eco:
batch_size = 512
memory_fraction = 0.1
else:
vram_gb = torch.cuda.get_device_properties(0).total_memory / 10**9
batch_size = int((vram_gb // 6) * 16 * 1024)
memory_fraction = 1.0
mask_fn = (
lambda x, y, z: self.car_mesh.sdf({"x": x, "y": y, "z": z}, {})["sdf"]
< 0
)
sp_array = np.ones((np.prod(resolution), 1))
specific_params = {
"inlet_velocity": inlet_velocity * sp_array,
}
# Set up the voxel sample domain
self._inferencer.setup_voxel_domain(
bounds=self.bounds,
npoints=resolution,
invar=specific_params,
batch_size=batch_size,
mask_fn=mask_fn,
)
self.progress_bar.value = 0.2
# Perform inference
invar, predvar = self._inferencer.query(memory_fraction)
# TODO: Remove should be changed to inside inferencer
self.progress_bar._prev_step = 0.0
self.progress_bar.value = 0.9
return predvar
@property
def data_path(self):
data_dir = Path(os.path.dirname(__file__)) / Path("../data")
return str(data_dir)
| 6,706 |
Python
| 30.050926 | 92 | 0.567253 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.