file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
cKohl10/TactileSim/exts/README.md
|
To add the contact extension to Isaac Sim:
1: Open the extensions window, *Window* > *Extensions*
2: Add the path to extenstions in the *Extension Search Paths* window in the form `/TactileSim/exts`
3: Open the extension from the toolbar at the top of the screen.
To use the extension:
1: After creating a CSV list of sensor positions, radius, and paths as described in `/TactileSim/blender_scripts/sensor_bay_addon/README.md`
2: IMPORTANT: For a new robot, a contact sensor has to first be manually added to each link that will be covered.
This allows the link to become the parent of a contact sensor. After manually adding the sensor once, it can be deleted and the robot can be saved.
| 714 |
Markdown
| 43.687497 | 150 | 0.7507 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/global_variables.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
EXTENSION_TITLE = "Contact Extension Test"
EXTENSION_DESCRIPTION = "First test of using isaac sim extensions"
| 543 |
Python
| 40.846151 | 76 | 0.80663 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/ContactSensorClass.py
|
# Tactile Contact Sensor Functions
# Author: Carson Kohlbrenner
# Date: 6/3/2024
from .AbstracSensorClass import AbstractSensorOperator
import numpy as np
import omni.kit.commands
import omni.ui as ui
from omni.isaac.sensor import _sensor
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.prims import is_prim_path_valid, get_prim_children
from omni.isaac.ui.element_wrappers import CollapsableFrame
from omni.isaac.ui.ui_utils import get_style, LABEL_WIDTH
from pxr import Gf
class ContactSensorOperator(AbstractSensorOperator):
def __init__(self):
super().__init__()
self.parent_paths = [] # List of parent paths for each sensor
self.sliders = [] # List of sliders for each sensor on the UI
self.meters_per_unit = 1.00 # Unit conversion factor
self.activated = False # Flag to determine if the sensors are active
self.sensor_description = "Contact Sensors" # Description of the sensor type
# Data structure to store sensor information
class Sensor:
def __init__(self, name, position, radius, parent_path):
self.name = name
self.position = position
self.radius = radius
self.parent_path = parent_path
self.path = parent_path + "/tact_sensor_" + name
def import_sensors_fn(self):
"""
Function that executes when the user clicks the 'Update' button
Imports the sensor data from the CSV file and creates the sensors
Expects the CSV file to have the following format:
Sensor Name, X Offset, Y Offset, Z Offset, Radius, Parent Path
"""
self.activated = True
self._cs = _sensor.acquire_contact_sensor_interface()
# Remove all sensors already on the robot
message = "Removing existing sensors...\n"
self._status_report_field.set_text(message)
self.remove_sensors()
message += "Sensors successfully removed\n\n"
self._status_report_field.set_text(message)
#Change the text of the status report field to show the import status
path = self.config_path
message += "Importing sensor data from '" + path + "'...\n"
self._status_report_field.set_text(message)
#Import the sensor data from the CSV file
try:
names, positions, radii, parent_paths, data = self.import_csv(path)
self.parent_paths = parent_paths
self.remove_sensors() # Second call to ensure all sensors are removed after parent paths are updated
message += "File opened successfully\n"
# Output the data to the status report field
# message += "\n\nSensor Data:\n"
# for i in range(len(names)):
# message += str(data[i]) + "\n"
except:
message += "Invalid file path or file format!"
message += "\nPlease make sure the file has at least 2 sensors and is formatted correctly.\n"
self._status_report_field.set_text(message)
return
self._status_report_field.set_text(message)
# Determine the number of sensors and their positions
num_sensors = len(data)
self.sensors = {}
sensor_count = 0 # Keep track of the number of sensors created successfully
for i in range(num_sensors):
# Create a contact sensor at the specified position
# message += "\nCreating sensor " + str(i) + " at position " + str(positions[i]) + "...\n"
# self._status_report_field.set_text(message)
# Check if the parent path is valid
if not is_prim_path_valid(parent_paths[i]):
message += "Could not find parent path: " + parent_paths[i] + "\n"
self._status_report_field.set_text(message)
continue
# Get the parent prim
parent_prim = get_current_stage().GetPrimAtPath(parent_paths[i])
# Check if the appled link has a rigidbody component (Unknown if this is necessary)
# if not parent_prim.HasAPI(UsdPhysics.RigidBodyAPI):
# message += "Parent path does not have a rigidbody component: " + parent_paths[i] + "\n"
# self._status_report_field.set_text(message)
# continue
# Create the sensor
self.create_contact_sensor(parent_paths[i], positions[i], radii[i], names[i])
sensor_count = sensor_count + 1
message += "\nSuccessfully created " + str(sensor_count) + " sensors\n"
self._status_report_field.set_text(message)
# Populate the sensor readings frame with the new sensors
self.update_sensor_readings_frame()
def import_csv(self, path):
"""
Function that imports the sensor data from a CSV file
CSV file should have the following format:
Sensor Name, X Offset, Y Offset, Z Offset, Radius, Parent Path
"""
try:
data = np.genfromtxt(path, delimiter=',', skip_header=1, dtype=str)
# Save the first column as a list of names, the 2-4th columns as a list of positions, and the 5th column as a list of parent paths
names = data[:, 0]
# Convert the positions to a list of Gf.Vec3d objects
positions = []
for i in range(len(data)):
positions.append(Gf.Vec3d(float(data[i, 1]), float(data[i, 2]), float(data[i, 3])))
radii = []
for i in range(len(data)):
radii.append(float(data[i, 4]))
# Save the parent paths as a list of strings
parent_paths = []
for i in range(len(data)):
parent_paths.append(data[i, 5])
return names, positions, radii, parent_paths, data
except:
return None
def create_contact_sensor(self, parent_path, position, radius, name):
# Create the sensor at the specified position
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateContactSensor",
path="/tact_sensor_" + name,
parent=parent_path,
min_threshold=0,
max_threshold=1000000,
color=(1, 0, 0, 1),
radius=radius,
sensor_period=1,
translation=position,
visualize=True,
)
# Add the sensor to the list of sensors
self.sensors[name] = self.Sensor(name, position, radius, parent_path)
def remove_sensors(self):
"""
Function that removes all sensors from the robot
"""
if len(self.parent_paths) == 0:
return
for parent_path in self.parent_paths:
# Find all prims under the parent path that contain "tact_sensor" in their name
try:
parent_prim = get_current_stage().GetPrimAtPath(parent_path)
prims = get_prim_children(parent_prim)
except:
self._status_report_field.set_text("Unexpected path!\n")
return
#self._status_report_field.set_text("Found " + str(len(prims)) + " sensors to remove\n")
# Remove all prims found
for prim in prims:
if "tact_sensor" in prim.GetName():
omni.kit.commands.execute('DeletePrims', paths=[parent_path + "/" + prim.GetName()])
def remove_sensors_fn(self):
"""
Function that executes when the user clicks the 'Remove Sensors' button
Removes all sensors from the robot
"""
self.activated = False
self.remove_sensors()
self._status_report_field.set_text("All sensors removed\n\n\n If sensors remain, choose the correct configuration file and click 'Update'\n")
# This function updates the sensor readings in the UI at every physics step
def sensor_update(self, dt):
#self._status_report_field.set_text("Updating sensor readings...\n")
if len(self.sliders) > 0:
slider_num = 0
for s in self.sensors.values():
#self._status_report_field.set_text("Updating sensor " + s.name + " at path " + s.path + "...\n")
reading = self._cs.get_sensor_reading(s.path)
if reading.is_valid:
self.sliders[slider_num].model.set_value(
float(reading.value) * self.meters_per_unit
) # readings are in kg⋅m⋅s−2, converting to Newtons
else:
self.sliders[slider_num].model.set_value(0)
slider_num += 1
# contacts_raw = self._cs.get_body_contact_raw_data(self.leg_paths[0])
# if len(contacts_raw):
# c = contacts_raw[0]
# # print(c)
def create_sensor_readings_frame(self):
self.sensor_readings_frame = CollapsableFrame("Sensor Readings", collapsed=False)
def update_sensor_readings_frame(self):
# Color and style for the UI elements
self.sliders = []
self.colors = [0xFFBBBBFF, 0xFFBBFFBB, 0xBBFFBBBB, 0xBBBBFFFF]
style = {"background_color": 0xFF888888, "color": 0xFF333333, "secondary_color": self.colors[0]}
#message = "There are " + str(len(self.sensors)) + " sensors\n"
with self.sensor_readings_frame:
# Vertical stack to hold the sensor readings in the frame
with ui.VStack(style=get_style(), spacing=5, height=0):
for s in self.sensors.values():
#message += "Creating reading bar for sensor " + s.name + "...\n"
with ui.HStack():
ui.Label(s.name, width=LABEL_WIDTH, tooltip="Force in Newtons")
# ui.Spacer(height=0, width=10)
style["secondary_color"] = self.colors[0]
self.sliders.append(ui.FloatDrag(min=0.0, max=15.0, step=0.001, style=style))
self.sliders[-1].enabled = False
ui.Spacer(width=20)
| 10,168 |
Python
| 41.195021 | 149 | 0.586644 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/scenario.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
class ScenarioTemplate:
def __init__(self):
pass
def setup_scenario(self):
pass
def teardown_scenario(self):
pass
def update_scenario(self):
pass
import numpy as np
from omni.isaac.core.utils.types import ArticulationAction
"""
This scenario takes in a robot Articulation and makes it move through its joint DOFs.
Additionally, it adds a cuboid prim to the stage that moves in a circle around the robot.
The particular framework under which this scenario operates should not be taken as a direct
recomendation to the user about how to structure their code. In the simple example put together
in this template, this particular structure served to improve code readability and separate
the logic that runs the example from the UI design.
"""
class ExampleScenario(ScenarioTemplate):
def __init__(self):
self._object = None
self._articulation = None
self._running_scenario = False
self._time = 0.0 # s
self._object_radius = 0.5 # m
self._object_height = 0.5 # m
self._object_frequency = 0.25 # Hz
self._joint_index = 0
self._max_joint_speed = 4 # rad/sec
self._lower_joint_limits = None
self._upper_joint_limits = None
self._joint_time = 0
self._path_duration = 0
self._calculate_position = lambda t, x: 0
self._calculate_velocity = lambda t, x: 0
def setup_scenario(self, articulation, object_prim):
self._articulation = articulation
self._object = object_prim
self._initial_object_position = self._object.get_world_pose()[0]
self._initial_object_phase = np.arctan2(self._initial_object_position[1], self._initial_object_position[0])
self._object_radius = np.linalg.norm(self._initial_object_position[:2])
self._running_scenario = True
self._joint_index = 0
self._lower_joint_limits = articulation.dof_properties["lower"]
self._upper_joint_limits = articulation.dof_properties["upper"]
# teleport robot to lower joint range
epsilon = 0.001
articulation.set_joint_positions(self._lower_joint_limits + epsilon)
self._derive_sinusoid_params(0)
def teardown_scenario(self):
self._time = 0.0
self._object = None
self._articulation = None
self._running_scenario = False
self._joint_index = 0
self._lower_joint_limits = None
self._upper_joint_limits = None
self._joint_time = 0
self._path_duration = 0
self._calculate_position = lambda t, x: 0
self._calculate_velocity = lambda t, x: 0
def update_scenario(self, step: float):
if not self._running_scenario:
return
self._time += step
x = self._object_radius * np.cos(self._initial_object_phase + self._time * self._object_frequency * 2 * np.pi)
y = self._object_radius * np.sin(self._initial_object_phase + self._time * self._object_frequency * 2 * np.pi)
z = self._initial_object_position[2]
self._object.set_world_pose(np.array([x, y, z]))
self._update_sinusoidal_joint_path(step)
def _derive_sinusoid_params(self, joint_index: int):
# Derive the parameters of the joint target sinusoids for joint {joint_index}
start_position = self._lower_joint_limits[joint_index]
P_max = self._upper_joint_limits[joint_index] - start_position
V_max = self._max_joint_speed
T = P_max * np.pi / V_max
# T is the expected time of the joint path
self._path_duration = T
self._calculate_position = (
lambda time, path_duration: start_position
+ -P_max / 2 * np.cos(time * 2 * np.pi / path_duration)
+ P_max / 2
)
self._calculate_velocity = lambda time, path_duration: V_max * np.sin(2 * np.pi * time / path_duration)
def _update_sinusoidal_joint_path(self, step):
# Update the target for the robot joints
self._joint_time += step
if self._joint_time > self._path_duration:
self._joint_time = 0
self._joint_index = (self._joint_index + 1) % self._articulation.num_dof
self._derive_sinusoid_params(self._joint_index)
joint_position_target = self._calculate_position(self._joint_time, self._path_duration)
joint_velocity_target = self._calculate_velocity(self._joint_time, self._path_duration)
action = ArticulationAction(
np.array([joint_position_target]),
np.array([joint_velocity_target]),
joint_indices=np.array([self._joint_index]),
)
self._articulation.apply_action(action)
| 5,180 |
Python
| 34.244898 | 118 | 0.643822 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/AbstracSensorClass.py
|
# Abstract Sensor Class
# Author: Carson Kohlbrenner
# Date: 6/3/2024
"""
This class is an abstract class that defines the basic structure of a sensor.
"""
class AbstractSensorOperator:
def __init__(self):
self.sensors = {}
self.config_path = ""
self._status_report_field = None
def import_sensors_fn(self):
"""
Function that executes when the user clicks the 'Update' button
Imports the sensor data from the CSV file and creates the sensors
"""
pass
def remove_sensors_fn(self):
"""
Function that removes all sensors from the robot
"""
pass
def sensor_update(self, dt):
"""
Function that updates the sensor data
"""
pass
def create_sensor_readings_frame(self):
"""
Function that creates the sensor readings frame
"""
pass
def update_sensor_readings_frame(self):
"""
Function that updates the sensor readings
"""
pass
| 1,040 |
Python
| 22.65909 | 77 | 0.584615 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/extension.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import gc
import omni
import omni.kit.commands
import omni.physx as _physx
import omni.timeline
import omni.ui as ui
import omni.usd
from omni.isaac.ui.element_wrappers import ScrollingWindow
from omni.isaac.ui.menu import MenuItemDescription
from omni.kit.menu.utils import add_menu_items, remove_menu_items
from omni.usd import StageEventType
from .global_variables import EXTENSION_DESCRIPTION, EXTENSION_TITLE
from .ui_builder import UIBuilder
"""
This file serves as a basic template for the standard boilerplate operations
that make a UI-based extension appear on the toolbar.
This implementation is meant to cover most use-cases without modification.
Various callbacks are hooked up to a seperate class UIBuilder in .ui_builder.py
Most users will be able to make their desired UI extension by interacting solely with
UIBuilder.
This class sets up standard useful callback functions in UIBuilder:
on_menu_callback: Called when extension is opened
on_timeline_event: Called when timeline is stopped, paused, or played
on_physics_step: Called on every physics step
on_stage_event: Called when stage is opened or closed
cleanup: Called when resources such as physics subscriptions should be cleaned up
build_ui: User function that creates the UI they want.
"""
class ContactExtension(omni.ext.IExt):
def on_startup(self, ext_id: str):
"""Initialize extension and UI elements"""
self.ext_id = ext_id
self._usd_context = omni.usd.get_context()
# Build Window
self._window = ScrollingWindow(
title=EXTENSION_TITLE, width=800, height=500, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
self._window.set_visibility_changed_fn(self._on_window)
action_registry = omni.kit.actions.core.get_action_registry()
action_registry.register_action(
ext_id,
f"CreateUIExtension:{EXTENSION_TITLE}",
self._menu_callback,
description=f"Add {EXTENSION_TITLE} Extension to UI toolbar",
)
self._menu_items = [
MenuItemDescription(name="Demo Test", onclick_action=(ext_id, f"CreateUIExtension:{EXTENSION_TITLE}")),
]
add_menu_items(self._menu_items, EXTENSION_TITLE)
# Filled in with User Functions
self.ui_builder = UIBuilder(self._window)
# Events
self._usd_context = omni.usd.get_context()
self._physxIFace = _physx.acquire_physx_interface()
self._physx_subscription = None
self._stage_event_sub = None
self._timeline = omni.timeline.get_timeline_interface()
def on_shutdown(self):
self._models = {}
remove_menu_items(self._menu_items, EXTENSION_TITLE)
action_registry = omni.kit.actions.core.get_action_registry()
action_registry.deregister_action(self.ext_id, f"CreateUIExtension:{EXTENSION_TITLE}")
if self._window:
self._window = None
self.ui_builder.cleanup()
gc.collect()
def _on_window(self, visible):
if self._window.visible:
# Subscribe to Stage and Timeline Events
self._usd_context = omni.usd.get_context()
events = self._usd_context.get_stage_event_stream()
self._stage_event_sub = events.create_subscription_to_pop(self._on_stage_event)
stream = self._timeline.get_timeline_event_stream()
self._timeline_event_sub = stream.create_subscription_to_pop(self._on_timeline_event)
self._build_ui()
else:
self._usd_context = None
self._stage_event_sub = None
self._timeline_event_sub = None
self.ui_builder.cleanup()
def _build_ui(self):
with self._window.frame:
with ui.VStack(spacing=5, height=0):
self._build_extension_ui()
async def dock_window():
await omni.kit.app.get_app().next_update_async()
def dock(space, name, location, pos=0.5):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, pos)
return window
tgt = ui.Workspace.get_window("Viewport")
dock(tgt, EXTENSION_TITLE, omni.ui.DockPosition.LEFT, 0.33)
await omni.kit.app.get_app().next_update_async()
self._task = asyncio.ensure_future(dock_window())
#################################################################
# Functions below this point call user functions
#################################################################
def _menu_callback(self):
self._window.visible = not self._window.visible
self.ui_builder.on_menu_callback()
def _on_timeline_event(self, event):
if event.type == int(omni.timeline.TimelineEventType.PLAY):
if not self._physx_subscription:
self._physx_subscription = self._physxIFace.subscribe_physics_step_events(self._on_physics_step)
elif event.type == int(omni.timeline.TimelineEventType.STOP):
self._physx_subscription = None
self.ui_builder.on_timeline_event(event)
def _on_physics_step(self, step):
self.ui_builder.on_physics_step(step)
def _on_stage_event(self, event):
if event.type == int(StageEventType.OPENED) or event.type == int(StageEventType.CLOSED):
# stage was opened or closed, cleanup
self._physx_subscription = None
self.ui_builder.cleanup()
self.ui_builder.on_stage_event(event)
def _build_extension_ui(self):
# Call user function for building UI
self.ui_builder.build_ui()
def _on_update(self, dt):
if self._timeline.is_playing() and self.ui_builder.sliders:
slider_num = 0
for s in self.ui_builder.sensors.values():
reading = self._cs.get_sensor_reading(s.path)
if reading.is_valid:
self.sliders[slider_num].model.set_value(
float(reading.value) * self.meters_per_unit
) # readings are in kg⋅m⋅s−2, converting to Newtons
else:
self.sliders[slider_num].model.set_value(0)
slider_num += 1
# contacts_raw = self._cs.get_body_contact_raw_data(self.leg_paths[0])
# if len(contacts_raw):
# c = contacts_raw[0]
# # print(c)
| 6,988 |
Python
| 37.401099 | 117 | 0.632227 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/__init__.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .extension import *
| 456 |
Python
| 44.699996 | 76 | 0.809211 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/contact_sensor_example.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import weakref
import carb
import omni
import omni.kit.commands
import omni.physx as _physx
import omni.ui as ui
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.sensor import _sensor
from omni.isaac.ui.menu import make_menu_item_description
from omni.isaac.ui.ui_utils import LABEL_WIDTH, get_style, setup_ui_headers
from omni.kit.menu.utils import MenuItemDescription, add_menu_items, remove_menu_items
from pxr import Gf, UsdGeom
EXTENSION_NAME = "Contact Sensor Example"
class Contact_sensor_demo(omni.ext.IExt):
def on_startup(self, ext_id: str):
ext_manager = omni.kit.app.get_app().get_extension_manager()
self._ext_id = ext_id
self._extension_path = ext_manager.get_extension_path(ext_id)
self._menu_items = [
MenuItemDescription(
name="Sensors",
sub_menu=[make_menu_item_description(ext_id, "Contact", lambda a=weakref.proxy(self): a.build_ui())],
)
]
add_menu_items(self._menu_items, "Isaac Examples")
self.meters_per_unit = 1.00
self._window = None
def _on_stage_event(self, event):
if event.type == int(omni.usd.StageEventType.CLOSED):
self.on_closed()
def build_ui(self):
if self._window is None:
self._cs = _sensor.acquire_contact_sensor_interface()
self._timeline = omni.timeline.get_timeline_interface()
self.sub = _physx.get_physx_interface().subscribe_physics_step_events(self._on_update)
self.leg_paths = ["/Ant/Arm_{:02d}/Lower_Arm".format(i + 1) for i in range(4)]
self.shoulder_joints = ["/Ant/Arm_{:02d}/Upper_Arm/shoulder_joint".format(i + 1) for i in range(4)]
self.lower_joints = ["{}/lower_arm_joint".format(i) for i in self.leg_paths]
self._sensor_handles = [0 for i in range(4)]
self.sliders = None
# self._window = ui.Window(
# title="Contact Sensor Sample", width=300, height=200, dockPreference=ui.DockPreference.LEFT_BOTTOM
# )
self.sliders = []
self.colors = [0xFFBBBBFF, 0xFFBBFFBB, 0xBBFFBBBB, 0xBBBBFFFF]
style = {"background_color": 0xFF888888, "color": 0xFF333333, "secondary_color": self.colors[0]}
self.plots = []
self.plot_vals = []
self._window = ui.Window(
title=EXTENSION_NAME, width=600, height=0, visible=True, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
with ui.VStack(spacing=5, height=0):
title = "Contact Sensor Example"
doc_link = "https://docs.omniverse.nvidia.com/isaacsim/latest/features/sensors_simulation/isaac_sim_sensors_physics_based_contact.html"
overview = "This Example shows how to Surface load sensors applied to a body. "
overview += "It works by summing all forces applied on a given trigger shperical region intersected with the given body surface."
overview += (
"\nPress PLAY to start the simulation, hold 'shift' and left click the model to drag it around"
)
overview += "\n\nPress the 'Open in IDE' button to view the source code."
setup_ui_headers(self._ext_id, __file__, title, doc_link, overview)
frame = ui.CollapsableFrame(
title="Sensor Readings",
height=0,
collapsed=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5):
for i in range(4):
with ui.HStack():
ui.Label("Arm {}".format(i + 1), width=LABEL_WIDTH, tooltip="Force in Newtons")
# ui.Spacer(height=0, width=10)
style["secondary_color"] = self.colors[i]
self.sliders.append(ui.FloatDrag(min=0.0, max=15.0, step=0.001, style=style))
self.sliders[-1].enabled = False
ui.Spacer(width=20)
asyncio.ensure_future(self.create_scenario())
self._window.visible = True
def on_shutdown(self):
self.on_closed()
remove_menu_items(self._menu_items, "Isaac Examples")
def on_closed(self):
if self._window:
self.sub = None
self._timeline = None
self._stage_event_subscription = None
self._window = None
def _on_update(self, dt):
if self._timeline.is_playing() and self.sliders:
for i in range(4):
reading = self._cs.get_sensor_reading(self.leg_paths[i] + "/sensor")
if reading.is_valid:
self.sliders[i].model.set_value(
float(reading.value) * self.meters_per_unit
) # readings are in kg⋅m⋅s−2, converting to Newtons
else:
self.sliders[i].model.set_value(0)
# contacts_raw = self._cs.get_body_contact_raw_data(self.leg_paths[0])
# if len(contacts_raw):
# c = contacts_raw[0]
# # print(c)
async def create_scenario(self):
self._assets_root_path = get_assets_root_path()
if self._assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
return
# Add Contact Sensor
await omni.usd.get_context().open_stage_async(self._assets_root_path + "/Isaac/Robots/Simple/ant.usd")
await omni.kit.app.get_app().next_update_async()
self.meters_per_unit = UsdGeom.GetStageMetersPerUnit(omni.usd.get_context().get_stage())
self.sensor_offsets = [Gf.Vec3d(40, 0, 0), Gf.Vec3d(40, 0, 0), Gf.Vec3d(40, 0, 0), Gf.Vec3d(40, 0, 0)]
self.color = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1)]
self.sensorGeoms = []
for i in range(4):
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateContactSensor",
path="/sensor",
parent=self.leg_paths[i],
min_threshold=0,
max_threshold=10000000,
color=self.color[i],
radius=0.12,
sensor_period=-1,
translation=self.sensor_offsets[i],
visualize=True,
)
self._events = omni.usd.get_context().get_stage_event_stream()
self._stage_event_subscription = self._events.create_subscription_to_pop(
self._on_stage_event, name="Contact Sensor Sample stage Watch"
)
| 7,649 |
Python
| 43.219653 | 155 | 0.569225 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/ui_builder backup.py
|
# This software contains source code provided by NVIDIA Corporation.
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import numpy as np
import omni.timeline
import omni.ui as ui
import omni.kit.commands
import time
import os
import sys
import carb
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.objects.cuboid import FixedCuboid
from omni.isaac.core.prims import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import is_prim_path_valid, get_all_matching_child_prims, delete_prim, get_prim_children
from omni.isaac.core.utils.stage import add_reference_to_stage, create_new_stage, get_current_stage
from omni.isaac.core.world import World
from omni.isaac.ui.element_wrappers import CollapsableFrame, StateButton, Button, TextBlock, StringField, DropDown
from omni.isaac.ui.element_wrappers.core_connectors import LoadButton, ResetButton
from omni.isaac.ui.ui_utils import get_style, LABEL_WIDTH
from omni.usd import StageEventType
from pxr import Sdf, UsdLux, Gf
from omni.isaac.sensor import _sensor
from omni.isaac.proximity_sensor import Sensor, register_sensor, clear_sensors
from .scenario import ExampleScenario
from pxr import UsdPhysics
class UIBuilder:
def __init__(self, window):
# Window to hold the UI elements
self.window = window
# Frames are sub-windows that can contain multiple UI elements
self.frames = []
# UI elements created using a UIElementWrapper instance
self.wrapped_ui_elements = []
# Get access to the timeline to control stop/pause/play programmatically
self._timeline = omni.timeline.get_timeline_interface()
self.parent_paths = []
self.sensors = {}
# Contact Parameters
self.meters_per_unit = 1.00
version = sys.version
executable = sys.executable
print(f"Python version: {version}")
print(f"Python executable location: {executable}")
# Data structure to store sensor information
class Sensor:
def __init__(self, name, position, radius, parent_path):
self.name = name
self.position = position
self.radius = radius
self.parent_path = parent_path
self.path = parent_path + "/tact_sensor_" + name
###################################################################################
# The Functions Below Are Called Automatically By extension.py
###################################################################################
def on_menu_callback(self):
"""Callback for when the UI is opened from the toolbar.
This is called directly after build_ui().
"""
pass
def on_timeline_event(self, event):
"""Callback for Timeline events (Play, Pause, Stop)
Args:
event (omni.timeline.TimelineEventType): Event Type
"""
if event.type == int(omni.timeline.TimelineEventType.STOP):
# When the user hits the stop button through the UI, they will inevitably discover edge cases where things break
# For complete robustness, the user should resolve those edge cases here
# In general, for extensions based off this template, there is no value to having the user click the play/stop
# button instead of using the Load/Reset/Run buttons provided.
#self._scenario_state_btn.reset()
#self._scenario_state_btn.enabled = False
pass
def on_physics_step(self, step: float):
"""Callback for Physics Step.
Physics steps only occur when the timeline is playing
Args:
step (float): Size of physics step
"""
self.contact_sensor_update(step)
def on_stage_event(self, event):
"""Callback for Stage Events
Args:
event (omni.usd.StageEventType): Event Type
"""
if event.type == int(StageEventType.OPENED):
# If the user opens a new stage, the extension should completely reset
self._reset_extension()
def cleanup(self):
"""
Called when the stage is closed or the extension is hot reloaded.
Perform any necessary cleanup such as removing active callback functions
Buttons imported from omni.isaac.ui.element_wrappers implement a cleanup function that should be called
"""
for ui_elem in self.wrapped_ui_elements:
ui_elem.cleanup()
################################# Individual Frames ##################################################
def create_status_report_frame(self):
self._status_report_frame = CollapsableFrame("Status Report", collapsed=False)
with self._status_report_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
self._status_report_field = TextBlock(
"Last UI Event",
num_lines=3,
tooltip="Prints the latest change to this UI",
include_copy_button=True,
)
def update_sensor_readings_frame(self):
# Color and style for the UI elements
self.sliders = []
self.colors = [0xFFBBBBFF, 0xFFBBFFBB, 0xBBFFBBBB, 0xBBBBFFFF]
style = {"background_color": 0xFF888888, "color": 0xFF333333, "secondary_color": self.colors[0]}
#message = "There are " + str(len(self.sensors)) + " sensors\n"
with self.sensor_readings_frame:
# Vertical stack to hold the sensor readings in the frame
with ui.VStack(style=get_style(), spacing=5, height=0):
for s in self.sensors.values():
#message += "Creating reading bar for sensor " + s.name + "...\n"
with ui.HStack():
ui.Label(s.name, width=LABEL_WIDTH, tooltip="Force in Newtons")
# ui.Spacer(height=0, width=10)
style["secondary_color"] = self.colors[0]
self.sliders.append(ui.FloatDrag(min=0.0, max=15.0, step=0.001, style=style))
self.sliders[-1].enabled = False
ui.Spacer(width=20)
#self._status_report_field.set_text(message)
def create_sensor_readings_frame(self):
self.sensor_readings_frame = CollapsableFrame("Sensor Readings", collapsed=False)
def create_import_sensors_frame(self):
buttons_frame = CollapsableFrame("Import Sensors", collapsed=False)
with buttons_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
string_field = StringField(
"Import CSV File",
default_value="TactileSim/sensor_configs",
tooltip="Path to sensor positioning file",
read_only=False,
multiline_okay=False,
on_value_changed_fn=self._on_string_field_value_changed_fn,
use_folder_picker=True,
#item_filter_fn=is_usd_or_python_path,
)
self.wrapped_ui_elements.append(string_field)
dropdown = DropDown(
"Config Options",
tooltip=" Select an option from the DropDown",
populate_fn=self.dropdown_populate_fn,
on_selection_fn=self._on_dropdown_item_selection,
)
self.wrapped_ui_elements.append(dropdown)
dropdown.repopulate() # This does not happen automatically, and it triggers the on_selection_fn
button = Button(
"Refresh Sensor Positions",
"Update",
tooltip="Reread the data from the specified file path to update sensors",
on_click_fn=self.import_sensors_fn,
)
self.wrapped_ui_elements.append(button)
self._status_report_field = TextBlock(
"Import Status",
num_lines=10,
tooltip="Outputs the status of the import process",
include_copy_button=True,
)
def build_ui(self):
"""
Build a custom UI tool to run your extension.
This function will be called any time the UI window is closed and reopened.
"""
self._cs = _sensor.acquire_contact_sensor_interface()
self.create_import_sensors_frame()
self.create_sensor_readings_frame()
#self.create_status_report_frame()
############################## Import Frame Functions ########################################
def dropdown_populate_fn(self):
"""
Function that populates the dropdown with options
Returns all the files in the directory specified by the string field
"""
options = []
# Get the path from the string field
path = self.wrapped_ui_elements[0].get_value()
# Get all the files in the directory
try:
options = os.listdir(path)
# Add an empty string to the beginning of the list
options.insert(0, "")
# Add a 'Go Back' option at the end of the list
options.append("Go Back")
except:
options = []
return options
def import_sensors_fn(self):
"""
Function that executes when the user clicks the 'Refresh Sensors' button
"""
# Remove all sensors already on the robot
message = "Removing existing sensors...\n"
self._status_report_field.set_text(message)
#self.countdown(3, "Remove Sensors")
self.remove_sensors("tactile_sensors")
message += "Sensors successfully removed\n\n"
self._status_report_field.set_text(message)
#Change the text of the status report field to show the import status
path = self.config_path
message += "Importing sensor data from '" + path + "'...\n"
self._status_report_field.set_text(message)
#Import the sensor data from the CSV file
try:
names, positions, radii, parent_paths, data = self.import_csv(path)
self.parent_paths = parent_paths
self.remove_sensors("tactile_sensors") # Second call to ensure all sensors are removed after parent paths are updated
message += "File opened successfully\n"
# Output the data to the status report field
# message += "\n\nSensor Data:\n"
# for i in range(len(names)):
# message += str(data[i]) + "\n"
except:
message += "Invalid file path or file format!"
message += "\nPlease make sure the file has at least 2 sensors and is formatted correctly.\n"
self._status_report_field.set_text(message)
return
self._status_report_field.set_text(message)
# Determine the number of sensors and their positions
num_sensors = len(data)
self.sensors = {}
sensor_count = 0 # Keep track of the number of sensors created successfully
for i in range(num_sensors):
# Create a contact sensor at the specified position
# message += "\nCreating sensor " + str(i) + " at position " + str(positions[i]) + "...\n"
# self._status_report_field.set_text(message)
# Check if the parent path is valid
if not is_prim_path_valid(parent_paths[i]):
message += "Could not find parent path: " + parent_paths[i] + "\n"
self._status_report_field.set_text(message)
continue
# Get the parent prim
parent_prim = get_current_stage().GetPrimAtPath(parent_paths[i])
# Check if the appled link has a rigidbody component
# if not parent_prim.HasAPI(UsdPhysics.RigidBodyAPI):
# message += "Parent path does not have a rigidbody component: " + parent_paths[i] + "\n"
# self._status_report_field.set_text(message)
# continue
# Create the sensor
self.create_contact_sensor(parent_paths[i], positions[i], radii[i], names[i])
sensor_count = sensor_count + 1
message += "\nSuccessfully created " + str(sensor_count) + " sensors\n"
self._status_report_field.set_text(message)
# Populate the sensor readings frame with the new sensors
#self.update_sensor_readings_frame()
# This function breaks down the CSV file into its components. Make sure the CSV file is formatted correctly
#
# The CSV file should be formatted as follows:
# - The first row should contain the names of the sensors
# - The second row should contain the x, y, and z positions of the sensors
# - The third row should contain the radii of the sensors
# - The fourth row should contain the parent paths of the sensors
#
def import_csv(self, path):
"""
Function that imports data from a CSV file
Args:
path (str): The path to the CSV file
"""
try:
data = np.genfromtxt(path, delimiter=',', skip_header=1, dtype=str)
# Save the first column as a list of names, the 2-4th columns as a list of positions, and the 5th column as a list of parent paths
names = data[:, 0]
# Convert the positions to a list of Gf.Vec3d objects
positions = []
for i in range(len(data)):
positions.append(Gf.Vec3d(float(data[i, 1]), float(data[i, 2]), float(data[i, 3])))
radii = []
for i in range(len(data)):
radii.append(float(data[i, 4]))
# Save the parent paths as a list of strings
parent_paths = []
for i in range(len(data)):
parent_paths.append(data[i, 5])
return names, positions, radii, parent_paths, data
except:
return None
def create_contact_sensor(self, parent_path, position, radius, name):
result, sensor = omni.kit.commands.execute(
"IsaacSensorCreateContactSensor",
path="/tact_sensor_" + name,
parent=parent_path,
min_threshold=0,
max_threshold=1000000,
color=(1, 0, 0, 1),
radius=radius,
sensor_period=1,
translation=position,
visualize=True,
)
# Add the sensor to the list of sensors
self.sensors[name] = self.Sensor(name, position, radius, parent_path)
def remove_sensors(self, sensor_label):
"""
Function that removes all sensors from the robot
"""
if len(self.parent_paths) == 0:
return
for parent_path in self.parent_paths:
# Find all prims under the parent path that contain "tact_sensor" in their name
try:
parent_prim = get_current_stage().GetPrimAtPath(parent_path)
prims = get_prim_children(parent_prim)
except:
self._status_report_field.set_text("Unexpected path!\n")
return
#self._status_report_field.set_text("Found " + str(len(prims)) + " sensors to remove\n")
# Remove all prims found
for prim in prims:
if sensor_label in prim.GetName():
omni.kit.commands.execute('DeletePrims', paths=[parent_path + "/" + prim.GetName()])
def _on_string_field_value_changed_fn(self, value):
"""
Function that executes when the user changes the value of the string field
Sets the value of the string field to what the user entered
"""
self.wrapped_ui_elements[0].set_value(value)
# Update the dropdown with the new path
self.wrapped_ui_elements[1].repopulate()
def _on_dropdown_item_selection(self, item):
"""
Function that executes when the user selects an item from the dropdown
Sets the value of the dropdown to the item the user selected
"""
# Go back if the user selects the empty string
if item == "Go Back":
# Get the path from the string field minus the last folder
path = self.wrapped_ui_elements[0].get_value()
parts = path.split("/")
path = "/".join(parts[:-1])
self.wrapped_ui_elements[0].set_value(path)
self.wrapped_ui_elements[1].repopulate()
return
if item == "":
return
self.config_path = self.wrapped_ui_elements[0].get_value() + "/" + item
if self.config_path[-4:] != ".csv":
# If the user selects a file that is not a CSV file, and it is a folder, update the string field with the new path
self.wrapped_ui_elements[0].set_value(self.config_path)
self.wrapped_ui_elements[1].repopulate()
pass
######################################################################################
# Contact updates
######################################################################################
# This function updates the sensor readings in the UI at every physics step
def contact_sensor_update(self, dt):
#self._status_report_field.set_text("Updating sensor readings...\n")
if len(self.sliders) > 0:
slider_num = 0
for s in self.sensors.values():
reading = self._cs.get_sensor_reading(s.path)
if reading.is_valid:
self.sliders[slider_num].model.set_value(
float(reading.value) * self.meters_per_unit
) # readings are in kg⋅m⋅s−2, converting to Newtons
else:
self.sliders[slider_num].model.set_value(0)
slider_num += 1
# contacts_raw = self._cs.get_body_contact_raw_data(self.leg_paths[0])
# if len(contacts_raw):
# c = contacts_raw[0]
# # print(c)
######################################################################################
# Functions Below This Point Support The Provided Example And Can Be Deleted/Replaced
######################################################################################
def countdown(self, seconds, name=""):
for i in range(seconds, 0, -1):
#message += str(i) + "...\n"
# self._status_report_field.set_text(message)
print(name + " Countdown: " + str(i))
time.sleep(1)
print("\n")
return
# def _on_init(self):
# self._articulation = None
# self._cuboid = None
# self._scenario = ExampleScenario()
# def _add_light_to_stage(self):
# """
# A new stage does not have a light by default. This function creates a spherical light
# """
# sphereLight = UsdLux.SphereLight.Define(get_current_stage(), Sdf.Path("/World/SphereLight"))
# sphereLight.CreateRadiusAttr(2)
# sphereLight.CreateIntensityAttr(100000)
# XFormPrim(str(sphereLight.GetPath())).set_world_pose([6.5, 0, 12])
# def _setup_scene(self):
# """
# This function is attached to the Load Button as the setup_scene_fn callback.
# On pressing the Load Button, a new instance of World() is created and then this function is called.
# The user should now load their assets onto the stage and add them to the World Scene.
# In this example, a new stage is loaded explicitly, and all assets are reloaded.
# If the user is relying on hot-reloading and does not want to reload assets every time,
# they may perform a check here to see if their desired assets are already on the stage,
# and avoid loading anything if they are. In this case, the user would still need to add
# their assets to the World (which has low overhead). See commented code section in this function.
# """
# # Load the UR10e
# robot_prim_path = "/ur10e"
# path_to_robot_usd = get_assets_root_path() + "/Isaac/Robots/UniversalRobots/ur10e/ur10e.usd"
# # Do not reload assets when hot reloading. This should only be done while extension is under development.
# # if not is_prim_path_valid(robot_prim_path):
# # create_new_stage()
# # add_reference_to_stage(path_to_robot_usd, robot_prim_path)
# # else:
# # print("Robot already on Stage")
# create_new_stage()
# self._add_light_to_stage()
# add_reference_to_stage(path_to_robot_usd, robot_prim_path)
# # Create a cuboid
# self._cuboid = FixedCuboid(
# "/Scenario/cuboid", position=np.array([0.3, 0.3, 0.5]), size=0.05, color=np.array([255, 0, 0])
# )
# self._articulation = Articulation(robot_prim_path)
# # Add user-loaded objects to the World
# world = World.instance()
# world.scene.add(self._articulation)
# world.scene.add(self._cuboid)
# def _setup_scenario(self):
# """
# This function is attached to the Load Button as the setup_post_load_fn callback.
# The user may assume that their assets have been loaded by their setup_scene_fn callback, that
# their objects are properly initialized, and that the timeline is paused on timestep 0.
# In this example, a scenario is initialized which will move each robot joint one at a time in a loop while moving the
# provided prim in a circle around the robot.
# """
# self._reset_scenario()
# # UI management
# self._scenario_state_btn.reset()
# self._scenario_state_btn.enabled = True
# self._reset_btn.enabled = True
# def _reset_scenario(self):
# self._scenario.teardown_scenario()
# self._scenario.setup_scenario(self._articulation, self._cuboid)
# def _on_post_reset_btn(self):
# """
# This function is attached to the Reset Button as the post_reset_fn callback.
# The user may assume that their objects are properly initialized, and that the timeline is paused on timestep 0.
# They may also assume that objects that were added to the World.Scene have been moved to their default positions.
# I.e. the cube prim will move back to the position it was in when it was created in self._setup_scene().
# """
# self._reset_scenario()
# # UI management
# self._scenario_state_btn.reset()
# self._scenario_state_btn.enabled = True
# def _update_scenario(self, step: float):
# """This function is attached to the Run Scenario StateButton.
# This function was passed in as the physics_callback_fn argument.
# This means that when the a_text "RUN" is pressed, a subscription is made to call this function on every physics step.
# When the b_text "STOP" is pressed, the physics callback is removed.
# Args:
# step (float): The dt of the current physics step
# """
# self._scenario.update_scenario(step)
# def _on_run_scenario_a_text(self):
# """
# This function is attached to the Run Scenario StateButton.
# This function was passed in as the on_a_click_fn argument.
# It is called when the StateButton is clicked while saying a_text "RUN".
# This function simply plays the timeline, which means that physics steps will start happening. After the world is loaded or reset,
# the timeline is paused, which means that no physics steps will occur until the user makes it play either programmatically or
# through the left-hand UI toolbar.
# """
# self._timeline.play()
# def _on_run_scenario_b_text(self):
# """
# This function is attached to the Run Scenario StateButton.
# This function was passed in as the on_b_click_fn argument.
# It is called when the StateButton is clicked while saying a_text "STOP"
# Pausing the timeline on b_text is not strictly necessary for this example to run.
# Clicking "STOP" will cancel the physics subscription that updates the scenario, which means that
# the robot will stop getting new commands and the cube will stop updating without needing to
# pause at all. The reason that the timeline is paused here is to prevent the robot being carried
# forward by momentum for a few frames after the physics subscription is canceled. Pausing here makes
# this example prettier, but if curious, the user should observe what happens when this line is removed.
# """
# self._timeline.pause()
def _reset_extension(self):
"""This is called when the user opens a new stage from self.on_stage_event().
All state should be reset.
"""
self._on_init()
self._reset_ui()
# def _reset_ui(self):
# self._scenario_state_btn.reset()
# self._scenario_state_btn.enabled = False
# self._reset_btn.enabled = False
| 25,818 |
Python
| 41.187908 | 142 | 0.589395 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/ui_builder.py
|
# This software contains source code provided by NVIDIA Corporation.
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .AbstracSensorClass import AbstractSensorOperator
from .ContactSensorClass import ContactSensorOperator
import numpy as np
import omni.timeline
import omni.ui as ui
import omni.kit.commands
import time
import os
import sys
import carb
from omni.isaac.core.utils.prims import is_prim_path_valid, get_all_matching_child_prims, delete_prim, get_prim_children
from omni.isaac.core.utils.stage import add_reference_to_stage, create_new_stage, get_current_stage
from omni.isaac.core.world import World
from omni.isaac.ui.element_wrappers import CollapsableFrame, StateButton, Button, TextBlock, StringField, DropDown
from omni.isaac.ui.ui_utils import get_style, LABEL_WIDTH
from omni.usd import StageEventType
from pxr import Sdf, UsdLux, Gf
from omni.isaac.sensor import _sensor
#from omni.isaac.proximity_sensor import Sensor, register_sensor, clear_sensors
from .scenario import ExampleScenario
from pxr import UsdPhysics
class UIBuilder:
def __init__(self, window):
# Window to hold the UI elements
self.window = window
# Frames are sub-windows that can contain multiple UI elements
self.frames = []
# UI elements created using a UIElementWrapper instance
self.wrapped_ui_elements = []
# Get access to the timeline to control stop/pause/play programmatically
self._timeline = omni.timeline.get_timeline_interface()
# Create a list to hold all sensor operators
self._sensor_operators = []
############### Add Sensor Operators Here ################
self._sensor_operators.append(ContactSensorOperator()) # Add a contact sensor operator
#########################################################
# Debugging
# version = sys.version
# executable = sys.executable
# print(f"Python version: {version}")
# print(f"Python executable location: {executable}")
###################################################################################
# The Functions Below Are Called Automatically By extension.py
###################################################################################
def on_menu_callback(self):
"""Callback for when the UI is opened from the toolbar.
This is called directly after build_ui().
"""
pass
def on_timeline_event(self, event):
"""Callback for Timeline events (Play, Pause, Stop)
Args:
event (omni.timeline.TimelineEventType): Event Type
"""
if event.type == int(omni.timeline.TimelineEventType.STOP):
# When the user hits the stop button through the UI, they will inevitably discover edge cases where things break
# For complete robustness, the user should resolve those edge cases here
# In general, for extensions based off this template, there is no value to having the user click the play/stop
# button instead of using the Load/Reset/Run buttons provided.
#self._scenario_state_btn.reset()
#self._scenario_state_btn.enabled = False
pass
def on_physics_step(self, step: float):
"""Callback for Physics Step.
Physics steps only occur when the timeline is playing
Args:
step (float): Size of physics step
"""
# Update the sensor readings for all added sensors
for operator in self._sensor_operators:
operator.sensor_update(step)
def on_stage_event(self, event):
"""Callback for Stage Events
Args:
event (omni.usd.StageEventType): Event Type
"""
if event.type == int(StageEventType.OPENED):
# If the user opens a new stage, the extension should completely reset
self._reset_extension()
def cleanup(self):
"""
Called when the stage is closed or the extension is hot reloaded.
Perform any necessary cleanup such as removing active callback functions
Buttons imported from omni.isaac.ui.element_wrappers implement a cleanup function that should be called
"""
for ui_elem in self.wrapped_ui_elements:
ui_elem.cleanup()
################################# Individual Frames ##################################################
def create_status_report_frame(self):
self._status_report_frame = CollapsableFrame("Status Report", collapsed=False)
with self._status_report_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
self._status_report_field = TextBlock(
"Last UI Event",
num_lines=3,
tooltip="Prints the latest change to this UI",
include_copy_button=True,
)
#link the status report frame to the sensor operators to allow them to update the status report
for operator in self._sensor_operators:
operator._status_report_field = self._status_report_field
def create_import_sensors_frame(self):
buttons_frame = CollapsableFrame("Import Sensors", collapsed=False)
with buttons_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
string_field = StringField(
"Import CSV File",
default_value="TactileSim/sensor_configs",
tooltip="Path to sensor positioning file",
read_only=False,
multiline_okay=False,
on_value_changed_fn=self._on_string_field_value_changed_fn,
use_folder_picker=True,
#item_filter_fn=is_usd_or_python_path,
)
self.wrapped_ui_elements.append(string_field)
dropdown = DropDown(
"Config Options",
tooltip=" Select an option from the DropDown",
populate_fn=self.dropdown_populate_fn,
on_selection_fn=self._on_dropdown_item_selection,
)
self.wrapped_ui_elements.append(dropdown)
dropdown.repopulate() # This does not happen automatically, and it triggers the on_selection_fn
# Add a button to all supported sensor operators
for operator in self._sensor_operators:
button = Button(
"Refresh " + operator.sensor_description,
"Update",
tooltip="Reread the data from the specified file path to update sensors",
on_click_fn=operator.import_sensors_fn,
)
self.wrapped_ui_elements.append(button)
# Add a remove sensors button to all supported sensor operators
for operator in self._sensor_operators:
button = Button(
"Remove " + operator.sensor_description,
"Remove All",
tooltip="Remove all sensors " + operator.sensor_description + " from the robot",
on_click_fn=operator.remove_sensors_fn,
)
self.wrapped_ui_elements.append(button)
self._status_report_field = TextBlock(
"Import Status",
num_lines=10,
tooltip="Outputs the status of the import process",
include_copy_button=True,
)
# Add the status report field to the sensor operators
for operator in self._sensor_operators:
operator._status_report_field = self._status_report_field
def create_all_sensor_readings_frames(self):
# Create a sensor readings frame for each sensor operator
for operator in self._sensor_operators:
operator.create_sensor_readings_frame()
def build_ui(self):
"""
Build a custom UI tool to run your extension.
This function will be called any time the UI window is closed and reopened.
"""
self.create_import_sensors_frame()
self.create_all_sensor_readings_frames()
############################## Import Frame Functions ########################################
def dropdown_populate_fn(self):
"""
Function that populates the dropdown with options
Returns all the files in the directory specified by the string field
"""
options = []
# Get the path from the string field
path = self.wrapped_ui_elements[0].get_value()
# Get all the files in the directory
try:
options = os.listdir(path)
# Add an empty string to the beginning of the list
options.insert(0, "")
# Add a 'Go Back' option at the end of the list
options.append("Go Back")
except:
options = []
return options
def _on_string_field_value_changed_fn(self, value):
"""
Function that executes when the user changes the value of the string field
Sets the value of the string field to what the user entered
"""
self.wrapped_ui_elements[0].set_value(value)
# Update the dropdown with the new path
self.wrapped_ui_elements[1].repopulate()
def _on_dropdown_item_selection(self, item):
"""
Function that executes when the user selects an item from the dropdown
Sets the value of the dropdown to the item the user selected
"""
# Go back if the user selects the empty string
if item == "Go Back":
# Get the path from the string field minus the last folder
path = self.wrapped_ui_elements[0].get_value()
parts = path.split("/")
path = "/".join(parts[:-1])
self.wrapped_ui_elements[0].set_value(path)
self.wrapped_ui_elements[1].repopulate()
return
if item == "":
return
self.config_path = self.wrapped_ui_elements[0].get_value() + "/" + item
# Update the config path in the sensor operators
for operator in self._sensor_operators:
operator.config_path = self.config_path
if self.config_path[-4:] != ".csv":
# If the user selects a file that is not a CSV file, and it is a folder, update the string field with the new path
self.wrapped_ui_elements[0].set_value(self.config_path)
self.wrapped_ui_elements[1].repopulate()
pass
######################################################################################
# Functions Below This Point Support The Provided Example And Can Be Deleted/Replaced
######################################################################################
def countdown(self, seconds, name=""):
for i in range(seconds, 0, -1):
#message += str(i) + "...\n"
# self._status_report_field.set_text(message)
print(name + " Countdown: " + str(i))
time.sleep(1)
print("\n")
return
def _on_init(self):
# Frames are sub-windows that can contain multiple UI elements
self.frames = []
# UI elements created using a UIElementWrapper instance
self.wrapped_ui_elements = []
# Get access to the timeline to control stop/pause/play programmatically
self._timeline = omni.timeline.get_timeline_interface()
# Create a list to hold all sensor operators
self._sensor_operators = []
############### Add Sensor Operators Here ################
self._sensor_operators.append(ContactSensorOperator()) # Add a contact sensor operator
#########################################################
# Debugging
# version = sys.version
# executable = sys.executable
# print(f"Python version: {version}")
# print(f"Python executable location: {executable}")
# def _add_light_to_stage(self):
# """
# A new stage does not have a light by default. This function creates a spherical light
# """
# sphereLight = UsdLux.SphereLight.Define(get_current_stage(), Sdf.Path("/World/SphereLight"))
# sphereLight.CreateRadiusAttr(2)
# sphereLight.CreateIntensityAttr(100000)
# XFormPrim(str(sphereLight.GetPath())).set_world_pose([6.5, 0, 12])
# def _setup_scene(self):
# """
# This function is attached to the Load Button as the setup_scene_fn callback.
# On pressing the Load Button, a new instance of World() is created and then this function is called.
# The user should now load their assets onto the stage and add them to the World Scene.
# In this example, a new stage is loaded explicitly, and all assets are reloaded.
# If the user is relying on hot-reloading and does not want to reload assets every time,
# they may perform a check here to see if their desired assets are already on the stage,
# and avoid loading anything if they are. In this case, the user would still need to add
# their assets to the World (which has low overhead). See commented code section in this function.
# """
# # Load the UR10e
# robot_prim_path = "/ur10e"
# path_to_robot_usd = get_assets_root_path() + "/Isaac/Robots/UniversalRobots/ur10e/ur10e.usd"
# # Do not reload assets when hot reloading. This should only be done while extension is under development.
# # if not is_prim_path_valid(robot_prim_path):
# # create_new_stage()
# # add_reference_to_stage(path_to_robot_usd, robot_prim_path)
# # else:
# # print("Robot already on Stage")
# create_new_stage()
# self._add_light_to_stage()
# add_reference_to_stage(path_to_robot_usd, robot_prim_path)
# # Create a cuboid
# self._cuboid = FixedCuboid(
# "/Scenario/cuboid", position=np.array([0.3, 0.3, 0.5]), size=0.05, color=np.array([255, 0, 0])
# )
# self._articulation = Articulation(robot_prim_path)
# # Add user-loaded objects to the World
# world = World.instance()
# world.scene.add(self._articulation)
# world.scene.add(self._cuboid)
# def _setup_scenario(self):
# """
# This function is attached to the Load Button as the setup_post_load_fn callback.
# The user may assume that their assets have been loaded by their setup_scene_fn callback, that
# their objects are properly initialized, and that the timeline is paused on timestep 0.
# In this example, a scenario is initialized which will move each robot joint one at a time in a loop while moving the
# provided prim in a circle around the robot.
# """
# self._reset_scenario()
# # UI management
# self._scenario_state_btn.reset()
# self._scenario_state_btn.enabled = True
# self._reset_btn.enabled = True
# def _reset_scenario(self):
# self._scenario.teardown_scenario()
# self._scenario.setup_scenario(self._articulation, self._cuboid)
# def _on_post_reset_btn(self):
# """
# This function is attached to the Reset Button as the post_reset_fn callback.
# The user may assume that their objects are properly initialized, and that the timeline is paused on timestep 0.
# They may also assume that objects that were added to the World.Scene have been moved to their default positions.
# I.e. the cube prim will move back to the position it was in when it was created in self._setup_scene().
# """
# self._reset_scenario()
# # UI management
# self._scenario_state_btn.reset()
# self._scenario_state_btn.enabled = True
# def _update_scenario(self, step: float):
# """This function is attached to the Run Scenario StateButton.
# This function was passed in as the physics_callback_fn argument.
# This means that when the a_text "RUN" is pressed, a subscription is made to call this function on every physics step.
# When the b_text "STOP" is pressed, the physics callback is removed.
# Args:
# step (float): The dt of the current physics step
# """
# self._scenario.update_scenario(step)
# def _on_run_scenario_a_text(self):
# """
# This function is attached to the Run Scenario StateButton.
# This function was passed in as the on_a_click_fn argument.
# It is called when the StateButton is clicked while saying a_text "RUN".
# This function simply plays the timeline, which means that physics steps will start happening. After the world is loaded or reset,
# the timeline is paused, which means that no physics steps will occur until the user makes it play either programmatically or
# through the left-hand UI toolbar.
# """
# self._timeline.play()
# def _on_run_scenario_b_text(self):
# """
# This function is attached to the Run Scenario StateButton.
# This function was passed in as the on_b_click_fn argument.
# It is called when the StateButton is clicked while saying a_text "STOP"
# Pausing the timeline on b_text is not strictly necessary for this example to run.
# Clicking "STOP" will cancel the physics subscription that updates the scenario, which means that
# the robot will stop getting new commands and the cube will stop updating without needing to
# pause at all. The reason that the timeline is paused here is to prevent the robot being carried
# forward by momentum for a few frames after the physics subscription is canceled. Pausing here makes
# this example prettier, but if curious, the user should observe what happens when this line is removed.
# """
# self._timeline.pause()
def _reset_extension(self):
"""This is called when the user opens a new stage from self.on_stage_event().
All state should be reset.
"""
self._on_init()
self._reset_ui()
def _reset_ui(self):
"""This function is called by _reset_extension() to reset the UI to its initial state.
This function should not reset the state of the extension, only the UI.
"""
for frame in self.frames:
frame.delete()
self.frames = []
self.wrapped_ui_elements = []
self.build_ui()
| 19,266 |
Python
| 41.720621 | 140 | 0.598827 |
cKohl10/TactileSim/exts/contact_ext_test/Contact_Extension_Test_python/README.md
|
# Loading Extension
To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name}
The user will see the extension appear on the toolbar on startup with the title they specified in the Extension Generator
# Extension Usage
This template extension creates a Load, Reset, and Run button in a simple UI.
The Load and Reset buttons interact with the omni.isaac.core World() in order
to simplify user interaction with the simulator and provide certain gurantees to the user
at the times their callback functions are called.
# Template Code Overview
The template is well documented and is meant to be self-explanatory to the user should they
start reading the provided python files. A short overview is also provided here:
global_variables.py:
A script that stores in global variables that the user specified when creating this extension such as the Title and Description.
extension.py:
A class containing the standard boilerplate necessary to have the user extension show up on the Toolbar. This
class is meant to fulfill most ues-cases without modification.
In extension.py, useful standard callback functions are created that the user may complete in ui_builder.py.
ui_builder.py:
This file is the user's main entrypoint into the template. Here, the user can see useful callback functions that have been
set up for them, and they may also create UI buttons that are hooked up to more user-defined callback functions. This file is
the most thoroughly documented, and the user should read through it before making serious modification.
scenario.py:
This file contains an implementation of an example "Scenario" that implements a "teardown", "setup", and "update" function.
This particular structure was chosen to make a clear code separation between UI management and the scenario logic. In this way, the
ExampleScenario() class serves as a simple backend to the UI. The user should feel encouraged to implement the backend to their UI
that best suits their needs.
| 2,078 |
Markdown
| 58.399998 | 137 | 0.783927 |
cKohl10/TactileSim/exts/contact_ext_test/config/extension.toml
|
[core]
reloadable = true
order = 0
[package]
version = "1.0.0"
category = "Simulation"
title = "Contact Extension Test"
description = "First test of using isaac sim extensions"
authors = ["NVIDIA"]
repository = ""
keywords = []
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.kit.uiapp" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
[[python.module]]
name = "Contact_Extension_Test_python"
| 485 |
TOML
| 18.439999 | 56 | 0.686598 |
cKohl10/TactileSim/exts/contact_ext_test/docs/CHANGELOG.md
|
# Changelog
## [0.1.0] - 2024-05-17
### Added
- Initial version of Contact Extension Test Extension
| 103 |
Markdown
| 11.999999 | 53 | 0.679612 |
cKohl10/TactileSim/exts/contact_ext_test/docs/README.md
|
# Usage
To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name}
| 129 |
Markdown
| 24.999995 | 118 | 0.736434 |
cKohl10/TactileSim/exts/Util_example/Example_of_utilities_python/global_variables.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
EXTENSION_TITLE = "Example of utilities"
EXTENSION_DESCRIPTION = ""
| 501 |
Python
| 37.615382 | 76 | 0.802395 |
cKohl10/TactileSim/exts/Util_example/Example_of_utilities_python/extension.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import gc
import omni
import omni.kit.commands
import omni.physx as _physx
import omni.timeline
import omni.ui as ui
import omni.usd
from omni.isaac.ui.element_wrappers import ScrollingWindow
from omni.isaac.ui.menu import MenuItemDescription
from omni.kit.menu.utils import add_menu_items, remove_menu_items
from omni.usd import StageEventType
from .global_variables import EXTENSION_DESCRIPTION, EXTENSION_TITLE
from .ui_builder import UIBuilder
"""
This file serves as a basic template for the standard boilerplate operations
that make a UI-based extension appear on the toolbar.
This implementation is meant to cover most use-cases without modification.
Various callbacks are hooked up to a seperate class UIBuilder in .ui_builder.py
Most users will be able to make their desired UI extension by interacting solely with
UIBuilder.
This class sets up standard useful callback functions in UIBuilder:
on_menu_callback: Called when extension is opened
on_timeline_event: Called when timeline is stopped, paused, or played
on_physics_step: Called on every physics step
on_stage_event: Called when stage is opened or closed
cleanup: Called when resources such as physics subscriptions should be cleaned up
build_ui: User function that creates the UI they want.
"""
class Extension(omni.ext.IExt):
def on_startup(self, ext_id: str):
"""Initialize extension and UI elements"""
self.ext_id = ext_id
self._usd_context = omni.usd.get_context()
# Build Window
self._window = ScrollingWindow(
title=EXTENSION_TITLE, width=600, height=500, visible=False, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
self._window.set_visibility_changed_fn(self._on_window)
action_registry = omni.kit.actions.core.get_action_registry()
action_registry.register_action(
ext_id,
f"CreateUIExtension:{EXTENSION_TITLE}",
self._menu_callback,
description=f"Add {EXTENSION_TITLE} Extension to UI toolbar",
)
self._menu_items = [
MenuItemDescription(name=EXTENSION_TITLE, onclick_action=(ext_id, f"CreateUIExtension:{EXTENSION_TITLE}"))
]
add_menu_items(self._menu_items, EXTENSION_TITLE)
# Filled in with User Functions
self.ui_builder = UIBuilder()
# Events
self._usd_context = omni.usd.get_context()
self._physxIFace = _physx.acquire_physx_interface()
self._physx_subscription = None
self._stage_event_sub = None
self._timeline = omni.timeline.get_timeline_interface()
def on_shutdown(self):
self._models = {}
remove_menu_items(self._menu_items, EXTENSION_TITLE)
action_registry = omni.kit.actions.core.get_action_registry()
action_registry.deregister_action(self.ext_id, f"CreateUIExtension:{EXTENSION_TITLE}")
if self._window:
self._window = None
self.ui_builder.cleanup()
gc.collect()
def _on_window(self, visible):
if self._window.visible:
# Subscribe to Stage and Timeline Events
self._usd_context = omni.usd.get_context()
events = self._usd_context.get_stage_event_stream()
self._stage_event_sub = events.create_subscription_to_pop(self._on_stage_event)
stream = self._timeline.get_timeline_event_stream()
self._timeline_event_sub = stream.create_subscription_to_pop(self._on_timeline_event)
self._build_ui()
else:
self._usd_context = None
self._stage_event_sub = None
self._timeline_event_sub = None
self.ui_builder.cleanup()
def _build_ui(self):
with self._window.frame:
with ui.VStack(spacing=5, height=0):
self._build_extension_ui()
async def dock_window():
await omni.kit.app.get_app().next_update_async()
def dock(space, name, location, pos=0.5):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, pos)
return window
tgt = ui.Workspace.get_window("Viewport")
dock(tgt, EXTENSION_TITLE, omni.ui.DockPosition.LEFT, 0.33)
await omni.kit.app.get_app().next_update_async()
self._task = asyncio.ensure_future(dock_window())
#################################################################
# Functions below this point call user functions
#################################################################
def _menu_callback(self):
self._window.visible = not self._window.visible
self.ui_builder.on_menu_callback()
def _on_timeline_event(self, event):
if event.type == int(omni.timeline.TimelineEventType.PLAY):
if not self._physx_subscription:
self._physx_subscription = self._physxIFace.subscribe_physics_step_events(self._on_physics_step)
elif event.type == int(omni.timeline.TimelineEventType.STOP):
self._physx_subscription = None
self.ui_builder.on_timeline_event(event)
def _on_physics_step(self, step):
self.ui_builder.on_physics_step(step)
def _on_stage_event(self, event):
if event.type == int(StageEventType.OPENED) or event.type == int(StageEventType.CLOSED):
# stage was opened or closed, cleanup
self._physx_subscription = None
self.ui_builder.cleanup()
self.ui_builder.on_stage_event(event)
def _build_extension_ui(self):
# Call user function for building UI
self.ui_builder.build_ui()
| 6,165 |
Python
| 37.298136 | 118 | 0.651095 |
cKohl10/TactileSim/exts/Util_example/Example_of_utilities_python/__init__.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from .extension import *
| 456 |
Python
| 44.699996 | 76 | 0.809211 |
cKohl10/TactileSim/exts/Util_example/Example_of_utilities_python/ui_builder.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from typing import List
import omni.ui as ui
from omni.isaac.ui.element_wrappers import (
Button,
CheckBox,
CollapsableFrame,
ColorPicker,
DropDown,
FloatField,
IntField,
StateButton,
StringField,
TextBlock,
XYPlot,
)
from omni.isaac.ui.ui_utils import get_style
class UIBuilder:
def __init__(self):
# Frames are sub-windows that can contain multiple UI elements
self.frames = []
# UI elements created using a UIElementWrapper from omni.isaac.ui.element_wrappers
self.wrapped_ui_elements = []
###################################################################################
# The Functions Below Are Called Automatically By extension.py
###################################################################################
def on_menu_callback(self):
"""Callback for when the UI is opened from the toolbar.
This is called directly after build_ui().
"""
pass
def on_timeline_event(self, event):
"""Callback for Timeline events (Play, Pause, Stop)
Args:
event (omni.timeline.TimelineEventType): Event Type
"""
pass
def on_physics_step(self, step):
"""Callback for Physics Step.
Physics steps only occur when the timeline is playing
Args:
step (float): Size of physics step
"""
pass
def on_stage_event(self, event):
"""Callback for Stage Events
Args:
event (omni.usd.StageEventType): Event Type
"""
pass
def cleanup(self):
"""
Called when the stage is closed or the extension is hot reloaded.
Perform any necessary cleanup such as removing active callback functions
Buttons imported from omni.isaac.ui.element_wrappers implement a cleanup function that should be called
"""
# None of the UI elements in this template actually have any internal state that needs to be cleaned up.
# But it is best practice to call cleanup() on all wrapped UI elements to simplify development.
for ui_elem in self.wrapped_ui_elements:
ui_elem.cleanup()
def build_ui(self):
"""
Build a custom UI tool to run your extension.
This function will be called any time the UI window is closed and reopened.
"""
# Create a UI frame that prints the latest UI event.
self._create_status_report_frame()
# Create a UI frame demonstrating simple UI elements for user input
self._create_simple_editable_fields_frame()
# Create a UI frame with different button types
self._create_buttons_frame()
# Create a UI frame with different selection widgets
self._create_selection_widgets_frame()
# Create a UI frame with different plotting tools
self._create_plotting_frame()
def _create_status_report_frame(self):
self._status_report_frame = CollapsableFrame("Status Report", collapsed=False)
with self._status_report_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
self._status_report_field = TextBlock(
"Last UI Event",
num_lines=3,
tooltip="Prints the latest change to this UI",
include_copy_button=True,
)
def _create_simple_editable_fields_frame(self):
self._simple_fields_frame = CollapsableFrame("Simple Editable Fields", collapsed=False)
with self._simple_fields_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
int_field = IntField(
"Int Field",
default_value=1,
tooltip="Type an int or click and drag to set a new value.",
lower_limit=-100,
upper_limit=100,
on_value_changed_fn=self._on_int_field_value_changed_fn,
)
self.wrapped_ui_elements.append(int_field)
float_field = FloatField(
"Float Field",
default_value=1.0,
tooltip="Type a float or click and drag to set a new value.",
step=0.5,
format="%.2f",
lower_limit=-100.0,
upper_limit=100.0,
on_value_changed_fn=self._on_float_field_value_changed_fn,
)
self.wrapped_ui_elements.append(float_field)
def is_usd_or_python_path(file_path: str):
# Filter file paths shown in the file picker to only be USD or Python files
_, ext = os.path.splitext(file_path.lower())
return ext == ".usd" or ext == ".py"
string_field = StringField(
"String Field",
default_value="Type Here or Use File Picker on the Right",
tooltip="Type a string or use the file picker to set a value",
read_only=False,
multiline_okay=False,
on_value_changed_fn=self._on_string_field_value_changed_fn,
use_folder_picker=True,
item_filter_fn=is_usd_or_python_path,
)
self.wrapped_ui_elements.append(string_field)
def _create_buttons_frame(self):
buttons_frame = CollapsableFrame("Buttons Frame", collapsed=False)
with buttons_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
button = Button(
"Button",
"CLICK ME",
tooltip="Click This Button to activate a callback function",
on_click_fn=self._on_button_clicked_fn,
)
self.wrapped_ui_elements.append(button)
state_button = StateButton(
"State Button",
"State A",
"State B",
tooltip="Click this button to transition between two states",
on_a_click_fn=self._on_state_btn_a_click_fn,
on_b_click_fn=self._on_state_btn_b_click_fn,
physics_callback_fn=None, # See Loaded Scenario Template for example usage
)
self.wrapped_ui_elements.append(state_button)
check_box = CheckBox(
"Check Box",
default_value=False,
tooltip=" Click this checkbox to activate a callback function",
on_click_fn=self._on_checkbox_click_fn,
)
self.wrapped_ui_elements.append(check_box)
def _create_selection_widgets_frame(self):
self._selection_widgets_frame = CollapsableFrame("Selection Widgets", collapsed=False)
with self._selection_widgets_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
def dropdown_populate_fn():
return ["Option A", "Option B", "Option C"]
dropdown = DropDown(
"Drop Down",
tooltip=" Select an option from the DropDown",
populate_fn=dropdown_populate_fn,
on_selection_fn=self._on_dropdown_item_selection,
)
self.wrapped_ui_elements.append(dropdown)
dropdown.repopulate() # This does not happen automatically, and it triggers the on_selection_fn
color_picker = ColorPicker(
"Color Picker",
default_value=[0.69, 0.61, 0.39, 1.0],
tooltip="Select a Color",
on_color_picked_fn=self._on_color_picked,
)
self.wrapped_ui_elements.append(color_picker)
def _create_plotting_frame(self):
self._plotting_frame = CollapsableFrame("Plotting Tools", collapsed=False)
with self._plotting_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
import numpy as np
x = np.arange(-1, 6.01, 0.01)
y = np.sin((x - 0.5) * np.pi)
plot = XYPlot(
"XY Plot",
tooltip="Press mouse over the plot for data label",
x_data=[x[:300], x[100:400], x[200:]],
y_data=[y[:300], y[100:400], y[200:]],
x_min=None, # Use default behavior to fit plotted data to entire frame
x_max=None,
y_min=-1.5,
y_max=1.5,
x_label="X [rad]",
y_label="Y",
plot_height=10,
legends=["Line 1", "Line 2", "Line 3"],
show_legend=True,
plot_colors=[
[255, 0, 0],
[0, 255, 0],
[0, 100, 200],
], # List of [r,g,b] values; not necessary to specify
)
######################################################################################
# Functions Below This Point Are Callback Functions Attached to UI Element Wrappers
######################################################################################
def _on_int_field_value_changed_fn(self, new_value: int):
status = f"Value was changed in int field to {new_value}"
self._status_report_field.set_text(status)
def _on_float_field_value_changed_fn(self, new_value: float):
status = f"Value was changed in float field to {new_value}"
self._status_report_field.set_text(status)
def _on_string_field_value_changed_fn(self, new_value: str):
status = f"Value was changed in string field to {new_value}"
self._status_report_field.set_text(status)
def _on_button_clicked_fn(self):
status = "The Button was Clicked!"
self._status_report_field.set_text(status)
def _on_state_btn_a_click_fn(self):
status = "State Button was Clicked in State A!"
self._status_report_field.set_text(status)
def _on_state_btn_b_click_fn(self):
status = "State Button was Clicked in State B!"
self._status_report_field.set_text(status)
def _on_checkbox_click_fn(self, value: bool):
status = f"CheckBox was set to {value}!"
self._status_report_field.set_text(status)
def _on_dropdown_item_selection(self, item: str):
status = f"{item} was selected from DropDown"
self._status_report_field.set_text(status)
def _on_color_picked(self, color: List[float]):
formatted_color = [float("%0.2f" % i) for i in color]
status = f"RGBA Color {formatted_color} was picked in the ColorPicker"
self._status_report_field.set_text(status)
| 11,487 |
Python
| 38.888889 | 112 | 0.542178 |
cKohl10/TactileSim/exts/Util_example/Example_of_utilities_python/README.md
|
# Loading Extension
To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name}
The user will see the extension appear on the toolbar on startup with the title they specified in the Extension Generator
# Extension Usage
This template provides the example usage for a library of UIElementWrapper objects that help to quickly develop
custom UI tools with minimal boilerplate code.
# Template Code Overview
The template is well documented and is meant to be self-explanatory to the user should they
start reading the provided python files. A short overview is also provided here:
global_variables.py:
A script that stores in global variables that the user specified when creating this extension such as the Title and Description.
extension.py:
A class containing the standard boilerplate necessary to have the user extension show up on the Toolbar. This
class is meant to fulfill most ues-cases without modification.
In extension.py, useful standard callback functions are created that the user may complete in ui_builder.py.
ui_builder.py:
This file is the user's main entrypoint into the template. Here, the user can see useful callback functions that have been
set up for them, and they may also create UI buttons that are hooked up to more user-defined callback functions. This file is
the most thoroughly documented, and the user should read through it before making serious modification.
| 1,488 |
Markdown
| 58.559998 | 132 | 0.793011 |
cKohl10/TactileSim/exts/Util_example/config/extension.toml
|
[core]
reloadable = true
order = 0
[package]
version = "1.0.0"
category = "Simulation"
title = "Example of utilities"
description = ""
authors = ["NVIDIA"]
repository = ""
keywords = []
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.kit.uiapp" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
[[python.module]]
name = "Example_of_utilities_python"
| 441 |
TOML
| 16.679999 | 36 | 0.668934 |
cKohl10/TactileSim/exts/Util_example/docs/CHANGELOG.md
|
# Changelog
## [0.1.0] - 2024-05-17
### Added
- Initial version of Example of utilities Extension
| 101 |
Markdown
| 11.749999 | 51 | 0.673267 |
cKohl10/TactileSim/exts/Util_example/docs/README.md
|
# Usage
To enable this extension, run Isaac Sim with the flags --ext-folder {path_to_ext_folder} --enable {ext_directory_name}
| 129 |
Markdown
| 24.999995 | 118 | 0.736434 |
cKohl10/TactileSim/blender_scripts/save_sensor_pos.py
|
# Save Sensor Position Script
# Author: Carson Kohlbrenner
# Date Created: 5/29/2024
# Last Modified: 5/29/2024
# Description: This exports all sensor locations in Blender into a CSV file that Isaac Sim can accept.
#
# Output CSV format:
# SensorNum, X, Y, Z, radius, parent_path
import bpy
import bmesh
# Get the active object
obj = bpy.context.active_object
# Ensure we are in the correct mode
bpy.ops.object.mode_set(mode='OBJECT')
# Get the geometry data
mesh = obj.data
# Create a BMesh to access vertex data
bm = bmesh.new()
bm.from_mesh(mesh)
# Get the custom attribute (assuming it is a point cloud)
positions = [v.co for v in bm.verts]
# Write positions to a file
output_file = "C:/path_to_your_file/instance_positions.txt"
with open(output_file, 'w') as f:
for pos in positions:
f.write(f"{pos.x}, {pos.y}, {pos.z}\n")
bm.free()
print(f"Instance positions written to {output_file}")
| 915 |
Python
| 22.487179 | 102 | 0.713661 |
cKohl10/TactileSim/blender_scripts/verts2sensors.py
|
import csv
import re
from pxr import Usd, UsdGeom
########################################################
# Extract vertices from a USD file and save to a CSV file
# All sensors positions are the vertices of the geometry in the USD file
########################################################
def remove_repeated_prims(path):
parts = path.split('/')
for i in range(len(parts) - 1, 0, -1):
if parts[i] == parts[i - 1]:
del parts[i]
return '/'.join(parts)
def extract_vertices(usd_file, csv_file):
# Open the USD file
stage = Usd.Stage.Open(usd_file)
# Open the CSV file for writing
with open(csv_file, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
# Write the header row
csvwriter.writerow(['SensorNum', 'X', 'Y', 'Z', 'parent_path'])
vert_count = 1
# Iterate through all the prims in the stage
for prim in stage.Traverse():
# Check if the prim is a geometry (has points)
if prim.IsA(UsdGeom.PointBased):
geom = UsdGeom.PointBased(prim)
# Get the points (vertices) of the geometry
points = geom.GetPointsAttr().Get()
parent_path = prim.GetPath().pathString
# Omit 'Root' from the parent path
parent_path = parent_path.replace('/Root', '')
# Remove any triple digit numbers from the parent path
parent_path = re.sub(r'_\d{3,}', '', parent_path)
# Blender splits the vertices into seperate objects, so we need to remove the repeated prims
parent_path = remove_repeated_prims(parent_path)
# Write each point to the CSV
for point in points:
csvwriter.writerow([vert_count, point[0], point[1], point[2], 0.1, parent_path])
vert_count += 1
print(f'Vertices extracted from {usd_file} and saved to {csv_file}')
print(f'Vertex count: {vert_count - 1}')
# Example usage
usd_file = 'scenes/Ant Scene/robots/Ant_decimated.usd'
csv_file = 'sensor_configs/ant_vertices.csv'
extract_vertices(usd_file, csv_file)
| 2,233 |
Python
| 36.864406 | 108 | 0.552172 |
cKohl10/TactileSim/blender_scripts/sensor_bay_addon/README.md
|
Blender Tactile Sensor Addon Capabilities
1) Use weight painting to apply tactile sensors with configurable density.
2) Save the output as a CSV for reading into Isaac Sim
Importing the Addon:
Applying the Skin Geometry Node:
Saving your Configurations
| 267 |
Markdown
| 19.615383 | 78 | 0.782772 |
cKohl10/TactileSim/blender_scripts/sensor_bay_addon/sensor_bay_addon.py
|
bl_info = {
"name": "Tactile Sensor Bay",
"author": "Carson Kohlbrenner",
"version": (1, 0),
"blender": (2, 80, 0),
"location": "View3D > Add > Mesh > New Object",
"description": "Paint on tactile sensors over a surface and save them for Isaac Sim",
"warning": "",
"doc_url": "https://github.com/cKohl10/TactileSim",
"category": "",
}
import bpy
import bmesh
import csv
import os
import re
from bpy.utils import resource_path
from pathlib import Path
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty
from bpy.types import Operator
class SensorData:
def __init__(self, pos, radius, parent):
self.pos = pos
self.radius = radius
self.parent = parent
def __str__(self):
return f"Pos: {self.pos}, Radius: {self.radius}, Parent: {self.parent}"
def __repr__(self):
return str(self)
def check_children_for_sensors(obj, parent_path):
sensor_data = []
# Get the parent path from the root object
parent_path = parent_path + "/" + obj.name
is_sensor_attribtue_name = "is_sensor"
pos_attribute_name = "sensor_pos"
rad_attribute_name = "radii"
default_radius = False
# Loop through all of the children objects and search for GeometryNodes modifier
for child in obj.children:
pos_attribute_data = []
# Recursively check the children for sensors
sensor_data = check_children_for_sensors(child, sensor_data, parent_path)
# Loop through all of the children objects and search for GeometryNodes modifier
#print(f"\nChecking object {child.name} under {parent_path}...")
# Ensure the object has geometry nodes modifier
if child.modifiers.get('Skin') is None:
#print(f"{child.name} does not have a Skin modifier.")
continue
# Get the evaluated geometry
depsgraph = bpy.context.evaluated_depsgraph_get()
eval_obj = child.evaluated_get(depsgraph)
mesh = eval_obj.to_mesh()
# Check if the position data exists
if pos_attribute_name not in mesh.attributes:
#print(f"Attribute {attribute_name} not found in object {child.name}.")
# for other_name in mesh.attributes:
# print(f"Found attribute: {other_name}.")
continue
if is_sensor_attribtue_name not in mesh.attributes:
#print(f"Attribute {is_sensor_attribtue_name} not found in object {child.name}.")
continue
if rad_attribute_name not in mesh.attributes:
#Set a default radius value if the radii attribute is not found
print(f"Attribute {rad_attribute_name} not found in object {child.name}. Setting default radius of 0.1.")
default_radius = True
# Get the attribute data
pos_attribute_data = mesh.attributes[pos_attribute_name].data
# Get the radii attribute data
rad_attribute_data = []
if not default_radius:
rad_attribute_data = mesh.attributes[rad_attribute_name].data
is_sensor_data = mesh.attributes[is_sensor_attribtue_name].data
# Get path to object
parent_path = parent_path + "/" + child.name
# Remove any triple digit numbers from the parent path
parent_path = re.sub(r'\.\d{3,}', '', parent_path)
# Add the attribute data to the sensor data list
for i in range(len(pos_attribute_data)):
if is_sensor_data[i].value:
if default_radius:
sensor_data.append(SensorData(pos_attribute_data[i].vector, 0.1, parent_path))
else:
sensor_data.append(SensorData(pos_attribute_data[i].vector, rad_attribute_data[i].value, parent_path))
print(f"Found {len(pos_attribute_data)} sensor positions in object {child.name}.")
# Clean up
eval_obj.to_mesh_clear()
return sensor_data
def save_attribute_to_csv(context, file_path):
# Get the object
obj = context.object
# Expand the ~ symbol into the path of the home directory
#file_path = os.path.expanduser(file_path)
# Make an array of all sensor positions,radii, and parent paths
sensor_data = []
# Check the children for sensors
sensor_data = check_children_for_sensors(obj, "")
# Check if there are any sensor positions
if len(sensor_data) == 0:
print("No sensor positions found.")
return
# Save the attribute data to CSV
with open(file_path, 'w', newline='') as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(['Index', 'X', 'Y', 'Z', 'Radius', 'Parent'])
for i, element in enumerate(sensor_data):
pos = element.pos
csv_writer.writerow([i, pos.x, pos.y, pos.z, element.radius, element.parent])
# print(f"\nAttribute {attribute_name} saved to {file_path}")
print(f"Sensor count: {len(sensor_data)}")
class SensorSaveOperator(Operator, ExportHelper):
"""Saves the sensors in the scene"""
bl_idname = "object.save_sensors_operator"
bl_label = "Save Sensor Positions"
filename_ext = ".csv"
filter_glob: StringProperty(
default="*.csv",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
def execute(self, context):
print("SensorSaveOperator.execute called\n")
if self.filepath: # Check if filepath has been set
save_attribute_to_csv(context, self.filepath)
else:
self.report({'WARNING'}, "No file selected") # Report a warning if no file was selected
return {'CANCELLED'}
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self) # Open file explorer
return {'RUNNING_MODAL'}
class SensorPanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Sensor Bay"
bl_idname = "OBJECT_PT_SENSOR_BAY"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.label(text="Save sensors as CSV", icon='FILE_TICK')
row = layout.row()
row.label(text="Selected root prim: " + obj.name)
row = layout.row()
row.prop(obj, "name")
row = layout.row()
row.operator("object.save_sensors_operator")
def register():
bpy.utils.register_class(SensorSaveOperator)
bpy.utils.register_class(SensorPanel)
def unregister():
bpy.utils.unregister_class(SensorSaveOperator)
bpy.utils.unregister_class(SensorPanel)
if __name__ == "__main__":
register()
| 6,835 |
Python
| 31.865384 | 122 | 0.625018 |
makolon/hsr_isaac_tamp/README.md
|
# TAMP-HSR
This repository implement PDDLStream for Toyota Human Support Robot (HSR) and offer parallel reinforcement learning environment on Isaac Sim.
## Getting Started
### Prerequisited
- NVIDIA Docker
- NVIDIA RTX GPU
- NVIDIA Driver 515.xx
https://github.com/makolon/hsr_isaac_tamp/assets/39409362/e7945ca0-e040-47cc-b73f-0cf99413d30d
https://github.com/makolon/hsr_isaac_tamp/assets/39409362/0322855f-2aa6-46a2-963e-28bc1f77577c
### Installation
1. Clone the repository
```
$ git clone --recursive [email protected]/makolon/tamp-hsr.git
```
2. Build docker image
```
$ cd tamp-hsr/docker/docker_hsr/
$ ./build.sh
```
3. Run docker container
```
$ cd tamp-hsr/docker/docker_hsr/
$ ./run.sh
```
4. Compile FastDownward
```
$ cd tamp-hsr/hsr_tamp/downward/
$ git submodule update --init --recursive
$ python3 build.py
$ cd ./builds/
$ ln -s release release32
```
## Usage
### Simulation
#### PDDLStream only for 2d environment
You can test PDDLStream on 2D pygame environment.
```
$ cd tamp-hsr/hsr_tamp/experiments/env_2d/
$ python3 tamp_planner.py
```
#### PDDLStream only for 3d environment
You can test PDDLStream on 3D pybullet environment including cooking, holding block task.
```
$ cd tamp-hsr/hsr_tamp/experiments/env_3d/
$ python3 tamp_planner.py --problem <problem_name>
```
### Real
#### Execute plan on 2D environment
1. Enter to hsrb_mode
```
$ hsrb_mode
```
2. Set up ROS configurations
```
$ cd tamp-hsr/hsr_ros/hsr_ws/
$ source devel/setup.bash
```
3. Execute ROS scripts
```
$ roslaunch env_2d exec_tamp.launch --mode <feedforward/feedback>
```
#### Execute plan on 3D environment
1. Enter to hsrb_mode
```
$ hsrb_mode
```
2. Set up ROS configurations
```
$ cd tamp-hsr/hsr_ros/hsr_ws/
$ source devel/setup.bash
```
3. Launch gearbox assembling scipts
```
$ roslaunch env_3d exec_tamp.launch --mode <feedforward/feedback>
````
## Setup IKfast
### Compile IKfast
Build & run docker for openrave that contain IKfast scripts.
```
$ cd tamp-hsr/docker/docker_openrave/
$ ./build.sh
$ ./run.sh
```
Then, execute ikfast scripts that can automatically create cpp IK solver and copy and plaste the appropriate scripts to <ik_solver>.cpp.
```
$ ./exec_openrave.sh
```
After that process, you can call IK solver in python script by executing the following commands.
```
$ cd tamp-hsr/hsr_tamp/experiments/env_3d/utils/pybullet_tools/ikfast/hsrb/
$ python3 setup.py
```
### Create HSR Collada Model
If you don't have hsr collada model, you have to run the following commands in docker_openrave container. \
Terminal 1.
```
$ cd /ikfast/
$ roscore
```
Terminal 2.
```
$ cd /ikfast
$ export MYROBOT_NAME='hsrb4s'
$ rosrun collada_urdf urdf_to_collada "$MYROBOT_NAME".urdf "$MYROBOT_NAME".dae
```
Then, you can see the generated HSR collada model using following commands.
```
$ openrave-robot.py "$MYROBOT_NAME".dae --info links
$ openrave "$MYROBOT_NAME".dae
```
For more informations, please refer to the [following document](http://docs.ros.org/en/kinetic/api/moveit_tutorials/html/doc/ikfast/ikfast_tutorial.html).
## Setup Motion Capture
### Prerequisited
- Windows11 PC
- Windows11 with ROS
- OptiTrack
### Usage
Check IP address of the HSR PC and own IP address.
```
ipconfig
```
Then, execute the following commands in the ROS executable terminal.
```
set ROS_HOSTNAME=<OWN IP ADDRESS>
set ROS_IP=<OWN IP ADDRESS>
set ROS_MASTER_URI=http://<HSR PC IP ADDRESS>:11311
C:\path\to\catkin_ws\devel\setup.bat
cd C:\path\to\catkin_ws\src\mocap_msg\src && python MocapRosPublisher.py
```
| 3,514 |
Markdown
| 22.433333 | 154 | 0.72453 |
makolon/hsr_isaac_tamp/hsr_ros/README.md
|
# HSR-ROS
| 10 |
Markdown
| 4.499998 | 9 | 0.6 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/test/script/analyze_trajectory.py
|
import glob
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
class AnalyzeTrajectory(object):
def __init__(self, idx=0, mode='both', joint_index=None):
# Load ee trajectory
self.sim_ee_traj = np.load(f"log/simulation_ee_traj.npy", allow_pickle=True)
self.measured_ee_traj = np.load(f"log/measured_ee_traj_{idx}.npy", allow_pickle=True)
# Load joint trajectory
self.sim_joint_traj = np.load(f"log/simulation_joint_traj.npy", allow_pickle=True)
self.measured_joint_traj = np.load(f"log/measured_joint_traj_{idx}.npy", allow_pickle=True)
# Length of ee/joint trajectory
self.ee_traj_len = min(len(self.sim_ee_traj), len(self.measured_ee_traj))
self.jt_traj_len = min(len(self.sim_joint_traj), len(self.measured_joint_traj))
# Visualization settings
self.fps = 8
self.num_joints = 8
self.mode = mode
self.joint_index = joint_index
self.joint_name = {'2': 'BaseRotation', '3': 'ArmLift', '4': 'ArmFlex',
'5': 'ArmRoll', '6': 'WristFlex', '7': 'WristRoll'}
if self.joint_index is None:
self.vis_all = True
else:
self.vis_all = False
def visualize_ee_3d_traj(self):
# Visualize end effector trajectory
animation_duration = self.ee_traj_len
self.time_steps_ee_3d = np.linspace(0, animation_duration, animation_duration * self.fps)
fig = plt.figure()
self.ax_ee_3d = fig.add_subplot(111, projection='3d')
self.ax_ee_3d.view_init(elev=45, azim=75)
ani = FuncAnimation(fig, self.update_ee_3d_traj_animation, frames=animation_duration, interval=1000/self.fps)
# Save fig
ani.save(f"{self.mode}_end_effector_3d_trajectory.gif", writer='imagemagick', fps=10)
ani.save(f"{self.mode}_end_effector_3d_trajectory.mp4", writer='ffmpeg', fps=10)
def visualize_ee_2d_traj(self, dir='x'):
# Visualize end effector trajectory
animation_duration = self.ee_traj_len
self.time_steps_ee_2d = np.linspace(0, animation_duration, animation_duration * self.fps)
fig = plt.figure()
self.ax_ee_2d = fig.add_subplot(1, 1, 1)
ani = FuncAnimation(fig, self.update_ee_2d_traj_animation, frames=animation_duration, interval=1000/self.fps)
# Save fig
ani.save(f"{self.mode}_end_effector_2d_trajectory.gif", writer='imagemagick', fps=10)
ani.save(f"{self.mode}_end_effector_2d_trajectory.mp4", writer='ffmpeg', fps=10)
def visualize_joint_2d_traj(self):
# Visualize each joint trajectory
animation_duration = self.jt_traj_len
self.time_steps_jt_2d = np.linspace(0, animation_duration, animation_duration * self.fps)
if self.vis_all:
fig, self.ax_jt = plt.subplots(self.num_joints, 1, sharex=True)
ani = FuncAnimation(fig, self.update_all_joint_2d_traj_animation, frames=animation_duration, interval=1000/self.fps)
else:
fig = plt.figure()
self.ax_jt = fig.add_subplot(1, 1, 1)
ani = FuncAnimation(fig, self.update_joint_2d_traj_animation, frames=animation_duration, interval=1000/self.fps)
# Save fig
if self.vis_all:
ani.save(f"{self.mode}_all_joint_2d_trajectory.gif", writer='imagemagick', fps=10)
ani.save(f"{self.mode}_all_joint_2d_trajectory.mp4", writer='ffmpeg', fps=10)
else:
ani.save(f"{self.mode}_joint_{self.joint_index}_2d_trajectory.gif", writer='imagemagick', fps=10)
ani.save(f"{self.mode}_joint_{self.joint_index}_2d_trajectory.mp4", writer='ffmpeg', fps=10)
def update_ee_3d_traj_animation(self, i):
self.ax_ee_3d.cla()
self.ax_ee_3d.set_xlim(0.5, 1.5)
self.ax_ee_3d.set_ylim(-1.0, 1.0)
self.ax_ee_3d.set_zlim(0.0, 1.0)
self.ax_ee_3d.set_xticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5])
self.ax_ee_3d.set_yticks([-1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0])
self.ax_ee_3d.set_xlabel("X")
self.ax_ee_3d.set_ylabel("Y")
self.ax_ee_3d.set_zlabel("Z")
self.ax_ee_3d.set_title(f"Time: {self.time_steps_ee_3d[i]:.2f}s")
if self.mode == 'simulation':
### Simulation
position = self.sim_ee_traj[i][0]
orientation = self.sim_ee_traj[i][1]
rotation_matrix = R.from_quat(orientation).as_matrix()
self.ax_ee_3d.scatter(*position, color='b', marker='*', s=3)
axis_x_length, axis_y_length, axis_z_length = 0.075, 0.1, 0.05
x_axis = position + axis_x_length * rotation_matrix[:, 0]
y_axis = position + axis_y_length * rotation_matrix[:, 1]
z_axis = position + axis_z_length * rotation_matrix[:, 2]
self.ax_ee_3d.plot([position[0], x_axis[0]], [position[1], x_axis[1]], [position[2], x_axis[2]], color='r', linewidth='3')
self.ax_ee_3d.plot([position[0], y_axis[0]], [position[1], y_axis[1]], [position[2], y_axis[2]], color='g', linewidth='3')
self.ax_ee_3d.plot([position[0], z_axis[0]], [position[1], z_axis[1]], [position[2], z_axis[2]], color='b', linewidth='3')
traj = self.sim_ee_traj[:i+1, 0]
x = [point[0] for point in traj]
y = [point[1] for point in traj]
z = [point[2] for point in traj]
self.ax_ee_3d.plot(x, y, z, color='deepskyblue', linewidth=1, marker='*', markersize=2)
elif self.mode == 'measured':
### Measured
position = self.measured_ee_traj[i][0]
orientation = self.measured_ee_traj[i][1]
rotation_matrix = R.from_quat(orientation).as_matrix()
self.ax_ee_3d.scatter(*position, color='g', marker='^', s=3)
axis_x_length, axis_y_length, axis_z_length = 0.075, 0.1, 0.05
x_axis = position + axis_x_length * rotation_matrix[:, 0]
y_axis = position + axis_y_length * rotation_matrix[:, 1]
z_axis = position + axis_z_length * rotation_matrix[:, 2]
self.ax_ee_3d.plot([position[0], x_axis[0]], [position[1], x_axis[1]], [position[2], x_axis[2]], color='r', linewidth='3')
self.ax_ee_3d.plot([position[0], y_axis[0]], [position[1], y_axis[1]], [position[2], y_axis[2]], color='g', linewidth='3')
self.ax_ee_3d.plot([position[0], z_axis[0]], [position[1], z_axis[1]], [position[2], z_axis[2]], color='b', linewidth='3')
traj = self.measured_ee_traj[:i+1, 0]
x = [point[0] for point in traj]
y = [point[1] for point in traj]
z = [point[2] for point in traj]
self.ax_ee_3d.plot(x, y, z, color='limegreen', linewidth=1, marker='^', markersize=2)
elif self.mode == 'both':
### Simulation
position = self.sim_ee_traj[i][0]
orientation = self.sim_ee_traj[i][1]
rotation_matrix = R.from_quat(orientation).as_matrix()
self.ax_ee_3d.scatter(*position, color='b', marker='*', s=3)
axis_x_length, axis_y_length, axis_z_length = 0.075, 0.1, 0.05
x_axis = position + axis_x_length * rotation_matrix[:, 0]
y_axis = position + axis_y_length * rotation_matrix[:, 1]
z_axis = position + axis_z_length * rotation_matrix[:, 2]
self.ax_ee_3d.plot([position[0], x_axis[0]], [position[1], x_axis[1]], [position[2], x_axis[2]], color='r', linewidth='3')
self.ax_ee_3d.plot([position[0], y_axis[0]], [position[1], y_axis[1]], [position[2], y_axis[2]], color='g', linewidth='3')
self.ax_ee_3d.plot([position[0], z_axis[0]], [position[1], z_axis[1]], [position[2], z_axis[2]], color='b', linewidth='3')
traj = self.sim_ee_traj[:i+1, 0]
x = [point[0] for point in traj]
y = [point[1] for point in traj]
z = [point[2] for point in traj]
self.ax_ee_3d.plot(x, y, z, color='deepskyblue', linewidth=1, marker='*', markersize=2)
### Measured
position = self.measured_ee_traj[i][0]
orientation = self.measured_ee_traj[i][1]
rotation_matrix = R.from_quat(orientation).as_matrix()
self.ax_ee_3d.scatter(*position, color='g', marker='^', s=3)
axis_x_length, axis_y_length, axis_z_length = 0.075, 0.1, 0.05
x_axis = position + axis_x_length * rotation_matrix[:, 0]
y_axis = position + axis_y_length * rotation_matrix[:, 1]
z_axis = position + axis_z_length * rotation_matrix[:, 2]
self.ax_ee_3d.plot([position[0], x_axis[0]], [position[1], x_axis[1]], [position[2], x_axis[2]], color='r', linewidth='3')
self.ax_ee_3d.plot([position[0], y_axis[0]], [position[1], y_axis[1]], [position[2], y_axis[2]], color='g', linewidth='3')
self.ax_ee_3d.plot([position[0], z_axis[0]], [position[1], z_axis[1]], [position[2], z_axis[2]], color='b', linewidth='3')
traj = self.measured_ee_traj[:i+1, 0]
x = [point[0] for point in traj]
y = [point[1] for point in traj]
z = [point[2] for point in traj]
self.ax_ee_3d.plot(x, y, z, color='limegreen', linewidth=1, marker='^', markersize=2)
def update_ee_2d_traj_animation(self, i):
self.ax_ee_2d.cla()
self.ax_ee_2d.set_xlim(0, self.ee_traj_len)
self.ax_ee_2d.set_ylim(0.0, 1.5)
self.ax_ee_2d.set_xlabel("Time (s)")
self.ax_ee_2d.set_ylabel("End Effector Value (rad)")
self.ax_ee_2d.set_title(f"Joint Values over Time")
self.sim_ee_traj.T
self.measured_ee_traj.T
### Simulation / Measured
for idx in self.joint_idxs:
for joint_values in self.sim_ee_traj[idx*len(self.joint_idxs)]:
self.ax_ee_2d.plot(self.time_steps_jt_2d[:i+1], joint_values[:i+1])
for joint_values in self.measured_joint_traj[idx*len(self.joint_idxs)]:
self.ax_ee_2d.plot(self.time_steps_jt_2d[:i+1], joint_values[:i+1])
self.ax_ee_2d.legend(loc="upper left")
def update_all_joint_2d_traj_animation(self, i):
plot_length = self.jt_traj_len / self.fps
for joint_index in range(self.num_joints):
if joint_index == 0:
min_range, max_range = 0.0, 1.5
self.ax_jt[joint_index].cla()
self.ax_jt[joint_index].grid()
self.ax_jt[joint_index].set_xlim(0, plot_length)
self.ax_jt[joint_index].set_ylim(min_range, max_range)
self.ax_jt[joint_index].set_ylabel("X Position (m)", fontsize=8)
self.ax_jt[joint_index].set_title("Joint Values over Time", fontsize=14)
elif joint_index == 1:
min_range, max_range = -1.0, 1.0
self.ax_jt[joint_index].cla()
self.ax_jt[joint_index].grid()
self.ax_jt[joint_index].set_xlim(0, plot_length)
self.ax_jt[joint_index].set_ylabel("Y Position (m)", fontsize=8)
self.ax_jt[joint_index].set_ylim(min_range, max_range)
elif joint_index == 2:
min_range, max_range = -np.pi, np.pi
y_label = f"{self.joint_name[str(joint_index)]} Value(rad)"
self.ax_jt[joint_index].cla()
self.ax_jt[joint_index].grid()
self.ax_jt[joint_index].set_xlim(0, plot_length)
self.ax_jt[joint_index].set_ylim(min_range, max_range)
self.ax_jt[joint_index].set_ylabel(y_label, fontsize=8)
elif joint_index == 3:
min_range, max_range = -0.1, 0.5
y_label = f"{self.joint_name[str(joint_index)]} Value(m)"
self.ax_jt[joint_index].cla()
self.ax_jt[joint_index].grid()
self.ax_jt[joint_index].set_xlim(0, plot_length)
self.ax_jt[joint_index].set_ylim(min_range, max_range)
self.ax_jt[joint_index].set_ylabel(y_label, fontsize=8)
elif joint_index in (4, 5, 6):
min_range, max_range = -np.pi, np.pi
y_label = f"{self.joint_name[str(joint_index)]} Value(rad)"
self.ax_jt[joint_index].cla()
self.ax_jt[joint_index].grid()
self.ax_jt[joint_index].set_xlim(0, plot_length)
self.ax_jt[joint_index].set_ylim(min_range, max_range)
self.ax_jt[joint_index].set_ylabel(y_label, fontsize=8)
elif joint_index == 7:
min_range, max_range = -np.pi, np.pi
y_label = f"{self.joint_name[str(joint_index)]} Value(rad)"
self.ax_jt[joint_index].cla()
self.ax_jt[joint_index].grid()
self.ax_jt[joint_index].set_xlim(0, plot_length)
self.ax_jt[joint_index].set_ylim(min_range, max_range)
self.ax_jt[joint_index].set_ylabel(y_label, fontsize=8)
self.ax_jt[joint_index].set_xlabel("Time (s)", fontsize=12)
### Simulation
traj = self.sim_joint_traj[:i, joint_index]
self.ax_jt[joint_index].plot(self.time_steps_jt_2d[:i], traj, linestyle='dashed', label='simulation')
### Measured
traj = self.measured_joint_traj[:i, joint_index]
self.ax_jt[joint_index].plot(self.time_steps_jt_2d[:i], traj, linestyle='dashdot', label='measured')
self.ax_jt[joint_index].legend(loc="upper right", fontsize=12)
def update_joint_2d_traj_animation(self, i):
plot_length = self.jt_traj_len / self.fps
if self.joint_index == 0:
min_range, max_range = 0.0, 1.5
self.ax_jt.cla()
self.ax_jt.grid()
self.ax_jt.set_xlim(0, plot_length)
self.ax_jt.set_ylim(min_range, max_range)
self.ax_jt.set_xlabel("Time (s)", fontsize=12)
self.ax_jt.set_ylabel("X Position (m)", fontsize=12)
self.ax_jt.set_title("Joint Values over Time", fontsize=14)
elif self.joint_index == 1:
min_range, max_range = -1.0, 1.0
self.ax_jt.cla()
self.ax_jt.grid()
self.ax_jt.set_xlim(0, plot_length)
self.ax_jt.set_ylim(min_range, max_range)
self.ax_jt.set_xlabel("Time (s)", fontsize=12)
self.ax_jt.set_ylabel("Y Position Value (m)", fontsize=12)
self.ax_jt.set_title("Joint Values over Time", fontsize=14)
elif self.joint_index == 3:
min_range, max_range = -0.1, 0.5
self.ax_jt.cla()
self.ax_jt.grid()
self.ax_jt.set_xlim(0, plot_length)
self.ax_jt.set_ylim(min_range, max_range)
self.ax_jt.set_xlabel("Time (s)", fontsize=12)
self.ax_jt.set_ylabel("ArmLift Value (m)", fontsize=12)
self.ax_jt.set_title("Joint Values over Time", fontsize=14)
else:
min_range, max_range = -np.pi, np.pi
self.ax_jt.cla()
self.ax_jt.grid()
self.ax_jt.set_xlim(0, plot_length)
self.ax_jt.set_ylim(min_range, max_range)
self.ax_jt.set_xlabel("Time (s)", fontsize=12)
self.ax_jt.set_ylabel(f"{self.joint_name[str(self.joint_index)]} Value(rad)", fontsize=12)
self.ax_jt.set_title("Joint Values over Time", fontsize=14)
### Simulation
traj = self.sim_joint_traj[:i, self.joint_index]
self.ax_jt.plot(self.time_steps_jt_2d[:i], traj, linestyle='dashed', label='simulation')
### Measured
traj = self.measured_joint_traj[:i, self.joint_index]
self.ax_jt.plot(self.time_steps_jt_2d[:i], traj, linestyle='dashdot', label='measured')
self.ax_jt.legend(loc="upper right", fontsize=12)
class AnalyzeMultipleTrajectory(object):
def __init__(self):
self.file_names = glob.glob('log/*.npy', recursive=True)
self.num_files = 1 # int(len(self.file_names) / 4)
self.sim_ee_traj = []
self.measured_ee_traj = []
self.true_ee_traj = []
self.sim_joint_traj = []
self.measured_joint_traj = []
self.ee_traj_len = []
self.jt_traj_len = []
for idx in range(self.num_files):
# Load ee trajectory
self.sim_ee_traj.append(np.load(f"log/simulation_ee_traj.npy", allow_pickle=True))
self.measured_ee_traj.append(np.load(f"log/measured_ee_traj_{idx}.npy", allow_pickle=True))
self.true_ee_traj.append(np.load(f"log/true_ee_traj_{idx}.npy", allow_pickle=True))
# Load joint trajectory
self.sim_joint_traj.append(np.load(f"log/simulation_joint_traj.npy", allow_pickle=True))
self.measured_joint_traj.append(np.load(f"log/measured_joint_traj_{idx}.npy", allow_pickle=True))
# Length of ee/joint trajectory
self.ee_traj_len.append(min(len(self.sim_ee_traj[idx]), len(self.measured_ee_traj[idx])))
self.jt_traj_len.append(min(len(self.sim_joint_traj[idx]), len(self.measured_joint_traj[idx])))
# Visualization settings
self.num_joints = 8
self.joint_name = {'2': 'BaseRotation', '3': 'ArmLift', '4': 'ArmFlex',
'5': 'ArmRoll', '6': 'WristFlex', '7': 'WristRoll'}
def plot_ee_traj(self, axis='xy'):
all_in_one = False
if all_in_one:
if axis == 'xy':
fig_xy, ax_xy = plt.subplots()
ax_xy.set_xlim(0.5, 1.5)
ax_xy.set_ylim(-1.0, 1.0)
ax_xy.set_xlabel('X-axis Position (m)')
ax_xy.set_ylabel('Y-axis Position (m)')
elif axis == 'yz':
fig_yz, ax_yz = plt.subplots()
ax_yz.set_xlim(-1.0, 1.0)
ax_yz.set_ylim(0.0, 1.0)
ax_yz.set_xlabel('Y-axis Position (m)')
ax_yz.set_ylabel('Z-axis Position (m)')
elif axis == 'zx':
fig_zx, ax_zx = plt.subplots()
ax_zx.set_xlim(0.0, 1.0)
ax_zx.set_ylim(0.5, 1.5)
ax_zx.set_xlabel('Z-axis Position (m)')
ax_zx.set_ylabel('X-axis Position (m)')
for idx in range(self.num_files):
for i in range(self.ee_traj_len[idx]):
sim_traj = self.sim_ee_traj[idx][i]
sim_x = sim_traj[0][0]
sim_y = sim_traj[0][1]
sim_z = sim_traj[0][2]
measured_traj = self.measured_ee_traj[idx][i]
measured_x = measured_traj[0][0]
measured_y = measured_traj[0][1]
measured_z = measured_traj[0][2]
true_traj = self.true_ee_traj[idx][i]
true_x = true_traj[0][0]
true_y = true_traj[0][1]
true_z = true_traj[0][2]
if axis == 'xy':
ax_xy.plot(sim_x, sim_y, marker='*', markersize=3, color='tomato', label='Simulation Trajectory')
ax_xy.plot(measured_x, measured_y, marker='^', markersize=3, color='deepskyblue', label='Measured Trajectory')
elif axis == 'yz':
ax_yz.plot(sim_y, sim_z, marker='*', markersize=3, color='coral', label='Simulation Trajectory')
ax_yz.plot(measured_y, measured_z, marker='^', markersize=3, color='goldenrod', label='Measured Trajectory')
elif axis == 'zx':
ax_zx.plot(sim_z, sim_x, marker='*', markersize=3, color='springgreen', label='Simulation Trajectory')
ax_zx.plot(measured_z, measured_x, marker='^', markersize=3, color='lightseagreen', label='Measured Trajectory')
if axis == 'xy':
ax_xy.set_title('End Effector XY-axis Trajectory')
plt.savefig('XY_traj.png')
plt.close()
elif axis == 'yz':
ax_yz.set_title('End Effector YZ-axis Trajectory')
plt.savefig('YZ_traj.png')
plt.close()
elif axis == 'zx':
ax_zx.set_title('End Effector ZX-axis Trajectory')
plt.savefig('ZX_traj.png')
plt.close()
else:
for idx in range(self.num_files):
if axis == 'xy':
fig_xy, ax_xy = plt.subplots()
ax_xy.set_xlim(0.5, 1.5)
ax_xy.set_ylim(-1.0, 1.0)
ax_xy.set_xlabel('X-axis Position (m)')
ax_xy.set_ylabel('Y-axis Position (m)')
elif axis == 'yz':
fig_yz, ax_yz = plt.subplots()
ax_yz.set_xlim(-1.0, 1.0)
ax_yz.set_ylim(0.0, 1.0)
ax_yz.set_xlabel('Y-axis Position (m)')
ax_yz.set_ylabel('Z-axis Position (m)')
elif axis == 'zx':
fig_zx, ax_zx = plt.subplots()
ax_zx.set_xlim(0.0, 1.0)
ax_zx.set_ylim(0.5, 1.5)
ax_zx.set_xlabel('Z-axis Position (m)')
ax_zx.set_ylabel('X-axis Position (m)')
for i in range(self.ee_traj_len[idx]):
sim_traj = self.sim_ee_traj[idx][i]
sim_x = sim_traj[0][0]
sim_y = sim_traj[0][1]
sim_z = sim_traj[0][2]
measured_traj = self.measured_ee_traj[idx][i]
measured_x = measured_traj[0][0]
measured_y = measured_traj[0][1]
measured_z = measured_traj[0][2]
true_traj = self.true_ee_traj[idx][i]
true_x = true_traj[0][0]
true_y = true_traj[0][1]
true_z = true_traj[0][2]
if axis == 'xy':
ax_xy.plot(sim_x, sim_y, marker='*', markersize=3, color='tomato', label='Simulation Trajectory')
ax_xy.plot(measured_x, measured_y, marker='*', markersize=3, color='deepskyblue', label='Measured Trajectory')
ax_xy.plot(true_x, true_y, marker='*', markersize=3, color='green', label='True Trajectory')
elif axis == 'yz':
ax_yz.plot(sim_y, sim_z, marker='*', markersize=3, color='coral', label='Simulation Trajectory')
ax_yz.plot(measured_y, measured_z, marker='*', markersize=3, color='deepskyblue', label='Measured Trajectory')
ax_yz.plot(true_y, true_z, marker='*', markersize=3, color='green', label='True Trajectory')
elif axis == 'zx':
ax_zx.plot(sim_z, sim_x, marker='*', markersize=3, color='springgreen', label='Simulation Trajectory')
ax_zx.plot(measured_z, measured_x, marker='*', markersize=3, color='deepskyblue', label='Measured Trajectory')
ax_zx.plot(true_z, true_x, marker='*', markersize=3, color='green', label='True Trajectory')
if axis == 'xy':
ax_xy.set_title('End Effector XY-axis Trajectory')
plt.savefig('XY_traj.png')
plt.close()
elif axis == 'yz':
ax_yz.set_title('End Effector YZ-axis Trajectory')
plt.savefig('YZ_traj.png')
plt.close()
elif axis == 'zx':
ax_zx.set_title('End Effector ZX-axis Trajectory')
plt.savefig('ZX_traj.png')
plt.close()
def evaluate_ee_error_correlation(self, axis='yz'):
all_in_one = False
if all_in_one:
if axis == 'xy':
fig_xy, ax_xy = plt.subplots()
ax_xy.set_xlim(-0.5, 0.5)
ax_xy.set_ylim(-0.5, 0.5)
ax_xy.set_xlabel('X-axis Error (m)')
ax_xy.set_ylabel('Y-axis Error (m)')
x_range = np.arange(-0.5, 0.5, 0.01)
y_range = np.arange(-0.5, 0.5, 0.01)
ax_xy.plot(x_range, y_range, color='tan', linestyle="--")
elif axis == 'yz':
fig_yz, ax_yz = plt.subplots()
ax_yz.set_xlim(-0.5, 0.5)
ax_yz.set_ylim(-0.5, 0.5)
ax_yz.set_xlabel('Y-axis Error (m)')
ax_yz.set_ylabel('Z-axis Error (m)')
x_range = np.arange(-0.5, 0.5, 0.01)
y_range = np.arange(-0.5, 0.5, 0.01)
ax_yz.plot(x_range, y_range, color='tan', linestyle="--")
elif axis == 'zx':
fig_zx, ax_zx = plt.subplots()
ax_zx.set_xlim(-0.5, 0.5)
ax_zx.set_ylim(-0.5, 0.5)
ax_zx.set_xlabel('Z-axis Error (m)')
ax_zx.set_ylabel('X-axis Error (m)')
x_range = np.arange(-0.5, 0.5, 0.01)
y_range = np.arange(-0.5, 0.5, 0.01)
ax_zx.plot(x_range, y_range, color='tan', linestyle="--")
for idx in range(self.num_files):
diff_l1_all = []
diff_l1_x = []
diff_l1_y = []
diff_l1_z = []
for i in range(self.ee_traj_len[idx]):
sim_traj = self.sim_ee_traj[idx][i]
sim_x = sim_traj[0][0]
sim_y = sim_traj[0][1]
sim_z = sim_traj[0][2]
measured_traj = self.measured_ee_traj[idx][i]
measured_x = measured_traj[0][0]
measured_y = measured_traj[0][1]
measured_z = measured_traj[0][2]
l1_x = np.array([sim_x - measured_x])
l1_y = np.array([sim_y - measured_y])
l1_z = np.array([sim_z - measured_z])
l1_norm = l1_x + l1_y + l1_z
diff_l1_x.append(l1_x)
diff_l1_y.append(l1_y)
diff_l1_z.append(l1_z)
diff_l1_all.append(l1_norm)
if axis == 'xy':
ax_xy.plot(l1_x, l1_y, marker='x', markersize=3, color='teal', label='XY Error')
elif axis == 'yz':
ax_yz.plot(l1_y, l1_z, marker='x', markersize=3, color='coral', label='YZ Error')
elif axis == 'zx':
ax_zx.plot(l1_z, l1_x, marker='x', markersize=3, color='palevioletred', label='ZX Error')
diff_l1_x = np.array(diff_l1_x)
diff_l1_y = np.array(diff_l1_y)
diff_l1_z = np.array(diff_l1_z)
corr_mat = np.concatenate((diff_l1_x, diff_l1_y, diff_l1_z), axis=1)
corr_mat = corr_mat.T
corr_coef = np.corrcoef(corr_mat)
print('corr_coef:', corr_coef)
if axis == 'xy':
coef = corr_coef[0][1]
ax_xy.set_title(f'End Effector XY-axis Error Correlation: r={coef:.3f}')
plt.savefig('XY_correlation.png')
plt.close()
elif axis == 'yz':
coef = corr_coef[1][2]
ax_yz.set_title(f'End Effector YZ-axis Error Correlation: r={coef:.3f}')
plt.savefig('YZ_correlation.png')
plt.close()
elif axis == 'zx':
coef = corr_coef[2][0]
ax_zx.set_title(f'End Effector ZX-axis Error Correlation: r={coef:.3f}')
plt.savefig('ZX_correlation.png')
plt.close()
else:
for idx in range(self.num_files):
if axis == 'xy':
fig_xy, ax_xy = plt.subplots()
ax_xy.set_xlim(-0.5, 0.5)
ax_xy.set_ylim(-0.5, 0.5)
ax_xy.set_xlabel('X-axis Error (m)')
ax_xy.set_ylabel('Y-axis Error (m)')
x_range = np.arange(-0.5, 0.5, 0.01)
y_range = np.arange(-0.5, 0.5, 0.01)
ax_xy.plot(x_range, y_range, color='tan', linestyle="--")
elif axis == 'yz':
fig_yz, ax_yz = plt.subplots()
ax_yz.set_xlim(-0.5, 0.5)
ax_yz.set_ylim(-0.5, 0.5)
ax_yz.set_xlabel('Y-axis Error (m)')
ax_yz.set_ylabel('Z-axis Error (m)')
x_range = np.arange(-0.5, 0.5, 0.01)
y_range = np.arange(-0.5, 0.5, 0.01)
ax_yz.plot(x_range, y_range, color='tan', linestyle="--")
elif axis == 'zx':
fig_zx, ax_zx = plt.subplots()
ax_zx.set_xlim(-0.5, 0.5)
ax_zx.set_ylim(-0.5, 0.5)
ax_zx.set_xlabel('Z-axis Error (m)')
ax_zx.set_ylabel('X-axis Error (m)')
x_range = np.arange(-0.5, 0.5, 0.01)
y_range = np.arange(-0.5, 0.5, 0.01)
ax_zx.plot(x_range, y_range, color='tan', linestyle="--")
diff_l1_all = []
diff_l1_x = []
diff_l1_y = []
diff_l1_z = []
for i in range(self.ee_traj_len[idx]):
sim_traj = self.sim_ee_traj[idx][i]
sim_x = sim_traj[0][0]
sim_y = sim_traj[0][1]
sim_z = sim_traj[0][2]
measured_traj = self.measured_ee_traj[idx][i]
measured_x = measured_traj[0][0]
measured_y = measured_traj[0][1]
measured_z = measured_traj[0][2]
l1_x = np.array([sim_x - measured_x])
l1_y = np.array([sim_y - measured_y])
l1_z = np.array([sim_z - measured_z])
l1_norm = l1_x + l1_y + l1_z
diff_l1_x.append(l1_x)
diff_l1_y.append(l1_y)
diff_l1_z.append(l1_z)
diff_l1_all.append(l1_norm)
if axis == 'xy':
ax_xy.plot(l1_x, l1_y, marker='x', markersize=3, color='teal', label='XY Error')
elif axis == 'yz':
ax_yz.plot(l1_y, l1_z, marker='x', markersize=3, color='coral', label='YZ Error')
elif axis == 'zx':
ax_zx.plot(l1_z, l1_x, marker='x', markersize=3, color='palevioletred', label='ZX Error')
diff_l1_x = np.array(diff_l1_x)
diff_l1_y = np.array(diff_l1_y)
diff_l1_z = np.array(diff_l1_z)
corr_mat = np.concatenate((diff_l1_x, diff_l1_y, diff_l1_z), axis=1)
corr_mat = corr_mat.T
corr_coef = np.corrcoef(corr_mat)
print('corr_coef:', corr_coef)
if axis == 'xy':
coef = corr_coef[0][1]
ax_xy.set_title(f'End Effector XY-axis Error Correlation: r={coef:.3f}')
plt.savefig(f'XY_correlation_{idx}.png')
plt.close()
elif axis == 'yz':
coef = corr_coef[1][2]
ax_yz.set_title(f'End Effector YZ-axis Error Correlation: r={coef:.3f}')
plt.savefig(f'YZ_correlation_{idx}.png')
plt.close()
elif axis == 'zx':
coef = corr_coef[2][0]
ax_zx.set_title(f'End Effector ZX-axis Error Correlation: r={coef:.3f}')
plt.savefig(f'ZX_correlation_{idx}.png')
plt.close()
def evaluate_ee_error(self):
mean_x_l2_all = []
mean_y_l2_all = []
mean_z_l2_all = []
mean_all_l2_all = []
std_x_l2_all = []
std_y_l2_all = []
std_z_l2_all = []
std_all_l2_all = []
max_x_l2_all = []
max_y_l2_all = []
max_z_l2_all = []
max_all_l2_all = []
for idx in range(self.num_files):
diff_l2_all = []
diff_l2_x = []
diff_l2_y = []
diff_l2_z = []
for i in range(self.ee_traj_len[idx]):
sim_traj = self.sim_ee_traj[idx][i]
sim_x = sim_traj[0][0]
sim_y = sim_traj[0][1]
sim_z = sim_traj[0][2]
measured_traj = self.measured_ee_traj[idx][i]
measured_x = measured_traj[0][0]
measured_y = measured_traj[0][1]
measured_z = measured_traj[0][2]
l2_x = np.sqrt(np.square(sim_x-measured_x))
l2_y = np.sqrt(np.square(sim_y-measured_y))
l2_z = np.sqrt(np.square(sim_z-measured_z))
l2_norm = np.sqrt(np.square(sim_x-measured_x)+np.square(sim_y-measured_y)+np.square(sim_z-measured_z))
diff_l2_x.append(l2_x)
diff_l2_y.append(l2_y)
diff_l2_z.append(l2_z)
diff_l2_all.append(l2_norm)
# Calculate mean
mean_l2_x = np.mean(diff_l2_x)
mean_l2_y = np.mean(diff_l2_y)
mean_l2_z = np.mean(diff_l2_z)
mean_l2_all = np.mean(diff_l2_all)
mean_x_l2_all.append(mean_l2_x)
mean_y_l2_all.append(mean_l2_y)
mean_z_l2_all.append(mean_l2_z)
mean_all_l2_all.append(mean_l2_all)
# Calculate std
std_l2_x = np.var(diff_l2_x)
std_l2_y = np.var(diff_l2_y)
std_l2_z = np.var(diff_l2_z)
std_l2_all = np.var(diff_l2_all)
std_x_l2_all.append(std_l2_x)
std_y_l2_all.append(std_l2_y)
std_z_l2_all.append(std_l2_z)
std_all_l2_all.append(std_l2_all)
# Calculate max
max_l2_x = np.max(diff_l2_x)
max_l2_y = np.max(diff_l2_y)
max_l2_z = np.max(diff_l2_z)
max_l2_all = np.max(diff_l2_all)
print('Mean difference in direction X:', mean_l2_x)
print('Mean difference in direction Y:', mean_l2_y)
print('Mean difference in direction Z:', mean_l2_z)
print('Mean difference: ', mean_l2_all)
print('Std difference in direction X:', std_l2_x)
print('Std difference in direction Y:', std_l2_y)
print('Std difference in direction Z:', std_l2_y)
print('Std difference:', std_l2_all)
print('Max difference in direction X:', max_l2_x)
print('Max difference in direction Y:', max_l2_y)
print('Max difference in direction Z:', max_l2_z)
print('Max difference:', max_l2_all)
### Visualize mean and std
all_in_one = True
# Visualize X direction
if all_in_one:
x_range = np.arange(1, self.num_files+1)
y_range = mean_x_l2_all
fig, ax = plt.subplots()
ax.set_title('End-Effector Position Error')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.2)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Mean Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=std_x_l2_all, fmt='-^', markersize=6, color='darkorange',
markeredgecolor='darkorange', ecolor='darkorange', capsize=4, label='X position')
# Visualize Y direction
x_range = np.arange(1, self.num_files+1)
y_range = mean_y_l2_all
ax.errorbar(x_range, y_range, yerr=std_y_l2_all, fmt='-d', markersize=6, color='chocolate',
markeredgecolor='chocolate', ecolor='chocolate', capsize=4, label='Y position')
# Visualize Z direction
x_range = np.arange(1, self.num_files+1)
y_range = mean_z_l2_all
ax.errorbar(x_range, y_range, yerr=std_z_l2_all, fmt='-o', markersize=6, color='olivedrab',
markeredgecolor='olivedrab', ecolor='olivedrab', capsize=4, label='Z position')
# Visualize All
x_range = np.arange(1, self.num_files+1)
y_range = mean_all_l2_all
ax.errorbar(x_range, y_range, yerr=std_all_l2_all, fmt='-*', markersize=6, color='midnightblue',
markeredgecolor='midnightblue', ecolor='midnightblue', capsize=4, label='XYZ position')
ax.legend(loc="upper right")
else:
x_range = np.arange(1, self.num_files+1)
y_range = mean_x_l2_all
fig, ax = plt.subplots()
ax.set_title('End Effector Mean Error in X Direction')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.2)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Mean Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=std_x_l2_all, fmt='-^', markersize=6, color='darkorange',
markeredgecolor='darkorange', ecolor='darkorange', capsize=4)
plt.show()
plt.close()
# Visualize Y direction
x_range = np.arange(1, self.num_files+1)
y_range = mean_y_l2_all
fig, ax = plt.subplots()
ax.set_title('End Effector Mean Error in Y Direction')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.2)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Mean Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=std_y_l2_all, fmt='-^', markersize=6, color='darkorange',
markeredgecolor='darkorange', ecolor='darkorange', capsize=4)
# Visualize Z direction
x_range = np.arange(1, self.num_files+1)
y_range = mean_z_l2_all
fig, ax = plt.subplots()
ax.set_title('End Effector Mean Error in Z Direction')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.2)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Mean Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=std_z_l2_all, fmt='-^', markersize=6, color='darkorange',
markeredgecolor='darkorange', ecolor='darkorange', capsize=4)
plt.show()
plt.close()
# Visualize All
x_range = np.arange(1, self.num_files+1)
y_range = mean_all_l2_all
fig, ax = plt.subplots()
ax.set_title('End Effector Mean Error')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.2)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Mean Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=std_all_l2_all, fmt='-^', markersize=6, color='darkorange',
markeredgecolor='darkorange', ecolor='darkorange', capsize=4)
plt.show()
plt.savefig('mea_error.png')
def evaluate_jt_error(self):
mean_joint_all = []
std_joint_all = []
max_joint_all = []
for idx in range(self.num_files):
mean_joint_each = []
std_joint_each = []
max_joint_each = []
for i in range(self.num_joints):
diff_each_joints = []
for j in range(self.jt_traj_len[idx]):
sim_joint = self.sim_joint_traj[idx][j, i]
measured_joint = self.measured_joint_traj[idx][j, i]
diff_joint = np.sqrt(np.square(sim_joint-measured_joint))
diff_each_joints.append(diff_joint)
mean_joint = np.mean(diff_each_joints)
std_joint = np.var(diff_each_joints)
max_joint = np.max(diff_each_joints)
print(f"Mean joint_{i} difference:", mean_joint)
print(f"Std joint_{i} difference:", std_joint)
print(f"Max joint_{i} difference:", max_joint)
mean_joint_each.append(mean_joint)
std_joint_each.append(std_joint)
max_joint_each.append(max_joint)
mean_joint_all.append(mean_joint_each)
std_joint_all.append(std_joint_each)
max_joint_all.append(max_joint_all)
# Visualize results
for i in range(self.num_joints):
if i == 0:
x_range = np.arange(1, self.num_files+1)
y_range = np.array(mean_joint_all).T[i]
fig, ax = plt.subplots()
ax.set_title('X Position Error')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.15)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Position Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=np.array(std_joint_all).T[i], fmt='-^', markersize=6, color='navy',
markeredgecolor='navy', ecolor='navy', capsize=4, label='X Position Error')
ax.legend(loc="upper right")
plt.savefig('joint_x_error.png')
plt.close()
elif i == 1:
x_range = np.arange(1, self.num_files+1)
y_range = np.array(mean_joint_all).T[i]
fig, ax = plt.subplots()
ax.set_title('Y Position Error')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.15)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Position Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=np.array(std_joint_all).T[i], fmt='-^', markersize=6, color='navy',
markeredgecolor='navy', ecolor='navy', capsize=4, label='Y Position Error')
ax.legend(loc="upper right")
plt.savefig('joint_y_error.png')
plt.close()
elif i == 3:
x_range = np.arange(1, self.num_files+1)
y_range = np.array(mean_joint_all).T[i]
fig, ax = plt.subplots()
ax.set_title('ArmLift Joint Error')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.15)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Position Error (m)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=np.array(std_joint_all).T[i], fmt='-^', markersize=6, color='navy',
markeredgecolor='navy', ecolor='navy', capsize=4, label='ArmLift Position Error')
ax.legend(loc="upper right")
plt.savefig('joint_armlift_error.png')
plt.close()
else:
x_range = np.arange(1, self.num_files+1)
y_range = np.array(mean_joint_all).T[i]
fig, ax = plt.subplots()
ax.set_title(f'{self.joint_name[str(i)]} Position Error')
ax.set_xlim(0.5, self.num_files+0.5)
ax.set_ylim(-0.05, 0.15)
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Position Error (rad)')
ax.grid()
ax.errorbar(x_range, y_range, yerr=np.array(std_joint_all).T[i], fmt='-^', markersize=6, color='navy',
markeredgecolor='navy', ecolor='navy', capsize=4, label=f'{self.joint_name[str(i)]} Position Error')
ax.legend(loc="upper right")
plt.savefig(f'joint_{self.joint_name[str(i)].lower()}_error.png')
plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser('Visualize trajectory')
parser.add_argument('--index', type=int, default=0, help='set index')
parser.add_argument('--joint_index', type=int, default=None, help='set joint index')
parser.add_argument('--mode', type=str, default='both', help='set visualization mode')
parser.add_argument('--visualize', action='store_true', help='set visualization')
args = parser.parse_args()
if args.visualize:
at = AnalyzeTrajectory(idx=args.index, mode=args.mode, joint_index=args.joint_index)
at.visualize_ee_3d_traj()
at.visualize_joint_2d_traj()
else:
at = AnalyzeMultipleTrajectory()
at.evaluate_ee_error()
at.evaluate_jt_error()
at.evaluate_ee_error_correlation()
at.plot_ee_traj()
| 45,050 |
Python
| 45.684974 | 136 | 0.508746 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/hsr_py_interface.py
|
#!/usr/bin/env/python3
import sys
import rospy
import hsrb_interface
import numpy as np
from controller_manager_msgs.srv import ListControllers
class HSRPyInterface(object):
def __init__(self, standalone=False):
# Initialize ROS node
if standalone:
rospy.init_node('hsr_python_interface')
# Check server status
self.check_status()
self.robot = hsrb_interface.Robot()
self.omni_base = self.robot.get('omni_base')
self.gripper = self.robot.get('gripper')
self.whole_body = self.robot.get('whole_body')
def initialize_arm(self):
# Initialize arm position
self.whole_body.move_to_go()
def initialize_base(self):
# Initialize base position
self.omni_base.go_abs(0.0, 0.0, 0.0, 300.0)
def set_task_pose(self, grasp_type='side'):
# Set base to configuration position
self.omni_base.go_abs(-0.80, 0.75, np.pi/2, 300.0)
# Set arm to configuration pose
self.whole_body.move_to_neutral()
if grasp_type == 'side':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.3, y=0.0, z=0.0, ej=0.0),
], ref_frame_id='hand_palm_link')
elif grasp_type == 'top':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.3, y=0.0, z=0.0, ej=-1.57),
], ref_frame_id='hand_palm_link')
# Open gripper
self.gripper.command(1.2)
def fix_task_pose(self, diff_pose):
diff_x_pose, diff_y_pose, diff_z_pose = diff_pose[0], diff_pose[1], diff_pose[2]
if np.abs(diff_z_pose) < 0.1:
diff_z_pose = -0.12
else:
diff_z_pose = 0.0
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=0.0, y=diff_y_pose, z=diff_z_pose)
], ref_frame_id='hand_palm_link')
def check_status(self):
# Make sure the controller is running
rospy.wait_for_service('/hsrb/controller_manager/list_controllers')
list_controllers = rospy.ServiceProxy('/hsrb/controller_manager/list_controllers', ListControllers)
running = False
while running is False:
rospy.sleep(0.1)
for c in list_controllers().controller:
if c.name == 'arm_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'head_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'gripper_controller' and c.state == 'running':
running |= True
if c.name == 'omni_base_controller' and c.state == 'running':
running |= True
return running
def close_gripper(self, force=0.5):
self.gripper.apply_force(force)
def open_gripper(self, width=1.0):
self.gripper.command(width)
def insert_object(self, ee_mode):
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.03, y=0.0, z=0.0),
], ref_frame_id='hand_palm_link')
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=0.0, y=-0.03, z=0.0),
], ref_frame_id='hand_palm_link')
def grasp_gear(self, diff_ee_pose, pick_offset=0.1):
# Open gripper
self.open_gripper(0.5)
# Move to grasp
self.whole_body.move_end_effector_by_line((0, 0, 1), -pick_offset)
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=diff_ee_pose[0],
y=diff_ee_pose[1],
ek=-np.pi/2
),
], ref_frame_id='hand_palm_link')
self.whole_body.move_end_effector_by_line((0, 0, 1), diff_ee_pose[2]+pick_offset)
# Close gripper
self.close_gripper(0.5)
if __name__ == '__main__':
hsr_py = HSRPyInterface()
hsr_py.initialize_arm()
hsr_py.initialize_base()
| 4,143 |
Python
| 33.823529 | 107 | 0.567946 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/tf_interface.py
|
#!/bin/env/python3
import tf
import rospy
import tf2_ros
import numpy as np
import tf2_geometry_msgs
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import TransformStamped
from scipy.spatial.transform import Rotation as R
class TfManager(object):
def __init__(self, standalone=False):
if standalone:
rospy.init_node('tf_manager')
# TF listener
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
# Publisher
self.tf_pub = rospy.Publisher('/tf', TFMessage, queue_size=1)
def get_link_pose(self, link_name='hand_palm_link'):
# Translate from map coordinate to arbitrary coordinate of robot.
ee_pose = tf2_geometry_msgs.PoseStamped()
ee_pose.header.frame_id = link_name
ee_pose.header.stamp = rospy.Time(0)
ee_pose.pose.orientation.w = 1.0
try:
# Get transform at current time
global_pose = self.tfBuffer.transform(ee_pose, 'map')
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
print(e)
return None
return global_pose
def get_init_pose(self, mocap_poses):
# Get red_shaft pose
gearbox_x = mocap_poses['green_gear'].pose.position.x
gearbox_y = mocap_poses['green_gear'].pose.position.y
gearbox_z = mocap_poses['green_gear'].pose.position.z
# Get current end effector pose
current_ee_pose_x = mocap_poses['end_effector'].pose.position.x
current_ee_pose_y = mocap_poses['end_effector'].pose.position.y
current_ee_pose_z = mocap_poses['end_effector'].pose.position.z
# Culculate difference pose
diff_x_pose = current_ee_pose_x - gearbox_x
diff_y_pose = current_ee_pose_y - gearbox_y
diff_z_pose = current_ee_pose_z - gearbox_z
init_base_pose = np.array([diff_x_pose, diff_y_pose, diff_z_pose])
return init_base_pose
# Transform target_pose on end effector coordinate to map coordinate
def transform_coordinate(self, target_pose):
# Initialize vector and matrix
trans_mat = np.zeros((4, 4))
qc_trans = np.zeros(4)
# Initialize map coordination
qc_trans[0] = target_pose[0]
qc_trans[1] = target_pose[1]
qc_trans[2] = target_pose[2]
qc_trans[3] = 1.0
ee_pose = self.get_link_pose()
if ee_pose is None:
return
# Initialize ee coordination
translation = np.array([
ee_pose.pose.position.x,
ee_pose.pose.position.y,
ee_pose.pose.position.z
])
rot = np.array([
ee_pose.pose.orientation.x,
ee_pose.pose.orientation.y,
ee_pose.pose.orientation.z,
ee_pose.pose.orientation.w
])
rot_mat = R.from_quat(rot).as_matrix()
# Calculate translation
trans_mat[3:, 3:] = 1.0
trans_mat[:3, 3] = translation
trans_mat[:3, :3] = rot_mat
qw_trans = trans_mat @ qc_trans
qw_trans = qw_trans[:3]
# Calculate rotation
qw_rot = np.array([
ee_pose.pose.orientation.x,
ee_pose.pose.orientation.y,
ee_pose.pose.orientation.z,
ee_pose.pose.orientation.w
])
return np.concatenate([qw_trans, qw_rot])
def publish_mocap_to_map(self, mocap_poses):
tf_list = []
for rigid_name, mocap_pose in mocap_poses.items():
t = TransformStamped()
t.header.frame_id = 'map'
t.header.stamp = rospy.Time.now()
t.child_frame_id = rigid_name
t.transform.translation.x = mocap_pose.pose.position.x
t.transform.translation.y = mocap_pose.pose.position.y
t.transform.translation.z = mocap_pose.pose.position.z
t.transform.rotation.x = mocap_pose.pose.rotation.x
t.transform.rotation.y = mocap_pose.pose.rotation.y
t.transform.rotation.z = mocap_pose.pose.rotation.z
t.transform.rotation.w = mocap_pose.pose.rotation.w
tf_list.append(t)
tfm = TFMessage(tf_list)
# Publish tf message
self.tf_pub.publish(tfm)
if __name__ == '__main__':
tf_manager = TfManager(standalone=True)
while not rospy.is_shutdown():
transform = tf_manager.get_link_pose('hand_palm_link')
test_pose1 = np.array([-0.5, 0.0, 0.0]) # -0.5 to z direction on map
test_pose1 = tf_manager.transform_coordinate(test_pose1)
test_pose2 = np.array([0.0, 1.0, 0.0]) # -1.0 to x direction on map
test_pose2 = tf_manager.transform_coordinate(test_pose2)
test_pose3 = np.array([-0.3, -1.0, 0.0])
test_pose3 = tf_manager.transform_coordinate(test_pose3)
ee_pose = tf_manager.get_link_pose()
if ee_pose is None:
continue
test_pose4 = np.array([
ee_pose.pose.position.x,
ee_pose.pose.position.y,
ee_pose.pose.position.z
])
test_pose4 = tf_manager.transform_coordinate(test_pose4)
test_pose5 = np.zeros(3)
test_pose5 = tf_manager.transform_coordinate(test_pose5)
print('test_1: ', test_pose1)
print('test_2: ', test_pose2)
print('test_3: ', test_pose3)
print('test_4: ', test_pose4)
print('test_5: ', test_pose5)
| 5,495 |
Python
| 32.512195 | 109 | 0.592903 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/mocap_interface.py
|
#!/bin/env/python3
import rospy
import numpy as np
from geometry_msgs.msg import PoseStamped
from scipy.spatial.transform import Rotation as R
class MocapInterface(object):
def __init__(self, standalone=False):
if standalone:
rospy.init_node('mocap_interface')
# Mocap list
self.rigid_body_list = (
"base",
"blue_gear",
"green_gear",
"red_gear",
"yellow_shaft",
"red_shaft",
"end_effector"
)
# Mocap to map
self.translation = np.array([0.0, 0.0, 0.0])
self.rotation = np.array([
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]
])
# Rigid body pose dictionary
self.rigid_posestamped = {
name: PoseStamped() for name in self.rigid_body_list
}
# Subscriber
self._mocap_sub = [
rospy.Subscriber('/mocap_pose_topic/{0}_pose'.format(self.rigid_body_list[i]), PoseStamped, self._mocap_cb, callback_args=i)
for i in range(len(self.rigid_body_list))
]
def _mocap_cb(self, mocap_data, id):
converted_pose = self.convert_mocap_to_map(mocap_data)
mocap_data.header.frame_id = 'map'
mocap_data.pose.position.x = converted_pose[0]
mocap_data.pose.position.y = converted_pose[1]
mocap_data.pose.position.z = converted_pose[2]
mocap_data.pose.orientation.x = converted_pose[3]
mocap_data.pose.orientation.y = converted_pose[4]
mocap_data.pose.orientation.z = converted_pose[5]
mocap_data.pose.orientation.w = converted_pose[6]
self.rigid_posestamped[self.rigid_body_list[id]] = mocap_data
def get_pose(self, name):
return self.rigid_posestamped[name]
def get_poses(self):
return self.rigid_posestamped
def convert_mocap_to_map(self, mocap_data):
# Initialize vector and matrix
trans_mat = np.zeros((4, 4))
qc_trans = np.zeros(4)
qc_rot = np.zeros(4)
# Calculate translation
qc_trans[0] = mocap_data.pose.position.x
qc_trans[1] = mocap_data.pose.position.y
qc_trans[2] = mocap_data.pose.position.z
qc_trans[3] = 1.0
trans_mat[3:, 3:] = 1.0
trans_mat[:3, 3] = self.translation
trans_mat[:3, :3] = self.rotation
qw_trans = trans_mat @ qc_trans
qw_trans = qw_trans[:3]
# Calculate rotation
qc_rot[0] = mocap_data.pose.orientation.x
qc_rot[1] = mocap_data.pose.orientation.y
qc_rot[2] = mocap_data.pose.orientation.z
qc_rot[3] = mocap_data.pose.orientation.w
qc_rot_mat = R.from_quat(qc_rot).as_matrix()
qw_rot_mat = self.rotation @ qc_rot_mat
qw_rot = R.from_matrix(qw_rot_mat).as_quat()
return np.concatenate([qw_trans, qw_rot])
if __name__ == '__main__':
mocap_interface = MocapInterface(standalone=True)
while not rospy.is_shutdown():
# Test each rigid body
base_pose = mocap_interface.get_pose('base')
print('base_pose: ', base_pose)
blue_gear_pose = mocap_interface.get_pose('blue_gear')
print('blue_gear_pose: ', blue_gear_pose)
green_gear_pose = mocap_interface.get_pose('green_gear')
print('green_gear_pose: ', green_gear_pose)
red_gear_pose = mocap_interface.get_pose('red_gear')
print('red_gear_pose: ', red_gear_pose)
yellow_shaft_pose = mocap_interface.get_pose('yellow_shaft')
print('yellow_shaft_pose: ', yellow_shaft_pose)
red_shaft_pose = mocap_interface.get_pose('red_shaft')
print('red_shaft_pose: ', red_shaft_pose)
end_effector_pose = mocap_interface.get_pose('end_effector')
print('end_effector_pose: ', end_effector_pose)
# Test all rigid bodies
rigid_poses = mocap_interface.get_poses()
print('rigid_poses: ', rigid_poses['blue_gear'])
| 4,000 |
Python
| 33.791304 | 136 | 0.5855 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/force_sensor_interface.py
|
import sys
import math
import rospy
from geometry_msgs.msg import WrenchStamped
class ForceSensorInterface(object):
def __init__(self, standalone=False):
if standalone:
rospy.init_node('force_torque_sensor_interface')
self._force_data_x = 0.0
self._force_data_y = 0.0
self._force_data_z = 0.0
# Subscribe force torque sensor data from HSRB
self._wrist_wrench_sub = rospy.Subscriber(
'/hsrb/wrist_wrench/raw', WrenchStamped, self._ft_sensor_callback
)
# Wait for connection
try:
rospy.wait_for_message('/hsrb/wrist_wrench/raw', WrenchStamped, timeout=10.0)
except Exception as e:
rospy.logerr(e)
sys.exit(1)
def _ft_sensor_callback(self, data):
self._force_data_x = data.wrench.force.x
self._force_data_y = data.wrench.force.y
self._force_data_z = data.wrench.force.z
def initialize_ft(self):
self._force_data_x = 0.0
self._force_data_y = 0.0
self._force_data_z = 0.0
def get_current_force(self):
return [self._force_data_x, self._force_data_y, self._force_data_z]
def compute_difference(self, pre_data_list, post_data_list, calc_type='l1'):
if (len(pre_data_list) != len(post_data_list)):
raise ValueError('Argument lists differ in length')
# Calcurate square sum of difference
if calc_type == 'l1':
l1_sums = sum([b - a for (a, b) in zip(pre_data_list, post_data_list)])
return l1_sums
elif calc_type == 'l2':
l2_sums = sum([math.pow(b - a, 2) for (a, b) in zip(pre_data_list, post_data_list)])
return math.sqrt(l2_sums)
if __name__ == '__main__':
ft_interface = ForceSensorInterface(standalone=True)
rate = rospy.Rate(50)
while not rospy.is_shutdown():
prev_ft_data = ft_interface.get_current_force()
input('wait_for_user')
curr_ft_data = ft_interface.get_current_force()
force_difference = ft_interface.compute_difference(prev_ft_data, curr_ft_data)
weight = round(force_difference / 9.81 * 1000, 1)
print('weight:', weight)
rate.sleep()
| 2,244 |
Python
| 32.014705 | 96 | 0.595365 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/planner/cross_entropy_method.py
|
import numpy as np
from planner import Planner
class CrossEntropyMethod(Planner):
def __init__(self):
super().__init__()
def compute_path(self):
pass
| 176 |
Python
| 16.699998 | 34 | 0.630682 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/planner/planner.py
|
import numpy as np
class Planner(object):
def __init__(self):
pass
@staticmethod
def compute_path(self):
raise NotImplementedError("Implement compute_path")
| 187 |
Python
| 17.799998 | 59 | 0.652406 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/planner/linear_interpolation.py
|
import numpy as np
from .planner import Planner
class LinearInterpolationPlanner(Planner):
def __init__(self):
super().__init__()
def compute_path(self, op_pose, num_steps=10):
# Compute linear interpolation path
assert num_steps > 1, "Too short to create waypoints"
traj_x = np.linspace(0.0, op_pose[0], num_steps)
traj_y = np.linspace(0.0, op_pose[1], num_steps)
traj_z = np.linspace(0.0, op_pose[2], num_steps)
trajectory = np.dstack((traj_x, traj_y, traj_z))
return trajectory
| 557 |
Python
| 29.999998 | 61 | 0.621185 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/planner/spline_interpolation.py
|
import numpy as np
from planner import Planner
class SplineInterpolationPlanner(Planner):
def __init__(self, offset):
super().__init__()
# Set parameters
self.offset = offset
def compute_path(self, op_pose, num_steps=10):
# Compute linear interpolation path
assert num_steps > 1, "Too short to create waypoints"
traj_x = np.linspace(0.0, op_pose[0], num_steps)
traj_y = np.linspace(0.0, op_pose[1], num_steps)
traj_z = np.linspace(0.0, op_pose[2], num_steps)
trajectory = np.dstack((traj_x, traj_y, traj_z))
return trajectory
| 619 |
Python
| 28.523808 | 61 | 0.615509 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/feedback/demo.py
|
#!/usr/bin/env/python3
import tf
import sys
import rospy
import moveit_commander
import numpy as np
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_2d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from hsr_py_interface import HSRPyInterface
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from geometry_msgs.msg import Pose
from controller_manager_msgs.srv import ListControllers
class ExecutePlan(object):
def __init__(self, grasp_type='side'):
# Initialize moveit
moveit_commander.roscpp_initialize(sys.argv)
# Initialize ROS node
rospy.init_node('execute_tamp_plan')
# Feedback rate
self.rate = rospy.Rate(10)
# Threshold
self.move_threshold = 0.05
self.pick_threshold = 0.05
self.place_threshold = 0.04 # 0.038
self.stack_threshold = 0.04 # 0.038
# Core module
self.tamp_planner = TAMPPlanner()
self.hsr_py = HSRPyInterface()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
self.tf_manager = TfManager()
# Initialize Robot
self.initialize_robot(grasp_type)
def initialize_robot(self, grasp_type='side'):
self.check_status()
# Set moveit commander
self.robot = moveit_commander.RobotCommander()
self.arm = moveit_commander.MoveGroupCommander('arm', wait_for_servers=0.0)
self.base = moveit_commander.MoveGroupCommander('base', wait_for_servers=0.0)
self.gripper = moveit_commander.MoveGroupCommander('gripper', wait_for_servers=0.0)
self.whole_body = moveit_commander.MoveGroupCommander('whole_body', wait_for_servers=0.0)
# Set planning parameters
self.set_planning_parameter()
# Set arm to configuration position
self.hsr_py.initialize_arm()
# Set base to configuration position
self.hsr_py.initialize_base()
# Move to neutral
self.hsr_py.set_task_pose(grasp_type)
def set_planning_parameter(self):
# Planning parameters
self.whole_body.allow_replanning(True)
self.whole_body.set_planning_time(3)
self.whole_body.set_pose_reference_frame('map')
# Scene parameters
self.scene = moveit_commander.PlanningSceneInterface()
self.scene.remove_world_object()
def check_status(self):
# Make sure the controller is running
rospy.wait_for_service('/hsrb/controller_manager/list_controllers')
list_controllers = rospy.ServiceProxy('/hsrb/controller_manager/list_controllers', ListControllers)
running = False
while running is False:
rospy.sleep(0.1)
for c in list_controllers().controller:
if c.name == 'arm_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'head_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'gripper_controller' and c.state == 'running':
running |= True
if c.name == 'omni_base_controller' and c.state == 'running':
running |= True
return running
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args):
# Get rigid body poses from mocap
mocap_poses = self.mocap_interface.get_poses()
# Modify plan
action_name, modified_action, ee_mode = self.path_modifier.post_process(action_name, args, mocap_poses)
return modified_action, ee_mode
def check_ee_mode(self):
# Get end effector pose
ee_pose = self.whole_body.get_current_pose()
ee_rot = np.array([
ee_pose.pose.orientation.x,
ee_pose.pose.orientation.y,
ee_pose.pose.orientation.z,
ee_pose.pose.orientation.w
])
ee_euler = R.from_quat(ee_rot).as_euler('xyz')
# Check whether end effector is vertical to ground plane, or horizontal
if -0.8 < ee_euler[1] and 0.8 > ee_euler[1]:
ee_mode = 'vertical'
elif -2.0 < ee_euler[1] and -1.0 > ee_euler[1]:
ee_mode = 'horizontal'
return ee_mode
def create_trajectory(self, diff_pose, num_steps=2):
# Temporally linear interpolation
assert num_steps > 1, "Too shot to create waypoints"
traj_x = np.linspace(0.0, diff_pose[0], num_steps)
traj_y = np.linspace(0.0, diff_pose[1], num_steps)
traj_z = np.linspace(0.0, diff_pose[2], num_steps)
trajectory = np.dstack((traj_x, traj_y, traj_z))
return trajectory
def set_target_pose(self, trajectory, move_mode='outward', ee_mode='horizontal'):
if move_mode == 'outward':
# Extract full trajectory
diff_ee_traj = trajectory[0]
goal_way_points = []
for i in range(1, len(diff_ee_traj)):
# Transform difference pose on end effector frame to map frame
target_map_pose = self.tf_manager.transform_coordinate(diff_ee_traj[i])
if ee_mode == 'horizontal':
goal_pose = Pose()
goal_pose.position.x = target_map_pose[0]
goal_pose.position.y = target_map_pose[1]
goal_pose.position.z = target_map_pose[2]
goal_pose.orientation.x = 0.5
goal_pose.orientation.y = 0.5
goal_pose.orientation.z = 0.5
goal_pose.orientation.w = -0.5
goal_way_points.append(goal_pose)
elif ee_mode == 'vertical':
goal_pose = Pose()
goal_pose.position.x = target_map_pose[0]
goal_pose.position.y = target_map_pose[1]
goal_pose.position.z = target_map_pose[2]
goal_pose.orientation.x = 0.0
goal_pose.orientation.y = 0.707106781
goal_pose.orientation.z = 0.707106781
goal_pose.orientation.w = 0.0
goal_way_points.append(goal_pose)
return goal_way_points
elif move_mode == 'return':
# Extract full trajectory
diff_ee_traj = trajectory[0]
goal_way_points = []
for i in range(1, len(diff_ee_traj)):
# Transform difference pose on end effector frame to map frame
target_map_pose = self.tf_manager.transform_coordinate(diff_ee_traj[i])
if ee_mode == 'horizontal':
goal_pose = Pose()
goal_pose.position.x = diff_ee_traj[i][0]
goal_pose.position.y = diff_ee_traj[i][1] - 0.12 # TODO: modify
goal_pose.position.z = diff_ee_traj[i][2]
goal_pose.orientation.x = 0.5
goal_pose.orientation.y = 0.5
goal_pose.orientation.z = 0.5
goal_pose.orientation.w = -0.5
goal_way_points.append(goal_pose)
elif ee_mode == 'vertical':
goal_pose = Pose()
goal_pose.position.x = diff_ee_traj[i][0]
goal_pose.position.y = diff_ee_traj[i][1] - 0.12 # TODO: modify
goal_pose.position.z = diff_ee_traj[i][2] - 0.2 # TODO: modify
goal_pose.orientation.x = 0.0
goal_pose.orientation.y = 0.707106781
goal_pose.orientation.z = 0.707106781
goal_pose.orientation.w = 0.0
goal_way_points.append(goal_pose)
return goal_way_points
def modify_pose(self, diff_ee_pose, ee_mode):
if ee_mode == 'horizontal':
return (diff_ee_pose[0], diff_ee_pose[1], diff_ee_pose[2])
elif ee_mode == 'vertical':
return (-diff_ee_pose[1], diff_ee_pose[0], diff_ee_pose[2])
def execute(self):
plan = self.plan()
if plan is None:
return None
for i, (action_name, args) in enumerate(plan):
print('action_name:', action_name)
if action_name == 'move':
(_, diff_ee_pose), _ = self.process(action_name, args)
# Set end effector mode
ee_mode = self.check_ee_mode()
# Move to next pose
diff_ee_pose = self.modify_pose(diff_ee_pose, ee_mode)
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.set_target_pose(target_traj, ee_mode=ee_mode)
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
elif action_name == 'pick':
finish = False
(init_ee_pose, _), _ = self.process(action_name, args)
while not finish:
(_, diff_ee_pose), ee_mode = self.process(action_name, args)
if ee_mode == 'horizontal':
# Crete trajectory
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.set_target_pose(target_traj, ee_mode=ee_mode)
# Move and plan to grasp pose
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
if np.sum(np.absolute(diff_ee_pose)) < self.pick_threshold:
finish = True
# Grasp object
self.hsr_py.close_gripper()
# Move to terminal pose
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
elif ee_mode == 'vertical':
finish = True
# Move to target object & grasp object
self.hsr_py.grasp_gear(diff_ee_pose)
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return', ee_mode='vertical')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
# Sleep up to rate
self.rate.sleep()
elif action_name == 'place':
finish = False
(init_ee_pose, _), _ = self.process(action_name, args)
while not finish:
(_, diff_ee_pose), ee_mode = self.process(action_name, args)
# Crete trajectory
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.set_target_pose(target_traj, ee_mode=ee_mode)
# Move and plan to place pose
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
print('difference (place):', np.sum(np.absolute(diff_ee_pose)))
if np.sum(np.absolute(diff_ee_pose)) < self.place_threshold:
finish = True
# Sleep until stable
rospy.sleep(1.0)
# Put down end effector
self.hsr_py.insert_object(ee_mode)
# Release object
self.hsr_py.open_gripper()
# Move to terminal pose
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
# Sleep up to rate
self.rate.sleep()
elif action_name == 'stack':
finish = False
(init_ee_pose, _), _ = self.process(action_name, args)
while not finish:
(_, diff_ee_pose), ee_mode = self.process(action_name, args)
# Crete trajectory
diff_ee_pose = self.modify_pose(diff_ee_pose, ee_mode)
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.set_target_pose(target_traj, ee_mode=ee_mode)
# Move and plan to stack pose
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
print('diff_ee_pose (stack):', diff_ee_pose)
if np.sum(np.absolute(diff_ee_pose)) < self.stack_threshold:
finish = True
# Sleep until stable
rospy.sleep(1.0)
# Put down end effector
self.hsr_py.insert_object(ee_mode)
# Release object
self.hsr_py.open_gripper()
# Move to terminal pose
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
# Sleep up to rate
self.rate.sleep()
else:
continue
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 14,549 |
Python
| 39.304709 | 114 | 0.530552 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/feedback/post_process.py
|
#!/usr/bin/env/python3
import numpy as np
class PlanModifier(object):
def __init__(self, x_offset=0.0, y_offset=-0.065, z_offset=-0.04):
self.mocap_x_offset = x_offset
self.mocap_y_offset = y_offset
self.mocap_z_offset = z_offset
self.left_hole_x = -0.10
self.left_hole_y = -0.18
self.left_hole_z = 0.125 # 0.135 # 0.125 # 0.12
self.right_hole_x = 0.095
self.right_hole_y = -0.18
self.right_hole_z = 0.142 # 0.15 # 0.145 # 0.14
self.x_scale = 0.0 # 0.0~0.1
self.y_scale = 0.065 # 0.0~0.1
self.z_scale = 0.0 # 0.0~0.0
self.ee_mode = 'horizontal'
self.block_rigid_map = {
'A' : 'red_shaft',
'B' : 'green_gear',
'C' : 'yellow_shaft',
'D' : 'blue_gear'
}
def post_process(self, action_name, args, mocap_poses, grasp_type='side'):
"""
Modify plan using sensor data.
Args:
plan (list): plan is trajectory of the tamp.
robot_pose (list): robot_pose consists of base_pose, end_effector_pose, gripper.
rigid_poses (dict): rigid_pose consists of captured rigid body poses
Returns:
commands (list): commands is modified plan
"""
ee_pose = mocap_poses['end_effector']
if grasp_type == 'side':
if action_name == 'move':
robot, init_robot_pose, way_point, term_robot_pose = args
# Modify end effector position from mocap marker attached position
if self.ee_mode == 'horizontal':
ee_pose_x = ee_pose.pose.position.x + self.mocap_x_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_z_offset
elif self.ee_mode == 'vertical':
ee_pose_x = ee_pose.pose.position.x - self.mocap_z_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_x_offset
# Get end effector orientation
ee_ori_x = ee_pose.pose.orientation.x
ee_ori_y = ee_pose.pose.orientation.y
ee_ori_z = ee_pose.pose.orientation.z
ee_ori_w = ee_pose.pose.orientation.w
# Initial pose
init_ee_pose = np.array([
ee_pose_x, ee_pose_y, ee_pose_z,
ee_ori_x, ee_ori_y, ee_ori_z, ee_ori_w
], dtype=np.float64)
# Calculate difference from target pose in configuration space
diff_ee_pose = np.array([
self.x_scale * 0.0,
self.y_scale * (term_robot_pose[0] - init_robot_pose[0]),
self.z_scale * (term_robot_pose[1] - init_robot_pose[1])
], dtype=np.float64)
new_command = (action_name, [init_ee_pose, diff_ee_pose], self.ee_mode)
elif action_name == 'pick':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Set pick hyperparameters
if block == 'A':
ee_x_offset = 0.03
ee_y_offset = 0.0
ee_z_offset = 0.04
self.ee_mode = 'horizontal'
elif block == 'B':
ee_x_offset = -0.05
ee_y_offset = 0.0
ee_z_offset = 0.11
self.ee_mode = 'vertical'
elif block == 'C':
ee_x_offset = 0.03
ee_y_offset = 0.0
ee_z_offset = 0.04
self.ee_mode = 'horizontal'
elif block == 'D':
ee_x_offset = -0.05
ee_y_offset = 0.0
ee_z_offset = 0.13
self.ee_mode = 'vertical'
# Modify end effector position from mocap marker attached position
if self.ee_mode == 'horizontal':
ee_pose_x = ee_pose.pose.position.x + self.mocap_x_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_z_offset
elif self.ee_mode == 'vertical':
ee_pose_x = ee_pose.pose.position.x - self.mocap_z_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_x_offset
# Get end effector orientation
ee_ori_x = ee_pose.pose.orientation.x
ee_ori_y = ee_pose.pose.orientation.y
ee_ori_z = ee_pose.pose.orientation.z
ee_ori_w = ee_pose.pose.orientation.w
# Initial pose
init_ee_pose = np.array([
ee_pose_x, ee_pose_y, ee_pose_z,
ee_ori_x, ee_ori_y, ee_ori_z, ee_ori_w
], dtype=np.float64)
# Map symbolic block name to real block name
rigid_name = self.block_rigid_map[block]
rigid_pose = mocap_poses[rigid_name]
# Calculate grasp pose in configuration space
rigid_pose_x = rigid_pose.pose.position.x
rigid_pose_y = rigid_pose.pose.position.y
rigid_pose_z = rigid_pose.pose.position.z
diff_ee_pose = np.array([
rigid_pose_z - ee_pose_z - ee_x_offset,
rigid_pose_x - ee_pose_x - ee_y_offset,
rigid_pose_y - ee_pose_y - ee_z_offset,
], dtype=np.float64)
new_command = (action_name, [init_ee_pose, diff_ee_pose], self.ee_mode)
elif action_name == 'place':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Get hole pose
base_pose = mocap_poses['base']
# Modify hole position from mocap marker
if block == 'A':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
self.ee_mode = 'horizontal'
elif block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
self.ee_mode = 'vertical'
elif block == 'C':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
self.ee_mode = 'horizontal'
elif block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
self.ee_mode = 'vertical'
# Modify end effector position from mocap marker attached position
if self.ee_mode == 'horizontal':
ee_pose_x = ee_pose.pose.position.x + self.mocap_x_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_z_offset
elif self.ee_mode == 'vertical':
ee_pose_x = ee_pose.pose.position.x - self.mocap_z_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_x_offset
# Get end effector orientation
ee_ori_x = ee_pose.pose.orientation.x
ee_ori_y = ee_pose.pose.orientation.y
ee_ori_z = ee_pose.pose.orientation.z
ee_ori_w = ee_pose.pose.orientation.w
# Initial pose
init_ee_pose = np.array([
ee_pose_x, ee_pose_y, ee_pose_z,
ee_ori_x, ee_ori_y, ee_ori_z, ee_ori_w
], dtype=np.float64)
# Map symbolic block name to real block name
rigid_name = self.block_rigid_map[block]
rigid_pose = mocap_poses[rigid_name]
# Calculate place pose in configuration space
rigid_pose_x = rigid_pose.pose.position.x
rigid_pose_y = rigid_pose.pose.position.y
rigid_pose_z = rigid_pose.pose.position.z
diff_ee_pose = np.array([
hole_pose_z - rigid_pose_z,
hole_pose_x - rigid_pose_x,
hole_pose_y - rigid_pose_y,
], dtype=np.float64)
new_command = (action_name, [init_ee_pose, diff_ee_pose], self.ee_mode)
elif action_name == 'stack':
robot, u_block, u_pose, grasp_diff_pose, \
term_robot_pose, l_block, l_pose = args
# Get hole pose
base_pose = mocap_poses['base']
# Modify hole position from mocap marker
if u_block == 'A':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z - 0.01 # TODO: modify
self.ee_mode = 'horizontal'
elif u_block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x + 0.0175 # TODO: modify
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z - 0.015 # TODO: modify
self.ee_mode = 'vertical'
elif u_block == 'C':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z - 0.01 # TODO: modify
self.ee_mode = 'horizontal'
elif u_block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x + 0.0175 # TODO: modify
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z - 0.015 # TODO: modify
self.ee_mode = 'vertical'
# Modify end effector position from mocap marker attached position
if self.ee_mode == 'horizontal':
ee_pose_x = ee_pose.pose.position.x + self.mocap_x_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_z_offset
elif self.ee_mode == 'vertical':
ee_pose_x = ee_pose.pose.position.x - self.mocap_z_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_x_offset
# Get end effector orientation
ee_ori_x = ee_pose.pose.orientation.x
ee_ori_y = ee_pose.pose.orientation.y
ee_ori_z = ee_pose.pose.orientation.z
ee_ori_w = ee_pose.pose.orientation.w
# Initial pose
init_ee_pose = np.array([
ee_pose_x, ee_pose_y, ee_pose_z,
ee_ori_x, ee_ori_y, ee_ori_z, ee_ori_w
], dtype=np.float64)
# Map symbolic block name to real block name
u_rigid_name = self.block_rigid_map[u_block]
l_rigid_name = self.block_rigid_map[l_block]
u_rigid_pose = mocap_poses[u_rigid_name]
l_rigid_pose = mocap_poses[l_rigid_name]
# Calculate stack pose in configuration space
rigid_pose_x = u_rigid_pose.pose.position.x
rigid_pose_y = u_rigid_pose.pose.position.y
rigid_pose_z = u_rigid_pose.pose.position.z
diff_ee_pose = np.array([
hole_pose_z - rigid_pose_z,
hole_pose_x - rigid_pose_x,
hole_pose_y - rigid_pose_y,
], dtype=np.float64)
new_command = (action_name, [init_ee_pose, diff_ee_pose], self.ee_mode)
else:
pass
elif grasp_type == 'top':
ee_pose_x = ee_pose.pose.position.x + self.mocap_x_offset
ee_pose_y = ee_pose.pose.position.y + self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_z_offset
if action_name == 'move':
robot, init_robot_pose, way_point, term_robot_pose = args
# Calculate grasp pose in configuration space
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
diff_ee_pose = np.array([
self.x_scale * 0.0,
self.y_scale * (term_robot_pose[0] - init_robot_pose[0]),
self.z_scale * (term_robot_pose[1] - init_robot_pose[1])
])
term_ee_pose = init_ee_pose + diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
elif action_name == 'pick':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Map symbolic block name to real block name
rigid_name = self.block_rigid_map[block]
rigid_pose = mocap_poses[rigid_name]
# Get curent end effector pose
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
# Calculate grasp pose in configuration space
rigid_pose_x = rigid_pose.pose.position.x
rigid_pose_y = rigid_pose.pose.position.y
rigid_pose_z = rigid_pose.pose.position.z
diff_ee_pose = np.array([
ee_pose_y - rigid_pose_y,
ee_pose_x - rigid_pose_x,
ee_pose_z - rigid_pose_z
])
# Get terminal end effector pose as reverse of diff_ee_pose
term_ee_pose = -diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
elif action_name == 'place':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Get hole pose
base_pose = mocap_poses['base']
if block == 'A' or block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
elif block == 'C' or block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
# Get curent end effector pose
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
# Calculate place pose in configuration space
diff_ee_pose = np.array([
ee_pose_y - hole_pose_y,
ee_pose_x - hole_pose_x,
ee_pose_z - hole_pose_z
])
# Get terminal end effector pose as reverse of diff_ee_pose
term_ee_pose = -diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
elif action_name == 'stack':
robot, u_block, u_pose, grasp_diff_pose, \
term_robot_pose, l_block, l_pose = args
# Map symbolic block name to real block name
u_rigid_name = self.block_rigid_map[u_block]
l_rigid_name = self.block_rigid_map[l_block]
u_rigid_pose = mocap_poses[u_rigid_name]
l_rigid_pose = mocap_poses[l_rigid_name]
# Get hole pose
base_pose = mocap_poses['base']
if u_block == 'A' or u_block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
elif u_block == 'C' or u_block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
# Get curent end effector pose
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
# Calculate stack pose in configuration space
diff_ee_pose = np.array([
ee_pose_y - hole_pose_y,
ee_pose_x - hole_pose_x,
ee_pose_z - hole_pose_z
])
# Get terminal end effector pose as reverse of diff_ee_pose
term_ee_pose = -diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
else:
pass
return new_command
| 18,528 |
Python
| 44.192683 | 103 | 0.489259 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/feedback/execute_plan.py
|
#!/usr/bin/env/python3
import tf
import sys
import rospy
import moveit_commander
import numpy as np
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_2d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from hsr_py_interface import HSRPyInterface
from force_sensor_interface import ForceSensorInterface
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from controller.ik_controller import IKController
from geometry_msgs.msg import Pose
from controller_manager_msgs.srv import ListControllers
class ExecutePlan(object):
def __init__(self, grasp_type='side'):
# Initialize moveit
moveit_commander.roscpp_initialize(sys.argv)
# Initialize ROS node
# rospy.init_node('execute_tamp_plan')
# Feedback rate
self.rate = rospy.Rate(50)
# Threshold
self.move_threshold = 0.05
self.pick_threshold = 0.05
self.place_threshold = 0.04 # 0.038
self.stack_threshold = 0.032 # 0.038
self.weight_threshold = 500
# Core module
self.tamp_planner = TAMPPlanner()
self.hsr_py = HSRPyInterface()
self.tf_manager = TfManager()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
self.ft_interface = ForceSensorInterface()
self.ik_controller = IKController()
# Initialize Robot
self.initialize_robot(grasp_type)
def initialize_robot(self, grasp_type='side'):
self.check_status()
# Set moveit commander
self.robot = moveit_commander.RobotCommander()
self.arm = moveit_commander.MoveGroupCommander('arm', wait_for_servers=0.0)
self.base = moveit_commander.MoveGroupCommander('base', wait_for_servers=0.0)
self.gripper = moveit_commander.MoveGroupCommander('gripper', wait_for_servers=0.0)
self.whole_body = moveit_commander.MoveGroupCommander('whole_body', wait_for_servers=0.0)
# Set planning parameters
self.set_planning_parameter()
# Set arm to configuration position
self.hsr_py.initialize_arm()
# Set base to configuration position
self.hsr_py.initialize_base()
# Move to neutral
self.hsr_py.set_task_pose(grasp_type)
def set_planning_parameter(self):
# Planning parameters
self.whole_body.allow_replanning(True)
self.whole_body.set_planning_time(3)
self.whole_body.set_pose_reference_frame('map')
# Scene parameters
self.scene = moveit_commander.PlanningSceneInterface()
self.scene.remove_world_object()
def check_status(self):
# Make sure the controller is running
rospy.wait_for_service('/hsrb/controller_manager/list_controllers')
list_controllers = rospy.ServiceProxy('/hsrb/controller_manager/list_controllers', ListControllers)
running = False
while running is False:
rospy.sleep(0.1)
for c in list_controllers().controller:
if c.name == 'arm_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'head_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'gripper_controller' and c.state == 'running':
running |= True
if c.name == 'omni_base_controller' and c.state == 'running':
running |= True
return running
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args):
# Get rigid body poses from mocap
mocap_poses = self.mocap_interface.get_poses()
# Modify plan
action_name, modified_action, ee_mode = self.path_modifier.post_process(action_name, args, mocap_poses)
return modified_action, ee_mode
def check_ee_mode(self):
# Get end effector pose
ee_pose = self.whole_body.get_current_pose()
ee_rot = np.array([
ee_pose.pose.orientation.x,
ee_pose.pose.orientation.y,
ee_pose.pose.orientation.z,
ee_pose.pose.orientation.w
])
ee_euler = R.from_quat(ee_rot).as_euler('xyz')
# Check whether end effector is vertical to ground plane, or horizontal
if -0.8 < ee_euler[1] and 0.8 > ee_euler[1]:
ee_mode = 'vertical'
elif -2.0 < ee_euler[1] and -1.0 > ee_euler[1]:
ee_mode = 'horizontal'
return ee_mode
def create_trajectory(self, diff_pose, num_steps=2):
# Temporally linear interpolation
assert num_steps > 1, "Too shot to create waypoints"
# num_steps = int(np.max(np.abs(diff_pose))//0.03)
# if num_steps < 2:
# num_steps = 2
traj_x = np.linspace(0.0, diff_pose[0], num_steps)
traj_y = np.linspace(0.0, diff_pose[1], num_steps)
traj_z = np.linspace(0.0, diff_pose[2], num_steps)
trajectory = np.dstack((traj_x, traj_y, traj_z))
return trajectory
def set_target_pose(self, trajectory, move_mode='outward', ee_mode='horizontal'):
if move_mode == 'outward':
# Extract full trajectory
diff_ee_traj = trajectory[0]
goal_way_points = []
for i in range(1, len(diff_ee_traj)):
# Transform difference pose on end effector frame to map frame
target_map_pose = self.tf_manager.transform_coordinate(diff_ee_traj[i])
if ee_mode == 'horizontal':
goal_pose = Pose()
goal_pose.position.x = target_map_pose[0]
goal_pose.position.y = target_map_pose[1]
goal_pose.position.z = target_map_pose[2]
goal_pose.orientation.x = 0.5
goal_pose.orientation.y = 0.5
goal_pose.orientation.z = 0.5
goal_pose.orientation.w = -0.5
goal_way_points.append(goal_pose)
elif ee_mode == 'vertical':
goal_pose = Pose()
goal_pose.position.x = target_map_pose[0]
goal_pose.position.y = target_map_pose[1]
goal_pose.position.z = target_map_pose[2]
goal_pose.orientation.x = 0.0
goal_pose.orientation.y = 0.707106781
goal_pose.orientation.z = 0.707106781
goal_pose.orientation.w = 0.0
goal_way_points.append(goal_pose)
return goal_way_points
elif move_mode == 'return':
# Extract full trajectory
diff_ee_traj = trajectory[0]
goal_way_points = []
for i in range(1, len(diff_ee_traj)):
# Transform difference pose on end effector frame to map frame
target_map_pose = self.tf_manager.transform_coordinate(diff_ee_traj[i])
if ee_mode == 'horizontal':
goal_pose = Pose()
goal_pose.position.x = diff_ee_traj[i][0]
goal_pose.position.y = diff_ee_traj[i][1] - 0.12 # TODO: modify
goal_pose.position.z = diff_ee_traj[i][2]
goal_pose.orientation.x = 0.5
goal_pose.orientation.y = 0.5
goal_pose.orientation.z = 0.5
goal_pose.orientation.w = -0.5
goal_way_points.append(goal_pose)
elif ee_mode == 'vertical':
goal_pose = Pose()
goal_pose.position.x = diff_ee_traj[i][0]
goal_pose.position.y = diff_ee_traj[i][1] - 0.12 # TODO: modify
goal_pose.position.z = diff_ee_traj[i][2] - 0.2 # TODO: modify
goal_pose.orientation.x = 0.0
goal_pose.orientation.y = 0.707106781
goal_pose.orientation.z = 0.707106781
goal_pose.orientation.w = 0.0
goal_way_points.append(goal_pose)
return goal_way_points
def create_goal(self, trajectory, ee_mode='horizontal'):
# Extract full trajectory
diff_ee_traj = trajectory[0][1]
# Transform difference pose on end effector frame to map frame
target_map_pose = self.tf_manager.transform_coordinate(diff_ee_traj)
if ee_mode == 'horizontal':
goal_pose = ((target_map_pose[0], target_map_pose[1], target_map_pose[2]),
(0.5, 0.5, 0.5, -0.5))
elif ee_mode == 'vertical':
goal_pose = ((target_map_pose[0], target_map_pose[1], target_map_pose[2]),
(0.0, 0.707106781, 0.707106781, 0.0))
return goal_pose
def modify_pose(self, diff_ee_pose, ee_mode):
if ee_mode == 'horizontal':
return (diff_ee_pose[0], diff_ee_pose[1], diff_ee_pose[2])
elif ee_mode == 'vertical':
return (-diff_ee_pose[1], diff_ee_pose[0], diff_ee_pose[2])
def execute(self):
plan = self.plan()
if plan is None:
return None
for i, (action_name, args) in enumerate(plan):
print('action_name:', action_name)
if action_name == 'move':
(_, diff_ee_pose), _ = self.process(action_name, args)
# Set end effector mode
ee_mode = self.check_ee_mode()
# Move to next pose
diff_ee_pose = self.modify_pose(diff_ee_pose, ee_mode)
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.set_target_pose(target_traj, ee_mode=ee_mode)
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
elif action_name == 'pick':
finish = False
(init_ee_pose, diff_ee_pose), _ = self.process(action_name, args)
self.hsr_py.fix_task_pose(diff_ee_pose)
while not finish:
(_, diff_ee_pose), ee_mode = self.process(action_name, args)
# ee_mode = 'horizontal'
if ee_mode == 'horizontal':
# Crete trajectory
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.create_goal(target_traj, ee_mode=ee_mode)
# Move and plan to grasp pose
self.ik_controller.control(goal_pose)
if np.sum(np.absolute(diff_ee_pose)) < self.pick_threshold:
finish = True
# Grasp object
self.hsr_py.close_gripper()
# Move to terminal pose
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
elif ee_mode == 'vertical':
finish = True
# Move to target object & grasp object
self.hsr_py.grasp_gear(diff_ee_pose)
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return', ee_mode='vertical')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
# Sleep up to rate
self.rate.sleep()
elif action_name == 'place':
finish = False
(init_ee_pose, _), _ = self.process(action_name, args)
while not finish:
(_, diff_ee_pose), ee_mode = self.process(action_name, args)
# Crete trajectory
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.create_goal(target_traj, ee_mode=ee_mode)
prev_ft_data = self.ft_interface.get_current_force()
# Move and plan to place pose
self.ik_controller.control(goal_pose)
current_ft_data = self.ft_interface.get_current_force()
force_difference = self.ft_interface.compute_difference(prev_ft_data, current_ft_data)
weight = round(force_difference / 9.81 * 1000, 1)
if np.sum(np.absolute(diff_ee_pose)) < self.place_threshold or weight > self.weight_threshold:
finish = True
# Sleep until stable
rospy.sleep(1.0)
# Put down end effector
self.hsr_py.insert_object(ee_mode)
# Release object
self.hsr_py.open_gripper()
# Move to terminal pose
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
# Sleep up to rate
self.rate.sleep()
elif action_name == 'stack':
finish = False
(init_ee_pose, _), _ = self.process(action_name, args)
while not finish:
(_, diff_ee_pose), ee_mode = self.process(action_name, args)
# Crete trajectory
diff_ee_pose = self.modify_pose(diff_ee_pose, ee_mode)
target_traj = self.create_trajectory(diff_ee_pose)
goal_pose = self.create_goal(target_traj, ee_mode=ee_mode)
# Move and plan to stack pose
self.ik_controller.control(goal_pose)
if np.sum(np.absolute(diff_ee_pose)) < self.stack_threshold:
finish = True
# Sleep until stable
rospy.sleep(1.0)
# Put down end effector
self.hsr_py.insert_object(ee_mode)
# Release object
self.hsr_py.open_gripper()
# Move to terminal pose
target_traj = self.create_trajectory(init_ee_pose)
goal_pose = self.set_target_pose(target_traj, move_mode='return')
(plan, fraction) = self.whole_body.compute_cartesian_path(goal_pose, 0.01, 0.0, False)
self.whole_body.execute(plan, wait=True)
# Sleep up to rate
self.rate.sleep()
else:
continue
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 15,552 |
Python
| 39.085051 | 114 | 0.534915 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/feedforward/post_process.py
|
#!/usr/bin/env/python3
import numpy as np
class PlanModifier(object):
def __init__(self, x_offset=0.0, y_offset=-0.065, z_offset=-0.04):
self.mocap_x_offset = x_offset
self.mocap_y_offset = y_offset
self.mocap_z_offset = z_offset
self.left_hole_x = -0.10
self.left_hole_y = -0.17
self.left_hole_z = 0.12
self.right_hole_x = 0.095
self.right_hole_y = -0.17
self.right_hole_z = 0.12
self.x_scale = 0.0 # 0.0~0.1
self.y_scale = 0.075 # 0.0~0.1
self.z_scale = 0.0 # 0.0~0.0
self.ee_mode = 'horizontal'
self.block_rigid_map = {
'A' : 'red_shaft',
'B' : 'green_gear',
'C' : 'yellow_shaft',
'D' : 'blue_gear'
}
def post_process(self, action_name, args, mocap_poses, grasp_type='side'):
"""
Modify plan using sensor data.
Args:
plan (list): plan is trajectory of the tamp.
robot_pose (list): robot_pose consists of base_pose, end_effector_pose, gripper.
rigid_poses (dict): rigid_pose consists of captured rigid body poses
Returns:
commands (list): commands is modified plan
"""
ee_pose = mocap_poses['end_effector']
if grasp_type == 'side':
if action_name == 'move':
# Parse TAMP returns
robot, init_robot_pose, way_point, term_robot_pose = args
# Calculate difference from target pose in configuration space
diff_ee_pose = np.array([
self.x_scale * 0.0,
self.y_scale * (term_robot_pose[0] - init_robot_pose[0]),
self.z_scale * (term_robot_pose[1] - init_robot_pose[1])
])
new_command = (action_name, diff_ee_pose, self.ee_mode)
elif action_name == 'pick':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Map symbolic block name to real block name
rigid_name = self.block_rigid_map[block]
rigid_pose = mocap_poses[rigid_name]
# Set pick hyperparameters
if block == 'A':
ee_x_offset = 0.03
ee_y_offset = 0.0
ee_z_offset = 0.04
self.ee_mode = 'horizontal'
elif block == 'B':
ee_x_offset = -0.05
ee_y_offset = 0.0
ee_z_offset = 0.11
self.ee_mode = 'vertical'
elif block == 'C':
ee_x_offset = 0.03
ee_y_offset = 0.0
ee_z_offset = 0.04
self.ee_mode = 'horizontal'
elif block == 'D':
ee_x_offset = -0.05
ee_y_offset = 0.0
ee_z_offset = 0.13
self.ee_mode = 'vertical'
# Modify end effector position from mocap marker attached position
if self.ee_mode == 'horizontal':
ee_pose_x = ee_pose.pose.position.x + self.mocap_x_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_z_offset
elif self.ee_mode == 'vertical':
ee_pose_x = ee_pose.pose.position.x - self.mocap_z_offset
ee_pose_y = ee_pose.pose.position.y - self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_x_offset
# Calculate grasp pose in configuration space
rigid_pose_x = rigid_pose.pose.position.x
rigid_pose_y = rigid_pose.pose.position.y
rigid_pose_z = rigid_pose.pose.position.z
diff_ee_pose = np.array([
rigid_pose_z - ee_pose_z - ee_x_offset,
rigid_pose_x - ee_pose_x - ee_y_offset,
rigid_pose_y - ee_pose_y - ee_z_offset,
])
new_command = (action_name, diff_ee_pose, self.ee_mode)
elif action_name == 'place':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Get hole pose
base_pose = mocap_poses['base']
if block == 'A' or block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
if block == 'A':
self.ee_mode = 'horizontal'
else:
self.ee_mode = 'vertical'
elif block == 'C' or block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
if block == 'C':
self.ee_mode = 'horizontal'
else:
self.ee_mode = 'vertical'
# Map symbolic block name to real block name
rigid_name = self.block_rigid_map[block]
rigid_pose = mocap_poses[rigid_name]
rigid_pose_x = rigid_pose.pose.position.x
rigid_pose_y = rigid_pose.pose.position.y
rigid_pose_z = rigid_pose.pose.position.z
# Calculate place pose in configuration space
diff_ee_pose = np.array([
hole_pose_z - rigid_pose_z,
hole_pose_x - rigid_pose_x,
hole_pose_y - rigid_pose_y,
])
new_command = (action_name, diff_ee_pose, self.ee_mode)
elif action_name == 'stack':
robot, u_block, u_pose, grasp_diff_pose, \
term_robot_pose, l_block, l_pose = args
# Get hole pose
base_pose = mocap_poses['base']
if u_block == 'A' or u_block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
if u_block == 'A':
self.ee_mode = 'horizontal'
else:
self.ee_mode = 'vertical'
elif u_block == 'C' or u_block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
if u_block == 'C':
self.ee_mode = 'horizontal'
else:
self.ee_mode = 'vertical'
# Map symbolic block name to real block name
u_rigid_name = self.block_rigid_map[u_block]
l_rigid_name = self.block_rigid_map[l_block]
u_rigid_pose = mocap_poses[u_rigid_name]
l_rigid_pose = mocap_poses[l_rigid_name]
rigid_pose_x = u_rigid_pose.pose.position.x
rigid_pose_y = u_rigid_pose.pose.position.y
rigid_pose_z = u_rigid_pose.pose.position.z
# Calculate place pose in configuration space
diff_ee_pose = np.array([
hole_pose_z - rigid_pose_z,
hole_pose_x - rigid_pose_x,
hole_pose_y - rigid_pose_y,
])
new_command = (action_name, diff_ee_pose, self.ee_mode)
else:
pass
elif grasp_type == 'top':
ee_pose_x = ee_pose.pose.position.x + self.mocap_x_offset
ee_pose_y = ee_pose.pose.position.y + self.mocap_y_offset
ee_pose_z = ee_pose.pose.position.z + self.mocap_z_offset
if action_name == 'move':
robot, init_robot_pose, way_point, term_robot_pose = args
# Calculate grasp pose in configuration space
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
diff_ee_pose = np.array([
self.x_scale * 0.0,
self.y_scale * (term_robot_pose[0] - init_robot_pose[0]),
self.z_scale * (term_robot_pose[1] - init_robot_pose[1])
])
term_ee_pose = init_ee_pose + diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
elif action_name == 'pick':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Map symbolic block name to real block name
rigid_name = self.block_rigid_map[block]
rigid_pose = mocap_poses[rigid_name]
# Get curent end effector pose
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
# Calculate grasp pose in configuration space
rigid_pose_x = rigid_pose.pose.position.x
rigid_pose_y = rigid_pose.pose.position.y
rigid_pose_z = rigid_pose.pose.position.z
diff_ee_pose = np.array([
ee_pose_y - rigid_pose_y,
ee_pose_x - rigid_pose_x,
ee_pose_z - rigid_pose_z
])
# Get terminal end effector pose as reverse of diff_ee_pose
term_ee_pose = -diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
elif action_name == 'place':
robot, block, init_block_pose, grasp_diff_pose, term_robot_pose = args
# Get hole pose
base_pose = mocap_poses['base']
if block == 'A' or block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
elif block == 'C' or block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
# Get curent end effector pose
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
# Calculate place pose in configuration space
diff_ee_pose = np.array([
ee_pose_y - hole_pose_y,
ee_pose_x - hole_pose_x,
ee_pose_z - hole_pose_z
])
# Get terminal end effector pose as reverse of diff_ee_pose
term_ee_pose = -diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
elif action_name == 'stack':
robot, u_block, u_pose, grasp_diff_pose, \
term_robot_pose, l_block, l_pose = args
# Map symbolic block name to real block name
u_rigid_name = self.block_rigid_map[u_block]
l_rigid_name = self.block_rigid_map[l_block]
u_rigid_pose = mocap_poses[u_rigid_name]
l_rigid_pose = mocap_poses[l_rigid_name]
# Get hole pose
base_pose = mocap_poses['base']
if u_block == 'A' or u_block == 'B':
hole_pose_x = base_pose.pose.position.x + self.left_hole_x
hole_pose_y = base_pose.pose.position.y + self.left_hole_y
hole_pose_z = base_pose.pose.position.z + self.left_hole_z
elif u_block == 'C' or u_block == 'D':
hole_pose_x = base_pose.pose.position.x + self.right_hole_x
hole_pose_y = base_pose.pose.position.y + self.right_hole_y
hole_pose_z = base_pose.pose.position.z + self.right_hole_z
# Get curent end effector pose
init_ee_pose = np.array([
ee_pose_x,
ee_pose_y,
ee_pose_z
])
# Calculate stack pose in configuration space
diff_ee_pose = np.array([
ee_pose_y - hole_pose_y,
ee_pose_x - hole_pose_x,
ee_pose_z - hole_pose_z - 0.10
])
# Get terminal end effector pose as reverse of diff_ee_pose
term_ee_pose = -diff_ee_pose
new_command = (action_name, [init_ee_pose, diff_ee_pose, term_ee_pose])
else:
pass
return new_command
| 13,500 |
Python
| 40.41411 | 92 | 0.472815 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/feedforward/execute_plan.py
|
#!/usr/bin/env/python3
import sys
import rospy
import hsrb_interface
import numpy as np
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_2d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from controller_manager_msgs.srv import ListControllers
class ExecutePlan(object):
def __init__(self, grasp_type='side'):
# Initialize ROS node
rospy.init_node('execute_tamp_plan')
# Core module
self.tamp_planner = TAMPPlanner()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
self.tf_manager = TfManager()
# Initialize Robot
self.initialize_robot(grasp_type)
def initialize_robot(self, grasp_type='side'):
self.check_status()
# Get hsr python interface
self.robot = hsrb_interface.Robot()
self.omni_base = self.robot.get('omni_base')
self.gripper = self.robot.get('gripper')
self.whole_body = self.robot.get('whole_body')
# Initialize arm position
self.whole_body.move_to_go()
# Initialize base position
self.omni_base.go_abs(0.0, 0.0, 0.0, 300.0)
# Set base to configuration position
self.omni_base.go_abs(-0.8, 0.80, np.pi/2, 300.0)
# Set arm to neutral
self.whole_body.move_to_neutral()
# Set hsr to configuration pose
if grasp_type == 'side':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.25, y=0.0, z=0.0, ej=0.0),
], ref_frame_id='hand_palm_link')
# Fix base position using mocap data
gearbox_poses = self.mocap_interface.get_poses()
initial_pose = self.tf_manager.get_init_pose(gearbox_poses)
diff_x_pose, diff_y_pose, diff_z_pose = initial_pose[1], initial_pose[0], initial_pose[2]
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=0.0, y=-diff_y_pose, z=0.0)
], ref_frame_id='hand_palm_link')
elif grasp_type == 'top':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.25, y=0.0, z=0.0, ej=-np.pi/2),
], ref_frame_id='hand_palm_link')
# Fix base position using mocap data
gearbox_poses = self.mocap_interface.get_poses()
initial_pose = self.tf_manager.get_init_pose(gearbox_poses)
diff_x_pose, diff_y_pose, diff_z_pose = initial_pose[0], initial_pose[1], initial_pose[2]
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-diff_x_pose, y=-diff_y_pose, z=0.0)
], ref_frame_id='hand_palm_link')
# Open gripper
self.gripper.command(1.2)
def check_status(self):
# Make sure the controller is running
rospy.wait_for_service('/hsrb/controller_manager/list_controllers')
list_controllers = rospy.ServiceProxy('/hsrb/controller_manager/list_controllers', ListControllers)
running = False
while running is False:
rospy.sleep(0.1)
for c in list_controllers().controller:
if c.name == 'arm_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'head_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'gripper_controller' and c.state == 'running':
running |= True
if c.name == 'omni_base_controller' and c.state == 'running':
running |= True
return running
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args, grasp_type='side'):
# Get rigid body poses from mocap
mocap_poses = self.mocap_interface.get_poses()
# Modify plan
action_name, modified_action, ee_mode = self.path_modifier.post_process(action_name, args, mocap_poses, grasp_type)
return action_name, modified_action, ee_mode
def check_ee_mode(self):
# Get end effector pose
ee_pose = self.whole_body.get_end_effector_pose()
ee_rot = np.array([
ee_pose.ori.x,
ee_pose.ori.y,
ee_pose.ori.z,
ee_pose.ori.w
])
ee_euler = R.from_quat(ee_rot).as_euler('xyz')
# Check whether end effector is vertical to ground plane, or horizontal
if -0.8 < ee_euler[1] and 0.8 > ee_euler[1]:
ee_mode = 'vertical'
elif -2.0 < ee_euler[1] and -1.0 > ee_euler[1]:
ee_mode = 'horizontal'
return ee_mode
def execute(self, grasp_type='side'):
plan = self.plan()
if plan is None:
return None
for i, (action_name, args) in enumerate(plan):
# Post process TAMP commands to hsr executable actions
action_name, diff_ee_pose, ee_mode = self.process(action_name, args, grasp_type)
if action_name == 'move':
# Set end effector mode
ee_mode = self.check_ee_mode()
# Move to next pose
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=diff_ee_pose[0],
y=diff_ee_pose[1],
z=diff_ee_pose[2]
),
], ref_frame_id='hand_palm_link')
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=-diff_ee_pose[1],
y=diff_ee_pose[0],
z=diff_ee_pose[2]
),
], ref_frame_id='hand_palm_link')
elif action_name == 'pick':
# Set impedance
self.whole_body.impedance_config = 'compliance_hard'
# Move to grasp pose
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=diff_ee_pose[0],
y=diff_ee_pose[1],
),
], ref_frame_id='hand_palm_link')
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=diff_ee_pose[0],
y=diff_ee_pose[1],
ek=-np.pi/2
),
], ref_frame_id='hand_palm_link')
self.whole_body.move_end_effector_by_line((0, 0, 1), diff_ee_pose[2])
# Grasp object
self.gripper.apply_force(0.8)
# Move to terminal pose
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_by_line((1, 0, 0), -diff_ee_pose[0])
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_by_line((0, 1, 0), -diff_ee_pose[0])
# Remove impedance
self.whole_body.impedance_config = None
elif action_name == 'place':
# Set impedance
self.whole_body.impedance_config = 'compliance_hard'
# Move to grasp pose
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
y=diff_ee_pose[1],
z=diff_ee_pose[2],
),
], ref_frame_id='hand_palm_link')
self.whole_body.move_end_effector_by_line((1, 0, 0), diff_ee_pose[0])
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=-diff_ee_pose[1],
z=diff_ee_pose[2],
),
], ref_frame_id='hand_palm_link')
self.whole_body.move_end_effector_by_line((0, 1, 0), diff_ee_pose[0])
# Release object
self.gripper.command(0.5)
# Move to terminal pose
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_by_line((1, 0, 0), -diff_ee_pose[0])
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
y=-diff_ee_pose[1],
z=-diff_ee_pose[2]-0.15,
),
], ref_frame_id='hand_palm_link')
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_by_line((0, 1, 0), -diff_ee_pose[0])
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=diff_ee_pose[1],
z=-diff_ee_pose[2]-0.15,
ek=np.pi/2
),
], ref_frame_id='hand_palm_link')
# Remove impedance
self.whole_body.impedance_config = None
elif action_name == 'stack':
# Set impedance
self.whole_body.impedance_config = 'compliance_hard'
# Move to grasp pose
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
y=diff_ee_pose[1],
z=diff_ee_pose[2],
),
], ref_frame_id='hand_palm_link')
self.whole_body.move_end_effector_by_line((0, 1, 0), diff_ee_pose[0])
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=-diff_ee_pose[1],
z=diff_ee_pose[2],
),
], ref_frame_id='hand_palm_link')
self.whole_body.move_end_effector_by_line((0, 1, 0), diff_ee_pose[0])
# Release object
self.gripper.command(1.2)
# Move to terminal pose
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_by_line((1, 0, 0), -diff_ee_pose[0])
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
y=-diff_ee_pose[1],
z=-diff_ee_pose[2]-0.15,
),
], ref_frame_id='hand_palm_link')
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_by_line((0, 1, 0), -diff_ee_pose[0])
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=diff_ee_pose[1],
z=-diff_ee_pose[2]-0.15,
ek=np.pi/2
),
], ref_frame_id='hand_palm_link')
# Remove impedance
self.whole_body.impedance_config = None
else:
continue
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 12,041 |
Python
| 38.611842 | 123 | 0.487584 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/controller/ik_solver.py
|
import os
import sys
import glob
import random
import numpy as np
from scipy.spatial.transform import Rotation as R
from .utils import multiply, invert, all_between, compute_forward_kinematics, \
compute_inverse_kinematics, select_solution, USE_ALL, USE_CURRENT
from .hsrb_utils import get_link_pose, get_joint_positions, get_custom_limits
BASE_FRAME = 'base_footprint'
TORSO_JOINT = 'torso_lift_joint'
ROTATION_JOINT = 'joint_rz'
LIFT_JOINT = 'arm_lift_joint'
HSR_TOOL_FRAMES = {'arm': 'hand_palm_link'}
IK_FRAME = {'arm': 'hand_palm_link'}
def get_ik_lib():
lib_path = os.environ['PYTHONPATH'].split(':')[1] # TODO: modify
ik_lib_path = glob.glob(os.path.join(lib_path, '**/hsrb'), recursive=True)
return ik_lib_path[0]
#####################################
def get_tool_pose(arm):
sys.path.append(get_ik_lib())
from ikArm import armFK
arm_fk = {'arm': armFK}
ik_joints = ['world_joint', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
conf = get_joint_positions(ik_joints)
assert len(conf) == 8
base_from_tool = compute_forward_kinematics(arm_fk[arm], conf)
world_from_base = get_link_pose(BASE_FRAME)
return multiply(world_from_base, base_from_tool)
#####################################
def get_ik_generator(arm, ik_pose, custom_limits={}):
sys.path.append(get_ik_lib())
from ikArm import armIK
arm_ik = {'arm': armIK}
base_joints = ['odom_x', 'odom_y', 'odom_t']
arm_joints = ['arm_lift_joint', 'arm_flex_joint', 'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
min_limits, max_limits = get_custom_limits(base_joints, arm_joints, custom_limits)
arm_rot = R.from_quat(ik_pose[1]).as_euler('xyz')[0]
sampled_limits = [(arm_rot-np.pi, arm_rot-np.pi), (0.0, 0.34)]
while True:
sampled_values = [random.uniform(*limits) for limits in sampled_limits]
confs = compute_inverse_kinematics(arm_ik[arm], ik_pose, sampled_values)
solutions = [q for q in confs if all_between(min_limits, q, max_limits)]
yield solutions
if all(lower == upper for lower, upper in sampled_limits):
break
def get_tool_from_ik(arm):
world_from_tool = get_link_pose(HSR_TOOL_FRAMES[arm])
world_from_ik = get_link_pose(IK_FRAME[arm])
return multiply(invert(world_from_tool), world_from_ik)
def sample_tool_ik(arm, tool_pose, nearby_conf=USE_CURRENT, max_attempts=100, **kwargs):
generator = get_ik_generator(arm, tool_pose, **kwargs)
whole_body_joints = ['world_joint', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
for _ in range(max_attempts):
try:
solutions = next(generator)
if solutions:
return select_solution(whole_body_joints, solutions, nearby_conf=nearby_conf)
except StopIteration:
break
return None
def hsr_inverse_kinematics(arm, gripper_pose, custom_limits={}, **kwargs):
base_arm_conf = sample_tool_ik(arm, gripper_pose, custom_limits=custom_limits, **kwargs)
if base_arm_conf is None:
return None
return base_arm_conf
if __name__ == '__main__':
# test forward kinematics
fk_pose = get_tool_pose('arm')
print('fk_pose:', fk_pose)
# test inverse kinematics
import numpy as np
pos_x = 0.0
pos_y = 0.0
pos_z = 0.6
foward = (0.70710678, 0.0, 0.70710678, 0.0)
back = (0.0, -0.70710678, 0.0, 0.70710678)
right = (0.5, -0.5, 0.5, 0.5)
left = (0.5, 0.5, 0.5, -0.5)
pose = ((pos_x, pos_y, pos_z), foward)
print('pose:', pose)
ik_pose = hsr_inverse_kinematics('arm', pose)
print('ik_pose:', ik_pose)
# test inverse kinematics generator
import time
for i in range(100):
start = time.time()
pose_x = 2.5
pose_y = 2.0
pose_z = 0.6
tool_pose = ((pose_x, pose_y, pose_z), (0.707107, 0.0, 0.707107, 0.0))
generator = get_ik_generator('arm', tool_pose)
solutions = next(generator)
print(solutions)
print('Loop Hz:', 1/(time.time()-start))
| 4,179 |
Python
| 33.545454 | 111 | 0.615219 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/controller/hsrb_utils.py
|
import rospy
import numpy as np
import hsrb_interface
from scipy.spatial.transform import Rotation as R
robot = hsrb_interface.Robot()
base = robot.get('omni_base')
gripper = robot.get('gripper')
whole_body = robot.get('whole_body')
def get_link_pose(link):
tf_pose = whole_body._tf2_buffer.lookup_transform('map', link, rospy.Time(0))
link_pose = ((tf_pose.transform.translation.x,
tf_pose.transform.translation.y,
tf_pose.transform.translation.z),
(tf_pose.transform.rotation.x,
tf_pose.transform.rotation.y,
tf_pose.transform.rotation.z,
tf_pose.transform.rotation.w))
return link_pose
def get_joint_limits(joint):
if joint == 'odom_x':
limit = (-10.0, 10.0)
elif joint == 'odom_y':
limit = (-10.0, 10.0)
elif joint == 'odom_t':
limit = (-10.0, 10.0)
else:
limit = whole_body.joint_limits[joint]
return limit
def get_custom_limits(base_joints, arm_joints, custom_limits={}):
joint_limits = []
for joint in base_joints:
if joint in custom_limits:
joint_limits.append(custom_limits[joint])
else:
joint_limits.append(get_joint_limits(joint))
for joint in arm_joints:
if joint in custom_limits:
joint_limits.append(custom_limits[joint])
else:
joint_limits.append(get_joint_limits(joint))
return zip(*joint_limits)
def get_distance(p1, p2, **kwargs):
assert len(p1) == len(p2)
diff = np.array(p2) - np.array(p1)
return np.linalg.norm(diff, ord=2)
def get_joint_position(joint):
if joint == 'world_joint':
joint_position = base.pose
else:
joint_position = whole_body.joint_positions[joint]
return joint_position
def get_joint_positions(jonits):
joint_positions = []
for joint in jonits:
if joint == 'world_joint':
base_pose = base._tf2_buffer.lookup_transform('map', 'base_footprint', rospy.Time(0))
joint_positions.append(base_pose.transform.translation.x)
joint_positions.append(base_pose.transform.translation.y)
base_quat = np.array([base_pose.transform.rotation.x,
base_pose.transform.rotation.y,
base_pose.transform.rotation.z,
base_pose.transform.rotation.w])
base_rz = R.from_quat(base_quat).as_euler('xyz')[2]
joint_positions.append(base_rz)
else:
joint_positions.append(whole_body.joint_positions[joint])
return joint_positions
if __name__ == '__main__':
# Test get_link_pose
base_link_pose = get_link_pose('base_footprint')
print('base_link_pose:', base_link_pose)
hand_palm_link_pose = get_link_pose('hand_palm_link')
print('hand_palm_link_pose:', hand_palm_link_pose)
# Test get_custom_limits
base_joints = ['odom_x', 'odom_y', 'odom_t']
arm_joints = ['arm_lift_joint', 'arm_flex_joint', 'arm_roll_joint',
'wrist_flex_joint', 'wrist_roll_joint']
custom_limits = get_custom_limits(base_joints, arm_joints)
print('custom_limits:', custom_limits)
# Test get_joint_limits
for b_joint in base_joints:
joint_limit = get_joint_limits(b_joint)
print('joint_limit:', joint_limit)
for a_joint in arm_joints:
joint_limit = get_joint_limits(a_joint)
print('joint_limit:', joint_limit)
# Test get_joint_position
ik_joints = ['world_joint', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
for joint in ik_joints:
joint_position = get_joint_position(joint)
print('joint_position:', joint_position)
# Test get_joint_positions
joint_positions = get_joint_positions(ik_joints)
print('joint_positions:', joint_positions)
| 3,904 |
Python
| 35.495327 | 97 | 0.615523 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/controller/ik_controller.py
|
import rospy
import numpy as np
from .controller import Controller
from .ik_solver import get_tool_pose, get_ik_generator, hsr_inverse_kinematics
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
class IKController(Controller):
def __init__(self):
super(IKController, self).__init__()
# Publisher
self.arm_pub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=10)
self.base_pub = rospy.Publisher('/hsrb/omni_base_controller/command', JointTrajectory, queue_size=10)
# Wait for publisher has built
while self.base_pub.get_num_connections() == 0:
rospy.sleep(0.1)
while self.arm_pub.get_num_connections() == 0:
rospy.sleep(0.1)
def set_pose(self, base_pose, joint_pose):
base_traj = JointTrajectory()
arm_traj = JointTrajectory()
base_traj.joint_names = ['odom_x', 'odom_y', 'odom_t']
arm_traj.joint_names = ['arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
# Set base trajectory
assert len(base_pose) == 3, "Does not match the size of base pose"
base_p = JointTrajectoryPoint()
base_p.positions = base_pose
base_p.velocities = np.zeros(len(base_pose))
base_p.time_from_start = rospy.Duration(1)
base_traj.points = [base_p]
# Set arm trajectory
assert len(joint_pose) == 5, "Does not match the size of base pose"
arm_p = JointTrajectoryPoint()
arm_p.positions = joint_pose
arm_p.velocities = np.zeros(len(joint_pose))
arm_p.time_from_start = rospy.Duration(1)
arm_traj.points = [arm_p]
return base_traj, arm_traj
def control(self, pose):
# Inverse kinematics
ik_pose = hsr_inverse_kinematics('arm', pose) # pose must be contain (pos, quat)
if ik_pose is None:
return
else:
base_pose, arm_pose = ik_pose[:3], ik_pose[3:]
# Set target pose
base_traj, arm_traj = self.set_pose(base_pose, arm_pose)
# Publish target pose
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
if __name__ == '__main__':
ik_controller = IKController()
rate = rospy.Rate(50)
pose_x = 0.0
pose_y = 0.0
pose_z = 0.5
foward = (0.70710678, 0.0, 0.70710678, 0.0)
back = (0.0, -0.70710678, 0.0, 0.70710678)
right = (0.5, -0.5, 0.5, 0.5)
left = (0.5, 0.5, 0.5, -0.5)
while not rospy.is_shutdown():
# Test forward kinematics
fk_pose = get_tool_pose('arm')
# Test inverse kinematics
tool_pose = ((pose_x, pose_y, pose_z), foward)
ik_controller.control(tool_pose)
# Sleep
rate.sleep()
| 2,848 |
Python
| 32.517647 | 113 | 0.596559 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/controller/controller.py
|
import rospy
import numpy as np
class Controller(object):
def __init__(self):
pass
def set_pose(self):
raise NotImplementedError("Implement set_pose method")
def control(self):
raise NotImplementedError("Implement control method")
| 271 |
Python
| 18.42857 | 62 | 0.671587 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_2d/script/controller/utils.py
|
import random
import numpy as np
import pybullet as p
from collections import namedtuple
from scipy.spatial.transform import Rotation as R
from .hsrb_utils import get_joint_limits, get_joint_position, get_joint_positions, get_distance
IKFastInfo = namedtuple('IKFastInfo', ['module_name', 'base_link', 'ee_link', 'free_joints'])
USE_ALL = False
USE_CURRENT = None
############ Mathematics
def invert(pose):
point, quat = pose
return p.invertTransform(point, quat) # TODO: modify
def multiply(*poses):
pose = poses[0]
for next_pose in poses[1:]:
pose = p.multiplyTransforms(pose[0], pose[1], *next_pose) # TODO: modify
return pose
##############
def all_between(lower_limits, values, upper_limits):
assert len(lower_limits) == len(values)
assert len(values) == len(upper_limits)
return np.less_equal(lower_limits, values).all() and \
np.less_equal(values, upper_limits).all()
def compute_forward_kinematics(fk_fn, conf):
pose = fk_fn(list(conf))
pos, rot = pose
quat = R.from_matrix(rot).as_quat()
return pos, quat
def compute_inverse_kinematics(ik_fn, pose, sampled=[]):
pos, quat = pose[0], pose[1]
rot = R.from_quat(quat).as_matrix().tolist()
if len(sampled) == 0:
solutions = ik_fn(list(rot), list(pos))
else:
solutions = ik_fn(list(rot), list(pos), list(sampled))
if solutions is None:
return []
return solutions
def get_ik_limits(joint, limits=USE_ALL):
if limits is USE_ALL:
return get_joint_limits(joint)
elif limits is USE_CURRENT:
value = get_joint_position(joint)
return value, value
return limits
def select_solution(joints, solutions, nearby_conf=USE_ALL, **kwargs):
if not solutions:
return None
if nearby_conf is USE_ALL:
return random.choice(solutions)
if nearby_conf is USE_CURRENT:
nearby_conf = get_joint_positions(joints)
return min(solutions, key=lambda conf: get_distance(nearby_conf, conf, **kwargs))
| 2,034 |
Python
| 26.133333 | 95 | 0.656834 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/hsr_interface.py
|
#!/usr/bin/env/python3
import rospy
import hsrb_interface
import numpy as np
from scipy.spatial.transform import Rotation as R
from controller_manager_msgs.srv import ListControllers
from control_msgs.msg import JointTrajectoryControllerState
class HSRInterface(object):
def __init__(self, standalone=False):
# Initialize ROS node
if standalone:
rospy.init_node('hsr_interface')
# Check server status
self.check_status()
self.robot = hsrb_interface.Robot()
self.omni_base = self.robot.get('omni_base')
self.gripper = self.robot.get('gripper')
self.whole_body = self.robot.get('whole_body')
self.base_sub = rospy.Subscriber('/hsrb/omni_base_controller/state', JointTrajectoryControllerState, self.base_callback)
self.arm_sub = rospy.Subscriber('/hsrb/arm_trajectory_controller/state', JointTrajectoryControllerState, self.arm_callback)
self.base_pos, self.base_vel, self.base_acc = None, None, None
self.arm_pos, self.arm_vel, self.arm_acc = None, None, None
self.base_joints = ['joint_x', 'joint_y', 'joint_rz']
self.arm_joints = ['arm_lift_joint', 'arm_flex_joint', 'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
self.joint_ids_to_name = {'joint_x': 0, 'joint_y': 1, 'joint_rz': 2,
'arm_lift_joint': 0, 'arm_flex_joint': 1, 'arm_roll_joint': 2,
'wrist_flex_joint': 3, 'wrist_roll_joint': 4}
def initialize_arm(self):
# Initialize arm position
self.whole_body.move_to_go()
# Set TAMP pose
self.whole_body.move_to_joint_positions({
'arm_lift_joint': 0.1,
'arm_flex_joint': -np.pi/2,
'arm_roll_joint': 0.0,
'wrist_flex_joint': 0.0,
'wrist_roll_joint': 0.0,
})
def initialize_base(self):
# Initialize base position
self.omni_base.go_abs(0.0, 0.0, 0.0, 300.0)
def set_task_pose(self, grasp_type='side'):
# Set base to configuration position
self.omni_base.go_abs(-0.80, 0.75, np.pi/2, 300.0)
# Set arm to configuration pose
self.whole_body.move_to_neutral()
if grasp_type == 'side':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.3, y=0.0, z=0.0, ej=0.0),
], ref_frame_id='hand_palm_link')
elif grasp_type == 'top':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.3, y=0.0, z=0.0, ej=-1.57),
], ref_frame_id='hand_palm_link')
# Open gripper
self.gripper.command(1.2)
def fix_task_pose(self, diff_pose):
diff_x_pose, diff_y_pose, diff_z_pose = diff_pose[0], diff_pose[1], diff_pose[2]
if np.abs(diff_z_pose) < 0.1:
diff_z_pose = -0.12
else:
diff_z_pose = 0.0
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=0.0, y=diff_y_pose, z=diff_z_pose)
], ref_frame_id='hand_palm_link')
def check_status(self):
# Make sure the controller is running
rospy.wait_for_service('/hsrb/controller_manager/list_controllers')
list_controllers = rospy.ServiceProxy('/hsrb/controller_manager/list_controllers', ListControllers)
running = False
while running is False:
rospy.sleep(0.1)
for c in list_controllers().controller:
if c.name == 'arm_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'head_trajectory_controller' and c.state == 'running':
running |= True
if c.name == 'gripper_controller' and c.state == 'running':
running |= True
if c.name == 'omni_base_controller' and c.state == 'running':
running |= True
return running
def close_gripper(self, force=0.5):
self.gripper.apply_force(force)
def open_gripper(self, width=1.0):
self.gripper.command(width)
def insert_object(self, ee_mode):
if ee_mode == 'horizontal':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=-0.03, y=0.0, z=0.0),
], ref_frame_id='hand_palm_link')
elif ee_mode == 'vertical':
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(x=0.0, y=-0.03, z=0.0),
], ref_frame_id='hand_palm_link')
def grasp_gear(self, diff_ee_pose, pick_offset=0.1):
# Open gripper
self.open_gripper(0.5)
# Move to grasp
self.whole_body.move_end_effector_by_line((0, 0, 1), -pick_offset)
self.whole_body.move_end_effector_pose([
hsrb_interface.geometry.pose(
x=diff_ee_pose[0],
y=diff_ee_pose[1],
ek=-np.pi/2
),
], ref_frame_id='hand_palm_link')
self.whole_body.move_end_effector_by_line((0, 0, 1), diff_ee_pose[2]+pick_offset)
# Close gripper
self.close_gripper(0.5)
def get_link_pose(self, link):
tf_pose = self.whole_body._tf2_buffer.lookup_transform('map', link, rospy.Time(0))
link_pose = [[tf_pose.transform.translation.x,
tf_pose.transform.translation.y,
tf_pose.transform.translation.z],
[tf_pose.transform.rotation.x,
tf_pose.transform.rotation.y,
tf_pose.transform.rotation.z,
tf_pose.transform.rotation.w]]
return link_pose
def get_joint_limits(self, joint):
if joint == 'odom_x':
limit = (-10.0, 10.0)
elif joint == 'odom_y':
limit = (-10.0, 10.0)
elif joint == 'odom_t':
limit = (-10.0, 10.0)
else:
limit = self.whole_body.joint_limits[joint]
return limit
def get_custom_limits(self, base_joints, arm_joints, custom_limits={}):
joint_limits = []
for joint in base_joints:
if joint in custom_limits:
joint_limits.append(custom_limits[joint])
else:
joint_limits.append(self.get_joint_limits(joint))
for joint in arm_joints:
if joint in custom_limits:
joint_limits.append(custom_limits[joint])
else:
joint_limits.append(self.get_joint_limits(joint))
return zip(*joint_limits)
def get_distance(self, p1, p2, **kwargs):
assert len(p1) == len(p2)
diff = np.array(p2) - np.array(p1)
return np.linalg.norm(diff, ord=2)
def get_joint_position(self, joint, group=None):
if group == 'base':
joint_position = self.base_pos[self.joint_ids_to_name[joint]]
elif group == 'arm':
joint_position = self.arm_pos[self.joint_ids_to_name[joint]]
else:
raise ValueError(joint)
return joint_position
def get_joint_positions(self, group=None):
joint_positions = []
if group == 'base':
for base_joint in self.base_joints:
base_pos = self.get_joint_position(base_joint, 'base')
joint_positions.append(base_pos)
elif group == 'arm':
for arm_joint in self.arm_joints:
arm_pos = self.get_joint_position(arm_joint, 'arm')
joint_positions.append(arm_pos)
else:
for base_joint in self.base_joints:
base_pos = self.get_joint_position(base_joint, 'base')
joint_positions.append(base_pos)
for arm_joint in self.arm_joints:
arm_pos = self.get_joint_position(arm_joint, 'arm')
joint_positions.append(arm_pos)
return joint_positions
def get_joint_velocity(self, joint, group=None):
if group == 'base':
joint_velocity = self.base_vel[self.joint_ids_to_name[joint]]
elif group == 'arm':
joint_velocity = self.arm_vel[self.joint_ids_to_name[joint]]
else:
raise ValueError(joint)
return joint_velocity
def get_joint_velocities(self, group=None):
joint_velocities = []
if group == 'base':
for base_joint in self.base_joints:
joint_vel = self.get_joint_velocity(base_joint, 'base')
joint_velocities.append(joint_vel)
elif group == 'arm':
for arm_joint in self.arm_joints:
joint_vel = self.get_joint_velocity(arm_joint, 'arm')
joint_velocities.append(joint_vel)
else:
for base_joint in self.base_joints:
joint_vel = self.get_joint_velocity(base_joint, 'base')
joint_velocities.append(joint_vel)
for arm_joint in self.arm_joints:
joint_vel = self.get_joint_velocity(arm_joint, 'arm')
joint_velocities.append(joint_vel)
return joint_velocities
def get_joint_acceleration(self, joint, group=None):
if group == 'base':
joint_acceleration = self.base_acc[self.joint_ids_to_name[joint]]
elif group == 'arm':
joint_acceleration = self.arm_acc[self.joint_ids_to_name[joint]]
else:
raise ValueError(joint)
return joint_acceleration
def get_joint_accelerations(self, group=None):
joint_acceralataions = []
if group == 'base':
for base_joint in self.base_joints:
joint_acc = self.get_joint_acceleration(base_joint, 'base')
joint_acceralataions.append(joint_acc)
elif group == 'arm':
for arm_joint in self.arm_joints:
joint_acc = self.get_joint_acceleration(arm_joint, 'arm')
joint_acceralataions.append(joint_acc)
else:
for base_joint in self.base_joints:
joint_acc = self.get_joint_acceleration(base_joint, 'base')
joint_acceralataions.append(joint_acc)
for arm_joint in self.arm_joints:
joint_acc = self.get_joint_acceleration(arm_joint, 'arm')
joint_acceralataions.append(joint_acc)
return joint_acceralataions
def base_callback(self, data):
self.base_pos = data.actual.positions
self.base_vel = data.actual.velocities
self.base_acc = data.actual.accelerations
def arm_callback(self, data):
self.arm_pos = data.actual.positions
self.arm_vel = data.actual.velocities
self.arm_acc = data.actual.accelerations
if __name__ == '__main__':
hsr_interface = HSRInterface()
hsr_interface.initialize_arm()
hsr_interface.initialize_base()
print('positions:', hsr_interface.get_joint_positions())
print('velocities:', hsr_interface.get_joint_velocities())
| 11,059 |
Python
| 38.784173 | 131 | 0.570757 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/tf_interface.py
|
#!/bin/env/python3
import tf
import rospy
import tf2_ros
import numpy as np
import tf2_geometry_msgs
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import TransformStamped
from scipy.spatial.transform import Rotation as R
class TfManager(object):
def __init__(self, standalone=False):
if standalone:
rospy.init_node('tf_manager')
# TF listener
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
# Publisher
self.tf_pub = rospy.Publisher('/tf', TFMessage, queue_size=1)
def get_link_pose(self, link_name='hand_palm_link'):
# Translate from map coordinate to arbitrary coordinate of robot.
ee_pose = tf2_geometry_msgs.PoseStamped()
ee_pose.header.frame_id = link_name
ee_pose.header.stamp = rospy.Time(0)
ee_pose.pose.orientation.w = 1.0
try:
# Get transform at current time
global_pose = self.tfBuffer.transform(ee_pose, 'map')
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
print(e)
return None
return global_pose
def get_init_pose(self, mocap_poses):
# Get red_shaft pose
gearbox_x = mocap_poses['gear1'].pose.position.x
gearbox_y = mocap_poses['gear1'].pose.position.y
gearbox_z = mocap_poses['gear1'].pose.position.z
# Get current end effector pose
current_ee_pose_x = mocap_poses['end_effector'].pose.position.x
current_ee_pose_y = mocap_poses['end_effector'].pose.position.y
current_ee_pose_z = mocap_poses['end_effector'].pose.position.z
# Culculate difference pose
diff_x_pose = current_ee_pose_x - gearbox_x
diff_y_pose = current_ee_pose_y - gearbox_y
diff_z_pose = current_ee_pose_z - gearbox_z
init_base_pose = np.array([diff_x_pose, diff_y_pose, diff_z_pose])
return init_base_pose
# Transform target_pose on end effector coordinate to map coordinate
def transform_coordinate(self, target_pose):
# Initialize vector and matrix
trans_mat = np.zeros((4, 4))
qc_trans = np.zeros(4)
# Initialize map coordination
qc_trans[0] = target_pose[0]
qc_trans[1] = target_pose[1]
qc_trans[2] = target_pose[2]
qc_trans[3] = 1.0
ee_pose = self.get_link_pose()
if ee_pose is None:
return
# Initialize ee coordination
translation = np.array([
ee_pose.pose.position.x,
ee_pose.pose.position.y,
ee_pose.pose.position.z
])
rot = np.array([
ee_pose.pose.orientation.x,
ee_pose.pose.orientation.y,
ee_pose.pose.orientation.z,
ee_pose.pose.orientation.w
])
rot_mat = R.from_quat(rot).as_matrix()
# Calculate translation
trans_mat[3:, 3:] = 1.0
trans_mat[:3, 3] = translation
trans_mat[:3, :3] = rot_mat
qw_trans = trans_mat @ qc_trans
qw_trans = qw_trans[:3]
# Calculate rotation
qw_rot = np.array([
ee_pose.pose.orientation.x,
ee_pose.pose.orientation.y,
ee_pose.pose.orientation.z,
ee_pose.pose.orientation.w
])
return np.concatenate([qw_trans, qw_rot])
def publish_mocap_to_map(self, mocap_poses):
tf_list = []
for rigid_name, mocap_pose in mocap_poses.items():
t = TransformStamped()
t.header.frame_id = 'map'
t.header.stamp = rospy.Time.now()
t.child_frame_id = rigid_name
t.transform.translation.x = mocap_pose.pose.position.x
t.transform.translation.y = mocap_pose.pose.position.y
t.transform.translation.z = mocap_pose.pose.position.z
t.transform.rotation.x = mocap_pose.pose.rotation.x
t.transform.rotation.y = mocap_pose.pose.rotation.y
t.transform.rotation.z = mocap_pose.pose.rotation.z
t.transform.rotation.w = mocap_pose.pose.rotation.w
tf_list.append(t)
tfm = TFMessage(tf_list)
# Publish tf message
self.tf_pub.publish(tfm)
if __name__ == '__main__':
tf_manager = TfManager(standalone=True)
while not rospy.is_shutdown():
transform = tf_manager.get_link_pose('hand_palm_link')
test_pose1 = np.array([-0.5, 0.0, 0.0]) # -0.5 to z direction on map
test_pose1 = tf_manager.transform_coordinate(test_pose1)
test_pose2 = np.array([0.0, 1.0, 0.0]) # -1.0 to x direction on map
test_pose2 = tf_manager.transform_coordinate(test_pose2)
test_pose3 = np.array([-0.3, -1.0, 0.0])
test_pose3 = tf_manager.transform_coordinate(test_pose3)
ee_pose = tf_manager.get_link_pose()
if ee_pose is None:
continue
test_pose4 = np.array([
ee_pose.pose.position.x,
ee_pose.pose.position.y,
ee_pose.pose.position.z
])
test_pose4 = tf_manager.transform_coordinate(test_pose4)
test_pose5 = np.zeros(3)
test_pose5 = tf_manager.transform_coordinate(test_pose5)
print('test_1: ', test_pose1)
print('test_2: ', test_pose2)
print('test_3: ', test_pose3)
print('test_4: ', test_pose4)
print('test_5: ', test_pose5)
| 5,480 |
Python
| 32.420732 | 109 | 0.592336 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/mocap_interface.py
|
#!/bin/env/python3
import rospy
import numpy as np
from ros_numpy import numpify
from geometry_msgs.msg import PoseStamped
from scipy.spatial.transform import Rotation as R
class MocapInterface(object):
def __init__(self, standalone=False):
if standalone:
rospy.init_node('mocap_interface')
# Mocap list
self.rigid_body_list = (
"base",
"gear1",
"gear2",
"gear3",
"shaft1",
"shaft2",
"end_effector"
)
# Mocap to map
self.translation = np.array([0.0, 0.0, 0.0])
self.rotation = np.array([
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]
])
# Rigid body pose dictionary
self.rigid_posestamped = {
name: PoseStamped() for name in self.rigid_body_list
}
# Subscriber
self._mocap_sub = [
rospy.Subscriber('/mocap_pose_topic/{0}_pose'.format(self.rigid_body_list[i]), PoseStamped, self._mocap_cb, callback_args=i)
for i in range(len(self.rigid_body_list))
]
def _mocap_cb(self, mocap_data, id):
converted_pose = self.convert_mocap_to_map(mocap_data)
mocap_data.header.frame_id = 'map'
mocap_data.pose.position.x = converted_pose[0]
mocap_data.pose.position.y = converted_pose[1]
mocap_data.pose.position.z = converted_pose[2]
mocap_data.pose.orientation.x = converted_pose[3]
mocap_data.pose.orientation.y = converted_pose[4]
mocap_data.pose.orientation.z = converted_pose[5]
mocap_data.pose.orientation.w = converted_pose[6]
self.rigid_posestamped[self.rigid_body_list[id]] = mocap_data
def get_pose(self, name):
pos = numpify(self.rigid_posestamped[name].pose.position)
orn = numpify(self.rigid_posestamped[name].pose.orientation)
return [pos, orn]
def get_poses(self):
rigid_poses = {}
for name, pose_stamped in self.rigid_posestamped.items():
pos = numpify(pose_stamped.pose.position)
orn = numpify(pose_stamped.pose.orientation)
rigid_poses[name] = [pos, orn]
return rigid_poses
def convert_mocap_to_map(self, mocap_data):
# Initialize vector and matrix
trans_mat = np.zeros((4, 4))
qc_trans = np.zeros(4)
qc_rot = np.zeros(4)
# Calculate translation
qc_trans[0] = mocap_data.pose.position.x
qc_trans[1] = mocap_data.pose.position.y
qc_trans[2] = mocap_data.pose.position.z
qc_trans[3] = 1.0
trans_mat[3:, 3:] = 1.0
trans_mat[:3, 3] = self.translation
trans_mat[:3, :3] = self.rotation
qw_trans = trans_mat @ qc_trans
qw_trans = qw_trans[:3]
# Calculate rotation
qc_rot[0] = mocap_data.pose.orientation.x
qc_rot[1] = mocap_data.pose.orientation.y
qc_rot[2] = mocap_data.pose.orientation.z
qc_rot[3] = mocap_data.pose.orientation.w
qc_rot_mat = R.from_quat(qc_rot).as_matrix()
qw_rot_mat = self.rotation @ qc_rot_mat
qw_rot = R.from_matrix(qw_rot_mat).as_quat()
return np.concatenate([qw_trans, qw_rot])
if __name__ == '__main__':
mocap_interface = MocapInterface(standalone=True)
while not rospy.is_shutdown():
# Test each rigid body
base_pose = mocap_interface.get_pose('base')
print('base_pose: ', base_pose)
green_gear_pose = mocap_interface.get_pose('gear1')
print('green_gear_pose: ', green_gear_pose)
blue_gear_pose = mocap_interface.get_pose('gear2')
print('blue_gear_pose: ', blue_gear_pose)
red_gear_pose = mocap_interface.get_pose('gear3')
print('red_gear_pose: ', red_gear_pose)
red_shaft_pose = mocap_interface.get_pose('shaft1')
print('red_shaft_pose: ', red_shaft_pose)
yellow_shaft_pose = mocap_interface.get_pose('shaft2')
print('yellow_shaft_pose: ', yellow_shaft_pose)
end_effector_pose = mocap_interface.get_pose('end_effector')
print('end_effector_pose: ', end_effector_pose)
# Test all rigid bodies
rigid_poses = mocap_interface.get_poses()
print('rigid_poses: ', rigid_poses['gear1'])
| 4,331 |
Python
| 34.219512 | 136 | 0.587162 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/force_sensor_interface.py
|
import sys
import math
import rospy
from geometry_msgs.msg import WrenchStamped
class ForceSensorInterface(object):
def __init__(self, standalone=False):
if standalone:
rospy.init_node('force_torque_sensor_interface')
self._force_data_x = 0.0
self._force_data_y = 0.0
self._force_data_z = 0.0
# Subscribe force torque sensor data from HSRB
self._wrist_wrench_sub = rospy.Subscriber(
'/hsrb/wrist_wrench/raw', WrenchStamped, self._ft_sensor_callback
)
# Wait for connection
try:
rospy.wait_for_message('/hsrb/wrist_wrench/raw', WrenchStamped, timeout=10.0)
except Exception as e:
rospy.logerr(e)
sys.exit(1)
def _ft_sensor_callback(self, data):
self._force_data_x = data.wrench.force.x
self._force_data_y = data.wrench.force.y
self._force_data_z = data.wrench.force.z
def initialize_ft(self):
self._force_data_x = 0.0
self._force_data_y = 0.0
self._force_data_z = 0.0
def get_current_force(self):
return [self._force_data_x, self._force_data_y, self._force_data_z]
def compute_difference(self, pre_data_list, post_data_list, calc_type='l1'):
if (len(pre_data_list) != len(post_data_list)):
raise ValueError('Argument lists differ in length')
# Calcurate square sum of difference
if calc_type == 'l1':
l1_sums = sum([b - a for (a, b) in zip(pre_data_list, post_data_list)])
return l1_sums
elif calc_type == 'l2':
l2_sums = sum([math.pow(b - a, 2) for (a, b) in zip(pre_data_list, post_data_list)])
return math.sqrt(l2_sums)
if __name__ == '__main__':
ft_interface = ForceSensorInterface(standalone=True)
rate = rospy.Rate(50)
while not rospy.is_shutdown():
prev_ft_data = ft_interface.get_current_force()
input('wait_for_user')
curr_ft_data = ft_interface.get_current_force()
force_difference = ft_interface.compute_difference(prev_ft_data, curr_ft_data)
weight = round(force_difference / 9.81 * 1000, 1)
print('weight:', weight)
rate.sleep()
| 2,244 |
Python
| 32.014705 | 96 | 0.595365 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/planner/cross_entropy_method.py
|
import numpy as np
from planner import Planner
class CrossEntropyMethod(Planner):
def __init__(self):
super().__init__()
def compute_path(self):
pass
| 176 |
Python
| 16.699998 | 34 | 0.630682 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/planner/planner.py
|
import numpy as np
class Planner(object):
def __init__(self):
pass
@staticmethod
def compute_path(self):
raise NotImplementedError("Implement compute_path")
| 187 |
Python
| 17.799998 | 59 | 0.652406 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/planner/linear_interpolation.py
|
import numpy as np
from .planner import Planner
class LinearInterpolationPlanner(Planner):
def __init__(self):
super().__init__()
def compute_path(self, op_pose, num_steps=10):
# Compute linear interpolation path
assert num_steps > 1, "Too short to create waypoints"
traj_x = np.linspace(0.0, op_pose[0], num_steps)
traj_y = np.linspace(0.0, op_pose[1], num_steps)
traj_z = np.linspace(0.0, op_pose[2], num_steps)
trajectory = np.dstack((traj_x, traj_y, traj_z))
return trajectory
| 557 |
Python
| 29.999998 | 61 | 0.621185 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/planner/spline_interpolation.py
|
import numpy as np
from planner import Planner
class SplineInterpolationPlanner(Planner):
def __init__(self, offset):
super().__init__()
# Set parameters
self.offset = offset
def compute_path(self, op_pose, num_steps=10):
# Compute linear interpolation path
assert num_steps > 1, "Too short to create waypoints"
traj_x = np.linspace(0.0, op_pose[0], num_steps)
traj_y = np.linspace(0.0, op_pose[1], num_steps)
traj_z = np.linspace(0.0, op_pose[2], num_steps)
trajectory = np.dstack((traj_x, traj_y, traj_z))
return trajectory
| 619 |
Python
| 28.523808 | 61 | 0.615509 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/feedback/test_plan.py
|
#!/usr/bin/env/python3
import sys
import rospy
import numpy as np
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_3d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from hsr_interface import HSRInterface
from force_sensor_interface import ForceSensorInterface
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
class ExecutePlan(object):
def __init__(self):
# Initialize ROS node
rospy.init_node('execute_tamp_plan')
# Feedback rate
self.control_freq = 3
self.rate = rospy.Rate(self.control_freq)
# Threshold
self.move_threshold = 0.05
self.pick_threshold = 0.05
self.place_threshold = 0.04
self.stack_threshold = 0.032
self.weight_threshold = 500
# Core module
self.tamp_planner = TAMPPlanner()
self.hsr_interface = HSRInterface()
self.tf_manager = TfManager()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
self.ft_interface = ForceSensorInterface()
# Publisher
self.arm_pub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=10)
self.base_pub = rospy.Publisher('/hsrb/omni_base_controller/command', JointTrajectory, queue_size=10)
# Initialize Robot
self.initialize_robot()
# Initialize TAMP
self.initialize_tamp()
# Reset dataset
self.reset_dataset()
def reset_dataset(self):
self.measured_ee_traj = []
self.measured_joint_traj = []
def initialize_robot(self):
self.check_status()
# Set gripper to configuration position
self.hsr_interface.open_gripper()
# Set arm to configuration position
self.hsr_interface.initialize_arm()
# Set base to configuration position
self.hsr_interface.initialize_base()
def initialize_tamp(self):
# Get object poses
object_poses = self.mocap_interface.get_poses()
# Get robot poses
self.robot_joints = ['joint_x', 'joint_y', 'joint_rz', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
robot_poses = self.hsr_interface.get_joint_positions()
# Initialize tamp simulator
observations = (robot_poses, object_poses)
self.tamp_planner.initialize(observations)
def set_base_pose(self, base_pose):
base_traj = JointTrajectory()
base_traj.joint_names = ['odom_x', 'odom_y', 'odom_t']
# Set base trajectory
assert len(base_pose) == 3, "Does not match the size of base pose"
base_p = JointTrajectoryPoint()
base_p.positions = base_pose
base_p.velocities = np.zeros(len(base_pose))
base_p.time_from_start = rospy.Duration(1)
base_traj.points = [base_p]
return base_traj
def set_arm_pose(self, arm_pose):
arm_traj = JointTrajectory()
arm_traj.joint_names = ['arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
# Set arm trajectory
assert len(arm_pose) == 5, "Does not match the size of base pose"
arm_p = JointTrajectoryPoint()
arm_p.positions = arm_pose
arm_p.velocities = np.zeros(len(arm_pose))
arm_p.time_from_start = rospy.Duration(1)
arm_traj.points = [arm_p]
return arm_traj
def check_status(self):
# Wait for publisher has built
while self.base_pub.get_num_connections() == 0:
rospy.sleep(0.1)
while self.arm_pub.get_num_connections() == 0:
rospy.sleep(0.1)
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args):
# Modify plan
action_name, modified_action = self.path_modifier.post_process(action_name, args)
return action_name, modified_action
def calculate_base_command(self, target_pose):
curr_pose = self.hsr_interface.get_joint_positions(group='base')
curr_vel = self.hsr_interface.get_joint_velocities(group='base')
delta_pose = np.array(target_pose) - np.array(curr_pose)
kp = 1.0
D = 1.0
kd = 2 * np.sqrt(kp) * D
command = kp * delta_pose + kd * curr_vel
command += np.array(curr_pose)
return command
def calculate_arm_command(self, target_pose):
curr_pose = self.hsr_interface.get_joint_positions(group='arm')
curr_vel = self.hsr_interface.get_joint_velocities(group='arm')
delta_pose = np.array(target_pose) - np.array(curr_pose)
kp = 1.0
D = 1.0
kd = 2 * np.sqrt(kp) * D
command = kp * delta_pose + kd * curr_vel
command += np.array(curr_pose)
return command
def execute(self):
plan = self.plan()
if plan is None:
return None
for i, (action_name, args) in enumerate(plan):
# Post process TAMP commands to hsr executable actions
action_name, modified_action = self.process(action_name, args)
if action_name == 'move_base':
for target_pose in modified_action:
target_base_pose = self.calculate_base_command(target_pose[:3])
base_traj = self.set_base_pose(target_base_pose)
self.base_pub.publish(base_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
elif action_name == 'pick':
pick_traj, return_traj = modified_action
for target_pose in pick_traj: # pick
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
# rospy.sleep(5.0)
# self.hsr_interface.close_gripper()
for target_pose in return_traj: # return
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
elif action_name == 'place':
place_traj = modified_action
for target_pose in place_traj: # place
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
elif action_name == 'insert':
insert_traj, depart_traj, return_traj = modified_action
for target_pose in insert_traj: # insert
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
# rospy.sleep(5.0)
# self.hsr_interface.open_gripper()
for target_pose in depart_traj: # depart
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
for target_pose in return_traj: # return
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
else:
continue
self.save_traj()
def save_traj(self):
np.save(f'measured_ee_traj', self.measured_ee_traj)
np.save(f'measured_joint_traj', self.measured_joint_traj)
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 12,401 |
Python
| 38.496815 | 113 | 0.569148 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/feedback/post_process.py
|
#!/usr/bin/env/python3
import numpy as np
class PlanModifier(object):
def __init__(self):
self.block_rigid_map = {
'A' : 'red_shaft',
'B' : 'green_gear',
'C' : 'yellow_shaft',
'D' : 'blue_gear',
'E' : 'red_gear'
}
def post_process(self, action_name, args):
"""
Modify plan using sensor data.
Args:
plan (list): plan is trajectory of the tamp.
robot_pose (list): robot_pose consists of base_pose, end_effector_pose, gripper.
rigid_poses (dict): rigid_pose consists of captured rigid body poses
Returns:
commands (list): commands is modified plan
"""
if action_name == 'move_base':
# Parse TAMP returns
start_pose, end_pose, traj = args
base_traj = []
for commands in traj.commands:
for path in commands.path:
base_traj.append(path.values)
new_command = (action_name, base_traj)
elif action_name == 'pick':
arm, block, init_block_pose, grasp_pose, term_robot_pose, traj = args
[traj_pick] = traj.commands
pick_traj = []
for path in traj_pick.path:
pick_traj.append(path.values)
return_traj = []
for path in traj_pick.reverse().path:
return_traj.append(path.values)
new_command = (action_name, (pick_traj, return_traj))
elif action_name == 'place':
arm, block1, block2, init_block_pose, grasp_pose, term_robot_pose, traj = args
[traj_place] = traj.commands
place_traj = []
for path in traj_place.path:
place_traj.append(path.values)
new_command = (action_name, (place_traj))
elif action_name == 'insert':
arm, block1, block2, block_pose1, block_pose2, grasp_pose, _, _, traj = args
[traj_insert, traj_depart, traj_return] = traj.commands
insert_traj = []
for path in traj_insert.path:
insert_traj.append(path.values)
depart_traj = []
for path in traj_depart.path:
depart_traj.append(path.values)
return_traj = []
for path in traj_return.reverse().path:
return_traj.append(path.values)
new_command = (action_name, (insert_traj, depart_traj, return_traj))
else:
pass
return new_command
| 2,576 |
Python
| 30.426829 | 92 | 0.522904 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/feedback/execute_plan.py
|
#!/usr/bin/env/python3
import sys
import rospy
import numpy as np
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_3d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from hsr_interface import HSRInterface
from force_sensor_interface import ForceSensorInterface
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
class ExecutePlan(object):
def __init__(self):
# Initialize ROS node
rospy.init_node('execute_tamp_plan')
# Feedback rate
self.control_freq = 3
self.rate = rospy.Rate(self.control_freq)
# Core module
self.tamp_planner = TAMPPlanner()
self.hsr_interface = HSRInterface()
self.tf_manager = TfManager()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
self.ft_interface = ForceSensorInterface()
# Publisher
self.arm_pub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=10)
self.base_pub = rospy.Publisher('/hsrb/omni_base_controller/command', JointTrajectory, queue_size=10)
# Initialize Robot
self.initialize_robot()
# Initialize TAMP
self.initialize_tamp()
def initialize_robot(self):
self.check_status()
# Set gripper to configuration position
self.hsr_interface.open_gripper()
# Set arm to configuration position
self.hsr_interface.initialize_arm()
# Set base to configuration position
self.hsr_interface.initialize_base()
def initialize_tamp(self):
# Get object poses
object_poses = self.mocap_interface.get_poses()
# Get robot poses
self.robot_joints = ['joint_x', 'joint_y', 'joint_rz', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
robot_poses = self.hsr_interface.get_joint_positions()
# Initialize tamp simulator
observations = (robot_poses, object_poses)
self.tamp_planner.initialize(observations)
def set_base_pose(self, base_pose):
base_traj = JointTrajectory()
base_traj.joint_names = ['odom_x', 'odom_y', 'odom_t']
# Set base trajectory
assert len(base_pose) == 3, "Does not match the size of base pose"
base_p = JointTrajectoryPoint()
base_p.positions = base_pose
base_p.velocities = np.zeros(len(base_pose))
base_p.time_from_start = rospy.Duration(1)
base_traj.points = [base_p]
return base_traj
def set_arm_pose(self, arm_pose):
arm_traj = JointTrajectory()
arm_traj.joint_names = ['arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
# Set arm trajectory
assert len(arm_pose) == 5, "Does not match the size of base pose"
arm_p = JointTrajectoryPoint()
arm_p.positions = arm_pose
arm_p.velocities = np.zeros(len(arm_pose))
arm_p.time_from_start = rospy.Duration(1)
arm_traj.points = [arm_p]
return arm_traj
def check_status(self):
# Wait for publisher has built
while self.base_pub.get_num_connections() == 0:
rospy.sleep(0.1)
while self.arm_pub.get_num_connections() == 0:
rospy.sleep(0.1)
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args):
# Modify plan
action_name, modified_action = self.path_modifier.post_process(action_name, args)
return action_name, modified_action
def calculate_base_command(self, target_pose):
curr_pose = self.hsr_interface.get_joint_positions(group='base')
curr_vel = self.hsr_interface.get_joint_velocities(group='base')
diff_pose = np.array(target_pose) - np.array(curr_pose)
diff_vel = np.array(curr_vel)
kp = 1.0
D = 0.03
kd = 2 * np.sqrt(kp) * D
command = kp * diff_pose + kd * diff_vel
command += np.array(curr_pose)
return command
def calculate_arm_command(self, target_pose):
curr_pose = self.hsr_interface.get_joint_positions(group='arm')
curr_vel = self.hsr_interface.get_joint_velocities(group='arm')
diff_pose = np.array(target_pose) - np.array(curr_pose)
diff_vel = np.array(curr_vel)
kp = 1.0
D = 0.03
kd = 2 * np.sqrt(kp) * D
command = kp * diff_pose + kd * diff_vel
command += np.array(curr_pose)
return command
def execute(self):
plan = self.plan()
if plan is None:
return None
for i, (action_name, args) in enumerate(plan):
# Post process TAMP commands to hsr executable actions
action_name, modified_action = self.process(action_name, args)
if action_name == 'move_base':
for target_pose in modified_action:
target_base_pose = self.calculate_base_command(target_pose[:3])
base_traj = self.set_base_pose(target_base_pose)
self.base_pub.publish(base_traj)
self.rate.sleep()
elif action_name == 'pick':
pick_traj, return_traj = modified_action
for target_pose in pick_traj: # pick
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
rospy.sleep(3.0)
self.hsr_interface.close_gripper()
for target_pose in return_traj: # return
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
elif action_name == 'place':
place_traj = modified_action
for target_pose in place_traj: # place
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
elif action_name == 'insert':
insert_traj, depart_traj, return_traj = modified_action
for target_pose in insert_traj: # insert
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
rospy.sleep(3.0)
self.hsr_interface.open_gripper()
for target_pose in depart_traj: # depart
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
for target_pose in return_traj: # return
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
else:
continue
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 9,073 |
Python
| 37.777778 | 113 | 0.575223 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/feedforward/test_plan.py
|
#!/usr/bin/env/python3
import sys
import rospy
import numpy as np
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_3d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from hsr_interface import HSRInterface
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
class ExecutePlan(object):
def __init__(self):
# Initialize ROS node
rospy.init_node('execute_tamp_plan')
self.rate = rospy.Rate(3)
# Core module
self.tamp_planner = TAMPPlanner()
self.hsr_interface = HSRInterface()
self.tf_manager = TfManager()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
# Publisher
self.arm_pub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=10)
self.base_pub = rospy.Publisher('/hsrb/omni_base_controller/command', JointTrajectory, queue_size=10)
# Initialize Robot
self.initialize_robot()
# Initialize TAMP
self.initialize_tamp()
self.reset_dataset()
def reset_dataset(self):
self.measured_ee_traj = []
self.true_ee_traj = []
self.measured_joint_traj = []
def initialize_robot(self):
self.check_status()
# Set gripper to configuration position
self.hsr_interface.open_gripper()
# Set arm to configuration position
self.hsr_interface.initialize_arm()
# Set base to configuration position
self.hsr_interface.initialize_base()
def initialize_tamp(self):
# Get object poses
object_poses = self.mocap_interface.get_poses()
# Get robot poses
self.robot_joints = ['joint_x', 'joint_y', 'joint_rz', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
robot_poses = self.hsr_interface.get_joint_positions()
# Initialize tamp simulator
observations = (robot_poses, object_poses)
self.tamp_planner.initialize(observations)
def set_base_pose(self, base_pose):
base_traj = JointTrajectory()
base_traj.joint_names = ['odom_x', 'odom_y', 'odom_t']
# Set base trajectory
assert len(base_pose) == 3, "Does not match the size of base pose"
base_p = JointTrajectoryPoint()
base_p.positions = base_pose
base_p.velocities = np.zeros(len(base_pose))
base_p.time_from_start = rospy.Duration(1)
base_traj.points = [base_p]
return base_traj
def set_arm_pose(self, arm_pose):
arm_traj = JointTrajectory()
arm_traj.joint_names = ['arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
# Set arm trajectory
assert len(arm_pose) == 5, "Does not match the size of base pose"
arm_p = JointTrajectoryPoint()
arm_p.positions = arm_pose
arm_p.velocities = np.zeros(len(arm_pose))
arm_p.time_from_start = rospy.Duration(1)
arm_traj.points = [arm_p]
return arm_traj
def check_status(self):
# Wait for publisher has built
while self.base_pub.get_num_connections() == 0:
rospy.sleep(0.1)
while self.arm_pub.get_num_connections() == 0:
rospy.sleep(0.1)
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args):
# Modify plan
action_name, modified_action = self.path_modifier.post_process(action_name, args)
return action_name, modified_action
def execute(self):
plan = self.plan()
if plan is None:
return None
def make_tuple(pose_msg):
return ((pose_msg.pose.position.x, pose_msg.pose.position.y, pose_msg.pose.position.z),
(pose_msg.pose.orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.orientation.z, pose_msg.pose.orientation.w))
for num_trial in range(5):
for i, (action_name, args) in enumerate(plan):
# Post process TAMP commands to hsr executable actions
action_name, modified_action = self.process(action_name, args)
if action_name == 'move_base':
for target_pose in modified_action:
base_traj = self.set_base_pose(target_pose)
self.base_pub.publish(base_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
true_ee_pose = self.mocap_interface.get_pose('end_effector')
self.true_ee_traj.append(make_tuple(true_ee_pose))
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
elif action_name == 'pick':
pick_traj, return_traj = modified_action
for target_pose in pick_traj: # forward
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
true_ee_pose = self.mocap_interface.get_pose('end_effector')
self.true_ee_traj.append(make_tuple(true_ee_pose))
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
# rospy.sleep(5.0)
# self.hsr_interface.close_gripper()
for target_pose in return_traj: # reverse
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
true_ee_pose = self.mocap_interface.get_pose('end_effector')
self.true_ee_traj.append(make_tuple(true_ee_pose))
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
elif action_name == 'place':
place_traj = modified_action
for target_pose in place_traj: # forward
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
true_ee_pose = self.mocap_interface.get_pose('end_effector')
self.true_ee_traj.append(make_tuple(true_ee_pose))
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
elif action_name == 'insert':
insert_traj, depart_traj, return_traj = modified_action
for target_pose in insert_traj: # insert
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
true_ee_pose = self.mocap_interface.get_pose('end_effector')
self.true_ee_traj.append(make_tuple(true_ee_pose))
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
# rospy.sleep(5.0)
# self.hsr_interface.open_gripper()
for target_pose in depart_traj: # depart
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
true_ee_pose = self.mocap_interface.get_pose('end_effector')
self.true_ee_traj.append(make_tuple(true_ee_pose))
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
for target_pose in return_traj: # return
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
# Get measured/true EE traj
measured_ee_pose = self.hsr_interface.get_link_pose('hand_palm_link')
self.measured_ee_traj.append(measured_ee_pose)
true_ee_pose = self.mocap_interface.get_pose('end_effector')
self.true_ee_traj.append(make_tuple(true_ee_pose))
# Get measured joint traj
measured_joint_pos = self.hsr_interface.get_joint_positions()
self.measured_joint_traj.append(measured_joint_pos)
self.rate.sleep()
else:
continue
self.save_traj(num_trial)
self.reset_dataset()
self.hsr_interface.initialize_base()
self.hsr_interface.initialize_arm()
def save_traj(self, num_trial):
np.save(f'measured_ee_traj_{num_trial}', self.measured_ee_traj)
np.save(f'measured_joint_traj_{num_trial}', self.measured_joint_traj)
# np.save(f'true_ee_traj_{num_trial}', self.true_ee_traj)
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 12,155 |
Python
| 39.655518 | 137 | 0.541999 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/feedforward/post_process.py
|
#!/usr/bin/env/python3
import numpy as np
class PlanModifier(object):
def __init__(self):
self.block_rigid_map = {
'A' : 'shaft1',
'B' : 'gear1',
'C' : 'shaft2',
'D' : 'gear2',
'E' : 'gear3'
}
def post_process(self, action_name, args):
"""
Modify plan using sensor data.
Args:
plan (list): plan is trajectory of the tamp.
robot_pose (list): robot_pose consists of base_pose, end_effector_pose, gripper.
rigid_poses (dict): rigid_pose consists of captured rigid body poses
Returns:
commands (list): commands is modified plan
"""
if action_name == 'move_base':
# Parse TAMP returns
start_pose, end_pose, traj = args
base_traj = []
for commands in traj.commands:
for path in commands.path:
base_traj.append(path.values)
new_command = (action_name, base_traj)
elif action_name == 'pick':
arm, block, init_block_pose, grasp_pose, term_robot_pose, traj = args
[traj_pick] = traj.commands
pick_traj = []
for path in traj_pick.path:
pick_traj.append(path.values)
return_traj = []
for path in traj_pick.reverse().path:
return_traj.append(path.values)
new_command = (action_name, (pick_traj, return_traj))
elif action_name == 'place':
arm, block1, block2, init_block_pose, grasp_pose, term_robot_pose, traj = args
[traj_place] = traj.commands
place_traj = []
for path in traj_place.path:
place_traj.append(path.values)
new_command = (action_name, (place_traj))
elif action_name == 'insert':
arm, block1, block2, block_pose1, block_pose2, grasp_pose, _, _, traj = args
[traj_insert, traj_depart, traj_return] = traj.commands
insert_traj = []
for path in traj_insert.path:
insert_traj.append(path.values)
depart_traj = []
for path in traj_depart.path:
depart_traj.append(path.values)
return_traj = []
for path in traj_return.reverse().path:
return_traj.append(path.values)
new_command = (action_name, (insert_traj, depart_traj, return_traj))
else:
pass
return new_command
| 2,555 |
Python
| 30.170731 | 92 | 0.520939 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/feedforward/execute_plan.py
|
#!/usr/bin/env/python3
import sys
import rospy
import numpy as np
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_3d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from hsr_interface import HSRInterface
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
class ExecutePlan(object):
def __init__(self):
# Initialize ROS node
rospy.init_node('execute_tamp_plan')
# Control rate
self.rate = rospy.Rate(3)
# Core module
self.tamp_planner = TAMPPlanner()
self.hsr_interface = HSRInterface()
self.tf_manager = TfManager()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
# Publisher
self.arm_pub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=10)
self.base_pub = rospy.Publisher('/hsrb/omni_base_controller/command', JointTrajectory, queue_size=10)
# Initialize Robot
self.initialize_robot()
# Initialize TAMP
self.initialize_tamp()
def initialize_robot(self):
self.check_status()
# Set gripper to configuration position
self.hsr_interface.open_gripper()
# Set arm to configuration position
self.hsr_interface.initialize_arm()
# Set base to configuration position
self.hsr_interface.initialize_base()
def initialize_tamp(self):
# Get object poses
object_poses = self.mocap_interface.get_poses()
# Get robot poses
robot_poses = self.hsr_interface.get_joint_positions()
# Initialize tamp simulator
observations = (robot_poses, object_poses)
self.tamp_planner.initialize(observations)
def set_base_pose(self, base_pose):
base_traj = JointTrajectory()
base_traj.joint_names = ['odom_x', 'odom_y', 'odom_t']
# Set base trajectory
assert len(base_pose) == 3, "Does not match the size of base pose"
base_p = JointTrajectoryPoint()
base_p.positions = base_pose
base_p.velocities = np.zeros(len(base_pose))
base_p.time_from_start = rospy.Duration(1)
base_traj.points = [base_p]
return base_traj
def set_arm_pose(self, arm_pose):
arm_traj = JointTrajectory()
arm_traj.joint_names = ['arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
# Set arm trajectory
assert len(arm_pose) == 5, "Does not match the size of base pose"
arm_p = JointTrajectoryPoint()
arm_p.positions = arm_pose
arm_p.velocities = np.zeros(len(arm_pose))
arm_p.time_from_start = rospy.Duration(1)
arm_traj.points = [arm_p]
return arm_traj
def check_status(self):
# Wait for publisher has built
while self.base_pub.get_num_connections() == 0:
rospy.sleep(0.1)
while self.arm_pub.get_num_connections() == 0:
rospy.sleep(0.1)
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args):
# Modify plan
action_name, modified_action = self.path_modifier.post_process(action_name, args)
return action_name, modified_action
def execute(self):
plan = self.plan()
if plan is None:
return None
for i, (action_name, args) in enumerate(plan):
# Post process TAMP commands to hsr executable actions
action_name, modified_action = self.process(action_name, args)
if action_name == 'move_base':
for target_pose in modified_action:
base_traj = self.set_base_pose(target_pose)
self.base_pub.publish(base_traj)
self.rate.sleep()
elif action_name == 'pick':
pick_traj, return_traj = modified_action
for target_pose in pick_traj: # pick
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
rospy.sleep(5.0)
self.hsr_interface.close_gripper()
for target_pose in return_traj: # return
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
elif action_name == 'place':
place_traj = modified_action
for target_pose in place_traj: # place
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
elif action_name == 'insert':
insert_traj, depart_traj, return_traj = modified_action
for target_pose in insert_traj: # insert
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
rospy.sleep(5.0)
self.hsr_interface.open_gripper()
for target_pose in depart_traj: # depart
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
for target_pose in return_traj: # return
base_traj = self.set_base_pose(target_pose[:3])
arm_traj = self.set_arm_pose(target_pose[3:])
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
else:
continue
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 6,688 |
Python
| 34.020942 | 113 | 0.566836 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/controller/ik_solver.py
|
import os
import sys
import glob
import random
import numpy as np
from scipy.spatial.transform import Rotation as R
from utils import multiply, invert, all_between, compute_forward_kinematics, \
compute_inverse_kinematics, select_solution, USE_ALL, USE_CURRENT
from hsrb_utils import get_link_pose, get_joint_positions, get_custom_limits
BASE_FRAME = 'base_footprint'
TORSO_JOINT = 'torso_lift_joint'
ROTATION_JOINT = 'joint_rz'
LIFT_JOINT = 'arm_lift_joint'
HSR_TOOL_FRAMES = {'arm': 'hand_palm_link'}
IK_FRAME = {'arm': 'hand_palm_link'}
def get_ik_lib():
lib_path = os.environ['PYTHONPATH'].split(':')[1] # TODO: modify
ik_lib_path = glob.glob(os.path.join(lib_path, '**/hsrb'), recursive=True)
return ik_lib_path[0]
#####################################
def get_tool_pose(arm, curr_conf=None):
sys.path.append(get_ik_lib())
from ikArm import armFK
arm_fk = {'arm': armFK}
ik_joints = ['world_joint', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
if curr_conf == None:
conf = get_joint_positions(ik_joints)
assert len(conf) == 8
base_from_tool = compute_forward_kinematics(arm_fk[arm], conf)
world_from_base = get_link_pose(BASE_FRAME)
return multiply(world_from_base, base_from_tool)
#####################################
def get_ik_generator(arm, ik_pose, custom_limits={}):
sys.path.append(get_ik_lib())
from ikArm import armIK
arm_ik = {'arm': armIK}
base_joints = ['odom_x', 'odom_y', 'odom_t']
arm_joints = ['arm_lift_joint', 'arm_flex_joint', 'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
min_limits, max_limits = get_custom_limits(base_joints, arm_joints, custom_limits)
arm_rot = R.from_quat(ik_pose[1]).as_euler('xyz')[0]
sampled_limits = [(arm_rot-np.pi, arm_rot-np.pi), (0.0, 0.34)]
while True:
sampled_values = [random.uniform(*limits) for limits in sampled_limits]
confs = compute_inverse_kinematics(arm_ik[arm], ik_pose, sampled_values)
solutions = [q for q in confs if all_between(min_limits, q, max_limits)]
yield solutions
if all(lower == upper for lower, upper in sampled_limits):
break
def get_tool_from_ik(arm):
world_from_tool = get_link_pose(HSR_TOOL_FRAMES[arm])
world_from_ik = get_link_pose(IK_FRAME[arm])
return multiply(invert(world_from_tool), world_from_ik)
def sample_tool_ik(arm, tool_pose, nearby_conf=USE_CURRENT, max_attempts=100, **kwargs):
generator = get_ik_generator(arm, tool_pose, **kwargs)
whole_body_joints = ['world_joint', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
for _ in range(max_attempts):
try:
solutions = next(generator)
if solutions:
return select_solution(whole_body_joints, solutions, nearby_conf=nearby_conf)
except StopIteration:
break
return None
def hsr_inverse_kinematics(arm, gripper_pose, custom_limits={}, **kwargs):
base_arm_conf = sample_tool_ik(arm, gripper_pose, custom_limits=custom_limits, **kwargs)
if base_arm_conf is None:
return None
return base_arm_conf
if __name__ == '__main__':
# test forward kinematics
fk_pose = get_tool_pose('arm')
print('fk_pose:', fk_pose)
# test inverse kinematics
import numpy as np
pos_x = 0.0
pos_y = 0.0
pos_z = 0.6
foward = (0.70710678, 0.0, 0.70710678, 0.0)
back = (0.0, -0.70710678, 0.0, 0.70710678)
right = (0.5, -0.5, 0.5, 0.5)
left = (0.5, 0.5, 0.5, -0.5)
pose = ((pos_x, pos_y, pos_z), foward)
print('pose:', pose)
ik_pose = hsr_inverse_kinematics('arm', pose)
print('ik_pose:', ik_pose)
# test inverse kinematics generator
import time
for i in range(100):
start = time.time()
pose_x = 2.5
pose_y = 2.0
pose_z = 0.6
tool_pose = ((pose_x, pose_y, pose_z), (0.707107, 0.0, 0.707107, 0.0))
generator = get_ik_generator('arm', tool_pose)
solutions = next(generator)
print(solutions)
print('Loop Hz:', 1/(time.time()-start))
| 4,228 |
Python
| 33.382114 | 111 | 0.614238 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/controller/hsrb_utils.py
|
import rospy
import numpy as np
import hsrb_interface
from scipy.spatial.transform import Rotation as R
robot = hsrb_interface.Robot()
base = robot.get('omni_base')
gripper = robot.get('gripper')
whole_body = robot.get('whole_body')
def get_link_pose(link):
tf_pose = whole_body._tf2_buffer.lookup_transform('map', link, rospy.Time(0))
link_pose = ((tf_pose.transform.translation.x,
tf_pose.transform.translation.y,
tf_pose.transform.translation.z),
(tf_pose.transform.rotation.x,
tf_pose.transform.rotation.y,
tf_pose.transform.rotation.z,
tf_pose.transform.rotation.w))
return link_pose
def get_joint_limits(joint):
if joint == 'odom_x':
limit = (-10.0, 10.0)
elif joint == 'odom_y':
limit = (-10.0, 10.0)
elif joint == 'odom_t':
limit = (-10.0, 10.0)
else:
limit = whole_body.joint_limits[joint]
return limit
def get_custom_limits(base_joints, arm_joints, custom_limits={}):
joint_limits = []
for joint in base_joints:
if joint in custom_limits:
joint_limits.append(custom_limits[joint])
else:
joint_limits.append(get_joint_limits(joint))
for joint in arm_joints:
if joint in custom_limits:
joint_limits.append(custom_limits[joint])
else:
joint_limits.append(get_joint_limits(joint))
return zip(*joint_limits)
def get_distance(p1, p2, **kwargs):
assert len(p1) == len(p2)
diff = np.array(p2) - np.array(p1)
return np.linalg.norm(diff, ord=2)
def get_joint_position(joint):
if joint == 'world_joint':
joint_position = base.pose
else:
joint_position = whole_body.joint_positions[joint]
return joint_position
def get_joint_positions(jonits):
joint_positions = []
for joint in jonits:
if joint == 'world_joint':
base_pose = base._tf2_buffer.lookup_transform('map', 'base_footprint', rospy.Time(0))
joint_positions.append(base_pose.transform.translation.x)
joint_positions.append(base_pose.transform.translation.y)
base_quat = np.array([base_pose.transform.rotation.x,
base_pose.transform.rotation.y,
base_pose.transform.rotation.z,
base_pose.transform.rotation.w])
base_rz = R.from_quat(base_quat).as_euler('xyz')[2]
joint_positions.append(base_rz)
else:
joint_positions.append(whole_body.joint_positions[joint])
return joint_positions
if __name__ == '__main__':
# Test get_link_pose
base_link_pose = get_link_pose('base_footprint')
print('base_link_pose:', base_link_pose)
hand_palm_link_pose = get_link_pose('hand_palm_link')
print('hand_palm_link_pose:', hand_palm_link_pose)
# Test get_custom_limits
base_joints = ['odom_x', 'odom_y', 'odom_t']
arm_joints = ['arm_lift_joint', 'arm_flex_joint', 'arm_roll_joint',
'wrist_flex_joint', 'wrist_roll_joint']
custom_limits = get_custom_limits(base_joints, arm_joints)
print('custom_limits:', custom_limits)
# Test get_joint_limits
for b_joint in base_joints:
joint_limit = get_joint_limits(b_joint)
print('joint_limit:', joint_limit)
for a_joint in arm_joints:
joint_limit = get_joint_limits(a_joint)
print('joint_limit:', joint_limit)
# Test get_joint_position
ik_joints = ['world_joint', 'arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
for joint in ik_joints:
joint_position = get_joint_position(joint)
print('joint_position:', joint_position)
# Test get_joint_positions
joint_positions = get_joint_positions(ik_joints)
print('joint_positions:', joint_positions)
| 3,904 |
Python
| 35.495327 | 97 | 0.615523 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/controller/ik_controller.py
|
import rospy
import numpy as np
from controller import Controller
from ik_solver import get_tool_pose, get_ik_generator, hsr_inverse_kinematics
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
class IKController(Controller):
def __init__(self):
super(IKController, self).__init__()
# Publisher
self.arm_pub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=10)
self.base_pub = rospy.Publisher('/hsrb/omni_base_controller/command', JointTrajectory, queue_size=10)
# Wait for publisher has built
while self.base_pub.get_num_connections() == 0:
rospy.sleep(0.1)
while self.arm_pub.get_num_connections() == 0:
rospy.sleep(0.1)
def set_pose(self, base_pose, joint_pose):
base_traj = JointTrajectory()
arm_traj = JointTrajectory()
base_traj.joint_names = ['odom_x', 'odom_y', 'odom_t']
arm_traj.joint_names = ['arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
# Set base trajectory
assert len(base_pose) == 3, "Does not match the size of base pose"
base_p = JointTrajectoryPoint()
base_p.positions = base_pose
base_p.velocities = np.zeros(len(base_pose))
base_p.time_from_start = rospy.Duration(1)
base_traj.points = [base_p]
# Set arm trajectory
assert len(joint_pose) == 5, "Does not match the size of base pose"
arm_p = JointTrajectoryPoint()
arm_p.positions = joint_pose
arm_p.velocities = np.zeros(len(joint_pose))
arm_p.time_from_start = rospy.Duration(1)
arm_traj.points = [arm_p]
return base_traj, arm_traj
def control(self, pose):
# Inverse kinematics
ik_pose = hsr_inverse_kinematics('arm', pose) # pose must be contain (pos, quat)
if ik_pose is None:
return
else:
base_pose, arm_pose = ik_pose[:3], ik_pose[3:]
# Set target pose
base_traj, arm_traj = self.set_pose(base_pose, arm_pose)
# Publish target pose
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
if __name__ == '__main__':
ik_controller = IKController()
rate = rospy.Rate(50)
pose_x = 0.0
pose_y = 0.0
pose_z = 0.5
foward = (0.70710678, 0.0, 0.70710678, 0.0)
back = (0.0, -0.70710678, 0.0, 0.70710678)
right = (0.5, -0.5, 0.5, 0.5)
left = (0.5, 0.5, 0.5, -0.5)
while not rospy.is_shutdown():
# Test forward kinematics
fk_pose = get_tool_pose('arm')
# Test inverse kinematics
tool_pose = ((pose_x, pose_y, pose_z), foward)
ik_controller.control(tool_pose)
# Sleep
rate.sleep()
| 2,846 |
Python
| 32.494117 | 113 | 0.596978 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/controller/controller.py
|
import rospy
import numpy as np
class Controller(object):
def __init__(self):
pass
def set_pose(self):
raise NotImplementedError("Implement set_pose method")
def control(self):
raise NotImplementedError("Implement control method")
| 271 |
Python
| 18.42857 | 62 | 0.671587 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/controller/utils.py
|
import random
import numpy as np
import pybullet as p
from collections import namedtuple
from scipy.spatial.transform import Rotation as R
from .hsrb_utils import get_joint_limits, get_joint_position, get_joint_positions, get_distance
IKFastInfo = namedtuple('IKFastInfo', ['module_name', 'base_link', 'ee_link', 'free_joints'])
USE_ALL = False
USE_CURRENT = None
############ Mathematics
def invert(pose):
point, quat = pose
return p.invertTransform(point, quat) # TODO: modify
def multiply(*poses):
pose = poses[0]
for next_pose in poses[1:]:
pose = p.multiplyTransforms(pose[0], pose[1], *next_pose) # TODO: modify
return pose
##############
def all_between(lower_limits, values, upper_limits):
assert len(lower_limits) == len(values)
assert len(values) == len(upper_limits)
return np.less_equal(lower_limits, values).all() and \
np.less_equal(values, upper_limits).all()
def compute_forward_kinematics(fk_fn, conf):
pose = fk_fn(list(conf))
pos, rot = pose
quat = R.from_matrix(rot).as_quat()
return pos, quat
def compute_inverse_kinematics(ik_fn, pose, sampled=[]):
pos, quat = pose[0], pose[1]
rot = R.from_quat(quat).as_matrix().tolist()
if len(sampled) == 0:
solutions = ik_fn(list(rot), list(pos))
else:
solutions = ik_fn(list(rot), list(pos), list(sampled))
if solutions is None:
return []
return solutions
def get_ik_limits(joint, limits=USE_ALL):
if limits is USE_ALL:
return get_joint_limits(joint)
elif limits is USE_CURRENT:
value = get_joint_position(joint)
return value, value
return limits
def select_solution(joints, solutions, nearby_conf=USE_ALL, **kwargs):
if not solutions:
return None
if nearby_conf is USE_ALL:
return random.choice(solutions)
if nearby_conf is USE_CURRENT:
nearby_conf = get_joint_positions(joints)
return min(solutions, key=lambda conf: get_distance(nearby_conf, conf, **kwargs))
| 2,034 |
Python
| 26.133333 | 95 | 0.656834 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/rl_policy/post_process.py
|
#!/usr/bin/env/python3
import numpy as np
class PlanModifier(object):
def __init__(self):
self.block_rigid_map = {
'A' : 'red_shaft',
'B' : 'green_gear',
'C' : 'yellow_shaft',
'D' : 'blue_gear',
'E' : 'red_gear'
}
def post_process(self, action_name, object_names, args):
"""
Modify plan using sensor data.
Args:
plan (list): plan is trajectory of the tamp.
robot_pose (list): robot_pose consists of base_pose, end_effector_pose, gripper.
rigid_poses (dict): rigid_pose consists of captured rigid body poses
Returns:
commands (list): commands is modified plan
"""
if action_name == 'move_base':
# Parse TAMP returns
start_pose, end_pose, traj = args
base_traj = []
for commands in traj.commands:
for path in commands.path:
base_traj.append(path.values)
object_name = None
new_command = (action_name, object_name, base_traj)
elif action_name == 'pick':
arm, block, init_block_pose, grasp_pose, term_robot_pose, traj = args
[traj_pick] = traj.commands
pick_traj = []
for path in traj_pick.path:
pick_traj.append(path.values)
return_traj = []
for path in traj_pick.reverse().path:
return_traj.append(path.values)
object_name = object_names[block]
new_command = (action_name, object_name, (pick_traj, return_traj))
elif action_name == 'place':
arm, block1, block2, init_block_pose, grasp_pose, term_robot_pose, traj = args
[traj_place] = traj.commands
place_traj = []
for path in traj_place.path:
place_traj.append(path.values)
object_name = object_names[block1] # TODO: check whether block1 is correct
new_command = (action_name, object_name, (place_traj))
elif action_name == 'insert':
arm, block1, block2, block_pose1, block_pose2, grasp_pose, _, _, traj = args
[traj_insert, traj_depart, traj_return] = traj.commands
insert_traj = []
for path in traj_insert.path:
insert_traj.append(path.values)
depart_traj = []
for path in traj_depart.path:
depart_traj.append(path.values)
return_traj = []
for path in traj_return.reverse().path:
return_traj.append(path.values)
object_name = object_names[block1] # TODO: check whether block1 is correct
new_command = (action_name, object_name, (insert_traj, depart_traj, return_traj))
else:
pass
return new_command
| 2,898 |
Python
| 30.857143 | 93 | 0.536232 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/rl_policy/rl_agent.py
|
#!/usr/bin/env python3
import gym
import copy
import torch
import numpy as np
from rl_games.algos_torch import model_builder, torch_ext
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class ResidualRL(object):
def __init__(self, params):
builder = model_builder.ModelBuilder()
self.network = builder.load(params)
self.states = None
self.batch_size = 1
self.config = params['config']
self.clip_actions = self.config['clip_actions']
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config['normalize_value']
self.device = 'cuda'
self.num_actions = self.config['num_actions']
self.num_observations = self.config['num_observations']
self.action_space = gym.spaces.Box(np.ones(self.num_actions) * -1.0, np.ones(self.num_actions) * 1.0)
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
self.mask = [False]
self.observation_space = gym.spaces.Box(np.ones(self.num_observations) * -np.Inf, np.ones(self.num_observations) * np.Inf)
obs_shape = self.observation_space.shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.config['num_actors'],
'value_size': self.config.get('value_size', 1),
'normalize_value': self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
def get_action(self, obs, is_determenistic=True):
obs = self.unsqueeze_obs(obs)
obs = self.preprocess_obs(obs)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
mu = res_dict['mus']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if is_determenistic:
current_action = mu
else:
current_action = action
if self.clip_actions:
return rescale_actions(self.actions_low, self.actions_high, torch.clamp(current_action, -1.0, 1.0))
else:
return current_action
def unsqueeze_obs(self, obs):
if type(obs) is dict:
for k, v in obs.items():
obs[k] = self.unsqueeze_obs(v)
else:
if len(obs.size()) > 1 or obs.size()[0] > 1:
obs = obs.unsqueeze(0)
return obs
def preprocess_obs(self, obs):
if type(obs) is dict:
obs = copy.copy(obs)
for k, v in obs.items():
if v.dtype == torch.uint8:
obs[k] = v.float() / 255.0
else:
obs[k] = v.float()
else:
if obs.dtype == torch.uint8:
obs = obs.float() / 255.0
return obs
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input and 'running_mean_std' in checkpoint:
self.model.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def init_rnn(self):
if self.is_rnn:
rnn_states = self.model.get_default_rnn_state()
self.states = [torch.zeros((s.size()[0], self.batch_size, s.size()[2]),
dtype=torch.float32).to(self.device) for s in rnn_states]
def reset(self):
self.init_rnn()
| 4,007 |
Python
| 34.157894 | 130 | 0.564512 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/rl_policy/execute_plan.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env/python3
import os
import sys
import yaml
import rospy
import torch
import numpy as np
from typing import Union
from scipy.spatial.transform import Rotation as R
# TODO: fix
sys.path.append('/root/tamp-hsr/hsr_tamp/experiments/env_3d/')
sys.path.append('..')
from tamp_planner import TAMPPlanner
from hsr_interface import HSRInterface
from force_sensor_interface import ForceSensorInterface
from post_process import PlanModifier
from mocap_interface import MocapInterface
from tf_interface import TfManager
from rl_agent import ResidualRL
sys.path.append('/root/tamp-hsr/')
from hsr_rl.utils.hydra_cfg.reformat import omegaconf_to_dict
from hsr_rl.tasks.utils.pinoc_utils import HSRIKSolver
from hsr_rl.tasks.utils.ik_utils import DifferentialInverseKinematicsCfg, DifferentialInverseKinematics
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
def load_config(policy_name: str = 'pick'):
file_name = os.path.join('.', 'config', policy_name + '_config.yaml')
with open(file_name, 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
return config
def norm_diff_pos(p1: torch.Tensor, p2: torch.Tensor) -> torch.Tensor:
# Calculate norm
diff_norm = torch.norm(p1 - p2, p=2, dim=-1)
return diff_norm
def norm_diff_xy(p1: torch.Tensor, p2: torch.Tensor) -> torch.Tensor:
# Calculate norm
diff_norm = torch.norm(p1[:2] - p2[:2], p=2, dim=-1)
return diff_norm
def norm_diff_rot(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:
# Calculate norm
diff_norm1 = torch.norm(q1 - q2, p=2, dim=-1)
diff_norm2 = torch.norm(q2 - q1, p=2, dim=-1)
diff_norm = torch.min(diff_norm1, diff_norm2)
return diff_norm
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
aw, ax, ay, az = torch.unbind(a, -1)
bw, bx, by, bz = torch.unbind(b, -1)
ow = aw * bw - ax * bx - ay * by - az * bz
ox = aw * bx + ax * bw + ay * bz - az * by
oy = aw * by - ax * bz + ay * bw + az * bx
oz = aw * bz + ax * by - ay * bx + az * bw
return torch.stack((ow, ox, oy, oz), -1)
def calc_diff_pos(p1, p2):
return p1 - p2
def calc_diff_rot(q1, q2):
q1 = normalize(q1)
q2 = normalize(q2)
scaling = torch.tensor([1, -1, -1, -1], device=q1.device)
q1_inv = q1 * scaling
q_diff = quaternion_multiply(q2, q1_inv)
return q_diff
class ExecutePlan(object):
def __init__(self, standalone=False):
# Initialize ROS node
rospy.init_node('execute_tamp_plan')
# Feedback rate
self.control_freq = 3.0
self.rate = rospy.Rate(self.control_freq)
self._device = 'cuda'
self._command_type = 'position_rel'
self._pd_control = False
self._dt = torch.tensor(1.0, device=self._device)
self._clip_obs = torch.tensor(5.0, device=self._device)
# Core module
self.tamp_planner = TAMPPlanner()
self.hsr_interface = HSRInterface()
self.tf_manager = TfManager()
self.path_modifier = PlanModifier()
self.mocap_interface = MocapInterface()
self.ft_interface = ForceSensorInterface()
self.hsr_ik_utils = HSRIKSolver()
# Publisher
self.arm_pub = rospy.Publisher('/hsrb/arm_trajectory_controller/command', JointTrajectory, queue_size=10)
self.base_pub = rospy.Publisher('/hsrb/omni_base_controller/command', JointTrajectory, queue_size=10)
pick_yaml = load_config('pick')
place_yaml = load_config('place')
insert_yaml = load_config('insert')
pick_policy_cfg = omegaconf_to_dict(pick_yaml)
place_policy_cfg = omegaconf_to_dict(place_yaml)
insert_policy_cfg = omegaconf_to_dict(insert_yaml)
# Skill based residual policy agents
self.pick_agent = ResidualRL(pick_policy_cfg.get("params"))
self.place_agent = ResidualRL(place_policy_cfg.get("params"))
self.insert_agent = ResidualRL(insert_policy_cfg.get("params"))
# Restore learned params
self.pick_agent.restore(pick_policy_cfg["params"]["load_path"])
self.place_agent.restore(place_policy_cfg["params"]["load_path"])
self.insert_agent.restore(insert_policy_cfg["params"]["load_path"])
# Get action scales for each skills
self.pick_action_scale = torch.tensor(pick_policy_cfg["params"]["config"]["action_scale"], device=self._device)
self.place_action_scale = torch.tensor(place_policy_cfg["params"]["config"]["action_scale"], device=self._device)
self.insert_action_scale = torch.tensor(insert_policy_cfg["params"]["config"]["action_scale"], device=self._device)
# Set ik controller
self.ik_controller = self.set_ik_controller()
# Initialize Robot
self.initialize_robot()
# Initialize TAMP
self.initialize_tamp()
def initialize_robot(self):
self.check_status()
# Set gripper to configuration position
self.hsr_interface.open_gripper()
# Set arm to configuration position
self.hsr_interface.initialize_arm()
# Set base to configuration position
self.hsr_interface.initialize_base()
def initialize_tamp(self):
# Get object poses
object_poses = self.mocap_interface.get_poses()
# Get robot poses
robot_poses = self.hsr_interface.get_joint_positions()
# Initialize tamp simulator
observations = (robot_poses, object_poses)
self.tamp_planner.initialize(observations)
#########################
#### ROS utils ####
#########################
def set_base_pose(self, base_pose):
base_traj = JointTrajectory()
base_traj.joint_names = ['odom_x', 'odom_y', 'odom_t']
# Set base trajectory
assert len(base_pose) == 3, "Does not match the size of base pose"
base_p = JointTrajectoryPoint()
base_p.positions = base_pose
base_p.velocities = np.zeros(len(base_pose)) # TODO: modify velocity
base_p.time_from_start = rospy.Duration(1)
base_traj.points = [base_p]
return base_traj
def set_arm_pose(self, arm_pose):
arm_traj = JointTrajectory()
arm_traj.joint_names = ['arm_lift_joint', 'arm_flex_joint',
'arm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint']
# Set arm trajectory
assert len(arm_pose) == 5, "Does not match the size of base pose"
arm_p = JointTrajectoryPoint()
arm_p.positions = arm_pose
arm_p.velocities = np.zeros(len(arm_pose)) # TODO: modify velocity
arm_p.time_from_start = rospy.Duration(1)
arm_traj.points = [arm_p]
return arm_traj
def check_status(self):
# Wait for publisher has built
while self.base_pub.get_num_connections() == 0:
rospy.sleep(0.1)
while self.arm_pub.get_num_connections() == 0:
rospy.sleep(0.1)
##########################
#### TAMP utils ####
##########################
def plan(self):
# Run TAMP
plan, _, _ = self.tamp_planner.plan()
return plan
def process(self, action_name, args):
# Get object names
object_names = self.tamp_planner.tamp_problem.body_names
# Modify plan
action_name, object_name, modified_action = self.path_modifier.post_process(action_name, object_names, args)
return action_name, object_name, modified_action
#########################
#### RL utils ####
#########################
def get_pick_observation(self, obj_name, pick_pose, target_obj_pose) -> torch.Tensor:
# Get joint pose
joint_pose = self.hsr_interface.get_joint_positions(group='arm')
# Get end effector and target object pose
ee_pos, ee_rot = self.hsr_interface.get_link_pose('hand_palm_link')
obj_pos, obj_rot = self.mocap_interface.get_pose(obj_name)
# Calculate target end effector and object pose
target_ee_pos, target_ee_rot = pick_pose[0], pick_pose[1]
target_obj_pos, target_obj_rot = target_obj_pose[0], target_obj_pose[1]
# To Tensor
joint_pose = torch.tensor(joint_pose)
ee_pos, ee_rot = torch.tensor(ee_pos), torch.tensor(ee_rot) # TODO: -torch.tensor(ee_rot)
obj_pos, obj_rot = torch.tensor(obj_pos), torch.tensor(obj_rot)
target_ee_pos, target_ee_rot = torch.tensor(target_ee_pos), torch.tensor(target_ee_rot)
target_obj_pos, target_obj_rot = torch.tensor(target_obj_pos), torch.tensor(target_obj_rot)
diff_ee_pos = calc_diff_pos(ee_pos, target_ee_pos) # difference ee_pos and pick_pos
diff_ee_rot = calc_diff_rot(ee_rot, target_ee_rot) # difference ee_rot and pick_rot
diff_obj_pos = calc_diff_pos(obj_pos, target_obj_pos) # difference obj_pos and target_pos
diff_obj_rot = calc_diff_rot(obj_rot, target_obj_rot) # difference obj_rot and target_rot
obs = torch.cat((joint_pose, diff_ee_pos, diff_ee_rot, diff_obj_pos, diff_obj_rot))
obs = self._process_data(obs)
return obs
def get_place_observation(self, obj_name, target_obj_pose) -> torch.Tensor:
# Get joint pose
joint_pose = self.hsr_interface.get_joint_positions(group='arm')
# Get end effector and target object pose
ee_pos, ee_rot = self.hsr_interface.get_link_pose('hand_palm_link')
obj_pos, obj_rot = self.mocap_interface.get_pose(obj_name)
# Calculate target pose from base pose
target_obj_pos, target_obj_rot = target_obj_pose[0], target_obj_pose[1]
# To Tensor
joint_pose = torch.tensor(joint_pose)
ee_pos, ee_rot = torch.tensor(ee_pos), torch.tensor(ee_rot) # TODO: -torch.tensor(ee_rot)
obj_pos, obj_rot = torch.tensor(obj_pos), torch.tensor(obj_rot)
target_obj_pos, target_obj_rot = torch.tensor(target_obj_pos), torch.tensor(target_obj_rot)
diff_ee_pos = calc_diff_pos(ee_pos, obj_pos) # difference ee_pos and obj_pos
diff_ee_rot = calc_diff_rot(ee_rot, obj_rot) # difference ee_rot and obj_rot
diff_obj_pos = calc_diff_pos(obj_pos, target_obj_pos) # difference obj_pos and target_pos
diff_obj_rot = calc_diff_rot(obj_rot, target_obj_rot) # difference obj_rot and target_rot
obs = torch.cat((joint_pose, diff_ee_pos, diff_ee_rot, diff_obj_pos, diff_obj_rot))
obs = self._process_data(obs)
return obs
def get_insert_observation(self, obj_name, target_obj_pose) -> torch.Tensor:
# Get joint pose
joint_pose = self.hsr_interface.get_joint_positions(group='arm')
# Get end effector and target object pose
ee_pos, ee_rot = self.hsr_interface.get_link_pose('hand_palm_link')
obj_pos, obj_rot = self.mocap_interface.get_pose(obj_name)
# Calculate target pose from base pose
target_obj_pos, target_obj_rot = target_obj_pose[0], target_obj_pose[1]
# To Tensor
joint_pose = torch.tensor(joint_pose)
ee_pos, ee_rot = torch.tensor(ee_pos), torch.tensor(ee_rot) # TODO: -torch.tensor(ee_rot)
obj_pos, obj_rot = torch.tensor(obj_pos), torch.tensor(obj_rot)
target_obj_pos, target_obj_rot = torch.tensor(target_obj_pos), torch.tensor(target_obj_rot)
diff_ee_pos = calc_diff_pos(ee_pos, obj_pos) # difference ee_pos and obj_pos
diff_ee_rot = calc_diff_rot(ee_rot, obj_rot) # difference ee_rot and obj_rot
diff_obj_pos = calc_diff_pos(obj_pos, target_obj_pos) # difference obj_pos and target_pos
diff_obj_rot = calc_diff_rot(obj_rot, target_obj_rot) # difference obj_rot and target_rot
obs = torch.cat((joint_pose, diff_ee_pos, diff_ee_rot, diff_obj_pos, diff_obj_rot))
obs = self._process_data(obs)
return obs
def _process_data(self, obs: torch.Tensor) -> torch.Tensor:
# To device
obs = obs.to(self._device)
# To torch.float32
obs = obs.to(torch.float32)
# Clamp observation
obs = torch.clamp(obs, -self._clip_obs, self._clip_obs).to(self._device).clone()
return obs
def check_pick_status(self, obj_name):
obj_pos, obj_rot = self.mocap_interface.get_pose(obj_name)
ee_pos, ee_rot = self.hsr_interface.get_link_pose('hand_palm_link')
obj_pos, obj_rot = torch.tensor(obj_pos), torch.tensor(obj_rot)
ee_pos, ee_rot = torch.tensor(ee_pos), torch.tensor(ee_rot)
# Calculate norm distance
pos_dist = norm_diff_pos(ee_pos, obj_pos)
print('pick distance:', pos_dist)
pick_success = torch.where(
pos_dist < torch.tensor([0.1]),
torch.ones((1,)),
torch.zeros((1,))
)
return True
def check_place_status(self, obj_name, target_obj_pose):
obj_pos, obj_rot = self.mocap_interface.get_pose(obj_name)
target_pos, target_rot = target_obj_pose[0], target_obj_pose[1]
obj_pos, obj_rot = torch.tensor(obj_pos), torch.tensor(obj_rot)
target_pos, target_rot = torch.tensor(target_pos), torch.tensor(target_rot)
# Calculate norm distance
pos_dist = norm_diff_pos(obj_pos, target_pos)
print('place distance:', pos_dist)
place_success = torch.where(
pos_dist < torch.tensor([0.03]),
torch.ones((1,)),
torch.zeros((1,))
)
return True
def check_insert_status(self, obj_name, target_obj_pose):
obj_pos, obj_rot = self.mocap_interface.get_pose(obj_name)
target_pos, target_rot = target_obj_pose[0], target_obj_pose[1]
obj_pos, obj_rot = torch.tensor(obj_pos), torch.tensor(obj_rot)
target_pos, target_rot = torch.tensor(target_pos), torch.tensor(target_rot)
# Calculate norm distance
pos_dist = norm_diff_pos(obj_pos, target_pos)
print('insert distance:', pos_dist)
insert_success = torch.where(
pos_dist < torch.tensor([0.02]),
torch.ones((1,)),
torch.zeros((1,))
)
return insert_success
##########################
#### Real Robot utils ####
##########################
def set_ik_controller(self):
ik_control_cfg = DifferentialInverseKinematicsCfg(
command_type=self._command_type,
ik_method="dls",
position_offset=(0.0, 0.0, 0.0),
rotation_offset=(1.0, 0.0, 0.0, 0.0),
)
return DifferentialInverseKinematics(ik_control_cfg, 1, self._device)
def calculate_base_command(self, target_pose):
curr_pose = self.hsr_interface.get_joint_positions(group='base')
curr_vel = self.hsr_interface.get_joint_velocities(group='base')
diff_pose = np.array(target_pose) - np.array(curr_pose)
diff_vel = np.array(curr_vel)
kp = 1.25
kd = 0.1
command = kp * diff_pose + kd * diff_vel
command += np.array(curr_pose)
return command
def calculate_arm_command(self, target_pose):
curr_pose = self.hsr_interface.get_joint_positions(group='arm')
curr_vel = self.hsr_interface.get_joint_velocities(group='arm')
diff_pose = np.array(target_pose) - np.array(curr_pose)
diff_vel = np.array(curr_vel)
kp = 1.25
kd = 0.1
command = kp * diff_pose + kd * diff_vel
command += np.array(curr_pose)
return command
def execute(self):
plan = self.plan()
if plan is None:
return None
for i, (action_name, args) in enumerate(plan):
# Post process TAMP commands to hsr executable actions
action_name, object_name, modified_action = self.process(action_name, args)
if action_name == 'move_base':
for target_pose in modified_action:
if self._pd_control:
target_base_pose = self.calculate_base_command(target_pose[:3])
else:
target_base_pose = target_pose[:3]
base_traj = self.set_base_pose(target_base_pose)
self.base_pub.publish(base_traj)
self.rate.sleep()
elif action_name == 'pick':
finish = False
pick_traj, return_traj = modified_action
for target_pose in pick_traj:
if self._pd_control:
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
else:
target_base_pose = target_pose[:3]
target_arm_pose = target_pose[3:]
# Get observation
obs = self.get_pick_observation(object_name, pick_pose, target_obj_pose)
# Residual action
with torch.no_grad():
actions = self.pick_agent.get_action(obs)
# Multiply target 6D pose and residual 6D pose
ik_action = self._dt * self.pick_action_scale * actions.to(self._device) # (dx, dy, dz)
self.ik_controller.set_command(ik_action)
# Calculate robot jacobian
ee_pos, ee_rot = self.hsr_interface.get_link_pose('hand_palm_link')
joint_positions = np.array(self.hsr_interface.get_joint_positions())
robot_jacobian = self.hsr_ik_utils.get_jacobian(joint_positions)
# To tensor and device
ee_pos = torch.tensor(ee_pos, dtype=torch.float32, device=self._device).view(1, 3)
ee_rot = -torch.tensor(ee_rot, dtype=torch.float32, device=self._device).view(1, 4)
robot_jacobian = torch.tensor(robot_jacobian, dtype=torch.float32, device=self._device).view(1, -1, 8)
# Calcurate delta pose
delta_pose = self.ik_controller.compute_delta(ee_pos, ee_rot, robot_jacobian)
delta_pose = torch.squeeze(delta_pose, dim=0)
delta_pose = delta_pose.to('cpu').detach().numpy().copy() # 8 dim
# Add delta pose to reference trajectory
target_base_pose += delta_pose[:3]
target_arm_pose += delta_pose[3:]
# Set target pose
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
# Publish command
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
rospy.sleep(2.0)
self.hsr_interface.close_gripper()
for target_pose in return_traj: # return
if self._pd_control:
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
else:
target_base_pose = target_pose[:3]
target_arm_pose = target_pose[3:]
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
elif action_name == 'place':
finish = False
place_traj = modified_action
for target_pose in place_traj:
if self._pd_control:
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
else:
target_base_pose = target_pose[:3]
target_arm_pose = target_pose[3:]
# Get observation
obs = self.get_place_observation(object_name, target_obj_pose)
# Residual action
with torch.no_grad():
actions = self.place_agent.get_action(obs)
# Multiply target 6D pose and residual 6D pose
ik_action = self._dt * self.place_action_scale * actions.to(self._device) # (dx, dy, dz)
self.ik_controller.set_command(ik_action)
# Calculate robot jacobian
ee_pos, ee_rot = self.hsr_interface.get_link_pose('hand_palm_link')
joint_positions = np.array(self.hsr_interface.get_joint_positions())
robot_jacobian = self.hsr_ik_utils.get_jacobian(joint_positions)
# To tensor and device
ee_pos = torch.tensor(ee_pos, dtype=torch.float32, device=self._device).view(1, 3)
ee_rot = -torch.tensor(ee_rot, dtype=torch.float32, device=self._device).view(1, 4)
robot_jacobian = torch.tensor(robot_jacobian, dtype=torch.float32, device=self._device).view(1, -1, 8)
# Calcurate delta pose
delta_pose = self.ik_controller.compute_delta(ee_pos, ee_rot, robot_jacobian)
delta_pose = torch.squeeze(delta_pose, dim=0)
delta_pose = delta_pose.to('cpu').detach().numpy().copy() # 8 dim
# Add delta pose to reference trajectory
target_base_pose += delta_pose[:3]
target_arm_pose += delta_pose[3:]
# Set target pose
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
# Publish command
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
elif action_name == 'insert':
finish = False
loop_count = 0
insert_traj, depart_traj, return_traj = modified_action
while not finish: # insert
target_pose = insert_traj[loop_count]
if self._pd_control:
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
else:
target_base_pose = target_pose[:3]
target_arm_pose = target_pose[3:]
# Get observation
obs = self.get_insert_observation(object_name, target_obj_pose)
# Residual action
with torch.no_grad():
actions = self.insert_agent.get_action(obs)
# Multiply target 6D pose and residual 6D pose
ik_action = self._dt * self.insert_action_scale * actions.to(self._device) # (dx, dy, dz)
self.ik_controller.set_command(ik_action)
# Calculate robot jacobian
ee_pos, ee_rot = self.hsr_interface.get_link_pose('hand_palm_link')
joint_positions = np.array(self.hsr_interface.get_joint_positions())
robot_jacobian = self.hsr_ik_utils.get_jacobian(joint_positions)
# To tensor and device
ee_pos = torch.tensor(ee_pos, dtype=torch.float32, device=self._device).view(1, 3)
ee_rot = -torch.tensor(ee_rot, dtype=torch.float32, device=self._device).view(1, 4)
robot_jacobian = torch.tensor(robot_jacobian, dtype=torch.float32, device=self._device).view(1, -1, 8)
# Calcurate delta pose
delta_pose = self.ik_controller.compute_delta(ee_pos, ee_rot, robot_jacobian)
delta_pose = torch.squeeze(delta_pose, dim=0)
delta_pose = delta_pose.to('cpu').detach().numpy().copy() # 8 dim
# Add delta pose to reference trajectory
target_base_pose += delta_pose[:3]
target_arm_pose += delta_pose[3:]
# Set target pose
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
prev_ft_data = self.ft_interface.get_current_force()
# Publish command
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
loop_count += 1
if loop_count >= len(insert_traj):
loop_count = len(insert_traj)-1
current_ft_data = self.ft_interface.get_current_force()
force_difference = self.ft_interface.compute_difference(prev_ft_data, current_ft_data)
weight = round(force_difference / 9.81 * 1000, 1)
finish = bool(self.check_insert_status(object_name))
finish |= True if weight > 500 else False
rospy.sleep(2.0)
self.hsr_interface.open_gripper()
for target_pose in depart_traj: # depart
if self._pd_control:
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
else:
target_base_pose = target_pose[:3]
target_arm_pose = target_pose[3:]
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
for target_pose in return_traj: # return
if self._pd_control:
target_base_pose = self.calculate_base_command(target_pose[:3])
target_arm_pose = self.calculate_arm_command(target_pose[3:])
else:
target_base_pose = target_pose[:3]
target_arm_pose = target_pose[3:]
base_traj = self.set_base_pose(target_base_pose)
arm_traj = self.set_arm_pose(target_arm_pose)
self.base_pub.publish(base_traj)
self.arm_pub.publish(arm_traj)
self.rate.sleep()
else:
continue
if __name__ == '__main__':
exec_plan = ExecutePlan()
exec_plan.execute()
| 28,687 |
Python
| 41.188235 | 123 | 0.578799 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/rl_policy/config/place_config.yaml
|
name: HSRGearboxResidualPlacePPO
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: True # flag which sets whether to load the checkpoint
load_path: runs/HSRGearboxResidualPlace/nn/HSRGearboxResidualPlace.pth # path to the checkpoint to load
config:
name: HSRGearboxResidualPlace
full_experiment_name: HSRGearboxResidualPlace
env_name: rlgpu
device: cuda
device_name: cuda
multi_gpu: False
ppo: True
mixed_precision: False
clip_actions: True
normalize_input: True
normalize_value: True
num_actors: 1
num_actions: 3
num_observations: 19
action_scale: 0.03
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 256
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 256
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,615 |
YAML
| 20.263158 | 105 | 0.629102 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/rl_policy/config/insert_config.yaml
|
name: HSRGearboxResidualInsertPPO
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: True # flag which sets whether to load the checkpoint
load_path: runs/HSRGearboxResidualInsert/nn/HSRGearboxResidualInsert.pth # path to the checkpoint to load
config:
name: HSRGearboxResidualInsert
full_experiment_name: HSRGearboxResidualInsert
env_name: rlgpu
device: cuda
device_name: cuda
multi_gpu: False
ppo: True
mixed_precision: False
clip_actions: True
normalize_input: True
normalize_value: True
num_actors: 1
num_actions: 3
num_observations: 19
action_scale: 0.03
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 256
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 256
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,620 |
YAML
| 20.328947 | 107 | 0.630247 |
makolon/hsr_isaac_tamp/hsr_ros/hsr_ws/src/env_3d/script/rl_policy/config/pick_config.yaml
|
name: HSRGearboxResidualPickPPO
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: None
load_checkpoint: True # flag which sets whether to load the checkpoint
load_path: runs/HSRGearboxResidualPick/nn/HSRGearboxResidualPick.pth # path to the checkpoint to load
config:
name: HSRGearboxResidualPick
full_experiment_name: HSRGearboxResidualPick
env_name: rlgpu
device: cuda
device_name: cuda
multi_gpu: False
ppo: True
mixed_precision: False
clip_actions: True
normalize_input: True
normalize_value: True
num_actors: 1
num_actions: 3
num_observations: 19
action_scale: 0.03
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 256
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 256
minibatch_size: 8192
mini_epochs: 8
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
| 1,610 |
YAML
| 20.197368 | 103 | 0.62795 |
makolon/hsr_isaac_tamp/hsr_rl/README.md
|
# HSR-RL
| 9 |
Markdown
| 3.999998 | 8 | 0.555555 |
makolon/hsr_isaac_tamp/hsr_rl/envs/isaac_env_rlgames.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.gym.vec_env import VecEnvBase
import torch
import numpy as np
from datetime import datetime
# VecEnv Wrapper for RL training
class IsaacEnvRlgames(VecEnvBase):
def _process_data(self):
self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._rew = self._rew.to(self._task.rl_device).clone()
self._states = torch.clamp(self._states, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._resets = self._resets.to(self._task.rl_device).clone()
self._extras = self._extras.copy()
def set_task(
self, task, backend="numpy", sim_params=None, init_sim=True
) -> None:
super().set_task(task, backend, sim_params, init_sim)
self.num_states = self._task.num_states
self.state_space = self._task.state_space
def step(self, actions):
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(actions=actions, reset_buf=self._task.reset_buf)
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
self._task.pre_physics_step(actions)
for _ in range(self._task.control_frequency_inv):
self._world.step(render=self._render)
self.sim_frame_count += 1
self._obs, self._rew, self._resets, self._extras = self._task.post_physics_step()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(
observations=self._obs.to(device=self._task.rl_device), reset_buf=self._task.reset_buf)
self._states = self._task.get_states()
self._process_data()
obs_dict = {"obs": self._obs, "states": self._states}
return obs_dict, self._rew, self._resets, self._extras
def reset(self):
""" Resets the task and applies default zero actions to recompute observations and states. """
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}] Running RL reset")
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.rl_device)
obs_dict, _, _, _ = self.step(actions)
return obs_dict
| 3,934 |
Python
| 42.722222 | 124 | 0.6909 |
makolon/hsr_isaac_tamp/hsr_rl/envs/isaac_env_rlgames_mt.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.gym.vec_env import VecEnvMT
from omni.isaac.gym.vec_env import TaskStopException
from .isaac_env_rlgames import IsaacEnvRlgames
import torch
import numpy as np
# VecEnv Wrapper for RL training
class IsaacEnvRlgamesMT(IsaacEnvRlgames, VecEnvMT):
def _parse_data(self, data):
self._obs = data["obs"].clone()
self._rew = data["rew"].to(self._task.rl_device).clone()
self._states = torch.clamp(data["states"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._resets = data["reset"].to(self._task.rl_device).clone()
self._extras = data["extras"].copy()
def step(self, actions):
if self._stop:
raise TaskStopException()
if self._task.randomize_actions:
actions = self._task._dr_randomizer.apply_actions_randomization(actions=actions, reset_buf=self._task.reset_buf)
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
self.send_actions(actions)
data = self.get_data()
if self._task.randomize_observations:
self._obs = self._task._dr_randomizer.apply_observations_randomization(observations=self._obs.to(self._task.rl_device), reset_buf=self._task.reset_buf)
self._obs = torch.clamp(self._obs, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device)
obs_dict = {}
obs_dict["obs"] = self._obs
obs_dict["states"] = self._states
return obs_dict, self._rew, self._resets, self._extras
| 3,155 |
Python
| 44.085714 | 163 | 0.716006 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/example/hsr_place.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from hsr_rl.tasks.base.rl_task import RLTask
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
from omni.isaac.core.prims.geometry_prim_view import GeometryPrimView
from omni.isaac.core.articulations.articulation_view import ArticulationView
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.stage import print_stage_prim_paths
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.objects import DynamicSphere, DynamicCuboid, FixedCuboid
from omni.isaac.sensor import _sensor
import re
import torch
from pxr import Usd, UsdGeom
class HSRExamplePlaceTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._dt = torch.tensor(self._task_cfg["sim"]["dt"] * self._task_cfg["env"]["controlFrequencyInv"], device=self._device)
self._num_observations = self._task_cfg["env"]["num_observations"]
self._num_actions = self._task_cfg["env"]["num_actions"]
self._num_props = self._task_cfg["env"]["numProps"]
self._table_height = 0.2
self._table_width = 0.65
self._table_depth = 1.2
self._table_size = 1.0
self._prop_size = 0.04
self._hsr_position = torch.tensor([0.0, 0.0, 0.03], device=self._device)
self._hsr_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._table_position = torch.tensor([1.5, 0.0, self._table_height/2], device=self._device)
self._table_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._prop_position = torch.tensor([1.3, 0.0, self._table_height+self._prop_size/2], device=self._device)
self._prop_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
# Start at 'home' positions
self.torso_start = torch.tensor([0.1], device=self._device)
self.base_start = torch.tensor([0.0, 0.0, 0.0], device=self._device)
self.arm_start = torch.tensor([0.1, -1.570796, 0.0, -0.392699, 0.0], device=self._device)
self.gripper_proximal_start = torch.tensor([0.75, 0.75], device=self._device)
self.initial_dof_positions = torch.tensor([0.0, 0.0, 0.0, 0.1, 0.1, -1.570796, 0.0, 0.0, 0.0, -0.392699, 0.0, 0.75, 0.75, 0.0, 0.0], device=self._device)
# Joint & body names
self._torso_joint_name = ["torso_lift_joint"]
self._base_joint_names = ["joint_x", "joint_y", "joint_rz"]
self._arm_names = ["arm_lift_joint", "arm_flex_joint", "arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
self._gripper_proximal_names = ["hand_l_proximal_joint", "hand_r_proximal_joint"]
# Values are set in post_reset after model is loaded
self.torso_dof_idx = []
self.base_dof_idxs = []
self.arm_dof_idxs = []
self.gripper_proximal_dof_idxs = []
# Dof joint position limits
self.torso_dof_lower = []
self.torso_dof_upper = []
self.base_dof_lower = []
self.base_dof_upper = []
self.arm_dof_lower = []
self.arm_dof_upper = []
self.gripper_p_dof_lower = []
self.gripper_p_dof_upper = []
# Add contact sensor
self._contact_sensor_interface = _sensor.acquire_contact_sensor_interface()
self.is_collided = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.is_success = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.add_hsr()
self.add_prop()
self.add_table()
# Set up scene
super().set_up_scene(scene)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view")
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
# Add prop to scene
self._props = RigidPrimView(prim_paths_expr="/World/envs/.*/prop", name="prop_view", reset_xform_properties=False)
scene.add(self._props)
def add_hsr(self):
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb",
name="hsrb",
translation=self._hsr_position,
orientation=self._hsr_rotation)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_prop(self):
prop = DynamicCuboid(prim_path=self.default_zero_env_path + "/prop",
name="prop",
translation=self._prop_position,
orientation=self._prop_rotation,
size=self._prop_size,
color=torch.tensor([0.2, 0.4, 0.6]),
mass=0.1,
density=100.0)
self._sim_config.apply_articulation_settings("prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop"))
def add_table(self):
table = FixedCuboid(prim_path=self.default_zero_env_path + "/table",
name="table",
translation=self._table_position,
orientation=self._table_rotation,
size=self._table_size,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([self._table_width, self._table_depth, self._table_height]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def get_observations(self):
# Get prop positions and orientations
prop_positions, prop_orientations = self._props.get_world_poses(clone=False)
prop_positions = prop_positions[:, 0:3] - self._env_pos
# Get prop velocities
prop_velocities = self._props.get_velocities(clone=False)
prop_linvels = prop_velocities[:, 0:3]
prop_angvels = prop_velocities[:, 3:6]
# Get end effector positions and orientations
end_effector_positions, end_effector_orientations = self._robots._fingertip_centered.get_world_poses(clone=False)
end_effector_positions = end_effector_positions[:, 0:3] - self._env_pos
# Get end effector velocities
end_effector_velocities = self._robots._fingertip_centered.get_velocities(clone=False)
end_effector_linvels = end_effector_velocities[:, 0:3]
end_effector_angvels = end_effector_velocities[:, 3:6]
self.prop_positions = prop_positions
self.prop_linvels = prop_linvels
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
self.obs_buf[..., 0:8] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 8:16] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 16:19] = end_effector_positions
self.obs_buf[..., 19:23] = end_effector_orientations
self.obs_buf[..., 23:26] = end_effector_linvels
self.obs_buf[..., 26:29] = end_effector_angvels
self.obs_buf[..., 29:32] = prop_positions
self.obs_buf[..., 32:36] = prop_orientations
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Update position targets from actions
self.dof_position_targets[..., self.actuated_dof_indices] += self._dt * self._action_speed_scale * actions.to(self.device)
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits
)
# Modify torso joint positions
dof_pos = self._robots.get_joint_positions()
arm_pos = dof_pos[:, self.arm_dof_idxs]
scaled_arm_lift_pos = arm_pos[:, 0] / self.arm_dof_upper[0]
scaled_torso_lift_pos = scaled_arm_lift_pos * self.torso_dof_upper[0]
self.dof_position_targets[:, self.torso_dof_idx] = scaled_torso_lift_pos.unsqueeze(dim=1)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = self.initial_dof_positions
self._robots.set_joint_positions(self.dof_position_targets)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = -0.01 # min horizontal dist from origin
max_d = 0.01 # max horizontal dist from origin
min_height = 0.01
max_height = 0.02
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
# Prop pos / rot, velocities
self.prop_pos = self.initial_prop_pos.clone()
self.prop_rot = self.initial_prop_rot.clone()
# position
self.prop_pos[env_ids_64, 0:2] += hpos[..., 0:2]
self.prop_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
self.prop_rot[env_ids_64, 0] = 1
self.prop_rot[env_ids_64, 1:] = 0
# reset root state for props in selected envs
self._props.set_world_poses(self.prop_pos[env_ids_64], self.prop_rot[env_ids_64], indices=env_ids_32)
# reset root state for robots in selected envs
self._robots.set_world_poses(self.initial_robot_pos[env_ids_64], self.initial_robot_rot[env_ids_64], indices=env_ids_32)
# reset DOF states for robots in selected envs
self._robots.set_joint_positions(self.initial_dof_positions, indices=env_ids_32)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.extras[env_ids] = 0
self.is_collided[env_ids] = 0
def post_reset(self):
self.set_dof_idxs()
self.set_dof_limits()
self.set_default_state()
# reset prop pos / rot, and velocities
self.initial_robot_pos, self.initial_robot_rot = self._robots.get_world_poses()
self.initial_robot_velocities = self._robots.get_velocities()
# reset prop pos / rot, and velocities
self.initial_prop_pos, self.initial_prop_rot = self._props.get_world_poses()
self.initial_prop_velocities = self._props.get_velocities()
def calculate_metrics(self) -> None:
# Distance from hand to the box
dist_hand = torch.norm(self.obs_buf[..., 29:32] - self.obs_buf[..., 16:19], p=2, dim=-1)
# Distance from left gripper to the box
lfinger_pos, _ = self._robots._lfingers.get_world_poses(clone=False)
lfinger_pos -= self._env_pos
dist_lf = torch.norm(self.obs_buf[..., 29:32] - lfinger_pos, p=2, dim=-1)
# Distance from right gripper to the box
rfinger_pos, _ = self._robots._rfingers.get_world_poses(clone=False)
rfinger_pos -= self._env_pos
dist_rf = torch.norm(self.obs_buf[..., 29:32] - rfinger_pos, p=2, dim=-1)
dist_reward = 1.0 - torch.tanh(10.0 * (dist_hand + dist_lf + dist_rf) / 3)
self.rew_buf[:] = dist_reward
# In this policy, episode length is constant across all envs
is_last_step = (self.progress_buf[0] == self._max_episode_length - 1)
if is_last_step:
# Check if nut is picked up and above table
lift_success = self._check_lift_success(height_threashold=0.24)
self.rew_buf[:] += lift_success * self._task_cfg['rl']['success_bonus']
self.extras['successes'] = torch.mean(lift_success.float())
def is_done(self) -> None:
# prop height index is 31, NOTE: modify according to observation
self.reset_buf = torch.where(
self.obs_buf[:, 31] <= self._table_height,
torch.ones_like(self.reset_buf),
self.reset_buf
)
self.reset_buf = torch.where(
self.progress_buf[:] >= self._max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
is_last_step = (self.progress_buf[0] == self._max_episode_length - 1)
if is_last_step:
self._close_gripper(sim_steps=self._task_cfg['env']['num_gripper_close_sim_steps'])
self._lift_gripper(sim_steps=self._task_cfg['env']['num_gripper_lift_sim_steps'])
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def set_dof_idxs(self):
[self.torso_dof_idx.append(self._robots.get_dof_index(name)) for name in self._torso_joint_name]
[self.base_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._base_joint_names]
[self.arm_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._arm_names]
[self.gripper_proximal_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._gripper_proximal_names]
# Movable joints
self.actuated_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10]).to(self._device)
def set_dof_limits(self): # dof position limits
# (num_envs, num_dofs, 2)
dof_limits = self._robots.get_dof_limits()
dof_limits_lower = dof_limits[0, :, 0].to(self._device)
dof_limits_upper = dof_limits[0, :, 1].to(self._device)
# Set relevant joint position limit values
self.torso_dof_lower = dof_limits_lower[self.torso_dof_idx]
self.torso_dof_upper = dof_limits_upper[self.torso_dof_idx]
self.base_dof_lower = dof_limits_lower[self.base_dof_idxs]
self.base_dof_upper = dof_limits_upper[self.base_dof_idxs]
self.arm_dof_lower = dof_limits_lower[self.arm_dof_idxs]
self.arm_dof_upper = dof_limits_upper[self.arm_dof_idxs]
self.gripper_p_dof_lower = dof_limits_lower[self.gripper_proximal_dof_idxs]
self.gripper_p_dof_upper = dof_limits_upper[self.gripper_proximal_dof_idxs]
self.robot_dof_lower_limits, self.robot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
def set_default_state(self):
# Set default joint state
joint_states = self._robots.get_joints_default_state()
jt_pos = joint_states.positions
jt_pos[:, self.torso_dof_idx] = self.torso_start
jt_pos[:, self.base_dof_idxs] = self.base_start
jt_pos[:, self.arm_dof_idxs] = self.arm_start
jt_pos[:, self.gripper_proximal_dof_idxs] = self.gripper_proximal_start
jt_vel = joint_states.velocities
jt_vel[:, self.torso_dof_idx] = torch.zeros_like(self.torso_start, device=self._device)
jt_vel[:, self.base_dof_idxs] = torch.zeros_like(self.base_start, device=self._device)
jt_vel[:, self.arm_dof_idxs] = torch.zeros_like(self.arm_start, device=self._device)
jt_vel[:, self.gripper_proximal_dof_idxs] = torch.zeros_like(self.gripper_proximal_start, device=self._device)
self._robots.set_joints_default_state(positions=jt_pos, velocities=jt_vel)
# Initialize target positions
self.dof_position_targets = jt_pos
def _get_keypoint_dist(self):
# end effector pose indices are 0:3, and prop pose indices are 13:16
keypoint_dist = torch.sum(torch.norm(self.obs_buf[:, 0:3] - self.obs_buf[:, 13:16], p=2, dim=-1), dim=-1)
return keypoint_dist
def _close_gripper(self, sim_steps=10):
gripper_dof_pos = torch.tensor([-1.75, -1.75], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_velocity_targets(gripper_dof_pos, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
def _lift_gripper(self, sim_steps=10):
lift_dof_pos = torch.tensor([0.0, 0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_velocity_targets(lift_dof_pos)
SimulationContext.step(self._env._world, render=True)
def _check_lift_success(self, height_threashold):
lift_success = torch.where(
self.prop_pos[:, 2] > height_threashold,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return lift_success
def _check_robot_collisions(self):
# Check if the robot collided with an object
for obst_prim in self._tables._prim_paths:
match = re.search(r'\d+', obst_prim)
env_id = int(match.group())
raw_readings = self._contact_sensor_interface.get_contact_sensor_raw_data(obst_prim + "/Contact_Sensor")
if raw_readings.shape[0]:
for reading in raw_readings:
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body1"])):
self.is_collided[env_id] = True
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body0"])):
self.is_collided[env_id] = True
collide_penalty = torch.where(
self.is_collided == True,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return collide_penalty
| 20,725 |
Python
| 45.68018 | 170 | 0.623836 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/example/hsr_insert.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from hsr_rl.tasks.base.hsr_task import HSRBaseTask
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
from omni.isaac.core.prims.geometry_prim_view import GeometryPrimView
from omni.isaac.core.articulations.articulation_view import ArticulationView
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp
from omni.isaac.core.utils.stage import print_stage_prim_paths
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.objects import DynamicSphere, DynamicCuboid, FixedCuboid
from pxr import Usd, UsdGeom
class HSRExampleInsertTask(HSRBaseTask):
def __init__(
self,
name,
sim_config,
env,
) -> None:
HSRBaseTask.__init__(self, name, sim_config, env)
self._num_props = self._task_cfg["env"]["numProps"]
self._prop_size = self._task_cfg["env"]["propSize"]
self.is_collided = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.is_success = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
return
def set_up_environment(self):
self._prop_position = torch.tensor([1.3, 0.0, self._table_height+self._prop_size/2], device=self._device)
self._prop_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
def set_up_scene(self, scene) -> None:
self.add_hsr()
self.add_prop()
self.add_table()
# Set up scene
super().set_up_scene(scene)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view")
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
# Add prop to scene
self._props = RigidPrimView(prim_paths_expr="/World/envs/.*/prop", name="prop_view", reset_xform_properties=False)
scene.add(self._props)
def add_hsr(self):
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb",
name="hsrb",
translation=self._hsr_position,
orientation=self._hsr_rotation)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_prop(self):
prop = DynamicCuboid(prim_path=self.default_zero_env_path + "/prop",
name="prop",
translation=self._prop_position,
orientation=self._prop_rotation,
size=self._prop_size,
color=torch.tensor([0.2, 0.4, 0.6]),
mass=0.1,
density=100.0)
self._sim_config.apply_articulation_settings("prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop"))
def add_table(self):
table = FixedCuboid(prim_path=self.default_zero_env_path + "/table",
name="table",
translation=self._table_position,
orientation=self._table_rotation,
size=self._table_size,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([self._table_width, self._table_depth, self._table_height]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def get_observations(self):
# Get prop positions and orientations
prop_positions, prop_orientations = self._props.get_world_poses(clone=False)
prop_positions = prop_positions[:, 0:3] - self._env_pos
# Get prop velocities
prop_velocities = self._props.get_velocities(clone=False)
prop_linvels = prop_velocities[:, 0:3]
prop_angvels = prop_velocities[:, 3:6]
# Get end effector positions and orientations
end_effector_positions, end_effector_orientations = self._robots._fingertip_centered.get_world_poses(clone=False)
end_effector_positions = end_effector_positions[:, 0:3] - self._env_pos
# Get end effector velocities
end_effector_velocities = self._robots._fingertip_centered.get_velocities(clone=False)
end_effector_linvels = end_effector_velocities[:, 0:3]
end_effector_angvels = end_effector_velocities[:, 3:6]
self.prop_positions = prop_positions
self.prop_linvels = prop_linvels
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
self.obs_buf[..., 0:8] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 8:16] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 16:19] = end_effector_positions
self.obs_buf[..., 19:23] = end_effector_orientations
self.obs_buf[..., 23:26] = end_effector_linvels
self.obs_buf[..., 26:29] = end_effector_angvels
self.obs_buf[..., 29:32] = prop_positions
self.obs_buf[..., 32:36] = prop_orientations
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Update position targets from actions
self.dof_position_targets[..., self.actuated_dof_indices] += self._dt * self._action_speed_scale * actions.to(self.device)
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits
)
# Modify torso joint positions
dof_pos = self._robots.get_joint_positions()
arm_pos = dof_pos[:, self.arm_dof_idxs]
scaled_arm_lift_pos = arm_pos[:, 0] / self.arm_dof_upper[0]
scaled_torso_lift_pos = scaled_arm_lift_pos * self.torso_dof_upper[0]
self.dof_position_targets[:, self.torso_dof_idx] = scaled_torso_lift_pos.unsqueeze(dim=1)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = self.initial_dof_positions
self._robots.set_joint_positions(self.dof_position_targets)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = -0.01 # min horizontal dist from origin
max_d = 0.01 # max horizontal dist from origin
min_height = 0.01
max_height = 0.02
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
# Prop pos / rot, velocities
self.prop_pos = self.initial_prop_pos.clone()
self.prop_rot = self.initial_prop_rot.clone()
# position
self.prop_pos[env_ids_64, 0:2] += hpos[..., 0:2]
self.prop_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
self.prop_rot[env_ids_64, 0] = 1
self.prop_rot[env_ids_64, 1:] = 0
# reset root state for props in selected envs
self._props.set_world_poses(self.prop_pos[env_ids_64], self.prop_rot[env_ids_64], indices=env_ids_32)
# reset root state for robots in selected envs
self._robots.set_world_poses(self.initial_robot_pos[env_ids_64], self.initial_robot_rot[env_ids_64], indices=env_ids_32)
# reset DOF states for robots in selected envs
self._robots.set_joint_positions(self.initial_dof_positions, indices=env_ids_32)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.extras[env_ids] = 0
self.is_collided[env_ids] = 0
def post_reset(self):
self.set_dof_idxs()
self.set_dof_limits()
self.set_default_state()
# reset prop pos / rot, and velocities
self.initial_robot_pos, self.initial_robot_rot = self._robots.get_world_poses()
self.initial_robot_velocities = self._robots.get_velocities()
# reset prop pos / rot, and velocities
self.initial_prop_pos, self.initial_prop_rot = self._props.get_world_poses()
self.initial_prop_velocities = self._props.get_velocities()
def calculate_metrics(self) -> None:
# Distance from hand to the box
dist_hand = torch.norm(self.obs_buf[..., 29:32] - self.obs_buf[..., 16:19], p=2, dim=-1)
# Distance from left gripper to the box
lfinger_pos, _ = self._robots._lfingers.get_world_poses(clone=False)
lfinger_pos -= self._env_pos
dist_lf = torch.norm(self.obs_buf[..., 29:32] - lfinger_pos, p=2, dim=-1)
# Distance from right gripper to the box
rfinger_pos, _ = self._robots._rfingers.get_world_poses(clone=False)
rfinger_pos -= self._env_pos
dist_rf = torch.norm(self.obs_buf[..., 29:32] - rfinger_pos, p=2, dim=-1)
dist_reward = 1.0 - torch.tanh(10.0 * (dist_hand + dist_lf + dist_rf) / 3)
self.rew_buf[:] = dist_reward
# In this policy, episode length is constant across all envs
is_last_step = (self.progress_buf[0] == self._max_episode_length - 1)
if is_last_step:
# Check if nut is picked up and above table
lift_success = self._check_lift_success(height_threashold=0.24)
self.rew_buf[:] += lift_success * self._task_cfg['rl']['success_bonus']
self.extras['successes'] = torch.mean(lift_success.float())
def is_done(self) -> None:
# prop height index is 31, NOTE: modify according to observation
self.reset_buf = torch.where(
self.obs_buf[:, 31] <= self._table_height,
torch.ones_like(self.reset_buf),
self.reset_buf
)
self.reset_buf = torch.where(
self.progress_buf[:] >= self._max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
is_last_step = (self.progress_buf[0] == self._max_episode_length - 1)
if is_last_step:
self._close_gripper(sim_steps=self._task_cfg['env']['num_gripper_close_sim_steps'])
self._lift_gripper(sim_steps=self._task_cfg['env']['num_gripper_lift_sim_steps'])
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def set_dof_idxs(self):
[self.torso_dof_idx.append(self._robots.get_dof_index(name)) for name in self._torso_joint_name]
[self.base_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._base_joint_names]
[self.arm_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._arm_names]
[self.gripper_proximal_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._gripper_proximal_names]
# Movable joints
self.actuated_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10]).to(self._device)
def set_dof_limits(self): # dof position limits
# (num_envs, num_dofs, 2)
dof_limits = self._robots.get_dof_limits()
dof_limits_lower = dof_limits[0, :, 0].to(self._device)
dof_limits_upper = dof_limits[0, :, 1].to(self._device)
# Set relevant joint position limit values
self.torso_dof_lower = dof_limits_lower[self.torso_dof_idx]
self.torso_dof_upper = dof_limits_upper[self.torso_dof_idx]
self.base_dof_lower = dof_limits_lower[self.base_dof_idxs]
self.base_dof_upper = dof_limits_upper[self.base_dof_idxs]
self.arm_dof_lower = dof_limits_lower[self.arm_dof_idxs]
self.arm_dof_upper = dof_limits_upper[self.arm_dof_idxs]
self.gripper_p_dof_lower = dof_limits_lower[self.gripper_proximal_dof_idxs]
self.gripper_p_dof_upper = dof_limits_upper[self.gripper_proximal_dof_idxs]
self.robot_dof_lower_limits, self.robot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
def set_default_state(self):
# Set default joint state
joint_states = self._robots.get_joints_default_state()
jt_pos = joint_states.positions
jt_pos[:, self.torso_dof_idx] = self.torso_start
jt_pos[:, self.base_dof_idxs] = self.base_start
jt_pos[:, self.arm_dof_idxs] = self.arm_start
jt_pos[:, self.gripper_proximal_dof_idxs] = self.gripper_proximal_start
jt_vel = joint_states.velocities
jt_vel[:, self.torso_dof_idx] = torch.zeros_like(self.torso_start, device=self._device)
jt_vel[:, self.base_dof_idxs] = torch.zeros_like(self.base_start, device=self._device)
jt_vel[:, self.arm_dof_idxs] = torch.zeros_like(self.arm_start, device=self._device)
jt_vel[:, self.gripper_proximal_dof_idxs] = torch.zeros_like(self.gripper_proximal_start, device=self._device)
self._robots.set_joints_default_state(positions=jt_pos, velocities=jt_vel)
# Initialize target positions
self.dof_position_targets = jt_pos
def _get_keypoint_dist(self):
# end effector pose indices are 0:3, and prop pose indices are 13:16
keypoint_dist = torch.sum(torch.norm(self.obs_buf[:, 0:3] - self.obs_buf[:, 13:16], p=2, dim=-1), dim=-1)
return keypoint_dist
def _close_gripper(self, sim_steps=10):
gripper_dof_pos = torch.tensor([-1.75, -1.75], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_velocity_targets(gripper_dof_pos, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
def _lift_gripper(self, sim_steps=10):
lift_dof_pos = torch.tensor([0.0, 0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_velocity_targets(lift_dof_pos)
SimulationContext.step(self._env._world, render=True)
def _check_lift_success(self, height_threashold):
lift_success = torch.where(
self.prop_pos[:, 2] > height_threashold,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return lift_success
def _check_robot_collisions(self):
# Check if the robot collided with an object
for obst_prim in self._tables._prim_paths:
match = re.search(r'\d+', obst_prim)
env_id = int(match.group())
raw_readings = self._contact_sensor_interface.get_contact_sensor_raw_data(obst_prim + "/Contact_Sensor")
if raw_readings.shape[0]:
for reading in raw_readings:
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body1"])):
self.is_collided[env_id] = True
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body0"])):
self.is_collided[env_id] = True
collide_penalty = torch.where(
self.is_collided == True,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return collide_penalty
| 18,130 |
Python
| 45.850129 | 170 | 0.62995 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/example/hsr_cabinet.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from hsr_rl.tasks.base.rl_task import RLTask
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.cabinet import Cabinet
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from hsr_rl.robots.articulations.views.cabinet_view import CabinetView
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import math
import torch
import numpy as np
from pxr import Usd, UsdGeom
# Whole Body example task with holonomic robot base
class HSRExampleCabinetTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.num_props = self._task_cfg["env"]["numProps"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 35
self._num_actions = 15
RLTask.__init__(self, name, env)
self.hsr_position = torch.tensor([1.5, 0.0, 0.03])
self.hsr_rotation = torch.tensor([0.0, 0.0, 0.0, 1.0])
return
def set_up_scene(self, scene) -> None:
self.add_hsr()
self.add_cabinet()
if self.num_props > 0:
self.add_props()
# Set up scene
super().set_up_scene(scene)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view")
self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view")
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
scene.add(self._cabinets)
scene.add(self._cabinets._drawers)
if self.num_props > 0:
self._props = RigidPrimView(prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False)
scene.add(self._props)
self.init_data()
return
def add_hsr(self):
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb", name="hsrb", translation=self.hsr_position, orientation=self.hsr_rotation)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_cabinet(self):
cabinet = Cabinet(self.default_zero_env_path + "/cabinet", name="cabinet")
self._sim_config.apply_articulation_settings("cabinet", get_prim_at_path(cabinet.prim_path), self._sim_config.parse_actor_config("cabinet"))
def add_props(self):
prop_cloner = Cloner()
drawer_pos = torch.tensor([0.0515, 0.0, 0.7172])
prop_color = torch.tensor([0.2, 0.4, 0.6])
props_per_row = int(math.ceil(math.sqrt(self.num_props)))
prop_size = 0.04
prop_spacing = 0.09
xmin = -0.5 * prop_spacing * (props_per_row - 1)
zmin = -0.5 * prop_spacing * (props_per_row - 1)
prop_count = 0
prop_pos = []
for j in range(props_per_row):
prop_up = zmin + j * prop_spacing
for k in range(props_per_row):
if prop_count >= self.num_props:
break
propx = xmin + k * prop_spacing
prop_pos.append([propx, prop_up, 0.0])
prop_count += 1
prop = DynamicCuboid(
prim_path=self.default_zero_env_path + "/prop/prop_0",
name="prop",
color=prop_color,
size=prop_size,
density=100.0
)
self._sim_config.apply_articulation_settings("prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop"))
prop_paths = [f"{self.default_zero_env_path}/prop/prop_{j}" for j in range(self.num_props)]
prop_cloner.clone(
source_prim_path=self.default_zero_env_path + "/prop/prop_0",
prim_paths=prop_paths,
positions=np.array(prop_pos)+drawer_pos.numpy(),
replicate_physics=False
)
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/hsrb/hand_palm_link")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/hsrb/hand_l_proximal_link")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/hsrb/hand_r_proximal_link")), self._device
)
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
grasp_pose_axis = 1
hsr_local_grasp_pose_rot, hsr_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
hsr_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.hsr_local_grasp_pos = hsr_local_pose_pos.repeat((self._num_envs, 1))
self.hsr_local_grasp_rot = hsr_local_grasp_pose_rot.repeat((self._num_envs, 1))
drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self._device)
self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self._num_envs, 1))
self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.drawer_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.drawer_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.hsr_default_dof_pos = torch.tensor(
[0.0, 0.0, 0.0, 0.1, 0.1, -1.570796, 0.0, 0.0, 0.0, -0.392699, 0.0, 0.75, 0.75, 0.0, 0.0], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self):
hand_pos, hand_rot = self._robots._hands.get_world_poses(clone=False)
drawer_pos, drawer_rot = self._cabinets._drawers.get_world_poses(clone=False)
hsr_dof_pos = self._robots.get_joint_positions(clone=False)
hsr_dof_vel = self._robots.get_joint_velocities(clone=False)
self.cabinet_dof_pos = self._cabinets.get_joint_positions(clone=False)
self.cabinet_dof_vel = self._cabinets.get_joint_velocities(clone=False)
self.hsr_dof_pos = hsr_dof_pos
self.hsr_grasp_rot, self.hsr_grasp_pos, self.drawer_grasp_rot, self.drawer_grasp_pos = self.compute_grasp_transforms(
hand_rot,
hand_pos,
self.hsr_local_grasp_rot,
self.hsr_local_grasp_pos,
drawer_rot,
drawer_pos,
self.drawer_local_grasp_rot,
self.drawer_local_grasp_pos,
)
self.hsr_lfinger_pos, self.hsr_lfinger_rot = self._robots._lfingers.get_world_poses(clone=False)
self.hsr_rfinger_pos, self.hsr_rfinger_rot = self._robots._lfingers.get_world_poses(clone=False)
dof_pos_scaled = (
2.0
* (hsr_dof_pos - self.hsr_dof_lower_limits)
/ (self.hsr_dof_upper_limits - self.hsr_dof_lower_limits)
- 1.0
)
to_target = self.drawer_grasp_pos - self.hsr_grasp_pos
self.obs_buf = torch.cat(
(
dof_pos_scaled,
hsr_dof_vel * self.dof_vel_scale,
to_target,
self.cabinet_dof_pos[:, 3].unsqueeze(-1),
self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
targets = self.hsr_dof_targets + self.hsr_dof_speed_scales * self.dt * self.actions * self.action_scale
self.hsr_dof_targets[:] = tensor_clamp(targets, self.hsr_dof_lower_limits, self.hsr_dof_upper_limits)
env_ids_int32 = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
self._robots.set_joint_positions(self.hsr_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset hsr
pos = tensor_clamp(
self.hsr_default_dof_pos.unsqueeze(0)
+ 0.25 * (torch.rand((len(env_ids), self.num_hsr_dofs), device=self._device) - 0.5),
self.hsr_dof_lower_limits,
self.hsr_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._robots.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._robots.num_dof), device=self._device)
dof_pos[:, :] = pos
self.hsr_dof_targets[env_ids, :] = pos
self.hsr_dof_pos[env_ids, :] = pos
# reset cabinet
self._cabinets.set_joint_positions(torch.zeros_like(self._cabinets.get_joint_positions(clone=False)[env_ids]), indices=indices)
self._cabinets.set_joint_velocities(torch.zeros_like(self._cabinets.get_joint_velocities(clone=False)[env_ids]), indices=indices)
# reset props
if self.num_props > 0:
self._props.set_world_poses(
self.default_prop_pos[self.prop_indices[env_ids].flatten()],
self.default_prop_rot[self.prop_indices[env_ids].flatten()],
self.prop_indices[env_ids].flatten().to(torch.int32)
)
self._robots.set_joint_position_targets(self.hsr_dof_targets[env_ids], indices=indices)
self._robots.set_joint_positions(dof_pos, indices=indices)
self._robots.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.num_hsr_dofs = self._robots.num_dof
self.hsr_dof_pos = torch.zeros((self.num_envs, self.num_hsr_dofs), device=self._device)
dof_limits = self._robots.get_dof_limits()
self.hsr_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.hsr_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.hsr_dof_speed_scales = torch.ones_like(self.hsr_dof_lower_limits)
self.hsr_dof_speed_scales[self._robots.gripper_indices] = 0.1
self.hsr_dof_targets = torch.zeros(
(self._num_envs, self.num_hsr_dofs), dtype=torch.float, device=self._device
)
if self.num_props > 0:
self.default_prop_pos, self.default_prop_rot = self._props.get_world_poses()
self.prop_indices = torch.arange(self._num_envs * self.num_props, device=self._device).view(
self._num_envs, self.num_props
)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = self.compute_hsr_reward(
self.reset_buf, self.progress_buf, self.actions, self.cabinet_dof_pos,
self.hsr_grasp_pos, self.drawer_grasp_pos, self.hsr_grasp_rot, self.drawer_grasp_rot,
self.hsr_lfinger_pos, self.hsr_rfinger_pos,
self.gripper_forward_axis, self.drawer_inward_axis, self.gripper_up_axis, self.drawer_up_axis,
self._num_envs, self.dist_reward_scale, self.rot_reward_scale, self.around_handle_reward_scale, self.open_reward_scale,
self.finger_dist_reward_scale, self.action_penalty_scale, self.distX_offset, self._max_episode_length, self.hsr_dof_pos,
self.finger_close_reward_scale,
)
def is_done(self) -> None:
# reset if drawer is open or max length reached
self.reset_buf = torch.where(self.cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
def compute_grasp_transforms(
self,
hand_rot,
hand_pos,
hsr_local_grasp_rot,
hsr_local_grasp_pos,
drawer_rot,
drawer_pos,
drawer_local_grasp_rot,
drawer_local_grasp_pos,
):
global_hsr_rot, global_hsr_pos = tf_combine(
hand_rot, hand_pos, hsr_local_grasp_rot, hsr_local_grasp_pos
)
global_drawer_rot, global_drawer_pos = tf_combine(
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos
)
return global_hsr_rot, global_hsr_pos, global_drawer_rot, global_drawer_pos
def compute_hsr_reward(
self, reset_buf, progress_buf, actions, cabinet_dof_pos,
hsr_grasp_pos, drawer_grasp_pos, hsr_grasp_rot, drawer_grasp_rot,
hsr_lfinger_pos, hsr_rfinger_pos,
gripper_forward_axis, drawer_inward_axis, gripper_up_axis, drawer_up_axis,
num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale,
finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length, joint_positions, finger_close_reward_scale
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float, Tensor) -> Tuple[Tensor, Tensor]
# distance from hand to the drawer
d = torch.norm(hsr_grasp_pos - drawer_grasp_pos, p=2, dim=-1)
dist_reward = 1.0 / (1.0 + d ** 2)
dist_reward *= dist_reward
dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward)
axis1 = tf_vector(hsr_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis)
axis3 = tf_vector(hsr_grasp_rot, gripper_up_axis)
axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis)
dot1 = torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of forward axis for gripper
dot2 = torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of up axis for gripper
# reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1 ** 2 + torch.sign(dot2) * dot2 ** 2)
# bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(hsr_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(hsr_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
around_handle_reward + 0.5, around_handle_reward), around_handle_reward)
# reward for distance of each finger from the drawer
finger_dist_reward = torch.zeros_like(rot_reward)
lfinger_dist = torch.abs(hsr_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
rfinger_dist = torch.abs(hsr_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
finger_dist_reward = torch.where(hsr_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(hsr_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
(0.04 - lfinger_dist) + (0.04 - rfinger_dist), finger_dist_reward), finger_dist_reward)
finger_close_reward = torch.zeros_like(rot_reward)
finger_close_reward = torch.where(d <=0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward)
# regularization on the actions (summed for each environment)
action_penalty = torch.sum(actions ** 2, dim=-1)
# how far the cabinet has been opened out
open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint
rewards = dist_reward_scale * dist_reward + rot_reward_scale * rot_reward \
+ around_handle_reward_scale * around_handle_reward + open_reward_scale * open_reward \
+ finger_dist_reward_scale * finger_dist_reward - action_penalty_scale * action_penalty + finger_close_reward * finger_close_reward_scale
# bonus for opening drawer properly
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards)
return rewards
| 21,223 |
Python
| 47.346241 | 222 | 0.625077 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/example/hsr_reach.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from hsr_rl.tasks.base.rl_task import RLTask
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, torch_random_dir_2
from omni.isaac.core.objects import DynamicSphere
# Whole Body example task with holonomic robot base
class HSRExampleReachTask(RLTask):
def __init__(
self,
name,
sim_config,
env
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._dt = torch.tensor(self._sim_config.task_config["sim"]["dt"] * self._sim_config.task_config["env"]["controlFrequencyInv"], device=self._device)
self._num_observations = self._task_cfg["env"]["num_observations"]
self._num_actions = self._task_cfg["env"]["num_actions"]
self._hsr_position = torch.tensor([0.0, 0.0, 0.01], device=self._device)
# ball properties
self._ball_position = torch.tensor([1.5, 0.0, 0.045], device=self._device)
self._ball_radius = torch.tensor([0.05], device=self._device)
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
# Start at 'home' positions
self.torso_start = torch.tensor([0.1], device=self._device)
self.base_start = torch.tensor([0.0, 0.0, 0.0], device=self._device)
self.arm_start = torch.tensor([0.1, -1.570796, 0.0, -0.392699, 0], device=self._device)
self.gripper_proximal_start = torch.tensor([0.75, 0.75], device=self._device) # Opened gripper by default
# joint & body names
self._torso_joint_name = ["torso_lift_joint"]
self._base_joint_names = ["joint_x", "joint_y", "joint_rz"]
self._arm_names = ["arm_lift_joint", "arm_flex_joint", "arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
self._gripper_proximal_names = ["hand_l_proximal_joint", "hand_r_proximal_joint"]
# values are set in post_reset after model is loaded
self.torso_dof_idx = []
self.base_dof_idxs = []
self.arm_dof_idxs = []
self.gripper_proximal_dof_idxs = []
# dof joint position limits
self.torso_dof_lower = []
self.torso_dof_upper = []
self.base_dof_lower = []
self.base_dof_upper = []
self.arm_dof_lower = []
self.arm_dof_upper = []
self.gripper_p_dof_lower = []
self.gripper_p_dof_upper = []
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.add_hsr()
self.add_ball()
# Set up scene
super().set_up_scene(scene)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view") # ArticulationView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view", reset_xform_properties=False)
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
# Add ball to scene
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False)
scene.add(self._balls)
return
def add_hsr(self):
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb", name="hsrb", translation=self._hsr_position)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_ball(self):
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/Ball/ball",
translation=self._ball_position,
name="ball_0",
radius=0.05,
color=torch.tensor([0.2, 0.4, 0.6]),
)
self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball"))
def get_observations(self):
# Get ball positions and orientations
ball_positions, ball_orientations = self._balls.get_world_poses(clone=False)
ball_positions = ball_positions[:, 0:3] - self._env_pos
# Get ball velocities
ball_velocities = self._balls.get_velocities(clone=False)
ball_linvels = ball_velocities[:, 0:3]
ball_angvels = ball_velocities[:, 3:6]
# Get end effector positions and orientations
end_effector_positions, end_effector_orientations = self._robots._fingertip_centered.get_world_poses(clone=False)
end_effector_positions = end_effector_positions[:, 0:3] - self._env_pos
# Get end effector velocities
end_effector_velocities = self._robots._fingertip_centered.get_velocities(clone=False)
end_effector_linvels = end_effector_velocities[:, 0:3]
end_effector_angvels = end_effector_velocities[:, 3:6]
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
self.obs_buf[..., 0:8] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 8:16] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 16:19] = end_effector_positions
self.obs_buf[..., 19:22] = end_effector_linvels
self.obs_buf[..., 22:25] = ball_positions
self.obs_buf[..., 25:28] = ball_linvels
self.ball_positions = ball_positions
self.ball_linvels = ball_linvels
observations = {
"hsr_reach": {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# update position targets from actions
self.dof_position_targets[..., self.actuated_dof_indices] += self._dt * self._action_speed_scale * actions.to(self.device)
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits
)
dof_pos = self._robots.get_joint_positions()
arm_pos = dof_pos[:, self.arm_dof_idxs]
scaled_arm_lift_pos = arm_pos[:, 0] / self.arm_dof_upper[0]
scaled_torso_lift_pos = scaled_arm_lift_pos * self.torso_dof_upper[0]
self.dof_position_targets[:, self.torso_dof_idx] = scaled_torso_lift_pos.unsqueeze(dim=1)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = 0
self._robots.set_joint_positions(self.dof_position_targets)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = 0.001 # min horizontal dist from origin
max_d = 0.4 # max horizontal dist from origin
min_height = 0.0
max_height = 0.2
min_horizontal_speed = 0
max_horizontal_speed = 2
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
speedscales = (dists - min_d) / (max_d - min_d)
hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self._device)
hvels = -speedscales * hspeeds * dirs
vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self._device).squeeze()
ball_pos = self.initial_ball_pos.clone()
ball_rot = self.initial_ball_rot.clone()
# position
ball_pos[env_ids_64, 0:2] += hpos[..., 0:2]
ball_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
ball_rot[env_ids_64, 0] = 1
ball_rot[env_ids_64, 1:] = 0
ball_velocities = self.initial_ball_velocities.clone()
# linear
ball_velocities[env_ids_64, 0:2] = hvels[..., 0:2]
ball_velocities[env_ids_64, 2] = vspeeds
# angular
ball_velocities[env_ids_64, 3:6] = 0
# reset root state for bbots and balls in selected envs
self._balls.set_world_poses(ball_pos[env_ids_64], ball_rot[env_ids_64], indices=env_ids_32)
self._balls.set_velocities(ball_velocities[env_ids_64], indices=env_ids_32)
# reset root pose and velocity
self._robots.set_world_poses(self.initial_robot_pos[env_ids_64].clone(), self.initial_robot_rot[env_ids_64].clone(), indices=env_ids_32)
self._robots.set_velocities(self.initial_robot_velocities[env_ids_64].clone(), indices=env_ids_32)
# reset DOF states for bbots in selected envs
self._robots.set_joint_positions(self.initial_dof_positions[env_ids_64].clone(), indices=env_ids_32)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.extras[env_ids] = 0
def post_reset(self):
self.set_dof_idxs()
self.set_dof_limits()
self.set_default_state()
dof_limits = self._robots.get_dof_limits()
self.robot_dof_lower_limits, self.robot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
# reset ball pos / rot, and velocities
self.initial_dof_positions = self._robots.get_joint_positions()
self.initial_robot_pos, self.initial_robot_rot = self._robots.get_world_poses()
self.initial_robot_velocities = self._robots.get_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses()
self.initial_ball_velocities = self._balls.get_velocities()
self.dof_position_targets = self._robots.get_joints_default_state().positions
self.actuated_dof_indices = torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10]).to(self._device)
def calculate_metrics(self) -> None:
# Distance from hand to the ball
dist = torch.norm(self.obs_buf[..., 22:25] - self.obs_buf[..., 16:19], p=2, dim=-1)
dist_reward = 1.0 / (1.0 + dist ** 2)
dist_reward *= dist_reward
dist_reward = torch.where(dist <= 0.02, dist_reward * 2, dist_reward)
self.rew_buf[:] = dist_reward
def is_done(self) -> None:
reset = torch.where(
self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf
)
reset = torch.where(self.ball_positions[..., 2] < self._ball_radius * 1.5, torch.ones_like(self.reset_buf), reset)
self.reset_buf[:] = reset
def set_dof_idxs(self):
[self.torso_dof_idx.append(self._robots.get_dof_index(name)) for name in self._torso_joint_name]
[self.base_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._base_joint_names]
[self.arm_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._arm_names]
[self.gripper_proximal_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._gripper_proximal_names]
def set_dof_limits(self): # dof position limits
# (num_envs, num_dofs, 2)
dof_limits = self._robots.get_dof_limits()
dof_limits_lower = dof_limits[0, :, 0].to(self._device)
dof_limits_upper = dof_limits[0, :, 1].to(self._device)
# Set relevant joint position limit values
self.torso_dof_lower = dof_limits_lower[self.torso_dof_idx]
self.torso_dof_upper = dof_limits_upper[self.torso_dof_idx]
self.base_dof_lower = dof_limits_lower[self.base_dof_idxs]
self.base_dof_upper = dof_limits_upper[self.base_dof_idxs]
self.arm_dof_lower = dof_limits_lower[self.arm_dof_idxs]
self.arm_dof_upper = dof_limits_upper[self.arm_dof_idxs]
self.gripper_p_dof_lower = dof_limits_lower[self.gripper_proximal_dof_idxs]
self.gripper_p_dof_upper = dof_limits_upper[self.gripper_proximal_dof_idxs]
def set_default_state(self):
# Set default joint state
joint_states = self._robots.get_joints_default_state()
jt_pos = joint_states.positions
jt_pos[:, self.torso_dof_idx] = self.torso_start
jt_pos[:, self.base_dof_idxs] = self.base_start
jt_pos[:, self.arm_dof_idxs] = self.arm_start
jt_pos[:, self.gripper_proximal_dof_idxs] = self.gripper_proximal_start
jt_vel = joint_states.velocities
jt_vel[:, self.torso_dof_idx] = torch.zeros_like(self.torso_start, device=self._device)
jt_vel[:, self.base_dof_idxs] = torch.zeros_like(self.base_start, device=self._device)
jt_vel[:, self.arm_dof_idxs] = torch.zeros_like(self.arm_start, device=self._device)
jt_vel[:, self.gripper_proximal_dof_idxs] = torch.zeros_like(self.gripper_proximal_start, device=self._device)
self._robots.set_joints_default_state(positions=jt_pos, velocities=jt_vel)
| 15,063 |
Python
| 45.637771 | 193 | 0.642701 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/example/hsr_pick.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import carb
import hydra
import torch
import numpy as np
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from hsr_rl.utils.dataset_utils import load_dataset
from hsr_rl.tasks.base.rl_task import RLTask
from hsr_rl.tasks.utils.scene_utils import spawn_dynamic_object
from hsr_rl.tasks.utils.ik_utils import DifferentialInverseKinematics, DifferentialInverseKinematicsCfg
from omni.isaac.core.prims import GeometryPrimView, RigidPrimView, XFormPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, print_stage_prim_paths
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp
from omni.isaac.core.utils.torch.rotations import euler_angles_to_quats, quat_diff_rad, torch_random_dir_2
from omni.isaac.core.objects import FixedCuboid, DynamicCuboid
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.sensor import _sensor
from omni.isaac.surface_gripper._surface_gripper import Surface_Gripper_Properties, Surface_Gripper
from omni.physx.scripts import utils, physicsUtils
from pxr import Gf, Sdf, UsdGeom, PhysxSchema, UsdPhysics
class HSRExamplePickTask(RLTask):
def __init__(
self,
name,
sim_config,
env
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
# Get dt for integrating velocity commands and checking limit violations
self._dt = torch.tensor(self._task_cfg["sim"]["dt"] * self._task_cfg["env"]["controlFrequencyInv"], device=self._device)
self._action_type = self._task_cfg["env"]["action_type"]
self._target_space = self._task_cfg["env"]["target_space"]
self._num_props = self._task_cfg["env"]["numProps"]
self._num_actions = self._task_cfg["env"]["num_actions"]
self._num_observations = self._task_cfg["env"]["num_observations"]
# Table and prop settings
self._table_height = 0.2
self._table_width = 0.65
self._table_depth = 1.2
self._table_size = 1.0
self._prop_size = self._sim_config.task_config["sim"]["parts"]["size"]
self._prop_mass = self._sim_config.task_config["sim"]["parts"]["mass"]
self._prop_density = self._sim_config.task_config["sim"]["parts"]["density"]
self._prop_static_friction = self._sim_config.task_config["sim"]["parts"]["static_friction"]
self._prop_dynamic_friction = self._sim_config.task_config["sim"]["parts"]["dynamic_friction"]
self._prop_restitution = self._sim_config.task_config["sim"]["parts"]["restitution"]
self._gripper_mass = self._sim_config.task_config["sim"]["gripper"]["mass"]
self._gripper_density = self._sim_config.task_config["sim"]["gripper"]["density"]
self._gripper_static_friction = self._sim_config.task_config["sim"]["gripper"]["static_friction"]
self._gripper_dynamic_friction = self._sim_config.task_config["sim"]["gripper"]["dynamic_friction"]
self._gripper_restitution = self._sim_config.task_config["sim"]["gripper"]["restitution"]
self._pick_success = self._table_height + 0.05
self._place_success = torch.tensor([0.2, 0.2], device=self._device)
self._hsr_position = torch.tensor([0.0, 0.0, 0.03], device=self._device)
self._hsr_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._table_position = torch.tensor([1.4, 0.0, self._table_height/2], device=self._device)
self._table_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._prop_position = torch.tensor([1.25, 0.0, self._table_height+self._prop_size/2], device=self._device)
self._prop_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
# Start at 'home' positions
self.torso_start = torch.tensor([0.1], device=self._device)
self.base_start = torch.tensor([0.0, 0.0, 0.0], device=self._device)
self.arm_start = torch.tensor([0.1, -1.570796, 0.0, 0.0, 0.0], device=self._device)
self.gripper_proximal_start = torch.tensor([0.75, 0.75], device=self._device)
self.initial_dof_positions = torch.tensor([0.0, 0.0, 0.0, 0.1, 0.1, -1.570796, 0.0, 0.0, 0.0, 0.0, 0.0, 0.75, 0.75, 0.0, 0.0], device=self._device)
# Dof joint gains
self.joint_kps = torch.tensor([1e9, 1e9, 5.7296e10, 1e9, 1e9, 5.7296e10,
5.7296e10, 5.7296e10, 5.7296e10, 5.7296e10, 5.7296e10, 2.8648e4,
2.8648e4, 5.7296e10, 5.7296e10], device=self._device)
self.joint_kds = torch.tensor([1.4, 1.4, 80.2141, 1.4, 0.0, 80.2141, 0.0, 80.2141,
0.0, 80.2141, 80.2141, 17.1887, 17.1887, 17.1887, 17.1887], device=self._device)
# Dof joint friction coefficients
self.joint_friction_coefficients = torch.tensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], device=self._device)
# Joint & body names
self._torso_joint_name = ["torso_lift_joint"]
self._base_joint_names = ["joint_x", "joint_y", "joint_rz"]
self._arm_names = ["arm_lift_joint", "arm_flex_joint", "arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
self._gripper_proximal_names = ["hand_l_proximal_joint", "hand_r_proximal_joint"]
# Values are set in post_reset after model is loaded
self.torso_dof_idx = []
self.base_dof_idxs = []
self.arm_dof_idxs = []
self.gripper_proximal_dof_idxs = []
# Dof joint position limits
self.torso_dof_lower = []
self.torso_dof_upper = []
self.base_dof_lower = []
self.base_dof_upper = []
self.arm_dof_lower = []
self.arm_dof_upper = []
self.gripper_p_dof_lower = []
self.gripper_p_dof_upper = []
# Add contact sensor
self._contact_sensor_interface = _sensor.acquire_contact_sensor_interface()
self.replay_count = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.is_collided = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.lift_success = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.place_success = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.gripper_close = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.gripper_open = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.gripper_hold = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.exp_actions = self.load_exp_dataset()
self.ik_controller = self.set_ik_controller()
self._dynamic_control = _dynamic_control.acquire_dynamic_control_interface()
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.create_prop_material()
self.create_gripper_material()
self.add_hsr()
self.add_prop()
self.add_table()
# Set up scene
super().set_up_scene(scene)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view")
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
# Add prop to scene
self._props = RigidPrimView(prim_paths_expr="/World/envs/.*/prop", name="prop_view", reset_xform_properties=False)
scene.add(self._props)
def create_prop_material(self):
self._stage = get_current_stage()
self.propPhysicsMaterialPath = "/World/Physics_Materials/PropMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.propPhysicsMaterialPath,
density=self._prop_density,
staticFriction=self._prop_static_friction,
dynamicFriction=self._prop_dynamic_friction,
)
def create_gripper_material(self):
self._stage = get_current_stage()
self.gripperPhysicsMaterialPath = "/World/Physics_Materials/GripperMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.gripperPhysicsMaterialPath,
density=self._gripper_density,
staticFriction=self._gripper_static_friction,
dynamicFriction=self._gripper_dynamic_friction,
restitution=self._gripper_restitution
)
def add_hsr(self):
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb",
name="hsrb",
translation=self._hsr_position,
orientation=self._hsr_rotation)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_prop(self):
prop = DynamicCuboid(prim_path=self.default_zero_env_path + "/prop",
name="prop",
translation=self._prop_position,
orientation=self._prop_rotation,
size=self._prop_size,
color=torch.tensor([0.2, 0.4, 0.6]),
mass=0.1,
density=100.0)
self._sim_config.apply_articulation_settings("prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop"))
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(self.default_zero_env_path + "/prop"),
self.propPhysicsMaterialPath
)
def add_table(self):
table = FixedCuboid(prim_path=self.default_zero_env_path + "/table",
name="table",
translation=self._table_position,
orientation=self._table_rotation,
size=self._table_size,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([self._table_width, self._table_depth, self._table_height]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def get_observations(self):
# Get prop positions and orientations
prop_positions, prop_orientations = self._props.get_world_poses(clone=False)
prop_positions = prop_positions[:, 0:3] - self._env_pos
# Get prop velocities
prop_velocities = self._props.get_velocities(clone=False)
prop_linvels = prop_velocities[:, 0:3]
prop_angvels = prop_velocities[:, 3:6]
# Get end effector positions and orientations
end_effector_positions, end_effector_orientations = self._robots._fingertip_centered.get_world_poses(clone=False)
end_effector_positions = end_effector_positions[:, 0:3] - self._env_pos
# Get end effector velocities
end_effector_velocities = self._robots._fingertip_centered.get_velocities(clone=False)
end_effector_linvels = end_effector_velocities[:, 0:3]
end_effector_angvels = end_effector_velocities[:, 3:6]
self.prop_positions = prop_positions
self.prop_linvels = prop_linvels
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
self.obs_buf[..., 0:10] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 10:20] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 20:23] = end_effector_positions
self.obs_buf[..., 23:27] = end_effector_orientations
self.obs_buf[..., 27:30] = end_effector_linvels
self.obs_buf[..., 30:33] = end_effector_angvels
self.obs_buf[..., 33:36] = prop_positions
self.obs_buf[..., 38:42] = prop_orientations
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
if self._target_space == "task":
# Set IK target positions
replay_indices = torch.tensor(self.replay_count)
actions = self.exp_actions[replay_indices, :self._num_actions]
self.ik_controller.set_command(actions)
# Calculate joint positions
ee_pos, ee_rot = self._robots._fingertip_centered.get_world_poses()
ee_pos -= self._env_pos
robot_jacobian = self._robots.get_jacobians(clone=False)[:, self._robots._body_indices['hand_palm_link'], :, self.movable_dof_indices]
joint_positions = self._robots.get_joint_positions(joint_indices=self.movable_dof_indices)
self.dof_position_targets[..., self.movable_dof_indices] = self.ik_controller.compute(
ee_pos,
ee_rot,
robot_jacobian,
joint_positions
)
else:
# Update position targets from actions
self.dof_position_targets[..., self.movable_dof_indices] = self._robots.get_joint_positions(joint_indices=self.movable_dof_indices)
self.dof_position_targets[..., self.movable_dof_indices] += self.exp_actions[self.replay_count, :self._num_actions]
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits
)
# Modify torso joint positions
dof_pos = self._robots.get_joint_positions()
arm_pos = dof_pos[:, self.arm_dof_idxs]
scaled_arm_lift_pos = arm_pos[:, 0] / self.arm_dof_upper[0]
scaled_torso_lift_pos = scaled_arm_lift_pos * self.torso_dof_upper[0]
self.dof_position_targets[:, self.torso_dof_idx] = scaled_torso_lift_pos.unsqueeze(dim=1)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = self.initial_dof_positions
self._robots.set_joint_position_targets(self.dof_position_targets)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = 0.0 # min horizontal dist from origin
max_d = 0.0 # max horizontal dist from origin
min_height = 0.0 # min vertical dist from origin
max_height = 0.0 # min vertical dist from origin
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
# Prop pos / rot, velocities
self.prop_pos = self.initial_prop_pos.clone()
self.prop_rot = self.initial_prop_rot.clone()
# position
self.prop_pos[env_ids_64, 0:2] += hpos[..., 0:2]
self.prop_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
self.prop_rot[env_ids_64, 0] = 1
self.prop_rot[env_ids_64, 1:] = 0
# reset root state for props in selected envs
self._props.set_world_poses(self.prop_pos[env_ids_64], self.prop_rot[env_ids_64], indices=env_ids_32)
# reset root state for robots in selected envs
self._robots.set_world_poses(self.initial_robot_pos[env_ids_64], self.initial_robot_rot[env_ids_64], indices=env_ids_32)
# reset DOF states for robots in selected envs
self._robots.set_joint_position_targets(self.initial_dof_positions, indices=env_ids_32)
# bookkeeping
self.gripper_close[env_ids] = False
self.gripper_open[env_ids] = False
self.gripper_hold[env_ids] = False
self.replay_count[env_ids] = 0
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.extras[env_ids] = 0
self.is_collided[env_ids] = 0
self.lift_success[env_ids] = 0
self.place_success[env_ids] = 0
def post_reset(self):
if self._task_cfg["sim"]["disable_gravity"]:
self.disable_gravity()
self.set_dof_idxs()
self.set_dof_limits()
self.set_default_state()
self.set_joint_gains()
self.set_joint_frictions()
# reset prop pos / rot, and velocities
self.initial_robot_pos, self.initial_robot_rot = self._robots.get_world_poses()
self.initial_robot_velocities = self._robots.get_velocities()
# reset prop pos / rot, and velocities
self.initial_prop_pos, self.initial_prop_rot = self._props.get_world_poses()
self.initial_prop_velocities = self._props.get_velocities()
def calculate_metrics(self) -> None:
# Distance from hand to the ball
dist = torch.norm(self.obs_buf[..., 33:36] - self.obs_buf[..., 20:23], p=2, dim=-1)
dist_reward = 1.0 / (1.0 + dist ** 2)
dist_reward *= dist_reward
dist_reward = torch.where(dist <= 0.02, dist_reward * 2, dist_reward)
self.rew_buf[:] = dist_reward * self._task_cfg['rl']['distance_scale']
# In this policy, episode length is constant across all envs
is_last_step = (self.progress_buf[0] == int(self.exp_actions.size()[0] - 1))
if is_last_step:
# Check if block is picked up and above table
lift_success = self._check_lift_success(height_threashold=self._pick_success)
self.rew_buf[:] += lift_success * self._task_cfg['rl']['pick_success_bonus']
self.extras['lift_successes'] = torch.mean(lift_success.float())
self.lift_success = torch.where(
lift_success[:] == 1,
torch.ones_like(lift_success),
-torch.ones_like(lift_success)
)
def is_done(self) -> None:
self.reset_buf = torch.where(
self.progress_buf == self.exp_actions.size()[0] - 1,
torch.ones_like(self.reset_buf),
self.reset_buf
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.replay_count[:] += 1
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
replay_indices = torch.tensor(self.replay_count)
self.gripper_close = self.exp_actions[replay_indices, -1] < -0.01
self.gripper_open = self.exp_actions[replay_indices, -1] > 0.01
if torch.any(self.gripper_close):
close_indices = torch.where(self.gripper_close)[0]
self._close_gripper(close_indices, sim_steps=self._task_cfg['env']['num_gripper_close_sim_steps'])
elif torch.any(self.gripper_open):
open_indices = torch.where(self.gripper_open)[0]
self._open_gripper(open_indices, sim_steps=self._task_cfg['env']['num_gripper_open_sim_steps'])
elif torch.any(self.gripper_hold):
hold_indices = torch.where(self.gripper_hold)[0]
self._hold_gripper(hold_indices)
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def set_dof_idxs(self):
[self.torso_dof_idx.append(self._robots.get_dof_index(name)) for name in self._torso_joint_name]
[self.base_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._base_joint_names]
[self.arm_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._arm_names]
[self.gripper_proximal_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._gripper_proximal_names]
# Movable joints
self.actuated_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs+self.gripper_proximal_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10, 11, 12]).to(self._device)
self.movable_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10]).to(self._device)
def set_dof_limits(self): # dof position limits
# (num_envs, num_dofs, 2)
dof_limits = self._robots.get_dof_limits()
dof_limits_lower = dof_limits[0, :, 0].to(self._device)
dof_limits_upper = dof_limits[0, :, 1].to(self._device)
# Set relevant joint position limit values
self.torso_dof_lower = dof_limits_lower[self.torso_dof_idx]
self.torso_dof_upper = dof_limits_upper[self.torso_dof_idx]
self.base_dof_lower = dof_limits_lower[self.base_dof_idxs]
self.base_dof_upper = dof_limits_upper[self.base_dof_idxs]
self.arm_dof_lower = dof_limits_lower[self.arm_dof_idxs]
self.arm_dof_upper = dof_limits_upper[self.arm_dof_idxs]
self.gripper_p_dof_lower = dof_limits_lower[self.gripper_proximal_dof_idxs]
self.gripper_p_dof_upper = dof_limits_upper[self.gripper_proximal_dof_idxs]
self.robot_dof_lower_limits, self.robot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
def set_default_state(self):
# Set default joint state
joint_states = self._robots.get_joints_default_state()
jt_pos = joint_states.positions
jt_pos[:, self.torso_dof_idx] = self.torso_start
jt_pos[:, self.base_dof_idxs] = self.base_start
jt_pos[:, self.arm_dof_idxs] = self.arm_start
jt_pos[:, self.gripper_proximal_dof_idxs] = self.gripper_proximal_start
jt_vel = joint_states.velocities
jt_vel[:, self.torso_dof_idx] = torch.zeros_like(self.torso_start, device=self._device)
jt_vel[:, self.base_dof_idxs] = torch.zeros_like(self.base_start, device=self._device)
jt_vel[:, self.arm_dof_idxs] = torch.zeros_like(self.arm_start, device=self._device)
jt_vel[:, self.gripper_proximal_dof_idxs] = torch.zeros_like(self.gripper_proximal_start, device=self._device)
self._robots.set_joints_default_state(positions=jt_pos, velocities=jt_vel)
# Initialize target positions
self.dof_position_targets = jt_pos
def set_joint_gains(self):
self._robots.set_gains(kps=self.joint_kps, kds=self.joint_kds)
def set_joint_frictions(self):
self._robots.set_friction_coefficients(self.joint_friction_coefficients)
def _close_gripper(self, env_ids, sim_steps=10):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_pos = torch.tensor([-0.1, -0.1], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_position_targets(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.gripper_hold[env_ids_64] = True
def _open_gripper(self, env_ids, sim_steps=10):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_pos = torch.tensor([0.5, 0.5], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_position_targets(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.gripper_hold[env_ids_64] = False
def _hold_gripper(self, env_ids):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_pos = torch.tensor([-0.1, -0.1], device=self._device)
self._robots.set_joint_position_targets(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
def _check_lift_success(self, height_threashold):
prop_pos, _ = self._props.get_world_poses()
prop_pos -= self._env_pos
# check z direction range
lift_success = torch.where(
prop_pos[:, 2] > height_threashold,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return lift_success
def load_exp_dataset(self):
exp_actions = load_dataset('holding_problem', action_type=self._action_type, target_space=self._target_space)
return torch.tensor(exp_actions, device=self._device) # (dataset_length, num_actions)
def set_ik_controller(self):
command_type = "pose_rel" if self._action_type == 'relative' else "pose_abs"
ik_control_cfg = DifferentialInverseKinematicsCfg(
command_type=command_type,
ik_method="dls",
position_offset=(0.0, 0.0, 0.0),
rotation_offset=(1.0, 0.0, 0.0, 0.0),
)
return DifferentialInverseKinematics(ik_control_cfg, self._num_envs, self._device)
def enable_gravity(self, gravity_mag):
"""Enable gravity."""
gravity = [0.0, 0.0, -9.81]
self._env._world._physics_sim_view.set_gravity(carb.Float3(gravity[0], gravity[1], gravity[2]))
def disable_gravity(self):
"""Disable gravity."""
gravity = [0.0, 0.0, 0.0]
self._env._world._physics_sim_view.set_gravity(carb.Float3(gravity[0], gravity[1], gravity[2]))
| 28,313 |
Python
| 47.4 | 209 | 0.632112 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/base/rl_task.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
import numpy as np
import torch
from gym import spaces
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.cloner import GridCloner
from hsr_rl.tasks.utils.usd_utils import create_distant_light
from hsr_rl.utils.domain_randomization.randomize import Randomizer
import omni.kit
from omni.kit.viewport.utility.camera_state import ViewportCameraState
from omni.kit.viewport.utility import get_viewport_from_window_name
from pxr import Gf
class RLTask(BaseTask):
""" This class provides a PyTorch RL-specific interface for setting up RL tasks.
It includes utilities for setting up RL task related parameters,
cloning environments, and data collection for RL algorithms.
"""
def __init__(self, name, env, offset=None) -> None:
""" Initializes RL parameters, cloner object, and buffers.
Args:
name (str): name of the task.
env (VecEnvBase): an instance of the environment wrapper class to register task.
offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None.
"""
super().__init__(name=name, offset=offset)
# optimization flags for pytorch JIT
torch._C._jit_set_nvfuser_enabled(False)
self.test = self._cfg["test"]
self._device = self._cfg["sim_device"]
self._dr_randomizer = Randomizer(self._sim_config)
print("Task Device:", self._device)
self.randomize_actions = False
self.randomize_observations = False
self.clip_obs = self._cfg["task"]["env"].get("clipObservations", np.Inf)
self.clip_actions = self._cfg["task"]["env"].get("clipActions", np.Inf)
self.rl_device = self._cfg.get("rl_device", "cuda:0")
self.control_frequency_inv = self._cfg["task"]["env"].get("controlFrequencyInv", 1)
print("RL device: ", self.rl_device)
self._env = env
if not hasattr(self, "_num_agents"):
self._num_agents = 1 # used for multi-agent environments
if not hasattr(self, "_num_states"):
self._num_states = 0
# initialize data spaces (defaults to gym.Box)
if not hasattr(self, "action_space"):
self.action_space = spaces.Box(np.ones(self.num_actions) * -1.0, np.ones(self.num_actions) * 1.0)
if not hasattr(self, "observation_space"):
self.observation_space = spaces.Box(np.ones(self.num_observations) * -np.Inf, np.ones(self.num_observations) * np.Inf)
if not hasattr(self, "state_space"):
self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
self.cleanup()
def cleanup(self) -> None:
""" Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = torch.zeros((self._num_envs, self.num_observations), device=self._device, dtype=torch.float)
self.states_buf = torch.zeros((self._num_envs, self.num_states), device=self._device, dtype=torch.float)
self.rew_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.float)
self.reset_buf = torch.ones(self._num_envs, device=self._device, dtype=torch.long)
self.progress_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.extras = {}
def set_up_scene(self, scene, replicate_physics=True) -> None:
""" Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
replicate_physics (bool): Clone physics using PhysX API for better performance
"""
super().set_up_scene(scene)
collision_filter_global_paths = list()
if self._sim_config.task_config["sim"].get("add_ground_plane", True):
self._ground_plane_path = "/World/defaultGroundPlane"
collision_filter_global_paths.append(self._ground_plane_path)
scene.add_default_ground_plane(prim_path=self._ground_plane_path)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
self._env_pos = self._cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=prim_paths, replicate_physics=replicate_physics)
self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float)
self._cloner.filter_collisions(
self._env._world.get_physics_context().prim_path, "/World/collisions", prim_paths, collision_filter_global_paths)
self.set_initial_camera_params(camera_position=[10, 10, 3], camera_target=[0, 0, 0])
if self._sim_config.task_config["sim"].get("add_distant_light", True):
create_distant_light()
def set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]):
if self._env._render:
viewport_api_2 = get_viewport_from_window_name("Viewport")
viewport_api_2.set_active_camera("/OmniverseKit_Persp")
camera_state = ViewportCameraState("/OmniverseKit_Persp", viewport_api_2)
camera_state.set_position_world(Gf.Vec3d(camera_position[0], camera_position[1], camera_position[2]), True)
camera_state.set_target_world(Gf.Vec3d(camera_target[0], camera_target[1], camera_target[2]), True)
@property
def default_base_env_path(self):
""" Retrieves default path to the parent of all env prims.
Returns:
default_base_env_path(str): Defaults to "/World/envs".
"""
return "/World/envs"
@property
def default_zero_env_path(self):
""" Retrieves default path to the first env prim (index 0).
Returns:
default_zero_env_path(str): Defaults to "/World/envs/env_0".
"""
return f"{self.default_base_env_path}/env_0"
@property
def num_envs(self):
""" Retrieves number of environments for task.
Returns:
num_envs(int): Number of environments.
"""
return self._num_envs
@property
def num_actions(self):
""" Retrieves dimension of actions.
Returns:
num_actions(int): Dimension of actions.
"""
return self._num_actions
@property
def num_observations(self):
""" Retrieves dimension of observations.
Returns:
num_observations(int): Dimension of observations.
"""
return self._num_observations
@property
def num_states(self):
""" Retrieves dimesion of states.
Returns:
num_states(int): Dimension of states.
"""
return self._num_states
@property
def num_agents(self):
""" Retrieves number of agents for multi-agent environments.
Returns:
num_agents(int): Dimension of states.
"""
return self._num_agents
def get_states(self):
""" API for retrieving states buffer, used for asymmetric AC training.
Returns:
states_buf(torch.Tensor): States buffer.
"""
return self.states_buf
def get_extras(self):
""" API for retrieving extras data for RL.
Returns:
extras(dict): Dictionary containing extras data.
"""
return self.extras
def reset(self):
""" Flags all environments for reset.
"""
# self.reset_buf = torch.ones_like(self.reset_buf)
pass
def pre_physics_step(self, actions):
""" Optionally implemented by individual task classes to process actions.
Args:
actions (torch.Tensor): Actions generated by RL policy.
"""
pass
def post_physics_step(self):
""" Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
| 10,441 |
Python
| 39.007663 | 140 | 0.652715 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/base/hsr_task.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import carb
import hydra
import torch
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from hsr_rl.tasks.base.rl_task import RLTask
from hsr_rl.tasks.utils.ik_utils import DifferentialInverseKinematics, DifferentialInverseKinematicsCfg
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import quat_diff_rad, quat_mul, normalize
from omni.isaac.core.objects import FixedCuboid
from omni.isaac.core.simulation_context import SimulationContext
from omni.physx.scripts import utils, physicsUtils
class HSRBaseTask(RLTask):
def __init__(
self,
name,
sim_config,
env
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
# Get dt for integrating velocity commands and checking limit violations
self._dt = torch.tensor(self._sim_config.task_config["sim"]["dt"] * self._sim_config.task_config["env"]["controlFrequencyInv"], device=self._device)
# Set environment properties
self._table_height = self._task_cfg["env"]["table_height"]
self._table_width = self._task_cfg["env"]["table_width"]
self._table_depth = self._task_cfg["env"]["table_depth"]
# Set physics parameters for gripper
self._gripper_mass = self._sim_config.task_config["sim"]["gripper"]["mass"]
self._gripper_density = self._sim_config.task_config["sim"]["gripper"]["density"]
self._gripper_static_friction = self._sim_config.task_config["sim"]["gripper"]["static_friction"]
self._gripper_dynamic_friction = self._sim_config.task_config["sim"]["gripper"]["dynamic_friction"]
self._gripper_restitution = self._sim_config.task_config["sim"]["gripper"]["restitution"]
# Choose num_obs and num_actions based on task.
self._num_observations = self._task_cfg["env"]["num_observations"]
self._num_actions = self._task_cfg["env"]["num_actions"]
# Set inverse kinematics configurations
self._action_type = self._task_cfg["env"]["action_type"]
self._target_space = self._task_cfg["env"]["target_space"]
# Set up environment from loaded demonstration
self._hsr_position = torch.tensor([0.0, 0.0, 0.01], device=self._device)
self._hsr_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._table_position = torch.tensor([1.2, -0.2, self._table_height/2], device=self._device)
self._table_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
# Dof joint gains
self.joint_kps = torch.tensor([1e9, 1e9, 5.7296e10, 1e9, 1e9, 5.7296e10, 5.7296e10, 5.7296e10,
5.7296e10, 5.7296e10, 5.7296e10, 2.8648e4, 2.8648e4, 5.7296e10, 5.7296e10], device=self._device)
self.joint_kds = torch.tensor([1.4, 1.4, 80.2141, 1.4, 0.0, 80.2141, 0.0, 80.2141, 0.0, 80.2141,
80.2141, 17.1887, 17.1887, 17.1887, 17.1887], device=self._device)
# Dof joint friction coefficients
self.joint_friction_coefficients = torch.tensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], device=self._device)
# Joint & body names
self._torso_joint_name = ["torso_lift_joint"]
self._base_joint_names = ["joint_x", "joint_y", "joint_rz"]
self._arm_names = ["arm_lift_joint", "arm_flex_joint", "arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
self._gripper_proximal_names = ["hand_l_proximal_joint", "hand_r_proximal_joint"]
# Values are set in post_reset after model is loaded
self.torso_dof_idx = []
self.base_dof_idxs = []
self.arm_dof_idxs = []
self.gripper_proximal_dof_idxs = []
# Dof joint position limits
self.torso_dof_lower = []
self.torso_dof_upper = []
self.base_dof_lower = []
self.base_dof_upper = []
self.arm_dof_lower = []
self.arm_dof_upper = []
self.gripper_proximal_dof_lower = []
self.gripper_proximal_dof_upper = []
# Gripper settings
self.gripper_close = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.gripper_open = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.gripper_hold = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
# Load ik controller
self.ik_controller = self.set_ik_controller()
RLTask.__init__(self, name, env)
return
def set_up_environment(self) -> None:
raise NotImplementedError
def set_up_scene(self, scene) -> None:
# Create gripper materials
self.create_gripper_material()
# Import before environment parallelization
self.add_hsr()
self.add_table()
# Set up scene
super().set_up_scene(scene, replicate_physics=False)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view")
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
def add_hsr(self):
# Add HSR
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb",
name="hsrb",
translation=self._hsr_position,
orientation=self._hsr_rotation)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_table(self):
# Add table
table = FixedCuboid(prim_path=self.default_zero_env_path + "/table",
name="table",
translation=self._table_position,
orientation=self._table_rotation,
size=self._table_size,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([self._table_width, self._table_depth, self._table_height]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def create_gripper_material(self):
self._stage = get_current_stage()
self.gripperPhysicsMaterialPath = "/World/Physics_Materials/GripperMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.gripperPhysicsMaterialPath,
density=self._gripper_density,
staticFriction=self._gripper_static_friction,
dynamicFriction=self._gripper_dynamic_friction,
restitution=self._gripper_restitution
)
def get_observations(self):
raise NotImplementedError()
def pre_physics_step(self, actions) -> None:
raise NotImplementedError()
def reset_idx(self, env_ids):
raise NotImplementedError()
def post_reset(self):
raise NotImplementedError()
def calculate_metrics(self) -> None:
raise NotImplementedError()
def is_done(self) -> None:
raise NotImplementedError()
def post_physics_step(self):
raise NotImplementedError()
def load_dataset(self):
raise NotImplementedError()
def set_dof_idxs(self):
[self.torso_dof_idx.append(self._robots.get_dof_index(name)) for name in self._torso_joint_name]
[self.base_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._base_joint_names]
[self.arm_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._arm_names]
[self.gripper_proximal_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._gripper_proximal_names]
# Movable joints
self.actuated_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs+self.gripper_proximal_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10, 11, 12]).to(self._device)
self.movable_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10]).to(self._device)
def set_dof_limits(self): # dof position limits
# (num_envs, num_dofs, 2)
dof_limits = self._robots.get_dof_limits()
dof_limits_lower = dof_limits[0, :, 0].to(self._device)
dof_limits_upper = dof_limits[0, :, 1].to(self._device)
# Set relevant joint position limit values
self.torso_dof_lower = dof_limits_lower[self.torso_dof_idx]
self.torso_dof_upper = dof_limits_upper[self.torso_dof_idx]
self.base_dof_lower = dof_limits_lower[self.base_dof_idxs]
self.base_dof_upper = dof_limits_upper[self.base_dof_idxs]
self.arm_dof_lower = dof_limits_lower[self.arm_dof_idxs]
self.arm_dof_upper = dof_limits_upper[self.arm_dof_idxs]
self.gripper_proximal_dof_lower = dof_limits_lower[self.gripper_proximal_dof_idxs]
self.gripper_proximal_dof_upper = dof_limits_upper[self.gripper_proximal_dof_idxs]
self.robot_dof_lower_limits, self.robot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
def set_default_state(self):
# Start at 'home' positions
self.torso_start = self.initial_dof_positions[:, self.torso_dof_idx]
self.base_start = self.initial_dof_positions[:, self.base_dof_idxs]
self.arm_start = self.initial_dof_positions[:, self.arm_dof_idxs]
self.gripper_proximal_start = self.initial_dof_positions[:, self.gripper_proximal_dof_idxs]
# Set default joint state
joint_states = self._robots.get_joints_default_state()
jt_pos = joint_states.positions
jt_pos[:, self.torso_dof_idx] = self.torso_start.float()
jt_pos[:, self.base_dof_idxs] = self.base_start.float()
jt_pos[:, self.arm_dof_idxs] = self.arm_start.float()
jt_pos[:, self.gripper_proximal_dof_idxs] = self.gripper_proximal_start.float()
jt_vel = joint_states.velocities
jt_vel[:, self.torso_dof_idx] = torch.zeros_like(self.torso_start, device=self._device, dtype=torch.float)
jt_vel[:, self.base_dof_idxs] = torch.zeros_like(self.base_start, device=self._device, dtype=torch.float)
jt_vel[:, self.arm_dof_idxs] = torch.zeros_like(self.arm_start, device=self._device, dtype=torch.float)
jt_vel[:, self.gripper_proximal_dof_idxs] = torch.zeros_like(self.gripper_proximal_start, device=self._device, dtype=torch.float)
self._robots.set_joints_default_state(positions=jt_pos, velocities=jt_vel)
# Initialize target positions
self.dof_position_targets = jt_pos
def set_joint_gains(self):
self._robots.set_gains(kps=self.joint_kps, kds=self.joint_kds)
def set_joint_frictions(self):
self._robots.set_friction_coefficients(self.joint_friction_coefficients)
def _close_gripper(self, env_ids, sim_steps=1):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_pos = torch.tensor([-50., -50.], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_efforts(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.gripper_hold[env_ids_64] = True
def _open_gripper(self, env_ids, sim_steps=1):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_effort = torch.tensor([0., 0.], device=self._device)
self._robots.set_joint_efforts(gripper_dof_effort, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
gripper_dof_pos = torch.tensor([0.5, 0.5], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_position_targets(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.gripper_hold[env_ids_64] = False
def _hold_gripper(self, env_ids):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_pos = torch.tensor([-30., -30.], device=self._device)
self._robots.set_joint_efforts(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
def set_ik_controller(self):
command_type = "pose_rel" if self._action_type == 'relative' else "pose_abs"
ik_control_cfg = DifferentialInverseKinematicsCfg(
command_type=command_type,
ik_method="dls",
position_offset=(0.0, 0.0, 0.0),
rotation_offset=(1.0, 0.0, 0.0, 0.0),
)
return DifferentialInverseKinematics(ik_control_cfg, self._num_envs, self._device)
def enable_gravity(self):
"""Enable gravity."""
gravity = [0.0, 0.0, -9.81]
self._env._world._physics_sim_view.set_gravity(carb.Float3(gravity[0], gravity[1], gravity[2]))
def disable_gravity(self):
"""Disable gravity."""
gravity = [0.0, 0.0, 0.0]
self._env._world._physics_sim_view.set_gravity(carb.Float3(gravity[0], gravity[1], gravity[2]))
@torch.jit.script
def norm_diff_pos(p1: torch.Tensor, p2: torch.Tensor) -> torch.Tensor:
# Calculate norm
diff_norm = torch.norm(p1 - p2, p=2, dim=-1)
return diff_norm
@torch.jit.script
def norm_diff_rot(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:
# Calculate norm
diff_norm1 = torch.norm(q1 - q2, p=2, dim=-1)
diff_norm2 = torch.norm(q2 - q1, p=2, dim=-1)
diff_norm = torch.min(diff_norm1, diff_norm2)
return diff_norm
@torch.jit.script
def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
aw, ax, ay, az = torch.unbind(a, -1)
bw, bx, by, bz = torch.unbind(b, -1)
ow = aw * bw - ax * bx - ay * by - az * bz
ox = aw * bx + ax * bw + ay * bz - az * by
oy = aw * by - ax * bz + ay * bw + az * bx
oz = aw * bz + ax * by - ay * bx + az * bw
return torch.stack((ow, ox, oy, oz), -1)
@torch.jit.script
def calc_diff_pos(p1, p2):
return p1 - p2
@torch.jit.script
def calc_diff_rot(q1, q2):
# Normalize the input quaternions
q1 = normalize(q1)
q2 = normalize(q2)
# Calculate the quaternion product between q2 and the inverse of q1
scaling = torch.tensor([1, -1, -1, -1], device=q1.device)
q1_inv = q1 * scaling
q_diff = quaternion_multiply(q2, q1_inv)
return q_diff
| 17,085 |
Python
| 44.684492 | 209 | 0.650454 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/robot_utils.py
|
import random
import numpy as np
import pybullet as p
from collections import namedtuple
from scipy.spatial.transform import Rotation as R
IKFastInfo = namedtuple('IKFastInfo', ['module_name', 'base_link', 'ee_link', 'free_joints'])
USE_ALL = False
USE_CURRENT = True
############ Mathematics
def invert(pose):
point, quat = pose
return p.invertTransform(point, quat) # TODO: modify
def multiply(*poses):
pose = poses[0]
for next_pose in poses[1:]:
pose = p.multiplyTransforms(pose[0], pose[1], *next_pose) # TODO: modify
return pose
##############
def get_distance(p1, p2, **kwargs):
assert len(p1) == len(p2)
diff = np.array(p2) - np.array(p1)
return np.linalg.norm(diff, ord=2)
def all_between(lower_limits, values, upper_limits):
assert len(lower_limits) == len(values)
assert len(values) == len(upper_limits)
return np.less_equal(lower_limits, values).all() and \
np.less_equal(values, upper_limits).all()
def compute_forward_kinematics(fk_fn, conf):
pose = fk_fn(list(conf))
pos, rot = pose
quat = R.from_matrix(rot).as_quat()
return pos, quat
def compute_inverse_kinematics(ik_fn, pose, sampled=[]):
pos, quat = pose[0], pose[1]
rot = R.from_quat(quat).as_matrix().tolist()
if len(sampled) == 0:
solutions = ik_fn(list(rot), list(pos))
else:
solutions = ik_fn(list(rot), list(pos), list(sampled))
if solutions is None:
return []
return solutions
def select_solution(joints, solutions, curr_joints, nearby_conf=USE_ALL, **kwargs):
if not solutions:
return None
if nearby_conf is USE_ALL:
return random.choice(solutions)
if nearby_conf is USE_CURRENT:
nearby_conf = curr_joints
return min(solutions, key=lambda conf: get_distance(nearby_conf, conf, **kwargs))
| 1,849 |
Python
| 26.205882 | 93 | 0.646836 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/array_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Utilities for working with different array backends."""
import numpy as np
import torch
from typing import Optional, Sequence, Union
import warp as wp
__all__ = ["TENSOR_TYPES", "TENSOR_TYPE_CONVERSIONS", "convert_to_torch"]
TENSOR_TYPES = {
"numpy": np.ndarray,
"torch": torch.Tensor,
"warp": wp.array,
}
"""A dictionary containing the types for each backend.
The keys are the name of the backend ("numpy", "torch", "warp") and the values are the corresponding type
(``np.ndarray``, ``torch.Tensor``, ``wp.array``).
"""
TENSOR_TYPE_CONVERSIONS = {
"numpy": {wp.array: lambda x: x.numpy(), torch.Tensor: lambda x: x.detach().cpu().numpy()},
"torch": {wp.array: lambda x: wp.torch.to_torch(x), np.ndarray: lambda x: torch.from_numpy(x)},
"warp": {np.array: lambda x: wp.array(x), torch.Tensor: lambda x: wp.torch.from_torch(x)},
}
"""A nested dictionary containing the conversion functions for each backend.
The keys of the outer dictionary are the name of target backend ("numpy", "torch", "warp"). The keys of the
inner dictionary are the source backend (``np.ndarray``, ``torch.Tensor``, ``wp.array``).
"""
def convert_to_torch(
array: Sequence[float],
dtype: torch.dtype = None,
device: Optional[Union[torch.device, str]] = None,
) -> torch.Tensor:
"""Converts a given array into a torch tensor.
The function tries to convert the array to a torch tensor. If the array is a numpy/warp arrays, or python
list/tuples, it is converted to a torch tensor. If the array is already a torch tensor, it is returned
directly.
If ``device`` is :obj:`None`, then the function deduces the current device of the data. For numpy arrays,
this defaults to "cpu", for torch tensors it is "cpu" or "cuda", and for warp arrays it is "cuda".
Args:
array (Sequence[float]): The input array. It can be a numpy array, warp array, python list/tuple, or torch tensor.
dtype (torch.dtype, optional): Target data-type for the tensor.
device (Optional[Union[torch.device, str]], optional): The target device for the tensor. Defaults to None.
Returns:
torch.Tensor: The converted array as torch tensor.
"""
# Convert array to tensor
if isinstance(array, torch.Tensor):
tensor = array
elif isinstance(array, np.ndarray):
tensor = torch.from_numpy(array)
elif isinstance(array, wp.array):
tensor = wp.to_torch(array)
else:
tensor = torch.Tensor(array)
# Convert tensor to the right device
if device is not None and str(tensor.device) != str(device):
tensor = tensor.to(device)
# Convert dtype of tensor if requested
if dtype is not None and tensor.dtype != dtype:
tensor = tensor.type(dtype)
return tensor
| 2,943 |
Python
| 37.233766 | 122 | 0.678899 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/math_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Provides utilities for math operations.
Some of these are imported from the module `omni.isaac.core.utils.torch` for convenience.
"""
import numpy as np
import torch
import torch.nn.functional
from typing import Optional, Sequence, Tuple, Union
from omni.isaac.core.utils.torch.maths import normalize, scale_transform, unscale_transform
from omni.isaac.core.utils.torch.rotations import (
quat_apply,
quat_conjugate,
quat_from_angle_axis,
quat_mul,
quat_rotate,
quat_rotate_inverse,
)
__all__ = [
# General
"wrap_to_pi",
"saturate",
"copysign",
# General-Isaac Sim
"normalize",
"scale_transform",
"unscale_transform",
# Rotation
"matrix_from_quat",
"quat_inv",
"quat_from_euler_xyz",
"quat_apply_yaw",
"quat_box_minus",
"euler_xyz_from_quat",
"axis_angle_from_quat",
# Rotation-Isaac Sim
"quat_apply",
"quat_from_angle_axis",
"quat_mul",
"quat_conjugate",
"quat_rotate",
"quat_rotate_inverse",
# Transformations
"combine_frame_transforms",
"subtract_frame_transforms",
"compute_pose_error",
"apply_delta_pose",
# Sampling
"default_orientation",
"random_orientation",
"random_yaw_orientation",
"sample_triangle",
"sample_uniform",
]
"""
General
"""
@torch.jit.script
def wrap_to_pi(angles: torch.Tensor) -> torch.Tensor:
"""Wraps input angles (in radians) to the range [-pi, pi].
Args:
angles (torch.Tensor): Input angles.
Returns:
torch.Tensor: Angles in the range [-pi, pi].
"""
angles %= 2 * np.pi
angles -= 2 * np.pi * (angles > np.pi)
return angles
@torch.jit.script
def saturate(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""Clamps a given input tensor to (lower, upper).
It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape (dims,)
upper: The maximum value of the tensor. Shape (dims,)
Returns:
Clamped transform of the tensor. Shape (N, dims)
"""
return torch.max(torch.min(x, upper), lower)
@torch.jit.script
def copysign(mag: float, other: torch.Tensor) -> torch.Tensor:
"""Create a new floating-point tensor with the magnitude of input and the sign of other, element-wise.
Note:
The implementation follows from `torch.copysign`. The function allows a scalar magnitude.
Args:
mag (float): The magnitude scalar.
other (torch.Tensor): The tensor containing values whose signbits are applied to magnitude.
Returns:
torch.Tensor: The output tensor.
"""
mag = torch.tensor(mag, device=other.device, dtype=torch.float).repeat(other.shape[0])
return torch.abs(mag) * torch.sign(other)
"""
Rotation
"""
@torch.jit.script
def matrix_from_quat(quaternions: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
Reference:
Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L41-L70)
"""
r, i, j, k = torch.unbind(quaternions, -1)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`.
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def convert_quat(
quat: Union[torch.tensor, Sequence[float]], to: Optional[str] = "xyzw"
) -> Union[torch.tensor, np.ndarray]:
"""Converts quaternion from one convention to another.
The convention to convert TO is specified as an optional argument. If to == 'xyzw',
then the input is in 'wxyz' format, and vice-versa.
Args:
quat (Union[torch.tensor, Sequence[float]]): Input quaternion of shape (..., 4).
to (Optional[str], optional): Convention to convert quaternion to.. Defaults to "xyzw".
Raises:
ValueError: Invalid input argument `to`, i.e. not "xyzw" or "wxyz".
ValueError: Invalid shape of input `quat`, i.e. not (..., 4,).
Returns:
Union[torch.tensor, np.ndarray]: The converted quaternion in specified convention.
"""
# convert to numpy (sanity check)
if not isinstance(quat, torch.Tensor):
quat = np.asarray(quat)
# check input is correct
if quat.shape[-1] != 4:
msg = f"convert_quat(): Expected input quaternion shape mismatch: {quat.shape} != (..., 4)."
raise ValueError(msg)
# convert to specified quaternion type
if to == "xyzw":
return quat[..., [1, 2, 3, 0]]
elif to == "wxyz":
return quat[..., [3, 0, 1, 2]]
else:
raise ValueError("convert_quat(): Choose a valid `to` argument (xyzw or wxyz).")
@torch.jit.script
def quat_inv(q: torch.Tensor) -> torch.Tensor:
"""Compute the inverse of a quaternion.
Args:
q (torch.Tensor): The input quaternion (w, x, y, z).
Returns:
torch.Tensor: The inverse quaternion (w, x, y, z).
"""
return normalize(quat_conjugate(q))
@torch.jit.script
def quat_from_euler_xyz(roll: torch.Tensor, pitch: torch.Tensor, yaw: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as Euler angles in radians to Quaternions.
Note:
The euler angles are assumed in XYZ convention.
Args:
roll: Rotation around x-axis (in radians). Shape: [N,]
pitch: Rotation around y-axis (in radians). Shape: [N,]
yaw: Rotation around z-axis (in radians). Shape: [N,]
Returns:
torch.Tensor: Quaternion with real part in the start. Shape: [N, 4,]
"""
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
# compute quaternion
qw = cy * cr * cp + sy * sr * sp
qx = cy * sr * cp - sy * cr * sp
qy = cy * cr * sp + sy * sr * cp
qz = sy * cr * cp - cy * sr * sp
return torch.stack([qw, qx, qy, qz], dim=-1)
@torch.jit.script
def euler_xyz_from_quat(quat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Convert rotations given as quaternions to Euler angles in radians.
Note:
The euler angles are assumed in XYZ convention.
Reference:
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
Args:
quat: Quaternion with real part in the start. Shape: [N, 4,]
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple containing roll-pitch-yaw.
"""
q_w, q_x, q_y, q_z = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
# roll (x-axis rotation)
sin_roll = 2.0 * (q_w * q_x + q_y * q_z)
cos_roll = 1 - 2 * (q_x * q_x + q_y * q_y)
roll = torch.atan2(sin_roll, cos_roll)
# pitch (y-axis rotation)
sin_pitch = 2.0 * (q_w * q_y - q_z * q_x)
pitch = torch.where(torch.abs(sin_pitch) >= 1, copysign(np.pi / 2.0, sin_pitch), torch.asin(sin_pitch))
# yaw (z-axis rotation)
sin_yaw = 2.0 * (q_w * q_z + q_x * q_y)
cos_yaw = 1 - 2 * (q_y * q_y + q_z * q_z)
yaw = torch.atan2(sin_yaw, cos_yaw)
return roll % (2 * np.pi), pitch % (2 * np.pi), yaw % (2 * np.pi)
@torch.jit.script
def quat_apply_yaw(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
"""Rotate a vector only around the yaw-direction.
Args:
quat (torch.Tensor): Input orientation to extract yaw from.
vec (torch.Tensor): Input vector.
Returns:
torch.Tensor: Rotated vector.
"""
quat_yaw = quat.clone().view(-1, 4)
quat_yaw[:, 1:3] = 0.0 # set x, y components as zero
quat_yaw = normalize(quat_yaw)
return quat_apply(quat_yaw, vec)
@torch.jit.script
def quat_box_minus(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:
"""Implements box-minus operator (quaternion difference).
Args:
q1 (torch.Tensor): A (N, 4) tensor for quaternion (x, y, z, w)
q2 (torch.Tensor): A (N, 4) tensor for quaternion (x, y, z, w)
Returns:
torch.Tensor: q1 box-minus q2
Reference:
https://docs.leggedrobotics.com/kindr/cheatsheet_latest.pdf
"""
quat_diff = quat_mul(q1, quat_conjugate(q2)) # q1 * q2^-1
re = quat_diff[:, 0] # real part, q = [w, x, y, z] = [re, im]
im = quat_diff[:, 1:] # imaginary part
norm_im = torch.norm(im, dim=1)
scale = 2.0 * torch.where(norm_im > 1.0e-7, torch.atan(norm_im / re) / norm_im, torch.sign(re))
return scale.unsqueeze(-1) * im
@torch.jit.script
def axis_angle_from_quat(quat: torch.Tensor, eps: float = 1.0e-6) -> torch.Tensor:
"""Convert rotations given as quaternions to axis/angle.
Args:
quat (torch.Tensor): quaternions with real part first, as tensor of shape (..., 4).
eps (float): The tolerance for Taylor approximation. Defaults to 1.0e-6.
Returns:
torch.Tensor: Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle turned
anti-clockwise in radians around the vector's direction.
Reference:
Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L526-L554)
"""
# Modified to take in quat as [q_w, q_x, q_y, q_z]
# Quaternion is [q_w, q_x, q_y, q_z] = [cos(theta/2), n_x * sin(theta/2), n_y * sin(theta/2), n_z * sin(theta/2)]
# Axis-angle is [a_x, a_y, a_z] = [theta * n_x, theta * n_y, theta * n_z]
# Thus, axis-angle is [q_x, q_y, q_z] / (sin(theta/2) / theta)
# When theta = 0, (sin(theta/2) / theta) is undefined
# However, as theta --> 0, we can use the Taylor approximation 1/2 - theta^2 / 48
quat = quat * (1.0 - 2.0 * (quat[..., 0:1] < 0.0))
mag = torch.linalg.norm(quat[..., 1:], dim=1)
half_angle = torch.atan2(mag, quat[..., 0])
angle = 2.0 * half_angle
# check whether to apply Taylor approximation
sin_half_angles_over_angles = torch.where(
torch.abs(angle.abs()) > eps, torch.sin(half_angle) / angle, 0.5 - angle * angle / 48
)
return quat[..., 1:4] / sin_half_angles_over_angles.unsqueeze(-1)
"""
Transformations
"""
@torch.jit.script
def combine_frame_transforms(
t01: torch.Tensor, q01: torch.Tensor, t12: torch.Tensor = None, q12: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Combine transformations between two reference frames into a stationary frame.
It performs the following transformation operation: :math:`T_{02} = T_{01} \times T_{12}`,
where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B.
Args:
t01 (torch.Tensor): Position of frame 1 w.r.t. frame 0.
q01 (torch.Tensor): Quaternion orientation of frame 1 w.r.t. frame 0.
t12 (torch.Tensor): Position of frame 2 w.r.t. frame 1.
q12 (torch.Tensor): Quaternion orientation of frame 2 w.r.t. frame 1.
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing the position and orientation of
frame 2 w.r.t. frame 0.
"""
# compute orientation
if q12 is not None:
q02 = quat_mul(q01, q12)
else:
q02 = q01
# compute translation
if t12 is not None:
t02 = t01 + quat_apply(q01, t12)
else:
t02 = t01
return t02, q02
@torch.jit.script
def subtract_frame_transforms(
t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor, q02: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Subtract transformations between two reference frames into a stationary frame.
It performs the following transformation operation: :math:`T_{12} = T_{01}^{-1} \times T_{02}`,
where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B.
Args:
t01 (torch.Tensor): Position of frame 1 w.r.t. frame 0.
q01 (torch.Tensor): Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z).
t02 (torch.Tensor): Position of frame 2 w.r.t. frame 0.
q02 (torch.Tensor): Quaternion orientation of frame 2 w.r.t. frame 0 in (w, x, y, z).
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing the position and orientation of
frame 2 w.r.t. frame 1.
"""
# compute orientation
q10 = quat_inv(q01)
q12 = quat_mul(q10, q02)
# compute translation
t12 = quat_apply(q10, t02 - t01)
return t12, q12
@torch.jit.script
def compute_pose_error(t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor, q02: torch.Tensor, rot_error_type: str):
"""Compute the position and orientation error between source and target frames.
Args:
t01 (torch.Tensor): Position of source frame.
q01 (torch.Tensor): Quaternion orientation of source frame.
t02 (torch.Tensor): Position of target frame.
q02 (torch.Tensor): Quaternion orientation of target frame.
rot_error_type (str): The rotation error type to return: "quat", "axis_angle".
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing position and orientation error.
"""
# Compute quaternion error (i.e., difference quaternion)
# Reference: https://personal.utdallas.edu/~sxb027100/dock/quaternion.html
# q_current_norm = q_current * q_current_conj
source_quat_norm = quat_mul(q01, quat_conjugate(q01))[:, 0]
# q_current_inv = q_current_conj / q_current_norm
source_quat_inv = quat_conjugate(q01) / source_quat_norm.unsqueeze(-1)
# q_error = q_target * q_current_inv
quat_error = quat_mul(q02, source_quat_inv)
# Compute position error
pos_error = t02 - t01
# return error based on specified type
if rot_error_type == "quat":
return pos_error, quat_error
elif rot_error_type == "axis_angle":
# Convert to axis-angle error
axis_angle_error = axis_angle_from_quat(quat_error)
return pos_error, axis_angle_error
else:
raise ValueError(f"Unsupported orientation error type: {rot_error_type}. Valid: 'quat', 'axis_angle'.")
@torch.jit.script
def apply_delta_pose(
source_pos: torch.Tensor, source_rot, delta_pose: torch.Tensor, eps: float = 1.0e-6
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Applies delta pose transformation on source pose.
The first three elements of `delta_pose` are interpreted as cartesian position displacement.
The remaining three elements of `delta_pose` are interpreted as orientation displacement
in the angle-axis format.
Args:
frame_pos (torch.Tensor): Position of source frame. Shape: [N, 3]
frame_rot (torch.Tensor): Quaternion orientation of source frame in (w, x, y,z).
delta_pose (torch.Tensor): Position and orientation displacements. Shape [N, 6].
eps (float): The tolerance to consider orientation displacement as zero.
Returns:
torch.Tensor: A tuple containing the displaced position and orientation frames. Shape: ([N, 3], [N, 4])
"""
# number of poses given
num_poses = source_pos.shape[0]
device = source_pos.device
# interpret delta_pose[:, 0:3] as target position displacements
target_pos = source_pos + delta_pose[:, 0:3]
# interpret delta_pose[:, 3:6] as target rotation displacements
rot_actions = delta_pose[:, 3:6]
angle = torch.linalg.vector_norm(rot_actions, dim=1)
axis = rot_actions / angle.unsqueeze(-1)
# change from axis-angle to quat convention
identity_quat = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat(num_poses, 1)
rot_delta_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > eps, quat_from_angle_axis(angle, axis), identity_quat
)
# TODO: Check if this is the correct order for this multiplication.
target_rot = quat_mul(rot_delta_quat, source_rot)
return target_pos, target_rot
"""
Sampling
"""
@torch.jit.script
def default_orientation(num: int, device: str) -> torch.Tensor:
"""Returns identity rotation transform.
Args:
num (int): The number of rotations to sample.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Identity quaternion (w, x, y, z).
"""
quat = torch.zeros((num, 4), dtype=torch.float, device=device)
quat[..., 0] = 1.0
return quat
@torch.jit.script
def random_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation in 3D as quaternion.
Reference:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html
Args:
num (int): The number of rotations to sample.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled quaternion (w, x, y, z).
"""
# sample random orientation from normal distribution
quat = torch.randn((num, 4), dtype=torch.float, device=device)
# normalize the quaternion
return torch.nn.functional.normalize(quat, p=2.0, dim=-1, eps=1e-12)
@torch.jit.script
def random_yaw_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation around z-axis.
Args:
num (int): The number of rotations to sample.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled quaternion (w, x, y, z).
"""
roll = torch.zeros(num, dtype=torch.float, device=device)
pitch = torch.zeros(num, dtype=torch.float, device=device)
yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)
return quat_from_euler_xyz(roll, pitch, yaw)
def sample_triangle(lower: float, upper: float, size: Union[int, Tuple[int, ...]], device: str) -> torch.Tensor:
"""Randomly samples tensor from a triangular distribution.
Args:
lower (float): The lower range of the sampled tensor.
upper (float): The upper range of the sampled tensor.
size (Union[int, Tuple[int, ...]]): The shape of the tensor.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled tensor of shape :obj:`size`.
"""
# convert to tuple
if isinstance(size, int):
size = (size,)
# create random tensor in the range [-1, 1]
r = 2 * torch.rand(*size, device=device) - 1
# convert to triangular distribution
r = torch.where(r < 0.0, -torch.sqrt(-r), torch.sqrt(r))
# rescale back to [0, 1]
r = (r + 1.0) / 2.0
# rescale to range [lower, upper]
return (upper - lower) * r + lower
def sample_uniform(
lower: Union[torch.Tensor, float], upper: Union[torch.Tensor, float], size: Union[int, Tuple[int, ...]], device: str
) -> torch.Tensor:
"""Sample uniformly within a range.
Args:
lower (Union[torch.Tensor, float]): Lower bound of uniform range.
upper (Union[torch.Tensor, float]): Upper bound of uniform range.
size (Union[int, Tuple[int, ...]]): The shape of the tensor.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled tensor of shape :obj:`size`.
"""
# convert to tuple
if isinstance(size, int):
size = (size,)
# return tensor
return torch.rand(*size, device=device) * (upper - lower) + lower
def sample_cylinder(
radius: float, h_range: Tuple[float, float], size: Union[int, Tuple[int, ...]], device: str
) -> torch.Tensor:
"""Sample 3D points uniformly on a cylinder's surface.
The cylinder is centered at the origin and aligned with the z-axis. The height of the cylinder is
sampled uniformly from the range :obj:`h_range`, while the radius is fixed to :obj:`radius`.
The sampled points are returned as a tensor of shape :obj:`(*size, 3)`, i.e. the last dimension
contains the x, y, and z coordinates of the sampled points.
Args:
radius (float): The radius of the cylinder.
h_range (Tuple[float, float]): The minimum and maximum height of the cylinder.
size (Union[int, Tuple[int, ...]]): The shape of the tensor.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled tensor of shape :obj:`(*size, 3)`.
"""
# sample angles
angles = (torch.rand(size, device=device) * 2 - 1) * np.pi
h_min, h_max = h_range
# add shape
if isinstance(size, int):
size = (size, 3)
else:
size += (3,)
# allocate a tensor
xyz = torch.zeros(size, device=device)
xyz[..., 0] = radius * torch.cos(angles)
xyz[..., 1] = radius * torch.sin(angles)
xyz[..., 2].uniform_(h_min, h_max)
# return positions
return xyz
| 21,379 |
Python
| 33.153355 | 139 | 0.627391 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/dict_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Utilities for working with dictionaries."""
import collections.abc
import importlib
import inspect
from typing import Any, Callable, Dict, Iterable, Mapping
from hsr_rl.tasks.utils.array_utils import TENSOR_TYPE_CONVERSIONS, TENSOR_TYPES
__all__ = ["class_to_dict", "update_class_from_dict", "convert_dict_to_backend", "update_dict", "print_dict"]
"""
Dictionary <-> Class operations.
"""
def class_to_dict(obj: object) -> Dict[str, Any]:
"""Convert an object into dictionary recursively.
Note:
Ignores all names starting with "__" (i.e. built-in methods).
Args:
obj (object): An instance of a class to convert.
Raises:
ValueError: When input argument is not an object.
Returns:
Dict[str, Any]: Converted dictionary mapping.
"""
# check that input data is class instance
if not hasattr(obj, "__class__"):
raise ValueError(f"Expected a class instance. Received: {type(obj)}.")
# convert object to dictionary
if isinstance(obj, dict):
obj_dict = obj
else:
obj_dict = obj.__dict__
# convert to dictionary
data = dict()
for key, value in obj_dict.items():
# disregard builtin attributes
if key.startswith("__"):
continue
# check if attribute is callable -- function
if callable(value):
data[key] = f"{value.__module__}:{value.__name__}"
# check if attribute is a dictionary
elif hasattr(value, "__dict__") or isinstance(value, dict):
data[key] = class_to_dict(value)
else:
data[key] = value
return data
def update_class_from_dict(obj, data: Dict[str, Any], _ns: str = "") -> None:
"""Reads a dictionary and sets object variables recursively.
This function performs in-place update of the class member attributes.
Args:
obj (object): An instance of a class to update.
data (Dict[str, Any]): Input dictionary to update from.
_ns (str): Namespace of the current object. This is useful for nested configuration
classes or dictionaries. Defaults to "".
Raises:
TypeError: When input is not a dictionary.
ValueError: When dictionary has a value that does not match default config type.
KeyError: When dictionary has a key that does not exist in the default config type.
"""
for key, value in data.items():
# key_ns is the full namespace of the key
key_ns = _ns + "/" + key
# check if key is present in the object
if hasattr(obj, key):
obj_mem = getattr(obj, key)
if isinstance(obj_mem, Mapping):
# Note: We don't handle two-level nested dictionaries. Just use configclass if this is needed.
# iterate over the dictionary to look for callable values
for k, v in obj_mem.items():
if callable(v):
value[k] = _string_to_callable(value[k])
setattr(obj, key, value)
elif isinstance(value, Mapping):
# recursively call if it is a dictionary
update_class_from_dict(obj_mem, value, _ns=key_ns)
elif isinstance(value, Iterable) and not isinstance(value, str):
# check length of value to be safe
if len(obj_mem) != len(value) and obj_mem is not None:
raise ValueError(
f"[Config]: Incorrect length under namespace: {key_ns}. Expected: {len(obj_mem)}, Received: {len(value)}."
)
# set value
setattr(obj, key, value)
elif callable(obj_mem):
# update function name
value = _string_to_callable(value)
setattr(obj, key, value)
elif isinstance(value, type(obj_mem)):
# check that they are type-safe
setattr(obj, key, value)
else:
raise ValueError(
f"[Config]: Incorrect type under namespace: {key_ns}. Expected: {type(obj_mem)}, Received: {type(value)}."
)
else:
raise KeyError(f"[Config]: Key not found under namespace: {key_ns}.")
"""
Dictionary operations.
"""
def convert_dict_to_backend(
data: dict, backend: str = "numpy", array_types: Iterable[str] = ("numpy", "torch", "warp")
) -> dict:
"""Convert all arrays or tensors in a dictionary to a given backend.
This function iterates over the dictionary, converts all arrays or tensors with the given types to
the desired backend, and stores them in a new dictionary. It also works with nested dictionaries.
Currently supported backends are "numpy", "torch", and "warp".
Note:
This function only converts arrays or tensors. Other types of data are left unchanged. Mutable types
(e.g. lists) are referenced by the new dictionary, so they are not copied.
Args:
data (dict): An input dict containing array or tensor data as values.
backend(str): The backend ("numpy", "torch", "warp") to which arrays in this dict should be converted.
Defaults to "numpy".
array_types(Iterable[str]): A list containing the types of arrays that should be converted to
the desired backend. Defaults to ("numpy", "torch", "warp").
Raises:
ValueError: If the specified ``backend`` or ``array_types`` are unknown, i.e. not in the list of supported
backends ("numpy", "torch", "warp").
Returns:
dict: The updated dict with the data converted to the desired backend.
"""
# THINK: Should we also support converting to a specific device, e.g. "cuda:0"?
# Check the backend is valid.
if backend not in TENSOR_TYPE_CONVERSIONS:
raise ValueError(f"Unknown backend '{backend}'. Supported backends are 'numpy', 'torch', and 'warp'.")
# Define the conversion functions for each backend.
tensor_type_conversions = TENSOR_TYPE_CONVERSIONS[backend]
# Parse the array types and convert them to the corresponding types: "numpy" -> np.ndarray, etc.
parsed_types = list()
for t in array_types:
# Check type is valid.
if t not in TENSOR_TYPES:
raise ValueError(f"Unknown array type: '{t}'. Supported array types are 'numpy', 'torch', and 'warp'.")
# Exclude types that match the backend, since we do not need to convert these.
if t == backend:
continue
# Convert the string types to the corresponding types.
parsed_types.append(TENSOR_TYPES[t])
# Convert the data to the desired backend.
output_dict = dict()
for key, value in data.items():
# Obtain the data type of the current value.
data_type = type(value)
# -- arrays
if data_type in parsed_types:
# check if we have a known conversion.
if data_type not in tensor_type_conversions:
raise ValueError(f"No registered conversion for data type: {data_type} to {backend}!")
# convert the data to the desired backend.
output_dict[key] = tensor_type_conversions[data_type](value)
# -- nested dictionaries
elif isinstance(data[key], dict):
output_dict[key] = convert_dict_to_backend(value)
# -- everything else
else:
output_dict[key] = value
return output_dict
def update_dict(orig_dict: dict, new_dict: collections.abc.Mapping) -> dict:
"""Updates existing dictionary with values from a new dictionary.
This function mimics the dict.update() function. However, it works for
nested dictionaries as well.
Reference:
https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
Args:
orig_dict (dict): The original dictionary to insert items to.
new_dict (collections.abc.Mapping): The new dictionary to insert items from.
Returns:
dict: The updated dictionary.
"""
for keyname, value in new_dict.items():
if isinstance(value, collections.abc.Mapping):
orig_dict[keyname] = update_dict(orig_dict.get(keyname, {}), value)
else:
orig_dict[keyname] = value
return orig_dict
def print_dict(val, nesting: int = -4, start: bool = True):
"""Outputs a nested dictionary."""
if type(val) == dict:
if not start:
print("")
nesting += 4
for k in val:
print(nesting * " ", end="")
print(k, end=": ")
print_dict(val[k], nesting, start=False)
else:
# deal with functions in print statements
if callable(val) and val.__name__ == "<lambda>":
print("lambda", inspect.getsourcelines(val)[0][0].strip().split("lambda")[1].strip()[:-1])
elif callable(val):
print(f"{val.__module__}:{val.__name__}")
else:
print(val)
"""
Private helper functions.
"""
def _string_to_callable(name: str) -> Callable:
"""Resolves the module and function names to return the function.
Args:
name (str): The function name. The format should be 'module:attribute_name'.
Raises:
ValueError: When the resolved attribute is not a function.
ValueError: _description_
Returns:
Callable: The function loaded from the module.
"""
try:
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
callable_object = getattr(mod, attr_name)
# check if attribute is callable
if callable(callable_object):
return callable_object
else:
raise ValueError(f"The imported object is not callable: '{name}'")
except AttributeError as e:
msg = (
"While updating the config from a dictionary, we could not interpret the entry"
"as a callable object. The format of input should be 'module:attribute_name'\n"
f"While processing input '{name}', received the error:\n {e}."
)
raise ValueError(msg)
| 10,319 |
Python
| 37.364312 | 130 | 0.612462 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/pinoc_utils.py
|
from __future__ import print_function
import os
import numpy as np
import pinocchio as pin
from scipy.spatial.transform import Rotation as R
def get_se3_err(pos_first, quat_first, pos_second, quat_second):
# Retruns 6 dimensional log.SE3 error between two poses expressed as position and quaternion rotation
rot_first = R.from_quat(np.array([quat_first[1],quat_first[2],quat_first[3],quat_first[0]])).as_matrix() # Quaternion in scalar last format!!!
rot_second = R.from_quat(np.array([quat_second[1],quat_second[2],quat_second[3],quat_second[0]])).as_matrix() # Quaternion in scalar last format!!!
oMfirst = pin.SE3(rot_first, pos_first)
oMsecond = pin.SE3(rot_second, pos_second)
firstMsecond = oMfirst.actInv(oMsecond)
return pin.log(firstMsecond).vector # log gives us a spatial vector (exp co-ords)
class HSRIKSolver(object):
def __init__(
self,
include_torso: bool = False, # Use torso in th IK solution
include_base: bool = True, # Use base in th IK solution
max_rot_vel: float = 1.0472
) -> None:
# Settings
self.damp = 1e-10
self._include_torso = include_torso
self._include_base = include_base
self.max_rot_vel = max_rot_vel
# Load urdf # TODO: fix
urdf_file = "/root/tamp-hsr/hsr_rl/models/urdf/hsrb_description/hsrb4s.urdf"
self.model = pin.buildModelFromUrdf(urdf_file)
# Choose joints
name_end_effector = "hand_palm_link"
jointsOfInterest = ['arm_lift_joint', 'arm_flex_joint', 'arm_roll_joint',
'wrist_flex_joint', 'wrist_roll_joint']
# Add torso joints
if self._include_torso:
jointsOfInterest = ['torso_lift_joint'] + jointsOfInterest
# Add base joints
if self._include_base:
jointsOfInterest = ['joint_x', 'joint_y', 'joint_rz'] + jointsOfInterest
remove_ids = list()
for jnt in jointsOfInterest:
if self.model.existJointName(jnt):
remove_ids.append(self.model.getJointId(jnt))
else:
print('[IK WARNING]: joint ' + str(jnt) + ' does not belong to the model!')
jointIdsToExclude = np.delete(np.arange(0, self.model.njoints), remove_ids)
# Lock extra joints except joint 0 (root)
reference_configuration=pin.neutral(self.model)
if not self._include_torso:
reference_configuration[4] = 0.1 # lock torso_lift_joint at 0.1
self.model = pin.buildReducedModel(self.model, jointIdsToExclude[1:].tolist(),
reference_configuration=reference_configuration)
assert (len(self.model.joints)==(len(jointsOfInterest)+1)), "[IK Error]: Joints != nDoFs"
self.model_data = self.model.createData()
# Define Joint-Limits
self.joint_pos_min = np.array([-2.617, -1.919, -1.919, -1.919])
self.joint_pos_max = np.array([+0.0, +3.665, +1.221, +3.665])
if self._include_torso:
self.joint_pos_min = np.hstack((np.array([+0.0]), self.joint_pos_min))
self.joint_pos_max = np.hstack((np.array([+0.69]), self.joint_pos_max))
if self._include_base:
self.joint_pos_min = np.hstack((np.array([-10.0, -10.0, -10.0]), self.joint_pos_min))
self.joint_pos_max = np.hstack((np.array([+10.0, +10.0, +10.0]), self.joint_pos_max))
self.joint_pos_mid = (self.joint_pos_max + self.joint_pos_min) / 2.0
# Get End Effector Frame ID
self.id_EE = self.model.getFrameId(name_end_effector)
def get_jacobian(self, curr_conf):
# Compute Jacobian
J = pin.computeFrameJacobian(self.model, self.model_data, curr_conf, self.id_EE)
return J
def solve_fk_hsr(self, curr_joints):
pin.framesForwardKinematics(self.model, self.model_data, curr_joints)
oMf = self.model_data.oMf[self.id_EE]
ee_pos = oMf.translation
ee_quat = pin.Quaternion(oMf.rotation)
return ee_pos, np.array([ee_quat.w, ee_quat.x, ee_quat.y, ee_quat.z])
def solve_ik_pos_hsr(self, des_pos, des_quat, curr_joints=None, n_trials=7, dt=0.1, pos_threshold=0.05, angle_threshold=15.*np.pi/180, verbose=True):
# Get IK positions for hsr robot
damp = 1e-10
success = False
if des_quat is not None:
# quaternion to rot matrix
des_rot = R.from_quat(np.array([des_quat[1], des_quat[2], des_quat[3], des_quat[0]])).as_matrix() # Quaternion in scalar last format!!!
oMdes = pin.SE3(des_rot, des_pos)
else:
# 3D position error only
des_rot = None
if curr_joints is None:
q = np.random.uniform(self.joint_pos_min, self.joint_pos_max)
for n in range(n_trials):
for i in range(800):
pin.framesForwardKinematics(self.model, self.model_data, q)
oMf = self.model_data.oMf[self.id_EE]
if des_rot is None:
oMdes = pin.SE3(oMf.rotation, des_pos) # Set rotation equal to current rotation to exclude this error
dMf = oMdes.actInv(oMf)
err = pin.log(dMf).vector
if (np.linalg.norm(err[0:3]) < pos_threshold) and (np.linalg.norm(err[3:6]) < angle_threshold):
success = True
break
J = pin.computeFrameJacobian(self.model, self.model_data, q, self.id_EE)
if des_rot is None:
J = J[:3, :] # Only pos errors
err = err[:3]
v = - J.T.dot(np.linalg.solve(J.dot(J.T) + damp * np.eye(6), err))
q = pin.integrate(self.model, q, v*dt)
# Clip q to within joint limits
q = np.clip(q, self.joint_pos_min, self.joint_pos_max)
if verbose:
if not i % 100:
print('Trial %d: iter %d: error = %s' % (n+1, i, err.T))
i += 1
if success:
best_q = np.array(q)
else:
# Save current solution
best_q = np.array(q)
# Reset q to random configuration
q = np.random.uniform(self.joint_pos_min, self.joint_pos_max)
if verbose:
if success:
print("[[[[IK: Convergence achieved!]]]")
else:
print("[Warning: the IK iterative algorithm has not reached convergence to the desired precision]")
return success, best_q
if __name__ == '__main__':
hsr_ik_solver = HSRIKSolver()
success, best_q = hsr_ik_solver.solve_ik_pos_hsr(np.array([0.5, 0.1, 0.3]), np.array([0., 0., 0., 1.0]))
print('best_q: ', best_q)
| 6,825 |
Python
| 39.630952 | 153 | 0.57348 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/usd_utils.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from pxr import UsdPhysics, UsdLux
def set_drive_type(prim_path, drive_type):
joint_prim = get_prim_at_path(prim_path)
# set drive type ("angular" or "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, drive_type)
return drive
def set_drive_target_position(drive, target_value):
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
def set_drive_target_velocity(drive, target_value):
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
def set_drive_stiffness(drive, stiffness):
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
def set_drive_damping(drive, damping):
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
def set_drive_max_force(drive, max_force):
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
def set_drive(prim_path, drive_type, target_type, target_value, stiffness, damping, max_force) -> None:
drive = set_drive_type(prim_path, drive_type)
# set target type ("position" or "velocity")
if target_type == "position":
set_drive_target_position(drive, target_value)
elif target_type == "velocity":
set_drive_target_velocity(drive, target_value)
set_drive_stiffness(drive, stiffness)
set_drive_damping(drive, damping)
set_drive_max_force(drive, max_force)
def create_distant_light(prim_path="/World/defaultDistantLight", intensity=5000):
stage = get_current_stage()
light = UsdLux.DistantLight.Define(stage, prim_path)
light.GetPrim().GetAttribute("intensity").Set(intensity)
| 3,627 |
Python
| 40.701149 | 103 | 0.742211 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/annotation_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Wrapper around the Python 3.7 onwards `dataclasses` module."""
from copy import deepcopy
from dataclasses import Field, dataclass, field
from typing import Any, Callable, ClassVar, Dict
from hsr_rl.tasks.utils.dict_utils import class_to_dict, update_class_from_dict
# List of all methods provided by sub-module.
__all__ = ["configclass"]
"""
Wrapper around dataclass.
"""
def __dataclass_transform__():
"""Add annotations decorator for PyLance."""
return lambda a: a
@__dataclass_transform__()
def configclass(cls, **kwargs):
"""Wrapper around `dataclass` functionality to add extra checks and utilities.
As of Python3.8, the standard dataclasses have two main issues which makes them non-generic for configuration use-cases.
These include:
1. Requiring a type annotation for all its members.
2. Requiring explicit usage of :meth:`field(default_factory=...)` to reinitialize mutable variables.
This function wraps around :class:`dataclass` utility to deal with the above two issues.
Usage:
.. code-block:: python
from dataclasses import MISSING
from omni.isaac.orbit.utils.configclass import configclass
@configclass
class ViewerCfg:
eye: list = [7.5, 7.5, 7.5] # field missing on purpose
lookat: list = field(default_factory=[0.0, 0.0, 0.0])
@configclass
class EnvCfg:
num_envs: int = MISSING
episode_length: int = 2000
viewer: ViewerCfg = ViewerCfg()
# create configuration instance
env_cfg = EnvCfg(num_envs=24)
# print information
print(env_cfg.to_dict())
Reference:
https://docs.python.org/3/library/dataclasses.html#dataclasses.Field
"""
# add type annotations
_add_annotation_types(cls)
# add field factory
_process_mutable_types(cls)
# copy mutable members
setattr(cls, "__post_init__", _custom_post_init)
# add helper functions for dictionary conversion
setattr(cls, "to_dict", _class_to_dict)
setattr(cls, "from_dict", _update_class_from_dict)
# wrap around dataclass
cls = dataclass(cls, **kwargs)
# return wrapped class
return cls
"""
Dictionary <-> Class operations.
These are redefined here to add new docstrings.
"""
def _class_to_dict(obj: object) -> Dict[str, Any]:
"""Convert an object into dictionary recursively.
Returns:
Dict[str, Any]: Converted dictionary mapping.
"""
return class_to_dict(obj)
def _update_class_from_dict(obj, data: Dict[str, Any]) -> None:
"""Reads a dictionary and sets object variables recursively.
This function performs in-place update of the class member attributes.
Args:
data (Dict[str, Any]): Input (nested) dictionary to update from.
Raises:
TypeError: When input is not a dictionary.
ValueError: When dictionary has a value that does not match default config type.
KeyError: When dictionary has a key that does not exist in the default config type.
"""
return update_class_from_dict(obj, data, _ns="")
"""
Private helper functions.
"""
def _add_annotation_types(cls):
"""Add annotations to all elements in the dataclass.
By definition in Python, a field is defined as a class variable that has a type annotation.
In case type annotations are not provided, dataclass ignores those members when :func:`__dict__()` is called.
This function adds these annotations to the class variable to prevent any issues in case the user forgets to
specify the type annotation.
This makes the following a feasible operation:
@dataclass
class State:
pos = (0.0, 0.0, 0.0)
^^
If the function is NOT used, the following type-error is returned:
TypeError: 'pos' is a field but has no type annotation
"""
# Note: Do not change this line. `cls.__dict__.get("__annotations__", {})` is different from `cls.__annotations__` because of inheritance.
cls.__annotations__ = cls.__dict__.get("__annotations__", {})
# cls.__annotations__ = dict()
for key in dir(cls):
# skip dunder members
if key.startswith("__"):
continue
# skip class functions
if key in ["from_dict", "to_dict"]:
continue
# add type annotations for members that are not functions
var = getattr(cls, key)
if not isinstance(var, type):
if key not in cls.__annotations__:
cls.__annotations__[key] = type(var)
def _process_mutable_types(cls):
"""Initialize all mutable elements through :obj:`dataclasses.Field` to avoid unnecessary complaints.
By default, dataclass requires usage of :obj:`field(default_factory=...)` to reinitialize mutable objects every time a new
class instance is created. If a member has a mutable type and it is created without specifying the `field(default_factory=...)`,
then Python throws an error requiring the usage of `default_factory`.
Additionally, Python only explicitly checks for field specification when the type is a list, set or dict. This misses the
use-case where the type is class itself. Thus, the code silently carries a bug with it which can lead to undesirable effects.
This function deals with this issue
This makes the following a feasible operation:
@dataclass
class State:
pos: list = [0.0, 0.0, 0.0]
^^
If the function is NOT used, the following value-error is returned:
ValueError: mutable default <class 'list'> for field pos is not allowed: use default_factory
"""
def _return_f(f: Any) -> Callable[[], Any]:
"""Returns default function for creating mutable/immutable variables."""
def _wrap():
if isinstance(f, Field):
return f.default_factory
else:
return f
return _wrap
for key in dir(cls):
# skip dunder members
if key.startswith("__"):
continue
# skip class functions
if key in ["from_dict", "to_dict"]:
continue
# do not create field for class variables
if key in cls.__annotations__:
origin = getattr(cls.__annotations__[key], "__origin__", None)
if origin is ClassVar:
continue
# define explicit field for data members
f = getattr(cls, key)
if not isinstance(f, type):
f = field(default_factory=_return_f(f))
setattr(cls, key, f)
def _custom_post_init(obj):
"""Deepcopy all elements to avoid shared memory issues for mutable objects in dataclasses initialization.
This function is called explicitly instead of as a part of :func:`_process_mutable_types()` to prevent mapping
proxy type i.e. a read only proxy for mapping objects. The error is thrown when using hierarchical data-classes
for configuration.
"""
for key in dir(obj):
# skip dunder members
if key.startswith("__"):
continue
# duplicate data members
var = getattr(obj, key)
if not callable(var):
setattr(obj, key, deepcopy(var))
| 7,468 |
Python
| 32.644144 | 142 | 0.64301 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/transform_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Provides utilities for math operations.
Some of these are imported from the module `omni.isaac.core.utils.torch` for convenience.
"""
import numpy as np
import torch
import torch.nn.functional
from typing import Optional, Sequence, Tuple, Union
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def scale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""
Normalizes a given input tensor to a range of [-1, 1].
@note It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape (dims,)
upper: The maximum value of the tensor. Shape (dims,)
Returns:
Normalized transform of the tensor. Shape (N, dims)
"""
# default value of center
offset = (lower + upper) * 0.5
# return normalized tensor
return 2 * (x - offset) / (upper - lower)
@torch.jit.script
def unscale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""
Denormalizes a given input tensor from range of [-1, 1] to (lower, upper).
@note It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape (dims,)
upper: The maximum value of the tensor. Shape (dims,)
Returns:
Denormalized transform of the tensor. Shape (N, dims)
"""
# default value of center
offset = (lower + upper) * 0.5
# return normalized tensor
return x * (upper - lower) * 0.5 + offset
@torch.jit.script
def quat_apply(a, b):
shape = b.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 3)
xyz = a[:, 1:]
t = xyz.cross(b, dim=-1) * 2
return (b + a[:, 0:1] * t + xyz.cross(t, dim=-1)).view(shape)
@torch.jit.script
def quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((a[:, 0:1], -a[:, 1:]), dim=-1).view(shape)
@torch.jit.script
def quat_unit(a):
return normalize(a)
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([w, xyz], dim=-1))
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([w, x, y, z], dim=-1).view(shape)
return quat
@torch.jit.script
def quat_rotate(q, v):
shape = q.shape
q_w = q[:, 0]
q_vec = q[:, 1:]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * torch.bmm(q_vec.view(shape[0], 1, 3), v.view(shape[0], 3, 1)).squeeze(-1) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
shape = q.shape
q_w = q[:, 0]
q_vec = q[:, 1:]
a = v * (2.0 * q_w ** 2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
c = q_vec * torch.bmm(q_vec.view(shape[0], 1, 3), v.view(shape[0], 3, 1)).squeeze(-1) * 2.0
return a - b + c
__all__ = [
# General
"wrap_to_pi",
"saturate",
"copysign",
# General-Isaac Sim
"normalize",
"scale_transform",
"unscale_transform",
# Rotation
"matrix_from_quat",
"quat_inv",
"quat_from_euler_xyz",
"quat_apply_yaw",
"quat_box_minus",
"euler_xyz_from_quat",
"axis_angle_from_quat",
# Rotation-Isaac Sim
"quat_apply",
"quat_from_angle_axis",
"quat_mul",
"quat_conjugate",
"quat_rotate",
"quat_rotate_inverse",
# Transformations
"combine_frame_transforms",
"subtract_frame_transforms",
"compute_pose_error",
"apply_delta_pose",
# Sampling
"default_orientation",
"random_orientation",
"random_yaw_orientation",
"sample_triangle",
"sample_uniform",
]
"""
General
"""
@torch.jit.script
def wrap_to_pi(angles: torch.Tensor) -> torch.Tensor:
"""Wraps input angles (in radians) to the range [-pi, pi].
Args:
angles (torch.Tensor): Input angles.
Returns:
torch.Tensor: Angles in the range [-pi, pi].
"""
angles %= 2 * np.pi
angles -= 2 * np.pi * (angles > np.pi)
return angles
@torch.jit.script
def saturate(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""Clamps a given input tensor to (lower, upper).
It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape (dims,)
upper: The maximum value of the tensor. Shape (dims,)
Returns:
Clamped transform of the tensor. Shape (N, dims)
"""
return torch.max(torch.min(x, upper), lower)
@torch.jit.script
def copysign(mag: float, other: torch.Tensor) -> torch.Tensor:
"""Create a new floating-point tensor with the magnitude of input and the sign of other, element-wise.
Note:
The implementation follows from `torch.copysign`. The function allows a scalar magnitude.
Args:
mag (float): The magnitude scalar.
other (torch.Tensor): The tensor containing values whose signbits are applied to magnitude.
Returns:
torch.Tensor: The output tensor.
"""
mag = torch.tensor(mag, device=other.device, dtype=torch.float).repeat(other.shape[0])
return torch.abs(mag) * torch.sign(other)
"""
Rotation
"""
@torch.jit.script
def matrix_from_quat(quaternions: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
Reference:
Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L41-L70)
"""
r, i, j, k = torch.unbind(quaternions, -1)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`.
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def convert_quat(
quat: Union[torch.tensor, Sequence[float]], to: Optional[str] = "xyzw"
) -> Union[torch.tensor, np.ndarray]:
"""Converts quaternion from one convention to another.
The convention to convert TO is specified as an optional argument. If to == 'xyzw',
then the input is in 'wxyz' format, and vice-versa.
Args:
quat (Union[torch.tensor, Sequence[float]]): Input quaternion of shape (..., 4).
to (Optional[str], optional): Convention to convert quaternion to.. Defaults to "xyzw".
Raises:
ValueError: Invalid input argument `to`, i.e. not "xyzw" or "wxyz".
ValueError: Invalid shape of input `quat`, i.e. not (..., 4,).
Returns:
Union[torch.tensor, np.ndarray]: The converted quaternion in specified convention.
"""
# convert to numpy (sanity check)
if not isinstance(quat, torch.Tensor):
quat = np.asarray(quat)
# check input is correct
if quat.shape[-1] != 4:
msg = f"convert_quat(): Expected input quaternion shape mismatch: {quat.shape} != (..., 4)."
raise ValueError(msg)
# convert to specified quaternion type
if to == "xyzw":
return quat[..., [1, 2, 3, 0]]
elif to == "wxyz":
return quat[..., [3, 0, 1, 2]]
else:
raise ValueError("convert_quat(): Choose a valid `to` argument (xyzw or wxyz).")
@torch.jit.script
def quat_inv(q: torch.Tensor) -> torch.Tensor:
"""Compute the inverse of a quaternion.
Args:
q (torch.Tensor): The input quaternion (w, x, y, z).
Returns:
torch.Tensor: The inverse quaternion (w, x, y, z).
"""
return normalize(quat_conjugate(q))
@torch.jit.script
def quat_from_euler_xyz(roll: torch.Tensor, pitch: torch.Tensor, yaw: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as Euler angles in radians to Quaternions.
Note:
The euler angles are assumed in XYZ convention.
Args:
roll: Rotation around x-axis (in radians). Shape: [N,]
pitch: Rotation around y-axis (in radians). Shape: [N,]
yaw: Rotation around z-axis (in radians). Shape: [N,]
Returns:
torch.Tensor: Quaternion with real part in the start. Shape: [N, 4,]
"""
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
# compute quaternion
qw = cy * cr * cp + sy * sr * sp
qx = cy * sr * cp - sy * cr * sp
qy = cy * cr * sp + sy * sr * cp
qz = sy * cr * cp - cy * sr * sp
return torch.stack([qw, qx, qy, qz], dim=-1)
@torch.jit.script
def euler_xyz_from_quat(quat: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Convert rotations given as quaternions to Euler angles in radians.
Note:
The euler angles are assumed in XYZ convention.
Reference:
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
Args:
quat: Quaternion with real part in the start. Shape: [N, 4,]
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple containing roll-pitch-yaw.
"""
q_w, q_x, q_y, q_z = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
# roll (x-axis rotation)
sin_roll = 2.0 * (q_w * q_x + q_y * q_z)
cos_roll = 1 - 2 * (q_x * q_x + q_y * q_y)
roll = torch.atan2(sin_roll, cos_roll)
# pitch (y-axis rotation)
sin_pitch = 2.0 * (q_w * q_y - q_z * q_x)
pitch = torch.where(torch.abs(sin_pitch) >= 1, copysign(np.pi / 2.0, sin_pitch), torch.asin(sin_pitch))
# yaw (z-axis rotation)
sin_yaw = 2.0 * (q_w * q_z + q_x * q_y)
cos_yaw = 1 - 2 * (q_y * q_y + q_z * q_z)
yaw = torch.atan2(sin_yaw, cos_yaw)
return roll % (2 * np.pi), pitch % (2 * np.pi), yaw % (2 * np.pi)
@torch.jit.script
def quat_apply_yaw(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
"""Rotate a vector only around the yaw-direction.
Args:
quat (torch.Tensor): Input orientation to extract yaw from.
vec (torch.Tensor): Input vector.
Returns:
torch.Tensor: Rotated vector.
"""
quat_yaw = quat.clone().view(-1, 4)
quat_yaw[:, 1:3] = 0.0 # set x, y components as zero
quat_yaw = normalize(quat_yaw)
return quat_apply(quat_yaw, vec)
@torch.jit.script
def quat_box_minus(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:
"""Implements box-minus operator (quaternion difference).
Args:
q1 (torch.Tensor): A (N, 4) tensor for quaternion (x, y, z, w)
q2 (torch.Tensor): A (N, 4) tensor for quaternion (x, y, z, w)
Returns:
torch.Tensor: q1 box-minus q2
Reference:
https://docs.leggedrobotics.com/kindr/cheatsheet_latest.pdf
"""
quat_diff = quat_mul(q1, quat_conjugate(q2)) # q1 * q2^-1
re = quat_diff[:, 0] # real part, q = [w, x, y, z] = [re, im]
im = quat_diff[:, 1:] # imaginary part
norm_im = torch.norm(im, dim=1)
scale = 2.0 * torch.where(norm_im > 1.0e-7, torch.atan(norm_im / re) / norm_im, torch.sign(re))
return scale.unsqueeze(-1) * im
@torch.jit.script
def axis_angle_from_quat(quat: torch.Tensor, eps: float = 1.0e-6) -> torch.Tensor:
"""Convert rotations given as quaternions to axis/angle.
Args:
quat (torch.Tensor): quaternions with real part first, as tensor of shape (..., 4).
eps (float): The tolerance for Taylor approximation. Defaults to 1.0e-6.
Returns:
torch.Tensor: Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle turned
anti-clockwise in radians around the vector's direction.
Reference:
Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L526-L554)
"""
# Modified to take in quat as [q_w, q_x, q_y, q_z]
# Quaternion is [q_w, q_x, q_y, q_z] = [cos(theta/2), n_x * sin(theta/2), n_y * sin(theta/2), n_z * sin(theta/2)]
# Axis-angle is [a_x, a_y, a_z] = [theta * n_x, theta * n_y, theta * n_z]
# Thus, axis-angle is [q_x, q_y, q_z] / (sin(theta/2) / theta)
# When theta = 0, (sin(theta/2) / theta) is undefined
# However, as theta --> 0, we can use the Taylor approximation 1/2 - theta^2 / 48
quat = quat * (1.0 - 2.0 * (quat[..., 0:1] < 0.0))
mag = torch.linalg.norm(quat[..., 1:], dim=1)
half_angle = torch.atan2(mag, quat[..., 0])
angle = 2.0 * half_angle
# check whether to apply Taylor approximation
sin_half_angles_over_angles = torch.where(
torch.abs(angle.abs()) > eps, torch.sin(half_angle) / angle, 0.5 - angle * angle / 48
)
return quat[..., 1:4] / sin_half_angles_over_angles.unsqueeze(-1)
"""
Transformations
"""
@torch.jit.script
def combine_frame_transforms(
t01: torch.Tensor, q01: torch.Tensor, t12: torch.Tensor = None, q12: torch.Tensor = None
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Combine transformations between two reference frames into a stationary frame.
It performs the following transformation operation: :math:`T_{02} = T_{01} \times T_{12}`,
where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B.
Args:
t01 (torch.Tensor): Position of frame 1 w.r.t. frame 0.
q01 (torch.Tensor): Quaternion orientation of frame 1 w.r.t. frame 0.
t12 (torch.Tensor): Position of frame 2 w.r.t. frame 1.
q12 (torch.Tensor): Quaternion orientation of frame 2 w.r.t. frame 1.
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing the position and orientation of
frame 2 w.r.t. frame 0.
"""
# compute orientation
if q12 is not None:
q02 = quat_mul(q01, q12)
else:
q02 = q01
# compute translation
if t12 is not None:
t02 = t01 + quat_apply(q01, t12)
else:
t02 = t01
return t02, q02
@torch.jit.script
def subtract_frame_transforms(
t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor, q02: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Subtract transformations between two reference frames into a stationary frame.
It performs the following transformation operation: :math:`T_{12} = T_{01}^{-1} \times T_{02}`,
where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B.
Args:
t01 (torch.Tensor): Position of frame 1 w.r.t. frame 0.
q01 (torch.Tensor): Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z).
t02 (torch.Tensor): Position of frame 2 w.r.t. frame 0.
q02 (torch.Tensor): Quaternion orientation of frame 2 w.r.t. frame 0 in (w, x, y, z).
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing the position and orientation of
frame 2 w.r.t. frame 1.
"""
# compute orientation
q10 = quat_inv(q01)
q12 = quat_mul(q10, q02)
# compute translation
t12 = quat_apply(q10, t02 - t01)
return t12, q12
@torch.jit.script
def compute_pose_error(t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor, q02: torch.Tensor, rot_error_type: str):
"""Compute the position and orientation error between source and target frames.
Args:
t01 (torch.Tensor): Position of source frame.
q01 (torch.Tensor): Quaternion orientation of source frame.
t02 (torch.Tensor): Position of target frame.
q02 (torch.Tensor): Quaternion orientation of target frame.
rot_error_type (str): The rotation error type to return: "quat", "axis_angle".
Returns:
Tuple[torch.Tensor, torch.Tensor]: A tuple containing position and orientation error.
"""
# Compute quaternion error (i.e., difference quaternion)
# Reference: https://personal.utdallas.edu/~sxb027100/dock/quaternion.html
# q_current_norm = q_current * q_current_conj
source_quat_norm = quat_mul(q01, quat_conjugate(q01))[:, 0]
# q_current_inv = q_current_conj / q_current_norm
source_quat_inv = quat_conjugate(q01) / source_quat_norm.unsqueeze(-1)
# q_error = q_target * q_current_inv
quat_error = quat_mul(q02, source_quat_inv)
# Compute position error
pos_error = t02 - t01
# return error based on specified type
if rot_error_type == "quat":
return pos_error, quat_error
elif rot_error_type == "axis_angle":
# Convert to axis-angle error
axis_angle_error = axis_angle_from_quat(quat_error)
return pos_error, axis_angle_error
else:
raise ValueError(f"Unsupported orientation error type: {rot_error_type}. Valid: 'quat', 'axis_angle'.")
@torch.jit.script
def apply_delta_pose(
source_pos: torch.Tensor, source_rot, delta_pose: torch.Tensor, eps: float = 1.0e-6
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Applies delta pose transformation on source pose.
The first three elements of `delta_pose` are interpreted as cartesian position displacement.
The remaining three elements of `delta_pose` are interpreted as orientation displacement
in the angle-axis format.
Args:
frame_pos (torch.Tensor): Position of source frame. Shape: [N, 3]
frame_rot (torch.Tensor): Quaternion orientation of source frame in (w, x, y,z).
delta_pose (torch.Tensor): Position and orientation displacements. Shape [N, 6].
eps (float): The tolerance to consider orientation displacement as zero.
Returns:
torch.Tensor: A tuple containing the displaced position and orientation frames. Shape: ([N, 3], [N, 4])
"""
# number of poses given
num_poses = source_pos.shape[0]
device = source_pos.device
# interpret delta_pose[:, 0:3] as target position displacements
target_pos = source_pos + delta_pose[:, 0:3]
# interpret delta_pose[:, 3:6] as target rotation displacements
rot_actions = delta_pose[:, 3:6]
angle = torch.linalg.vector_norm(rot_actions, dim=1)
axis = rot_actions / angle.unsqueeze(-1)
# change from axis-angle to quat convention
identity_quat = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat(num_poses, 1)
rot_delta_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > eps, quat_from_angle_axis(angle, axis), identity_quat
)
# TODO: Check if this is the correct order for this multiplication.
target_rot = quat_mul(rot_delta_quat, source_rot)
return target_pos, target_rot
"""
Sampling
"""
@torch.jit.script
def default_orientation(num: int, device: str) -> torch.Tensor:
"""Returns identity rotation transform.
Args:
num (int): The number of rotations to sample.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Identity quaternion (w, x, y, z).
"""
quat = torch.zeros((num, 4), dtype=torch.float, device=device)
quat[..., 0] = 1.0
return quat
@torch.jit.script
def random_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation in 3D as quaternion.
Reference:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html
Args:
num (int): The number of rotations to sample.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled quaternion (w, x, y, z).
"""
# sample random orientation from normal distribution
quat = torch.randn((num, 4), dtype=torch.float, device=device)
# normalize the quaternion
return torch.nn.functional.normalize(quat, p=2.0, dim=-1, eps=1e-12)
@torch.jit.script
def random_yaw_orientation(num: int, device: str) -> torch.Tensor:
"""Returns sampled rotation around z-axis.
Args:
num (int): The number of rotations to sample.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled quaternion (w, x, y, z).
"""
roll = torch.zeros(num, dtype=torch.float, device=device)
pitch = torch.zeros(num, dtype=torch.float, device=device)
yaw = 2 * np.pi * torch.rand(num, dtype=torch.float, device=device)
return quat_from_euler_xyz(roll, pitch, yaw)
def sample_triangle(lower: float, upper: float, size: Union[int, Tuple[int, ...]], device: str) -> torch.Tensor:
"""Randomly samples tensor from a triangular distribution.
Args:
lower (float): The lower range of the sampled tensor.
upper (float): The upper range of the sampled tensor.
size (Union[int, Tuple[int, ...]]): The shape of the tensor.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled tensor of shape :obj:`size`.
"""
# convert to tuple
if isinstance(size, int):
size = (size,)
# create random tensor in the range [-1, 1]
r = 2 * torch.rand(*size, device=device) - 1
# convert to triangular distribution
r = torch.where(r < 0.0, -torch.sqrt(-r), torch.sqrt(r))
# rescale back to [0, 1]
r = (r + 1.0) / 2.0
# rescale to range [lower, upper]
return (upper - lower) * r + lower
def sample_uniform(
lower: Union[torch.Tensor, float], upper: Union[torch.Tensor, float], size: Union[int, Tuple[int, ...]], device: str
) -> torch.Tensor:
"""Sample uniformly within a range.
Args:
lower (Union[torch.Tensor, float]): Lower bound of uniform range.
upper (Union[torch.Tensor, float]): Upper bound of uniform range.
size (Union[int, Tuple[int, ...]]): The shape of the tensor.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled tensor of shape :obj:`size`.
"""
# convert to tuple
if isinstance(size, int):
size = (size,)
# return tensor
return torch.rand(*size, device=device) * (upper - lower) + lower
def sample_cylinder(
radius: float, h_range: Tuple[float, float], size: Union[int, Tuple[int, ...]], device: str
) -> torch.Tensor:
"""Sample 3D points uniformly on a cylinder's surface.
The cylinder is centered at the origin and aligned with the z-axis. The height of the cylinder is
sampled uniformly from the range :obj:`h_range`, while the radius is fixed to :obj:`radius`.
The sampled points are returned as a tensor of shape :obj:`(*size, 3)`, i.e. the last dimension
contains the x, y, and z coordinates of the sampled points.
Args:
radius (float): The radius of the cylinder.
h_range (Tuple[float, float]): The minimum and maximum height of the cylinder.
size (Union[int, Tuple[int, ...]]): The shape of the tensor.
device (str): Device to create tensor on.
Returns:
torch.Tensor: Sampled tensor of shape :obj:`(*size, 3)`.
"""
# sample angles
angles = (torch.rand(size, device=device) * 2 - 1) * np.pi
h_min, h_max = h_range
# add shape
if isinstance(size, int):
size = (size, 3)
else:
size += (3,)
# allocate a tensor
xyz = torch.zeros(size, device=device)
xyz[..., 0] = radius * torch.cos(angles)
xyz[..., 1] = radius * torch.sin(angles)
xyz[..., 2].uniform_(h_min, h_max)
# return positions
return xyz
| 24,568 |
Python
| 32.610123 | 139 | 0.616167 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/ik_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import torch
from dataclasses import MISSING
from typing import Dict, Optional, Tuple
from hsr_rl.tasks.utils.annotation_utils import configclass
from hsr_rl.tasks.utils.transform_utils import (
apply_delta_pose,
combine_frame_transforms,
compute_pose_error,
quat_apply,
quat_inv
)
@configclass
class DifferentialInverseKinematicsCfg:
"""Configuration for inverse differential kinematics controller."""
command_type: str = MISSING
"""Type of command: "position_abs", "position_rel", "pose_abs", "pose_rel"."""
ik_method: str = MISSING
"""Method for computing inverse of Jacobian: "pinv", "svd", "trans", "dls"."""
ik_params: Optional[Dict[str, float]] = None
"""Parameters for the inverse-kinematics method. (default: obj:`None`).
- Moore-Penrose pseudo-inverse ("pinv"):
- "k_val": Scaling of computed delta-dof positions (default: 1.0).
- Adaptive Singular Value Decomposition ("svd"):
- "k_val": Scaling of computed delta-dof positions (default: 1.0).
- "min_singular_value": Single values less than this are suppressed to zero (default: 1e-5).
- Jacobian transpose ("trans"):
- "k_val": Scaling of computed delta-dof positions (default: 1.0).
- Damped Moore-Penrose pseudo-inverse ("dls"):
- "lambda_val": Damping coefficient (default: 0.1).
"""
position_offset: Tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Position offset from parent body to end-effector frame in parent body frame."""
rotation_offset: Tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""Rotational offset from parent body to end-effector frame in parent body frame."""
position_command_scale: Tuple[float, float, float] = (1.0, 1.0, 1.0)
"""Scaling of the position command received. Used only in relative mode."""
rotation_command_scale: Tuple[float, float, float] = (1.0, 1.0, 1.0)
"""Scaling of the rotation command received. Used only in relative mode."""
class DifferentialInverseKinematics:
"""Inverse differential kinematics controller.
This controller uses the Jacobian mapping from joint-space velocities to end-effector velocities
to compute the delta-change in the joint-space that moves the robot closer to a desired end-effector
position.
To deal with singularity in Jacobian, the following methods are supported for computing inverse of the Jacobian:
- "pinv": Moore-Penrose pseudo-inverse
- "svd": Adaptive singular-value decomposition (SVD)
- "trans": Transpose of matrix
- "dls": Damped version of Moore-Penrose pseudo-inverse (also called Levenberg-Marquardt)
Note: We use the quaternions in the convention: [w, x, y, z].
Reference:
[1] https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2017/RD_HS2017script.pdf
[2] https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf
"""
_DEFAULT_IK_PARAMS = {
"pinv": {"k_val": 1.0},
"svd": {"k_val": 1.0, "min_singular_value": 1e-5},
"trans": {"k_val": 1.0},
"dls": {"lambda_val": 0.1},
}
"""Default parameters for different inverse kinematics approaches."""
def __init__(self, cfg: DifferentialInverseKinematicsCfg, num_robots: int, device: str):
"""Initialize the controller.
Args:
cfg (DifferentialInverseKinematicsCfg): The configuration for the controller.
num_robots (int): The number of robots to control.
device (str): The device to use for computations.
Raises:
ValueError: When configured IK-method is not supported.
ValueError: When configured command type is not supported.
"""
# store inputs
self.cfg = cfg
self.num_robots = num_robots
self._device = device
# check valid input
if self.cfg.ik_method not in ["pinv", "svd", "trans", "dls"]:
raise ValueError(f"Unsupported inverse-kinematics method: {self.cfg.ik_method}.")
if self.cfg.command_type not in ["position_abs", "position_rel", "pose_abs", "pose_rel"]:
raise ValueError(f"Unsupported inverse-kinematics command: {self.cfg.command_type}.")
# update parameters for IK-method
self._ik_params = self._DEFAULT_IK_PARAMS[self.cfg.ik_method].copy()
if self.cfg.ik_params is not None:
self._ik_params.update(self.cfg.ik_params)
# end-effector offsets
# -- position
tool_child_link_pos = torch.tensor(self.cfg.position_offset, device=self._device)
self._tool_child_link_pos = tool_child_link_pos.repeat(self.num_robots, 1)
# -- orientation
tool_child_link_rot = torch.tensor(self.cfg.rotation_offset, device=self._device)
self._tool_child_link_rot = tool_child_link_rot.repeat(self.num_robots, 1)
# transform from tool -> parent frame
self._tool_parent_link_rot = quat_inv(self._tool_child_link_rot)
self._tool_parent_link_pos = -quat_apply(self._tool_parent_link_rot, self._tool_child_link_pos)
# scaling of command
self._position_command_scale = torch.diag(torch.tensor(self.cfg.position_command_scale, device=self._device))
self._rotation_command_scale = torch.diag(torch.tensor(self.cfg.rotation_command_scale, device=self._device))
# create buffers
self.desired_ee_pos = torch.zeros(self.num_robots, 3, device=self._device)
self.desired_ee_rot = torch.zeros(self.num_robots, 4, device=self._device)
# -- input command
self._command = torch.zeros(self.num_robots, self.num_actions, device=self._device)
"""
Properties.
"""
@property
def num_actions(self) -> int:
"""Dimension of the action space of controller."""
if "position" in self.cfg.command_type:
return 3
elif self.cfg.command_type == "pose_rel":
return 6
elif self.cfg.command_type == "pose_abs":
return 7
else:
raise ValueError(f"Invalid control command: {self.cfg.command_type}.")
"""
Operations.
"""
def initialize(self):
"""Initialize the internals."""
pass
def reset_idx(self, robot_ids: torch.Tensor = None):
"""Reset the internals."""
pass
def set_command(self, command: torch.Tensor):
"""Set target end-effector pose command."""
# check input size
if command.shape != (self.num_robots, self.num_actions):
raise ValueError(
f"Invalid command shape '{command.shape}'. Expected: '{(self.num_robots, self.num_actions)}'."
)
# store command
self._command[:] = command
def compute(
self,
current_ee_pos: torch.Tensor,
current_ee_rot: torch.Tensor,
jacobian: torch.Tensor,
joint_positions: torch.Tensor,
) -> torch.Tensor:
"""Performs inference with the controller.
Returns:
torch.Tensor: The target joint positions commands.
"""
# compute the desired end-effector pose
if "position_rel" in self.cfg.command_type:
# scale command
self._command @= self._position_command_scale
# compute targets
self.desired_ee_pos = current_ee_pos + self._command
self.desired_ee_rot = current_ee_rot
elif "position_abs" in self.cfg.command_type:
# compute targets
self.desired_ee_pos = self._command
self.desired_ee_rot = current_ee_rot
elif "pose_rel" in self.cfg.command_type:
# scale command
self._command[:, 0:3] @= self._position_command_scale
self._command[:, 3:6] @= self._rotation_command_scale
# compute targets
self.desired_ee_pos, self.desired_ee_rot = apply_delta_pose(current_ee_pos, current_ee_rot, self._command)
elif "pose_abs" in self.cfg.command_type:
# compute targets
self.desired_ee_pos = self._command[:, 0:3]
self.desired_ee_rot = self._command[:, 3:7]
else:
raise ValueError(f"Invalid control command: {self.cfg.command_type}.")
# transform from ee -> parent
# TODO: Make this optional to reduce overhead?
desired_parent_pos, desired_parent_rot = combine_frame_transforms(
self.desired_ee_pos, self.desired_ee_rot, self._tool_parent_link_pos, self._tool_parent_link_rot
)
# transform from ee -> parent
# TODO: Make this optional to reduce overhead?
current_parent_pos, current_parent_rot = combine_frame_transforms(
current_ee_pos, current_ee_rot, self._tool_parent_link_pos, self._tool_parent_link_rot
)
# compute pose error between current and desired
position_error, axis_angle_error = compute_pose_error(
current_parent_pos, current_parent_rot, desired_parent_pos, desired_parent_rot, rot_error_type="axis_angle"
)
# compute the delta in joint-space
if "position" in self.cfg.command_type:
jacobian_pos = jacobian[:, 0:3]
delta_joint_positions = self._compute_delta_dof_pos(delta_pose=position_error, jacobian=jacobian_pos)
else:
pose_error = torch.cat((position_error, axis_angle_error), dim=1)
delta_joint_positions = self._compute_delta_dof_pos(delta_pose=pose_error, jacobian=jacobian)
# return the desired joint positions
return joint_positions + delta_joint_positions
def compute_delta(
self,
current_ee_pos: torch.Tensor,
current_ee_rot: torch.Tensor,
jacobian: torch.Tensor,
) -> torch.Tensor:
"""Performs inference with the controller.
Returns:
torch.Tensor: The target joint positions commands.
"""
# compute the desired end-effector pose
if "position_rel" in self.cfg.command_type:
# scale command
self._command @= self._position_command_scale
# compute targets
self.desired_ee_pos = current_ee_pos + self._command
self.desired_ee_rot = current_ee_rot
elif "position_abs" in self.cfg.command_type:
# compute targets
self.desired_ee_pos = self._command
self.desired_ee_rot = current_ee_rot
elif "pose_rel" in self.cfg.command_type:
# scale command
self._command[:, 0:3] @= self._position_command_scale
self._command[:, 3:6] @= self._rotation_command_scale
# compute targets
self.desired_ee_pos, self.desired_ee_rot = apply_delta_pose(current_ee_pos, current_ee_rot, self._command)
elif "pose_abs" in self.cfg.command_type:
# compute targets
self.desired_ee_pos = self._command[:, 0:3]
self.desired_ee_rot = self._command[:, 3:7]
else:
raise ValueError(f"Invalid control command: {self.cfg.command_type}.")
# transform from ee -> parent
# TODO: Make this optional to reduce overhead?
desired_parent_pos, desired_parent_rot = combine_frame_transforms(
self.desired_ee_pos, self.desired_ee_rot, self._tool_parent_link_pos, self._tool_parent_link_rot
)
# transform from ee -> parent
# TODO: Make this optional to reduce overhead?
current_parent_pos, current_parent_rot = combine_frame_transforms(
current_ee_pos, current_ee_rot, self._tool_parent_link_pos, self._tool_parent_link_rot
)
# compute pose error between current and desired
position_error, axis_angle_error = compute_pose_error(
current_parent_pos, current_parent_rot, desired_parent_pos, desired_parent_rot, rot_error_type="axis_angle"
)
# compute the delta in joint-space
if "position" in self.cfg.command_type:
jacobian_pos = jacobian[:, 0:3]
delta_joint_positions = self._compute_delta_dof_pos(delta_pose=position_error, jacobian=jacobian_pos)
else:
pose_error = torch.cat((position_error, axis_angle_error), dim=1)
delta_joint_positions = self._compute_delta_dof_pos(delta_pose=pose_error, jacobian=jacobian)
# return the desired joint positions
return delta_joint_positions
"""
Helper functions.
"""
def _compute_delta_dof_pos(self, delta_pose: torch.Tensor, jacobian: torch.Tensor) -> torch.Tensor:
"""Computes the change in dos-position that yields the desired change in pose.
The method uses the Jacobian mapping from joint-space velocities to end-effector velocities
to compute the delta-change in the joint-space that moves the robot closer to a desired end-effector
position.
Args:
delta_pose (torch.Tensor): The desired delta pose in shape [N, 3 or 6].
jacobian (torch.Tensor): The geometric jacobian matrix in shape [N, 3 or 6, num-dof]
Returns:
torch.Tensor: The desired delta in joint space.
"""
if self.cfg.ik_method == "pinv": # Jacobian pseudo-inverse
# parameters
k_val = self._ik_params["k_val"]
# computation
jacobian_pinv = torch.linalg.pinv(jacobian)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif self.cfg.ik_method == "svd": # adaptive SVD
# parameters
k_val = self._ik_params["k_val"]
min_singular_value = self._ik_params["min_singular_value"]
# computation
# U: 6xd, S: dxd, V: d x num-dof
U, S, Vh = torch.linalg.svd(jacobian)
S_inv = 1.0 / S
S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv))
jacobian_pinv = (
torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6]
@ torch.diag_embed(S_inv)
@ torch.transpose(U, dim0=1, dim1=2)
)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif self.cfg.ik_method == "trans": # Jacobian transpose
# parameters
k_val = self._ik_params["k_val"]
# computation
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif self.cfg.ik_method == "dls": # damped least squares
# parameters
lambda_val = self._ik_params["lambda_val"]
# computation
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
lambda_matrix = (lambda_val**2) * torch.eye(n=jacobian.shape[1], device=self._device)
delta_dof_pos = jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
else:
raise ValueError(f"Unsupported inverse-kinematics method: {self.cfg.ik_method}")
return delta_dof_pos
| 15,553 |
Python
| 39.824147 | 153 | 0.618852 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/utils/scene_utils.py
|
import os
import torch
import numpy as np
from typing import Optional
from pxr import Gf
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.prims.geometry_prim import GeometryPrim
from omni.physx.scripts import utils
from pxr import UsdGeom
from hsr_rl.utils.files import get_usd_path
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.torch.rotations import euler_angles_to_quats
def spawn_obstacle(name, prim_path, device):
object_translation = torch.tensor([1.7, 0.0, 0.055], device=device)
object_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device)
# Spawn object model from usd path
object_usd_path = os.path.join(get_usd_path(), 'gearbox', name, name+'.usd')
add_reference_to_stage(object_usd_path, prim_path+"/obstacle/"+name)
obst_prim = XFormPrim(
prim_path=prim_path+"/obstacle/"+name,
translation=object_translation,
orientation=object_orientation
)
return obst_prim
def spawn_dynamic_object(name, prim_path, object_translation, object_orientation):
# Spawn object model from usd path
object_usd_path = os.path.join(get_usd_path(), 'gearbox_dynamic', name, name+'.usd')
add_reference_to_stage(object_usd_path, prim_path+"/"+name)
obj_prim = XFormPrim(
prim_path=prim_path+"/"+name,
translation=object_translation,
orientation=object_orientation
)
return obj_prim
def spawn_static_object(name, prim_path, object_translation, object_orientation):
# Spawn object model from usd path
object_usd_path = os.path.join(get_usd_path(), 'gearbox_static', name, name+'.usd')
add_reference_to_stage(object_usd_path, prim_path+"/"+name)
obj_prim = XFormPrim(
prim_path=prim_path+"/"+name,
translation=object_translation,
orientation=object_orientation
)
return obj_prim
def setup_gearbox_scene(env_ids, indices, obstacles, obstacles_state, grasp_objs, grasp_objs_state, device):
# Place all grasp objects on the tabular obstacle (without overlaps)
for idx, _ in enumerate(obstacles):
obst_position = obstacles_state[idx][env_ids, :3]
obst_orientation = obstacles_state[idx][env_ids, 3:]
obstacles[idx].set_world_poses(positions=obst_position,
orientations=obst_orientation,
indices=indices)
for idx, _ in enumerate(grasp_objs):
grasp_obj_position = grasp_objs_state[idx][env_ids, :3]
grsap_obj_orientation = grasp_objs_state[idx][env_ids, 3:]
grasp_objs[idx].set_world_poses(positions=grasp_obj_position,
orientations=grsap_obj_orientation,
indices=indices)
# Pick one object to be the grasp object and compute its grasp:
goal_obj_index = np.random.randint(len(grasp_objs))
return grasp_objs[goal_obj_index]
class DynamicObject(RigidPrim, GeometryPrim):
"""Creates and adds a prim to stage from USD reference path, and wraps the prim with RigidPrim and GeometryPrim to
provide access to APIs for rigid body attributes, physics materials and collisions. Please note that this class
assumes the object has only a single mesh prim defining its geometry.
Args:
usd_path (str): USD reference path the Prim refers to.
prim_path (str): prim path of the Prim to encapsulate or create.
mesh_path (str): prim path of the underlying mesh Prim.
name (str, optional): shortname to be used as a key by Scene class. Note: needs to be unique if the object is
added to the Scene. Defaults to "dynamic_object".
position (Optional[np.ndarray], optional): position in the world frame of the prim. Shape is (3, ). Defaults to
None, which means left unchanged.
translation (Optional[np.ndarray], optional): translation in the local frame of the prim (with respect to its
parent prim). Shape is (3, ). Defaults to None, which means left
unchanged.
orientation (Optional[np.ndarray], optional): quaternion orientation in the world/local frame of the prim
(depends if translation or position is specified). Quaternion is
scalar-first (w, x, y, z). Shape is (4, ). Defaults to None, which
means left unchanged.
scale (Optional[np.ndarray], optional): local scale to be applied to the prim's dimensions. Shape is (3, ).
Defaults to None, which means left unchanged.
visible (bool, optional): set to false for an invisible prim in the stage while rendering. Defaults to True.
mass (Optional[float], optional): mass in kg. Defaults to None.
linear_velocity (Optional[np.ndarray], optional): linear velocity in the world frame. Defaults to None.
angular_velocity (Optional[np.ndarray], optional): angular velocity in the world frame. Defaults to None.
"""
def __init__(
self,
usd_path: str,
prim_path: str,
mesh_path: str,
name: str = "dynamic_object",
position: Optional[np.ndarray] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.ndarray] = None,
visible: bool = True,
mass: Optional[float] = None,
linear_velocity: Optional[np.ndarray] = None,
angular_velocity: Optional[np.ndarray] = None,
) -> None:
if is_prim_path_valid(mesh_path):
prim = get_prim_at_path(mesh_path)
if not prim.IsA(UsdGeom.Mesh):
raise Exception("The prim at path {} cannot be parsed as a Mesh object".format(mesh_path))
self.usd_path = usd_path
add_reference_to_stage(usd_path=usd_path, prim_path=prim_path)
GeometryPrim.__init__(
self,
prim_path=mesh_path,
name=name,
translation=translation,
orientation=orientation,
visible=visible,
collision=True,
)
self.set_collision_approximation("convexHull")
RigidPrim.__init__(
self,
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
mass=mass,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
)
| 7,020 |
Python
| 44.296774 | 121 | 0.618946 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/residual/hsr_residual_stack.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from hsr_rl.tasks.base.rl_task import RLTask
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from hsr_rl.utils.dataset_utils import load_dataset
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
from omni.isaac.core.prims.geometry_prim_view import GeometryPrimView
from omni.isaac.core.articulations.articulation_view import ArticulationView
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.stage import print_stage_prim_paths
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.objects import DynamicCuboid, FixedCuboid
from omni.isaac.core.materials import PhysicsMaterial
from omni.isaac.sensor import _sensor
from omni.isaac.cloner import Cloner
from omni.physx.scripts import utils, physicsUtils
import re
import math
import torch
import numpy as np
from pxr import Usd, UsdGeom, PhysxSchema, UsdPhysics, Gf
class HSRResidualStackTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
# Get dt for integrating velocity commands and checking limit violations
self._dt = torch.tensor(self._task_cfg["sim"]["dt"] * self._task_cfg["env"]["controlFrequencyInv"], device=self._device)
self._num_observations = self._task_cfg["env"]["num_observations"]
self._num_actions = self._task_cfg["env"]["num_actions"]
self._num_props = self._task_cfg["env"]["numProps"]
# Table and prop settings
self._table_height = 0.26
self._table_width = 0.5
self._table_depth = 0.5
self._table_size = 1.0
self._prop_size = self._sim_config.task_config["sim"]["parts"]["size"]
self._prop_density = self._sim_config.task_config["sim"]["parts"]["density"]
self._prop_static_friction = self._sim_config.task_config["sim"]["parts"]["static_friction"]
self._prop_dynamic_friction = self._sim_config.task_config["sim"]["parts"]["dynamic_friction"]
self._prop_restitution = self._sim_config.task_config["sim"]["parts"]["restitution"]
self._gripper_mass = self._sim_config.task_config["sim"]["gripper"]["mass"]
self._gripper_density = self._sim_config.task_config["sim"]["gripper"]["density"]
self._gripper_static_friction = self._sim_config.task_config["sim"]["gripper"]["static_friction"]
self._gripper_dynamic_friction = self._sim_config.task_config["sim"]["gripper"]["dynamic_friction"]
self._gripper_restitution = self._sim_config.task_config["sim"]["gripper"]["restitution"]
self._pick_success = self._table_height + 0.05
self._place_success = torch.tensor([0.2, 0.2], device=self._device)
self._hsr_position = torch.tensor([0.0, 0.0, 0.03], device=self._device)
self._hsr_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._table_position = torch.tensor([1.625, 0.0, self._table_height/2], device=self._device)
self._table_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._prop_position = torch.tensor([1.48, 0.455, self._table_height+self._prop_size/2], device=self._device)
self._prop_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
# Start at 'home' positions
self.torso_start = torch.tensor([0.1], device=self._device)
self.base_start = torch.tensor([0.0, 0.0, 0.0], device=self._device)
self.arm_start = torch.tensor([0.1, -1.570796, 0.0, -0.392699, 0.0], device=self._device)
self.gripper_proximal_start = torch.tensor([0.75, 0.75], device=self._device)
self.initial_dof_positions = torch.tensor([0.0, 0.0, 0.0, 0.1, 0.1, -1.570796, 0.0, 0.0, 0.0, 0.0, 0.0, 0.75, 0.75, 0.0, 0.0], device=self._device)
# Dof joint gains
self.joint_kps = torch.tensor([1e9, 1e9, 5.7296e10, 1e9, 1e9, 5.7296e10,
5.7296e10, 5.7296e10, 5.7296e10, 5.7296e10, 5.7296e10, 2.8648e4,
2.8648e4, 5.7296e10, 5.7296e10], device=self._device)
self.joint_kds = torch.tensor([1.4, 1.4, 80.2141, 1.4, 0.0, 80.2141, 0.0, 80.2141,
0.0, 80.2141, 80.2141, 17.1887, 17.1887, 17.1887, 17.1887], device=self._device)
# Dof joint friction coefficients
self.joint_friction_coefficients = torch.tensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], device=self._device)
# Joint & body names
self._torso_joint_name = ["torso_lift_joint"]
self._base_joint_names = ["joint_x", "joint_y", "joint_rz"]
self._arm_names = ["arm_lift_joint", "arm_flex_joint", "arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
self._gripper_proximal_names = ["hand_l_proximal_joint", "hand_r_proximal_joint"]
# Values are set in post_reset after model is loaded
self.torso_dof_idx = []
self.base_dof_idxs = []
self.arm_dof_idxs = []
self.gripper_proximal_dof_idxs = []
# Dof joint position limits
self.torso_dof_lower = []
self.torso_dof_upper = []
self.base_dof_lower = []
self.base_dof_upper = []
self.arm_dof_lower = []
self.arm_dof_upper = []
self.gripper_p_dof_lower = []
self.gripper_p_dof_upper = []
# Add contact sensor
self._contact_sensor_interface = _sensor.acquire_contact_sensor_interface()
self.replay_count = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.is_collided = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.lift_success = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.place_success = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.gripper_close = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.gripper_open = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.gripper_hold = torch.zeros(self._num_envs, device=self._device, dtype=torch.bool)
self.exp_actions = self.load_exp_dataset()
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.create_prop_material()
self.create_gripper_material()
self.add_hsr()
self.add_prop()
self.add_table1()
self.add_table2()
# Set up scene
super().set_up_scene(scene)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view")
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
# Add prop to scene
self._props = RigidPrimView(prim_paths_expr="/World/envs/.*/prop", name="prop_view", reset_xform_properties=False)
scene.add(self._props)
def create_prop_material(self):
self._stage = get_current_stage()
self.propPhysicsMaterialPath = "/World/Physics_Materials/PropMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.propPhysicsMaterialPath,
density=self._prop_density,
staticFriction=self._prop_static_friction,
dynamicFriction=self._prop_dynamic_friction,
)
def create_gripper_material(self):
self._stage = get_current_stage()
self.gripperPhysicsMaterialPath = "/World/Physics_Materials/GripperMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.gripperPhysicsMaterialPath,
density=self._gripper_density,
staticFriction=self._gripper_static_friction,
dynamicFriction=self._gripper_dynamic_friction,
restitution=self._gripper_restitution
)
def add_hsr(self):
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb",
name="hsrb",
translation=self._hsr_position,
orientation=self._hsr_rotation)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_prop(self):
prop = DynamicCuboid(prim_path=self.default_zero_env_path + "/prop",
name="prop",
translation=self._prop_position,
orientation=self._prop_rotation,
size=self._prop_size,
density=self._prop_density,
color=torch.tensor([0.2, 0.4, 0.6]))
self._sim_config.apply_articulation_settings("prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop"))
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(self.default_zero_env_path + "/prop"),
self.propPhysicsMaterialPath
)
def add_table1(self):
table_pos = torch.tensor([1.5, -0.5, self._table_height/2], device=self._device)
table_orn = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
table = FixedCuboid(prim_path=self.default_zero_env_path + "/table1",
name="table1",
translation=table_pos,
orientation=table_orn,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([self._table_width, self._table_depth, self._table_height]))
self._sim_config.apply_articulation_settings("table1", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def add_table2(self):
table_pos = torch.tensor([1.5, 0.5, self._table_height/2], device=self._device)
table_orn = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
table = FixedCuboid(prim_path=self.default_zero_env_path + "/table2",
name="table2",
translation=table_pos,
orientation=table_orn,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([self._table_width, self._table_depth, self._table_height]))
self._sim_config.apply_articulation_settings("table2", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def get_observations(self):
# Get prop positions and orientations
prop_positions, prop_orientations = self._props.get_world_poses(clone=False)
prop_positions = prop_positions[:, 0:3] - self._env_pos
# Get prop velocities
prop_velocities = self._props.get_velocities(clone=False)
prop_linvels = prop_velocities[:, 0:3]
prop_angvels = prop_velocities[:, 3:6]
# Get end effector positions and orientations
end_effector_positions, end_effector_orientations = self._robots._fingertip_centered.get_world_poses(clone=False)
end_effector_positions = end_effector_positions[:, 0:3] - self._env_pos
# Get end effector velocities
end_effector_velocities = self._robots._fingertip_centered.get_velocities(clone=False)
end_effector_linvels = end_effector_velocities[:, 0:3]
end_effector_angvels = end_effector_velocities[:, 3:6]
self.prop_positions = prop_positions
self.prop_linvels = prop_linvels
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
self.obs_buf[..., 0:10] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 10:20] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 20:23] = end_effector_positions
self.obs_buf[..., 23:27] = end_effector_orientations
self.obs_buf[..., 27:30] = end_effector_linvels
self.obs_buf[..., 30:33] = end_effector_angvels
self.obs_buf[..., 33:36] = prop_positions
self.obs_buf[..., 38:42] = prop_orientations
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Update position targets from actions
replay_indices = torch.tensor(self.replay_count)
self.dof_position_targets[..., self.movable_dof_indices] = self._robots.get_joint_positions(joint_indices=self.movable_dof_indices)
self.dof_position_targets[..., self.movable_dof_indices] += self.exp_actions[replay_indices, :8] # replay tamp trajectory
self.dof_position_targets[..., self.movable_dof_indices] += self._dt * self._action_speed_scale * actions.to(self.device) # add residual rl action
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits
)
# Modify torso joint positions
dof_pos = self._robots.get_joint_positions()
arm_pos = dof_pos[:, self.arm_dof_idxs]
scaled_arm_lift_pos = arm_pos[:, 0] / self.arm_dof_upper[0]
scaled_torso_lift_pos = scaled_arm_lift_pos * self.torso_dof_upper[0]
self.dof_position_targets[:, self.torso_dof_idx] = scaled_torso_lift_pos.unsqueeze(dim=1)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = self.initial_dof_positions
self._robots.set_joint_position_targets(self.dof_position_targets)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = 0.0 # min horizontal dist from origin
max_d = 0.0 # max horizontal dist from origin
min_height = 0.0 # min vertical dist from origin
max_height = 0.0 # min vertical dist from origin
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
# Prop pos / rot, velocities
self.prop_pos = self.initial_prop_pos.clone()
self.prop_rot = self.initial_prop_rot.clone()
# position
self.prop_pos[env_ids_64, 0:2] += hpos[..., 0:2]
self.prop_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
self.prop_rot[env_ids_64, 0] = 1
self.prop_rot[env_ids_64, 1:] = 0
# reset root state for props in selected envs
self._props.set_world_poses(self.prop_pos[env_ids_64], self.prop_rot[env_ids_64], indices=env_ids_32)
# reset root state for robots in selected envs
self._robots.set_world_poses(self.initial_robot_pos[env_ids_64], self.initial_robot_rot[env_ids_64], indices=env_ids_32)
# reset DOF states for robots in selected envs
self._robots.set_joint_position_targets(self.initial_dof_positions, indices=env_ids_32)
gripper_dof_effort = torch.tensor([0., 0.], device=self._device)
self._robots.set_joint_efforts(gripper_dof_effort, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
# bookkeeping
self.gripper_close[env_ids] = False
self.gripper_open[env_ids] = False
self.gripper_hold[env_ids] = False
self.replay_count[env_ids] = 0
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.extras[env_ids] = 0
self.is_collided[env_ids] = 0
self.lift_success[env_ids] = 0
self.place_success[env_ids] = 0
def post_reset(self):
self.set_dof_idxs()
self.set_dof_limits()
self.set_default_state()
self.set_joint_gains()
self.set_joint_frictions()
# reset prop pos / rot, and velocities
self.initial_robot_pos, self.initial_robot_rot = self._robots.get_world_poses()
self.initial_robot_velocities = self._robots.get_velocities()
# reset prop pos / rot, and velocities
self.initial_prop_pos, self.initial_prop_rot = self._props.get_world_poses()
self.initial_prop_velocities = self._props.get_velocities()
def calculate_metrics(self) -> None:
# Distance from hand to the ball
dist = torch.norm(self.obs_buf[..., 33:36] - self.obs_buf[..., 20:23], p=2, dim=-1)
dist_reward = 1.0 / (1.0 + dist ** 2)
dist_reward *= dist_reward
dist_reward = torch.where(dist <= 0.02, dist_reward * 2, dist_reward)
self.rew_buf[:] = dist_reward * self._task_cfg['rl']['distance_scale']
# In this policy, episode length is constant across all envs
is_mid_step = (self.progress_buf[0] == int(self.exp_actions.size()[0] / 2))
is_last_step = (self.progress_buf[0] == int(self.exp_actions.size()[0] - 1))
if is_mid_step:
# Check if block is picked up and above table
lift_success = self._check_lift_success(height_threashold=self._pick_success)
self.rew_buf[:] += lift_success * self._task_cfg['rl']['pick_success_bonus']
self.extras['lift_successes'] = torch.mean(lift_success.float())
self.lift_success = torch.where(
lift_success[:] == 1,
torch.ones_like(lift_success),
-torch.ones_like(lift_success)
)
if is_last_step:
# Check if block is in a target region
place_success = self._check_place_success(place_threashold=self._place_success)
self.rew_buf[:] += place_success * self._task_cfg['rl']['place_success_bonus']
self.extras['place_successes'] = torch.mean(place_success.float())
self.place_success = torch.where(
place_success[:] == 1,
torch.ones_like(place_success),
-torch.ones_like(place_success)
)
def is_done(self) -> None:
self.reset_buf = torch.where(
self.progress_buf == self.exp_actions.size()[0] - 1,
torch.ones_like(self.reset_buf),
self.reset_buf
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.replay_count[:] += 1
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
replay_indices = torch.tensor(self.replay_count)
self.gripper_close = self.exp_actions[replay_indices, -1] < -0.01
self.gripper_open = self.exp_actions[replay_indices, -1] > 0.01
if torch.any(self.gripper_close):
close_indices = torch.where(self.gripper_close)[0]
self._close_gripper(close_indices, sim_steps=self._task_cfg['env']['num_gripper_close_sim_steps'])
elif torch.any(self.gripper_open):
open_indices = torch.where(self.gripper_open)[0]
self._open_gripper(open_indices, sim_steps=self._task_cfg['env']['num_gripper_open_sim_steps'])
elif torch.any(self.gripper_hold):
hold_indices = torch.where(self.gripper_hold)[0]
self._hold_gripper(hold_indices)
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def set_dof_idxs(self):
[self.torso_dof_idx.append(self._robots.get_dof_index(name)) for name in self._torso_joint_name]
[self.base_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._base_joint_names]
[self.arm_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._arm_names]
[self.gripper_proximal_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._gripper_proximal_names]
# Movable joints
self.actuated_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs+self.gripper_proximal_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10, 11, 12]).to(self._device)
self.movable_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10]).to(self._device)
def set_dof_limits(self): # dof position limits
# (num_envs, num_dofs, 2)
dof_limits = self._robots.get_dof_limits()
dof_limits_lower = dof_limits[0, :, 0].to(self._device)
dof_limits_upper = dof_limits[0, :, 1].to(self._device)
# Set relevant joint position limit values
self.torso_dof_lower = dof_limits_lower[self.torso_dof_idx]
self.torso_dof_upper = dof_limits_upper[self.torso_dof_idx]
self.base_dof_lower = dof_limits_lower[self.base_dof_idxs]
self.base_dof_upper = dof_limits_upper[self.base_dof_idxs]
self.arm_dof_lower = dof_limits_lower[self.arm_dof_idxs]
self.arm_dof_upper = dof_limits_upper[self.arm_dof_idxs]
self.gripper_p_dof_lower = dof_limits_lower[self.gripper_proximal_dof_idxs]
self.gripper_p_dof_upper = dof_limits_upper[self.gripper_proximal_dof_idxs]
self.robot_dof_lower_limits, self.robot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
def set_default_state(self):
# Set default joint state
joint_states = self._robots.get_joints_default_state()
jt_pos = joint_states.positions
jt_pos[:, self.torso_dof_idx] = self.torso_start
jt_pos[:, self.base_dof_idxs] = self.base_start
jt_pos[:, self.arm_dof_idxs] = self.arm_start
jt_pos[:, self.gripper_proximal_dof_idxs] = self.gripper_proximal_start
jt_vel = joint_states.velocities
jt_vel[:, self.torso_dof_idx] = torch.zeros_like(self.torso_start, device=self._device)
jt_vel[:, self.base_dof_idxs] = torch.zeros_like(self.base_start, device=self._device)
jt_vel[:, self.arm_dof_idxs] = torch.zeros_like(self.arm_start, device=self._device)
jt_vel[:, self.gripper_proximal_dof_idxs] = torch.zeros_like(self.gripper_proximal_start, device=self._device)
self._robots.set_joints_default_state(positions=jt_pos, velocities=jt_vel)
# Initialize target positions
self.dof_position_targets = jt_pos
def set_joint_gains(self):
self._robots.set_gains(kps=self.joint_kps, kds=self.joint_kds)
def set_joint_frictions(self):
self._robots.set_friction_coefficients(self.joint_friction_coefficients)
def _close_gripper(self, env_ids, sim_steps=10):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_pos = torch.tensor([-30., -30.], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_efforts(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.gripper_hold[env_ids_64] = True
def _open_gripper(self, env_ids, sim_steps=10):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_effort = torch.tensor([0., 0.], device=self._device)
self._robots.set_joint_efforts(gripper_dof_effort, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
gripper_dof_pos = torch.tensor([0.5, 0.5], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_position_targets(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.gripper_hold[env_ids_64] = False
def _hold_gripper(self, env_ids):
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
gripper_dof_pos = torch.tensor([-30., -30.], device=self._device)
self._robots.set_joint_efforts(gripper_dof_pos, indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
self.dof_position_targets[env_ids_64[:, None], self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(indices=env_ids_32, joint_indices=self.gripper_proximal_dof_idxs)
def _check_lift_success(self, height_threashold):
prop_pos, _ = self._props.get_world_poses()
prop_pos -= self._env_pos
# check z direction range
lift_success = torch.where(
prop_pos[:, 2] > height_threashold,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return lift_success
def _check_place_success(self, place_threashold):
table_pos = torch.tensor([1.5, -0.5, self._table_height/2], device=self._device)
prop_pos, _ = self._props.get_world_poses()
prop_pos -= self._env_pos
# check x direction range
place_success = torch.where(
prop_pos[:, 0] > table_pos[0]-place_threashold[0],
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device)
)
place_success = torch.where(
prop_pos[:, 0] < table_pos[0]+place_threashold[1],
place_success,
torch.zeros((self.num_envs,), device=self._device)
)
# check y direction range
place_success = torch.where(
prop_pos[:, 1] > table_pos[1]-place_threashold[1],
place_success,
torch.zeros((self.num_envs,), device=self._device)
)
place_success = torch.where(
prop_pos[:, 1] < table_pos[1]+place_threashold[1],
place_success,
torch.zeros((self.num_envs,), device=self._device)
)
return place_success
def _check_robot_collisions(self):
# Check if the robot collided with an object
for obst_prim in self._tables._prim_paths:
match = re.search(r'\d+', obst_prim)
env_id = int(match.group())
raw_readings = self._contact_sensor_interface.get_contact_sensor_raw_data(obst_prim + "/Contact_Sensor")
if raw_readings.shape[0]:
for reading in raw_readings:
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body1"])):
self.is_collided[env_id] = True
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body0"])):
self.is_collided[env_id] = True
collide_penalty = torch.where(
self.is_collided == True,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return collide_penalty
def load_exp_dataset(self):
exp_actions = load_dataset('stacking_problem')
return torch.tensor(exp_actions, device=self._device) # (dataset_length, num_actions)
| 30,080 |
Python
| 47.674757 | 209 | 0.627427 |
makolon/hsr_isaac_tamp/hsr_rl/tasks/residual/hsr_residual_lift.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from hsr_rl.tasks.base.rl_task import RLTask
from hsr_rl.robots.articulations.hsr import HSR
from hsr_rl.robots.articulations.views.hsr_view import HSRView
from hsr_rl.utils.dataset_utils import load_dataset
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.prims.rigid_prim_view import RigidPrimView
from omni.isaac.core.prims.geometry_prim_view import GeometryPrimView
from omni.isaac.core.articulations.articulation_view import ArticulationView
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.stage import print_stage_prim_paths
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.objects import DynamicCuboid, FixedCuboid
from omni.isaac.sensor import _sensor
import torch
from pxr import Usd, UsdGeom
class HSRResidualLiftTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._device = self._cfg["sim_device"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._dt = torch.tensor(self._task_cfg["sim"]["dt"] * self._task_cfg["env"]["controlFrequencyInv"], device=self._device)
self._num_observations = self._task_cfg["env"]["num_observations"]
self._num_actions = self._task_cfg["env"]["num_actions"]
self._num_props = self._task_cfg["env"]["numProps"]
self._table_height = 0.12
self._table_width = 0.65
self._table_depth = 1.2
self._table_size = 1.0
self._prop_size = 0.04
self._pick_success = 0.15
self._hsr_position = torch.tensor([0.0, 0.0, 0.03], device=self._device)
self._hsr_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._table_position = torch.tensor([1.5, 0.0, self._table_height/2], device=self._device)
self._table_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._prop_position = torch.tensor([1.275, 0.0, self._table_height+self._prop_size/2], device=self._device)
self._prop_rotation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
# Start at 'home' positions
self.torso_start = torch.tensor([0.1], device=self._device)
self.base_start = torch.tensor([0.0, 0.0, 0.0], device=self._device)
self.arm_start = torch.tensor([0.1, -1.570796, 0.0, -0.392699, 0.0], device=self._device)
self.gripper_proximal_start = torch.tensor([0.75, 0.75], device=self._device)
self.initial_dof_positions = torch.tensor([0.0, 0.0, 0.0, 0.1, 0.1, -1.570796, 0.0, 0.0, 0.0, -0.392699, 0.0, 0.75, 0.75, 0.0, 0.0], device=self._device)
# Joint & body names
self._torso_joint_name = ["torso_lift_joint"]
self._base_joint_names = ["joint_x", "joint_y", "joint_rz"]
self._arm_names = ["arm_lift_joint", "arm_flex_joint", "arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
self._gripper_proximal_names = ["hand_l_proximal_joint", "hand_r_proximal_joint"]
# Values are set in post_reset after model is loaded
self.torso_dof_idx = []
self.base_dof_idxs = []
self.arm_dof_idxs = []
self.gripper_proximal_dof_idxs = []
# Dof joint position limits
self.torso_dof_lower = []
self.torso_dof_upper = []
self.base_dof_lower = []
self.base_dof_upper = []
self.arm_dof_lower = []
self.arm_dof_upper = []
self.gripper_p_dof_lower = []
self.gripper_p_dof_upper = []
# Add contact sensor
self._contact_sensor_interface = _sensor.acquire_contact_sensor_interface()
self.is_collided = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.is_success = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.exp_actions = self.load_exp_dataset()
RLTask.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.add_hsr()
self.add_prop()
self.add_table()
self.add_cover()
# Set up scene
super().set_up_scene(scene)
# Add robot to scene
self._robots = HSRView(prim_paths_expr="/World/envs/.*/hsrb", name="hsrb_view")
scene.add(self._robots)
scene.add(self._robots._hands)
scene.add(self._robots._lfingers)
scene.add(self._robots._rfingers)
scene.add(self._robots._fingertip_centered)
# Add prop to scene
self._props = RigidPrimView(prim_paths_expr="/World/envs/.*/prop", name="prop_view", reset_xform_properties=False)
scene.add(self._props)
def add_hsr(self):
hsrb = HSR(prim_path=self.default_zero_env_path + "/hsrb",
name="hsrb",
translation=self._hsr_position,
orientation=self._hsr_rotation)
self._sim_config.apply_articulation_settings("hsrb", get_prim_at_path(hsrb.prim_path), self._sim_config.parse_actor_config("hsrb"))
def add_prop(self):
prop = DynamicCuboid(prim_path=self.default_zero_env_path + "/prop",
name="prop",
translation=self._prop_position,
orientation=self._prop_rotation,
size=self._prop_size,
color=torch.tensor([0.2, 0.4, 0.6]),
mass=0.1,
density=100.0)
self._sim_config.apply_articulation_settings("prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop"))
def add_table(self):
table = FixedCuboid(prim_path=self.default_zero_env_path + "/table",
name="table",
translation=self._table_position,
orientation=self._table_rotation,
size=self._table_size,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([self._table_width, self._table_depth, self._table_height]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def add_cover(self):
cover_0 = FixedCuboid(prim_path=self.default_zero_env_path + "/cover_0",
name="cover_0",
translation=torch.tensor([1.825, 0.0, 0.2], device=self._device),
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device),
size=1.0,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([0.05, 1.2, 0.15]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(cover_0.prim_path), self._sim_config.parse_actor_config("table"))
cover_1 = FixedCuboid(prim_path=self.default_zero_env_path + "/cover_1",
name="cover_1",
translation=torch.tensor([1.5, 0.6, 0.2], device=self._device),
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device),
size=1.0,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([0.65, 0.05, 0.15]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(cover_1.prim_path), self._sim_config.parse_actor_config("table"))
cover_2 = FixedCuboid(prim_path=self.default_zero_env_path + "/cover_2",
name="cover_2",
translation=torch.tensor([1.5, -0.6, 0.2], device=self._device),
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device),
size=1.0,
color=torch.tensor([0.75, 0.75, 0.75]),
scale=torch.tensor([0.65, 0.05, 0.15]))
self._sim_config.apply_articulation_settings("table", get_prim_at_path(cover_2.prim_path), self._sim_config.parse_actor_config("table"))
def get_observations(self):
# Get prop positions and orientations
prop_positions, prop_orientations = self._props.get_world_poses(clone=False)
prop_positions = prop_positions[:, 0:3] - self._env_pos
# Get prop velocities
prop_velocities = self._props.get_velocities(clone=False)
prop_linvels = prop_velocities[:, 0:3]
prop_angvels = prop_velocities[:, 3:6]
# Get end effector positions and orientations
end_effector_positions, end_effector_orientations = self._robots._fingertip_centered.get_world_poses(clone=False)
end_effector_positions = end_effector_positions[:, 0:3] - self._env_pos
# Get end effector velocities
end_effector_velocities = self._robots._fingertip_centered.get_velocities(clone=False)
end_effector_linvels = end_effector_velocities[:, 0:3]
end_effector_angvels = end_effector_velocities[:, 3:6]
self.prop_positions = prop_positions
self.prop_linvels = prop_linvels
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
self.obs_buf[..., 0:8] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 8:16] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 16:19] = end_effector_positions
self.obs_buf[..., 19:23] = end_effector_orientations
self.obs_buf[..., 23:26] = end_effector_linvels
self.obs_buf[..., 26:29] = end_effector_angvels
self.obs_buf[..., 29:32] = prop_positions
self.obs_buf[..., 32:36] = prop_orientations
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Update position targets from actions
self.dof_position_targets[..., self.actuated_dof_indices] = self._robots.get_joint_positions(joint_indices=self.actuated_dof_indices)
self.dof_position_targets[..., self.actuated_dof_indices] += self.exp_actions[self.replay_count, :8]
self.dof_position_targets[..., self.actuated_dof_indices] += self._dt * self._action_speed_scale * actions.to(self.device)
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits
)
# Modify torso joint positions
dof_pos = self._robots.get_joint_positions()
arm_pos = dof_pos[:, self.arm_dof_idxs]
scaled_arm_lift_pos = arm_pos[:, 0] / self.arm_dof_upper[0]
scaled_torso_lift_pos = scaled_arm_lift_pos * self.torso_dof_upper[0]
self.dof_position_targets[:, self.torso_dof_idx] = scaled_torso_lift_pos.unsqueeze(dim=1)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = self.initial_dof_positions
self._robots.set_joint_position_targets(self.dof_position_targets)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = 0.0 # min horizontal dist from origin
max_d = 0.0 # max horizontal dist from origin
min_height = 0.0
max_height = 0.0
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
# Prop pos / rot, velocities
self.prop_pos = self.initial_prop_pos.clone()
self.prop_rot = self.initial_prop_rot.clone()
# position
self.prop_pos[env_ids_64, 0:2] += hpos[..., 0:2]
self.prop_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
self.prop_rot[env_ids_64, 0] = 1
self.prop_rot[env_ids_64, 1:] = 0
# reset root state for props in selected envs
self._props.set_world_poses(self.prop_pos[env_ids_64], self.prop_rot[env_ids_64], indices=env_ids_32)
# reset root state for robots in selected envs
self._robots.set_world_poses(self.initial_robot_pos[env_ids_64], self.initial_robot_rot[env_ids_64], indices=env_ids_32)
# reset DOF states for robots in selected envs
self._robots.set_joint_position_targets(self.initial_dof_positions, indices=env_ids_32)
# bookkeeping
self.gripper_close = False
self.gripper_hold = False
self.replay_count = 0
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.extras[env_ids] = 0
self.is_collided[env_ids] = 0
def post_reset(self):
self.set_dof_idxs()
self.set_dof_limits()
self.set_default_state()
# reset prop pos / rot, and velocities
self.initial_robot_pos, self.initial_robot_rot = self._robots.get_world_poses()
self.initial_robot_velocities = self._robots.get_velocities()
# reset prop pos / rot, and velocities
self.initial_prop_pos, self.initial_prop_rot = self._props.get_world_poses()
self.initial_prop_velocities = self._props.get_velocities()
def calculate_metrics(self) -> None:
# Distance from hand to the ball
dist = torch.norm(self.obs_buf[..., 29:32] - self.obs_buf[..., 16:19], p=2, dim=-1)
dist_reward = 1.0 / (1.0 + dist ** 2)
dist_reward *= dist_reward
dist_reward = torch.where(dist <= 0.02, dist_reward * 2, dist_reward)
self.rew_buf[:] = dist_reward * self._task_cfg['rl']['distance_scale']
# In this policy, episode length is constant across all envs
is_last_step = (self.progress_buf[0] == self.exp_actions.size()[0] - 1)
if is_last_step:
# Check if nut is picked up and above table
lift_success = self._check_lift_success(height_threashold=self._pick_success)
self.rew_buf[:] += lift_success * self._task_cfg['rl']['success_bonus']
self.extras['successes'] = torch.mean(lift_success.float())
def is_done(self) -> None:
self.reset_buf = torch.where(
self.progress_buf[:] >= self.exp_actions.size()[0] - 1,
torch.ones_like(self.reset_buf),
self.reset_buf
)
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.replay_count += 1
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
self.gripper_close = True if self.exp_actions[self.replay_count, -1] < -0.01 else False
if self.gripper_close:
self._close_gripper(sim_steps=self._task_cfg['env']['num_gripper_close_sim_steps'])
if self.gripper_hold:
self._hold_gripper()
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def set_dof_idxs(self):
[self.torso_dof_idx.append(self._robots.get_dof_index(name)) for name in self._torso_joint_name]
[self.base_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._base_joint_names]
[self.arm_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._arm_names]
[self.gripper_proximal_dof_idxs.append(self._robots.get_dof_index(name)) for name in self._gripper_proximal_names]
# Movable joints
self.actuated_dof_indices = torch.LongTensor(self.base_dof_idxs+self.arm_dof_idxs).to(self._device) # torch.LongTensor([0, 1, 2, 3, 5, 7, 9, 10]).to(self._device)
def set_dof_limits(self): # dof position limits
# (num_envs, num_dofs, 2)
dof_limits = self._robots.get_dof_limits()
dof_limits_lower = dof_limits[0, :, 0].to(self._device)
dof_limits_upper = dof_limits[0, :, 1].to(self._device)
# Set relevant joint position limit values
self.torso_dof_lower = dof_limits_lower[self.torso_dof_idx]
self.torso_dof_upper = dof_limits_upper[self.torso_dof_idx]
self.base_dof_lower = dof_limits_lower[self.base_dof_idxs]
self.base_dof_upper = dof_limits_upper[self.base_dof_idxs]
self.arm_dof_lower = dof_limits_lower[self.arm_dof_idxs]
self.arm_dof_upper = dof_limits_upper[self.arm_dof_idxs]
self.gripper_p_dof_lower = dof_limits_lower[self.gripper_proximal_dof_idxs]
self.gripper_p_dof_upper = dof_limits_upper[self.gripper_proximal_dof_idxs]
self.robot_dof_lower_limits, self.robot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
def set_default_state(self):
# Set default joint state
joint_states = self._robots.get_joints_default_state()
jt_pos = joint_states.positions
jt_pos[:, self.torso_dof_idx] = self.torso_start
jt_pos[:, self.base_dof_idxs] = self.base_start
jt_pos[:, self.arm_dof_idxs] = self.arm_start
jt_pos[:, self.gripper_proximal_dof_idxs] = self.gripper_proximal_start
jt_vel = joint_states.velocities
jt_vel[:, self.torso_dof_idx] = torch.zeros_like(self.torso_start, device=self._device)
jt_vel[:, self.base_dof_idxs] = torch.zeros_like(self.base_start, device=self._device)
jt_vel[:, self.arm_dof_idxs] = torch.zeros_like(self.arm_start, device=self._device)
jt_vel[:, self.gripper_proximal_dof_idxs] = torch.zeros_like(self.gripper_proximal_start, device=self._device)
self._robots.set_joints_default_state(positions=jt_pos, velocities=jt_vel)
# Initialize target positions
self.dof_position_targets = jt_pos
def _close_gripper(self, sim_steps=10):
gripper_dof_pos = torch.tensor([-0.75, -0.75], device=self._device)
# Step sim
for _ in range(sim_steps):
self._robots.set_joint_position_targets(gripper_dof_pos, joint_indices=self.gripper_proximal_dof_idxs)
SimulationContext.step(self._env._world, render=True)
self.dof_position_targets[:, self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(joint_indices=self.gripper_proximal_dof_idxs)
self.gripper_hold = True
def _hold_gripper(self):
gripper_dof_pos = torch.tensor([-0.7980, -0.7980], device=self._device)
self._robots.set_joint_position_targets(gripper_dof_pos, joint_indices=self.gripper_proximal_dof_idxs)
self.dof_position_targets[:, self.gripper_proximal_dof_idxs] = self._robots.get_joint_positions(joint_indices=self.gripper_proximal_dof_idxs)
def _check_lift_success(self, height_threashold):
prop_pos, prop_rot = self._props.get_world_poses()
lift_success = torch.where(
prop_pos[:, 2] > height_threashold,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return lift_success
def _check_robot_collisions(self):
# Check if the robot collided with an object
for obst_prim in self._tables._prim_paths:
match = re.search(r'\d+', obst_prim)
env_id = int(match.group())
raw_readings = self._contact_sensor_interface.get_contact_sensor_raw_data(obst_prim + "/Contact_Sensor")
if raw_readings.shape[0]:
for reading in raw_readings:
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body1"])):
self.is_collided[env_id] = True
if "hsrb" in str(self._contact_sensor_interface.decode_body_name(reading["body0"])):
self.is_collided[env_id] = True
collide_penalty = torch.where(
self.is_collided == True,
torch.ones((self.num_envs,), device=self._device),
torch.zeros((self.num_envs,), device=self._device))
return collide_penalty
def load_exp_dataset(self):
exp_actions = load_dataset('holding_problem')
return torch.tensor(exp_actions, device=self._device) # (dataset_length, num_actions)
| 22,747 |
Python
| 47.606838 | 170 | 0.618016 |
makolon/hsr_isaac_tamp/hsr_rl/cfg/config.yaml
|
# Task name - used to pick the class to load
task_name: ${task.name}
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs: 256
# seed - set to -1 to choose random seed
seed: -1
# set to True for deterministic performance
torch_deterministic: False
# (Optional) Use the isaac sim configuration from file
sim_app_cfg_path: "" # "/isaac_app_configs/omni.isaac.sim.python.kit"
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: 1500
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu' # 'cpu' or 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu' # 'cpu' or 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0' #'cpu' or 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 1 # 0: pgs, 1: tgs
# RL Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# enable/disable headless mode and rendering (in pygame window)
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 30
# Wandb settings (optional)
wandb_activate: True
wandb_group: ''
wandb_name: ${train.params.config.name}
wandb_entity: ''
wandb_project: 'tamp-hsr'
# set default task and default training config based on task
defaults:
- task: HSRExample
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
| 1,842 |
YAML
| 26.924242 | 103 | 0.736699 |
makolon/hsr_isaac_tamp/hsr_rl/cfg/task/HSRResidualLift.yaml
|
# used to create the object
name: HSRResidualLift
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5.0
resetDist: 1.0
maxEffort: 400.0
controlFrequencyInv: 2 # 60 Hz
maxEpisodeLength: 600
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 1
aggregateMode: 3
gamma: 0.999
horizon: 600
move_group: "whole_body"
randomize_robot_on_reset: False
num_actions: 8 # base and arm
num_observations: 36 # observation space
num_gripper_open_sim_steps: 1 # number of timesteps to reserve for opening gripper before first step of episode
num_gripper_close_sim_steps: 1 # number of timesteps to reserve for closing gripper after last step of episode
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
disable_gravity: False
add_ground_plane: True
add_distant_light: True
use_flatcache: True
enable_scene_query_support: True
disable_contact_processing: False
enable_cameras: False # set to True if you use camera sensors in the environment
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 32
solver_velocity_iteration_count: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.01
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 5.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 17060160 # 524288
gpu_found_lost_aggregate_pairs_capacity: 17060160 # 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
# sim asset configs here
hsrb:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 32
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.005
# per-body
density: -1
max_depenetration_velocity: 5.0
# per-shape
contact_offset: 0.005
rest_offset: 0.7e-5
prop:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 32
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.005
# per-body
density: -1
max_depenetration_velocity: 5.0
# per-shape
contact_offset: 0.005
rest_offset: 0.7e-5
table:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 32
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.005
# per-body
density: 1000
max_depenetration_velocity: 5.0
# per-shape
contact_offset: 0.005
rest_offset: 0.7e-5
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
clamp_rot: True
clamp_rot_thresh: 1.0e-6
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 100 # 200
distance_scale: 0.1 # length of line of keypoints
success_bonus: 5.0 # bonus if nut has been lifted
collision_penalty: 0.5 # collision penalty
| 4,315 |
YAML
| 28.360544 | 113 | 0.692236 |
makolon/hsr_isaac_tamp/hsr_rl/cfg/task/HSRExampleInsert.yaml
|
# used to create the object
name: HSRExampleInsert
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5.0
resetDist: 1.0
maxEffort: 400.0
controlFrequencyInv: 2 # 60 Hz
maxEpisodeLength: 200
actionSpeedScale: 1
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 1
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
gamma: 0.999
horizon: 600
move_group: "whole_body"
randomize_robot_on_reset: False
num_actions: 8 # base and arm
num_observations: 36 # observation space
num_gripper_move_sim_steps: 40 # 20 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 50 # 25 # number of timesteps to reserve for closing gripper after last step of episode
num_gripper_lift_sim_steps: 60 # 25 # number of timesteps to reserve for lift after last step of episode
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
disable_gravity: False
add_ground_plane: True
add_distant_light: True
use_flatcache: True
enable_scene_query_support: True
disable_contact_processing: False
enable_cameras: False # set to True if you use camera sensors in the environment
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.01
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 17060160 # 524288
gpu_found_lost_aggregate_pairs_capacity: 17060160 # 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
# sim asset configs here
hsrb:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
prop:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 100
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
table:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 100
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
clamp_rot: True
clamp_rot_thresh: 1.0e-6
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 100 # 200
success_bonus: 1.0 # bonus if nut has been lifted
collision_penalty: 0.5 # collision penalty
| 4,575 |
YAML
| 28.714286 | 118 | 0.699672 |
makolon/hsr_isaac_tamp/hsr_rl/cfg/task/HSRExampleReach.yaml
|
# used to create the object
name: HSRExampleReach
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5.0
resetDist: 1.0
maxEffort: 400.0
controlFrequencyInv: 2 # 60 Hz
maxEpisodeLength: 600
actionSpeedScale: 1
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 4
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
gamma: 0.999
horizon: 600
# Move group to be used
move_group: "whole_body" # String. Can be arm_left or arm_right
use_gripper: True
randomize_robot_on_reset: False
# Set custom state and action space limits
max_rot_vel: 0.5236 # 1.0472 # in radians (default is 60 degrees per second)
max_base_xy_vel: 0.1 # metres per second
max_base_rz_vel: 0.5236 # metres per second
# Hybrid action space
num_actions: 8 # base and arm
# Observation space
num_observations: 28
sim:
dt: 0.0083 # 1/120 s
action_base_scale: 0.05
action_arm_scale: 0.05
action_gripper_scale: 0.05
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: True
use_flatcache: True
enable_scene_query_support: True
disable_contact_processing: False
enable_cameras: False # set to True if you use camera sensors in the environment
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"}
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
contact_offset: 0.005
rest_offset: 0.001
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 17060160 # 524288
gpu_found_lost_aggregate_pairs_capacity: 17060160 # 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
# sim asset configs here
hsrb:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1.0
# per-shape
contact_offset: 0.02
rest_offset: 0.001
ball:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 1
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
| 3,593 |
YAML
| 26.646154 | 82 | 0.697189 |
makolon/hsr_isaac_tamp/hsr_rl/cfg/task/HSRExampleCabinet.yaml
|
# used to create the object
name: HSRExampleCabinet
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5.0
episodeLength: 500
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
controlFrequencyInv: 2 # 60 Hz
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 4
aggregateMode: 3
actionScale: 3.0
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
add_ground_plane: True
add_distant_light: True
use_flatcache: True
enable_scene_query_support: True
disable_contact_processing: False
# set to True if you use camera sensors in the environment
enable_cameras: False
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"}
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 524288
gpu_found_lost_aggregate_pairs_capacity: 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
# sim asset configs here
hsrb:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
cabinet:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.0
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
prop:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 100
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
| 3,286 |
YAML
| 25.296 | 60 | 0.696287 |
makolon/hsr_isaac_tamp/hsr_rl/cfg/task/HSRExamplePlace.yaml
|
# used to create the object
name: HSRExamplePlace
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:4096,${...num_envs}}
envSpacing: 5.0
resetDist: 1.0
maxEffort: 400.0
controlFrequencyInv: 2 # 60 Hz
maxEpisodeLength: 200
actionSpeedScale: 1
enableDebugVis: False
clipObservations: 5.0
clipActions: 1.0
startPositionNoise: 0.0
startRotationNoise: 0.0
numProps: 1
aggregateMode: 3
actionScale: 7.5
dofVelocityScale: 0.1
distRewardScale: 2.0
rotRewardScale: 0.5
aroundHandleRewardScale: 10.0
openRewardScale: 7.5
fingerDistRewardScale: 100.0
actionPenaltyScale: 0.01
fingerCloseRewardScale: 10.0
gamma: 0.999
horizon: 600
move_group: "whole_body"
randomize_robot_on_reset: False
num_actions: 8 # base and arm
num_observations: 36 # observation space
num_gripper_move_sim_steps: 40 # 20 # number of timesteps to reserve for moving gripper before first step of episode
num_gripper_close_sim_steps: 50 # 25 # number of timesteps to reserve for closing gripper after last step of episode
num_gripper_lift_sim_steps: 60 # 25 # number of timesteps to reserve for lift after last step of episode
sim:
dt: 0.0083 # 1/120 s
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
disable_gravity: False
add_ground_plane: True
add_distant_light: True
use_flatcache: True
enable_scene_query_support: True
disable_contact_processing: False
enable_cameras: False # set to True if you use camera sensors in the environment
default_physics_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0
physx:
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
contact_offset: 0.005
rest_offset: 0.0
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.01
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
max_depenetration_velocity: 1000.0
# GPU buffers
gpu_max_rigid_contact_count: 524288
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 17060160 # 524288
gpu_found_lost_aggregate_pairs_capacity: 17060160 # 262144
gpu_total_aggregate_pairs_capacity: 1048576
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8
# sim asset configs here
hsrb:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: -1
max_depenetration_velocity: 1000.0
prop:
# -1 to use default values
override_usd_defaults: False
make_kinematic: False
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 100
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
table:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 12
solver_velocity_iteration_count: 1
sleep_threshold: 0.005
stabilization_threshold: 0.001
# per-body
density: 100
max_depenetration_velocity: 1000.0
# per-shape
contact_offset: 0.005
rest_offset: 0.0
rl:
pos_action_scale: [0.1, 0.1, 0.1]
rot_action_scale: [0.1, 0.1, 0.1]
force_action_scale: [1.0, 1.0, 1.0]
torque_action_scale: [1.0, 1.0, 1.0]
clamp_rot: True
clamp_rot_thresh: 1.0e-6
num_keypoints: 4 # number of keypoints used in reward
keypoint_scale: 0.5 # length of line of keypoints
keypoint_reward_scale: 1.0 # scale on keypoint-based reward
action_penalty_scale: 0.0 # scale on action penalty
max_episode_length: 100 # 200
success_bonus: 1.0 # bonus if nut has been lifted
collision_penalty: 0.5 # collision penalty
| 4,574 |
YAML
| 28.707792 | 118 | 0.699606 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.