file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/PACKAGE-LICENSES/libxml2-LICENSE.md
|
Except where otherwise noted in the source code (e.g. the files hash.c,
list.c and the trio files, which are covered by a similar licence but
with different Copyright notices) all the files are:
Copyright (C) 1998-2012 Daniel Veillard. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is fur-
nished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FIT-
NESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| 1,288 |
Markdown
| 55.043476 | 77 | 0.798137 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/config/extension.toml
|
[package]
title = "Asset Converter"
description = "The asset converter API for converting assets. It supports conversion between assets like OBJ, GLTF, FBX and USD."
version = "1.2.39"
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
category = "Utility"
repository = ""
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["NVIDIA"]
[core]
# Load at the start, load all schemas with order -100 (with order -1000 the USD libs are loaded)
order = -100
toggleable = false
[dependencies]
"omni.usd" = {}
"omni.client" = {}
"omni.usd.libs" = {}
[[python.module]]
name = "omni.kit.asset_converter"
# Add this to create package with platform and config information.
[[native.plugin]]
recursive = false
# Extension test settings
[[test]]
stdoutFailPatterns.include = []
args = [
"--/renderer/enabled=pxr",
"--/renderer/active=pxr",
"--/app/file/ignoreUnsavedOnExit=true",
"--/app/window/dpiScaleOverride=1.0",
"--/app/window/scaleToMonitor=false",
]
dependencies = [
"omni.hydra.pxr",
"omni.kit.window.viewport",
"omni.kit.window.content_browser",
"omni.kit.window.viewport",
"omni.kit.window.stage",
"omni.kit.widget.layers",
"omni.kit.property.bundle",
"omni.kit.window.console",
"omni.kit.window.status_bar",
"omni.kit.quicklayout", # arranges the windows correctly
"omni.kit.ui_test",
"omni.kit.test_suite.helpers",
]
stdoutFailPatterns.exclude = [
"*HydraRenderer failed to render this frame*", # Can drop a frame or two rendering with OpenGL interop
"*Cannot use omni.hydra.pxr without OpenGL interop*", # Linux TC configs with multi-GPU might not have OpenGL available
]
| 2,080 |
TOML
| 28.728571 | 129 | 0.70625 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/config/extension.gen.toml
|
[package]
[package.target]
platform = ["windows-x86_64"]
config = ["release"]
python = ["cp37"]
[package.publish]
date = 1666794213
kitVersion = "104.0+release.92238.1ca55f8d.tc"
buildNumber = "104.0+master.559.6933238c.tc"
| 224 |
TOML
| 21.499998 | 46 | 0.71875 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/omni/kit/asset_converter/tests/drag_drop_usd_stage.py
|
## Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
##
## NVIDIA CORPORATION and its licensors retain all intellectual property
## and proprietary rights in and to this software, related documentation
## and any modifications thereto. Any use, reproduction, disclosure or
## distribution of this software and related documentation without an express
## license agreement from NVIDIA CORPORATION is strictly prohibited.
##
from pathlib import Path
import inspect
import os
import platform
import unittest
import glob
import carb
import omni.kit.app
import omni.usd
from omni.kit.test.async_unittest import AsyncTestCase
from pxr import Sdf, UsdGeom, Usd
from omni.kit import ui_test
from omni.kit.test_suite.helpers import get_test_data_path, wait_stage_loading, set_content_browser_grid_view, get_content_browser_path_item, get_prims
class DragDropUsdStage(AsyncTestCase):
def verify_dragged_references(self, prim_name, file_path, prims):
if os.path.splitext(file_path)[1] in [".usd", ".usda", ".usdc", ".fbx", ".gltf", ".obj"]:
self.assertTrue(len(prims) >= 2)
self.assertTrue(prims[0].GetPath().pathString == f"/World/{prim_name}")
self.assertTrue(prims[0].GetPrimPath().pathString == f"/World/{prim_name}")
external_refs = omni.usd.get_composed_references_from_prim(prims[1])
self.assertTrue(len(external_refs) == 0)
external_refs = omni.usd.get_composed_references_from_prim(prims[0])
self.assertTrue(len(external_refs) >= 1)
prim_ref = external_refs[0][0]
self.assertTrue(prim_ref.customData == {})
self.assertTrue(prim_ref.layerOffset == Sdf.LayerOffset())
self.assertTrue(prim_ref.primPath == Sdf.Path())
self.assertTrue(prim_ref.assetPath.lower() == file_path.lower())
else:
self.assertTrue(len(prims) == 0)
async def create_stage(self):
await omni.usd.get_context().new_stage_async()
await wait_stage_loading()
# Create defaultPrim
usd_context = omni.usd.get_context()
settings = carb.settings.get_settings()
default_prim_name = settings.get("/persistent/app/stage/defaultPrimName")
rootname = f"/{default_prim_name}"
stage = usd_context.get_stage()
with Usd.EditContext(stage, stage.GetRootLayer()):
default_prim = UsdGeom.Xform.Define(stage, Sdf.Path(rootname)).GetPrim()
stage.SetDefaultPrim(default_prim)
stage_prims = get_prims(stage)
return stage, stage_prims
async def iter_prims_to_drag(self):
for path in glob.glob(get_test_data_path(__name__, "../*")):
prim_path = os.path.abspath(path).replace("\\", "/")
item = await get_content_browser_path_item(prim_path)
yield item, prim_path
async def test_l1_drag_drop_usd_stage(self):
if inspect.iscoroutinefunction(set_content_browser_grid_view):
await set_content_browser_grid_view(False)
else:
set_content_browser_grid_view(False)
await ui_test.find("Stage").focus()
await ui_test.find("Content").focus()
async for item, prim_path in self.iter_prims_to_drag():
if os.path.splitext(item.path)[1] not in [".usd", ".usda", ".usdc", ".fbx", ".gltf", ".obj"]:
continue
# create stage
stage, stage_prims = await self.create_stage()
# drag/drop
stage_tree = ui_test.find("Stage//Frame/**/ScrollingFrame/TreeView[*].visible==True")
await item.drag_and_drop(stage_tree.center)
await wait_stage_loading()
# verify
self.verify_dragged_references(Path(prim_path).stem, prim_path, get_prims(stage, stage_prims))
| 3,833 |
Python
| 42.568181 | 151 | 0.64336 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/omni/kit/asset_converter/tests/test_asset_converter.py
|
import os
import carb.tokens
import omni.kit.test
import omni.kit.asset_converter
from pathlib import Path
from pxr import Sdf, Usd
# NOTE: those tests belong to omni.kit.asset_converter extension.
class TestAssetConverter(omni.kit.test.AsyncTestCaseFailOnLogError):
def get_test_dir(self):
token = carb.tokens.get_tokens_interface()
data_dir = token.resolve("${temp}")
return f"{data_dir}/asset_converter_tests"
async def setUp(self):
pass
async def tearDown(self):
await omni.client.delete_async(self.get_test_dir())
async def test_convert_assets_to_usd(self):
current_path = Path(__file__).parent
# invalid_mesh.obj includes an invalid mesh that has zero points
for file in ["cube.fbx", "cube.obj", "cube.gltf"]:
test_data_path = str(current_path.parent.parent.parent.parent.joinpath("data").joinpath(file))
converter_manager = omni.kit.asset_converter.get_instance()
context = omni.kit.asset_converter.AssetConverterContext()
context.keep_all_materials = True
context.merge_all_meshes = True
output_path = self.get_test_dir() + "/test.usd"
task = converter_manager.create_converter_task(test_data_path, output_path, None, context)
success = await task.wait_until_finished()
self.assertTrue(success, f"Failed to convert asset {file}.")
self.assertTrue(Path(output_path).is_file(), f"Failed to convert asset {file}.")
await omni.client.delete_async(self.get_test_dir())
async def test_convert_usd_to_other_formats(self):
current_path = Path(__file__).parent
test_data_path = str(current_path.parent.parent.parent.parent.joinpath("data").joinpath("cube.fbx"))
converter_manager = omni.kit.asset_converter.get_instance()
context = omni.kit.asset_converter.AssetConverterContext()
output_path = self.get_test_dir() + "/test.usd"
# Creates usd firstly
task = converter_manager.create_converter_task(test_data_path, output_path, None, context)
success = await task.wait_until_finished()
self.assertTrue(success)
self.assertTrue(Path(output_path).is_file())
for asset_name in ["test.fbx", "test.obj", "test.gltf"]:
asset_path = self.get_test_dir() + f"/{asset_name}"
task = converter_manager.create_converter_task(output_path, asset_path, None, context)
success = await task.wait_until_finished()
self.assertTrue(success)
self.assertTrue(Path(asset_path).is_file())
async def test_convert_assets_to_anonymous_layer(self):
layer = Sdf.Layer.CreateAnonymous()
current_path = Path(__file__).parent
for file in ["cube.fbx", "cube.obj", "cube.gltf"]:
test_data_path = str(current_path.parent.parent.parent.parent.joinpath("data").joinpath(file))
converter_manager = omni.kit.asset_converter.get_instance()
context = omni.kit.asset_converter.AssetConverterContext()
context.keep_all_materials = True
context.merge_all_meshes = True
context.baking_scales = True
task = converter_manager.create_converter_task(test_data_path, layer.identifier, None, context)
success = await task.wait_until_finished()
self.assertTrue(success, f"Failed to convert asset {file}.")
await omni.client.delete_async(self.get_test_dir())
async def test_open_non_usd(self):
current_path = Path(__file__).parent
for file in ["cube.fbx", "cube.obj", "cube.gltf"]:
test_data_path = str(current_path.parent.parent.parent.parent.joinpath("data").joinpath(file))
self.assertIsNotNone(Usd.Stage.Open(test_data_path))
async def test_overwrite_opened_stage(self):
current_path = Path(__file__).parent
output_path = self.get_test_dir() + "/test.usd"
opened = False
for i in range(10):
test_data_path = str(current_path.parent.parent.parent.parent.joinpath("data").joinpath("cube.fbx"))
converter_manager = omni.kit.asset_converter.get_instance()
context = omni.kit.asset_converter.AssetConverterContext()
task = converter_manager.create_converter_task(test_data_path, output_path, None, context, None, True)
success = await task.wait_until_finished()
self.assertTrue(success, "Failed to convert asset cube.fbx.")
# For the first round, opening this stage for next overwrite
if not opened:
await omni.usd.get_context().open_stage_async(output_path)
opened = True
await omni.client.delete_async(self.get_test_dir())
| 4,827 |
Python
| 47.767676 | 114 | 0.642635 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/omni/kit/asset_converter/tests/__init__.py
|
from .test_asset_converter import *
try:
from .drag_drop_usd_layer import *
from .drag_drop_usd_stage import *
from .drag_drop_usd_viewport import *
except:
pass
| 178 |
Python
| 21.374997 | 41 | 0.685393 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/omni/kit/asset_converter/impl/task_manager.py
|
import asyncio
import carb
import os
import omni.usd
from ..native_bindings import *
from .omni_client_wrapper import OmniClientWrapper
from .context import AssetConverterContext
class AssetConverterFutureWrapper:
def __init__(self, import_path, output_path, import_context, future, close_stage_and_reopen_if_opened):
super().__init__()
self._native_future = future
self._cancelled = False
self._import_path = import_path
self._output_path = output_path
self._import_context = import_context
self._is_finished = False
self._task_done_callbacks = []
self._close_stage_and_reopen_if_opened = close_stage_and_reopen_if_opened
self._error_message = ""
self._status = OmniConverterStatus.OK
def add_task_done_callback(self, callback):
if callback not in self._task_done_callbacks:
self._task_done_callbacks.append(callback)
def remove_task_done_callback(self, callback):
if callback in self._task_done_callbacks:
self._task_done_callbacks.remove(callback)
def cancel(self):
if not self._cancelled:
self._native_future.cancel()
self._cancelled = True
def is_cancelled(self):
return self._cancelled
def is_finished(self):
return self._is_finished
def get_status(self):
return self._status
def get_error_message(self):
return self._error_message
def _notify_finished(self):
for callback in self._task_done_callbacks:
callback()
async def wait_until_finished(self):
if self.is_cancelled() or self.is_finished():
self._notify_finished()
return not self.is_cancelled()
self._status = OmniConverterStatus.OK
self._error_message = ""
usd_context = omni.usd.get_context()
current_stage_url = usd_context.get_stage_url()
opened = os.path.normpath(self._output_path) == os.path.normpath(current_stage_url)
if not self._close_stage_and_reopen_if_opened and opened:
self._error_message = f"Failed to import {self._import_path} as USD since output path {self._output_path} is opened already."
carb.log_error(self._error_message)
self._status = OmniConverterStatus.UNKNOWN
return False
try:
if opened:
await usd_context.close_stage_async()
async with self._native_future as future:
status = future.get_status()
error = future.get_detailed_error()
if status != OmniConverterStatus.OK:
self._error_message = f"Couldn't Import Asset: {self._import_path}, code: {status}, error: {error}"
self._status = status
carb.log_error(self._error_message)
return False
if opened:
await usd_context.open_stage_async(self._output_path)
return True
finally:
self._is_finished = True
AssetConverterTaskManager._task_map.discard(self)
self._notify_finished()
return False
class AssetConverterTaskManager:
_thread_loop = None
_task_map = set()
@staticmethod
def on_startup():
AssetConverterTaskManager._thread_loop = None
token = carb.tokens.get_tokens_interface()
data_dir = token.resolve("${data}")
cache_dir = os.path.join(data_dir, "omniverse_asset_converter_cache")
cache_dir = cache_dir.replace("\\", "/")
carb.log_info(f"Initialize asset converter with cache folder {cache_dir}...")
OmniAssetConverter.set_cache_folder(cache_dir)
OmniAssetConverter.set_log_callback(AssetConverterTaskManager._log_print)
OmniAssetConverter.set_file_callback(
None,
AssetConverterTaskManager._asset_converter_binary_write,
None,
None,
None,
AssetConverterTaskManager._asset_converter_file_copy
)
@staticmethod
def on_shutdown():
for future_wrapper in AssetConverterTaskManager._task_map:
future_wrapper.cancel()
AssetConverterTaskManager._task_map.clear()
OmniAssetConverter.shutdown()
@staticmethod
def remove_task(task):
AssetConverterContext._task_map.discard(task)
@staticmethod
def create_converter_task(
import_path: str,
output_path: str,
progress_callback,
asset_converter_context: AssetConverterContext = None,
material_loader=None,
close_stage_and_reopen_if_opened=False
):
# If not context is provided, creates a default one.
if not asset_converter_context:
asset_converter_context = AssetConverterContext()
future = OmniAssetConverter(
import_path,
output_path,
progress_callback,
asset_converter_context.ignore_materials,
asset_converter_context.ignore_animations,
asset_converter_context.single_mesh,
asset_converter_context.smooth_normals,
asset_converter_context.ignore_camera,
asset_converter_context.export_preview_surface,
asset_converter_context.support_point_instancer,
False,
True,
asset_converter_context.use_meter_as_world_unit,
asset_converter_context.create_world_as_default_root_prim,
asset_converter_context.ignore_light,
asset_converter_context.embed_textures,
material_loader=material_loader,
convert_fbx_to_y_up=asset_converter_context.convert_fbx_to_y_up,
convert_fbx_to_z_up=asset_converter_context.convert_fbx_to_z_up,
keep_all_materials=asset_converter_context.keep_all_materials,
merge_all_meshes=asset_converter_context.merge_all_meshes,
use_double_precision_to_usd_transform_op=asset_converter_context.use_double_precision_to_usd_transform_op,
ignore_pivots=asset_converter_context.ignore_pivots,
disable_instancing=asset_converter_context.disabling_instancing,
export_hidden_props=asset_converter_context.export_hidden_props,
baking_scales=asset_converter_context.baking_scales
)
wrapper = AssetConverterFutureWrapper(import_path, output_path, asset_converter_context, future, close_stage_and_reopen_if_opened)
AssetConverterTaskManager._task_map.add(wrapper)
return wrapper
@staticmethod
def _get_thread_loop():
if not AssetConverterTaskManager._thread_loop:
AssetConverterTaskManager._thread_loop = asyncio.new_event_loop()
return AssetConverterTaskManager._thread_loop
@staticmethod
def _log_print(message):
carb.log_info(message)
@staticmethod
def _asset_converter_binary_write(path, data):
return AssetConverterTaskManager._get_thread_loop().run_until_complete(
OmniClientWrapper.write(path, bytearray(data))
)
@staticmethod
def _asset_converter_file_copy(target_path, source_path):
return AssetConverterTaskManager._get_thread_loop().run_until_complete(
OmniClientWrapper.copy(source_path, target_path)
)
| 7,388 |
Python
| 36.130653 | 138 | 0.635084 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/omni/kit/asset_converter/impl/extension.py
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
import os
import weakref
import omni.ext
from typing import Callable
from functools import partial
from .omni_client_wrapper import OmniClientWrapper
from .context import AssetConverterContext
from .task_manager import AssetConverterTaskManager, AssetConverterFutureWrapper
def get_instance():
global _global_instance
if _global_instance and _global_instance():
return _global_instance()
return None
class AssetImporterExtension(omni.ext.IExt):
def on_startup(self):
global _global_instance
_global_instance = weakref.ref(self)
AssetConverterTaskManager.on_startup()
def on_shutdown(self):
global _global_instance
_global_instance = None
AssetConverterTaskManager.on_shutdown()
def create_converter_task(
self,
import_path: str,
output_path: str,
progress_callback: Callable[[int], int] = None,
asset_converter_context: AssetConverterContext = AssetConverterContext(),
material_loader=None,
close_stage_and_reopen_if_opened: bool = False
):
"""
Creates task to convert import_path to output_path. Currently, it supports
to convert fbx/obj/glTF to USD, or USD to fbx/obj/glTF.
Snippet to use it:
>>> import asyncio
>>> importer omni.kit.asset_converter as converter
>>>
>>> async def convert(...):
>>> task_manger = converter.get_instance()
>>> task = task_manger.create_converter_task(...)
>>> success = await task.wait_until_finished()
>>> if not success:
>>> detailed_status_code = task.get_status()
>>> detailed_status_error_string = task.get_error_message()
NOTE: It uses FBX SDK for FBX convert and Assimp as fallback backend, so it should support
all assets that Assimp supports. But only obj/glTF are fully verified.
Args:
import_path (str): The source asset to be converted. It could also be stage id that's
cached in UsdUtils.StageCache since it supports to export loaded stage.
output_path (str): The target asset. Asset format is decided by its extension.
progress_callback(Callable[[int], int]): Progress callback to monitor the progress of
conversion. The first param is the progress, and
the second one is the total steps.
asset_converter_context (omni.kit.asset_converter.AssetConverterContext): Context.
material_loader (Callable[[omni.kit.asset_conerter.native_bindings.MaterialDescription], None]): You
can set this to intercept the material loading.
close_stage_and_reopen_if_opened (bool): If the output path has already been opened in the
current UsdContext, it will close the current stage, then import
and re-open it after import successfully if this flag is true. Otherwise,
it will return False and report errors.
"""
return AssetConverterTaskManager.create_converter_task(
import_path, output_path, progress_callback, asset_converter_context, material_loader, close_stage_and_reopen_if_opened
)
| 3,971 |
Python
| 42.648351 | 131 | 0.630068 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/omni/kit/asset_converter/impl/__init__.py
|
from .extension import AssetImporterExtension, get_instance
from .omni_client_wrapper import OmniClientWrapper
from .context import *
| 134 |
Python
| 32.749992 | 59 | 0.843284 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/omni/kit/asset_converter/impl/context.py
|
from typing import Callable
class AssetConverterContext:
ignore_materials = False # Don't import/export materials
ignore_animations = False # Don't import/export animations
ignore_camera = False # Don't import/export cameras
ignore_light = False # Don't import/export lights
single_mesh = False # By default, instanced props will be export as single USD for reference. If
# this flag is true, it will export all props into the same USD without instancing.
smooth_normals = True # Smoothing normals, which is only for assimp backend.
export_preview_surface = False # Imports material as UsdPreviewSurface instead of MDL for USD export
support_point_instancer = False # Deprecated
embed_mdl_in_usd = True # Deprecated.
use_meter_as_world_unit = False # Sets world units to meters, this will also scale asset if it's centimeters model.
create_world_as_default_root_prim = True # Creates /World as the root prim for Kit needs.
embed_textures = True # Embedding textures into output. This is only enabled for FBX and glTF export.
convert_fbx_to_y_up = False # Always use Y-up for fbx import.
convert_fbx_to_z_up = False # Always use Z-up for fbx import.
keep_all_materials = False # If it's to remove non-referenced materials.
merge_all_meshes = False # Merges all meshes to single one if it can.
use_double_precision_to_usd_transform_op = False # Uses double precision for all transform ops.
ignore_pivots = False # Don't export pivots if assets support that.
disabling_instancing = False # Don't export instancing assets with instanceable flag.
export_hidden_props = False # By default, only visible props will be exported from USD exporter.
baking_scales = False # Only for FBX. It's to bake scales into meshes.
def to_dict(self):
return {
"ignore_materials": self.ignore_materials,
"ignore_animations": self.ignore_animations,
"ignore_camera": self.ignore_camera,
"ignore_light": self.ignore_light,
"single_mesh": self.single_mesh,
"smooth_normals": self.smooth_normals,
"export_preview_surface": self.export_preview_surface,
"support_point_instancer": self.support_point_instancer,
"embed_mdl_in_usd": self.embed_mdl_in_usd,
"use_meter_as_world_unit": self.use_meter_as_world_unit,
"create_world_as_default_root_prim": self.create_world_as_default_root_prim,
"embed_textures": self.embed_textures,
"convert_fbx_to_y_up": self.convert_fbx_to_y_up,
"convert_fbx_to_z_up": self.convert_fbx_to_z_up,
"keep_all_materials": self.keep_all_materials,
"merge_all_meshes": self.merge_all_meshes,
"use_double_precision_to_usd_transform_op": self.use_double_precision_to_usd_transform_op,
"ignore_pivots": self.ignore_pivots,
"disabling_instancing": self.disabling_instancing,
"export_hidden_props": self.export_hidden_props,
"baking_scales" : self.baking_scales
}
| 3,232 |
Python
| 61.173076 | 123 | 0.655322 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/docs/CHANGELOG.md
|
# Changelog
## [1.2.39] - 2022-10-26
### Changed
- Update OmniverseAssetConverter library to 7.0.1303 to fix uv export of FBX when there are multi-subsets.
## [1.2.38] - 2022-10-11
### Changed
- Update OmniverseAssetConverter library to 7.0.1301 to avoid export fallback color for obj import.
## [1.2.37] - 2022-10-07
### Changed
- Update OmniverseAssetConverter library to 7.0.1300 to fix path resolve issue under linux when it's to reference glTF directly.
## [1.2.36] - 2022-09-08
### Changed
- Add support for BVH file imports.
- Update OmniverseAssetConverter library to 7.0.1293.
## [1.2.35] - 2022-09-03
### Changed
- Update tests to pass for kit 104.
## [1.2.34] - 2022-09-01
### Changed
- Update OmniverseAssetConverter library to 7.0.1289.
- Fix export crash caused by invalid normal data from usd.
- Merge skeletons for glTF to have single skeleton.
## [1.2.33] - 2022-08-05
### Changed
- Update OmniverseAssetConverter library to 7.0.1286.
- Fix issue of material naming conflict during USD export.
- Export kind for root node during USD export.
- Fix crash of exporting USD into OBJ caused by mal-formed USD data.
## [1.2.32] - 2022-07-22
### Changed
- Update OmniverseAssetConverter library to 7.0.1270.
- Fix hang of USD plugin to reference glTF from server.
- Improve glTF light import/export.
## [1.2.31] - 2022-06-07
### Changed
- Update OmniverseAssetConverter library to 7.0.1258.
- OM-52881: Fix some glb file cause crashs in importer.
## [1.2.30] - 2022-05-19
### Changed
- Update OmniverseAssetConverter library to 7.0.1253.
- OM-51000: support to pass file argument for specifying meter as world unit.
- Improve file format plugin to import asset with original units instead of baking scalings.
## [1.2.29] - 2022-05-19
### Changed
- Update OmniverseAssetConverter library to 7.0.1250 to fix issue of converting assets to local path under linux.
## [1.2.28] - 2022-05-16
### Changed
- Update OmniverseAssetConverter library to 7.0.1245
- OM-50555: Fix fbx animation rotation
- OM-50991: optimize uv export to fix texture load not properly
## [1.2.27] - 2022-05-12
### Changed
- Update OmniverseAssetConverter library to 7.0.1237 to fix pdb name issue for assimp library.
## [1.2.26] - 2022-05-07
### Changed
- Fix tests to make sure it will not fail for 103.1 release.
## [1.2.25] - 2022-05-04
### Changed
- Update OmniverseAssetConverter library to 7.0.1236.
- OM-36894: Support fbx uv indices's import and export.
- OM-34328: Support export lights for gltf.
## [1.2.24] - 2022-04-19
### Changed
- Update OmniverseAssetConverter library to 7.0.1225.
- Fix naming issue of obj import if mesh names are empty.
- Fix color space setup for material loader.
- Fix geometric transform import for FBX.
## [1.2.23] - 2022-04-12
### Changed
- Update OmniverseAssetConverter library to 7.0.1219 to support dissovle attribute of MTL for obj importer.
## [1.2.22] - 2022-03-30
### Changed
- Bump version to update licensing build.
## [1.2.21] - 2022-03-29
### Changed
- Supports option to close current stage with import to avoid multi-threading issue if output path is opened already.
## [1.2.20] - 2022-03-28
### Changed
- Update OmniverseAssetConverter library to 7.0.1201 to support option to bake scales for FBX import.
## [1.2.19] - 2022-03-09
### Changed
- Update OmniverseAssetConverter library to 7.0.1197 to improve pivots support for exporter.
- Fix issue that exports USD with pivots to gltf/obj.
## [1.2.18] - 2022-03-08
### Changed
- Update OmniverseAssetConverter library to 7.0.1192 to support to control hidden props export for USD exporter.
## [1.2.17] - 2022-03-04
### Changed
- Update OmniverseAssetConverter library to 7.0.1191 to improve animation import for FBX.
- Fix issue of skeletal mesh import when skeleton is not attached to root node.
- Fix issue of skeletal mesh if its joints is not provided, which should use global joints instead.
- Fix crash of skeleton import if skeleton removed and skinning is still existed.
- Fix issue of FBX importer that has possible naming conflict of nodes.
- Supports to export all instances into single USD for DriveSim usecase.
- Supports options to disable scene instancing for DriveSim usecase.
## [1.2.16] - 2022-02-22
### Changed
- Update OmniverseAssetConverter library to 7.0.1171 to improve multiple animation tracks import/export.
## [1.2.15] - 2022-02-16
### Changed
- Update OmniverseAssetConverter library to 7.0.1161 to remove instancing flag if glTF/glb is opened directly with file format plugin so it could be editable.
## [1.2.14] - 2022-02-15
### Changed
- Update OmniverseAssetConverter library to 7.0.1159 to fix a crash of fbx importer that accesses invalid attributes.
## [1.2.13] - 2022-02-15
### Changed
- Update OmniverseAssetConverter library to 7.0.1157 to improve glTF loading performance through file format plugin.
## [1.2.12] - 2022-02-13
### Changed
- Update OmniverseAssetConverter library to 7.0.1150 to fix a FBX exporter crash that's caused by empty uv set.
## [1.2.11] - 2022-02-11
### Changed
- Update OmniverseAssetConverter library to 7.0.1149 to fix a FBX exporter issue that ignores props under root node.
## [1.2.10] - 2022-02-11
### Changed
- Fix API docs.
## [1.2.9] - 2022-02-10
### Changed
- Update OmniverseAssetConverter library to 7.0.1148 to fix a crash caused by exporting obj files without materials.
## [1.2.8] - 2022-02-08
### Changed
- Update OmniverseAssetConverter library to 7.0.1145 to fix a crash that's caused by invalid path that includes "%" symbols.
## [1.2.7] - 2022-02-07
### Changed
- Update OmniverseAssetConverter library to 7.0.1143 to improve USD exporter to exclude proxy and guide prims.
## [1.2.6] - 2022-02-07
### Changed
- Update OmniverseAssetConverter library to 7.0.1142 to fix glTF import.
- Uses default mime type based on extension name if it's not specified for textures.
- Fix transparent material import.
## [1.2.5] - 2022-01-11
### Changed
- Update OmniverseAssetConverter library to 7.0.1138 to fix a regression to import assets to OV.
## [1.2.4] - 2022-01-09
### Changed
- Update OmniverseAssetConverter library to 7.0.1136.
- Re-org skeletal animation.
- Fix transform issue of obj export.
- Improve export for FBX assets exported from Substance Painter to avoid exporting separate parts for the same mesh.
## [1.2.3] - 2021-12-30
### Changed
- Update OmniverseAssetConverter library to 7.0.1127 to export obj model as meshes instead of group of subsets so subsets can be pickable.
## [1.2.2] - 2021-12-29
### Changed
- Update OmniverseAssetConverter library to 7.0.1123 to use tinyobj for obj import.
## [1.2.1] - 2021-12-23
### Changed
- Update OmniverseAssetConverter library to 7.0.1117 to support override file copy to speed up copying file.
- More optimization to avoid redundant IO to speed up glTF import.
## [1.2.0] - 2021-12-16
### Changed
- Update OmniverseAssetConverter library to 7.0.1115 to improve exporter.
- Replace assimp with tinygltf for glTF import/export.
- Refactoring stage structure to improve animation export.
- Refactoring stage structure to support scene instancing.
- Lots of improvement and bugfixes.
## [1.1.43] - 2021-12-01
### Changed
- Update OmniverseAssetConverter library to 7.0.1061 to rotation order issue of FBX import.
- Add option to control pivots generation.
- Use euler angles for rotation by default for FBX import.
## [1.1.42] - 2021-10-09
### Changed
- Update OmniverseAssetConverter library to 7.0.1041 to fix memory leak, and improve uv set import.
## [1.1.41] - 2021-10-08
### Changed
- Update OmniverseAssetConverter library to 7.0.1040 to fix opacity map export issue of FBX importer.
## [1.1.40] - 2021-10-06
### Changed
- Update OmniverseAssetConverter library to 7.0.1039 to improve exporter.
## [1.1.39] - 2021-09-30
### Changed
- Update initialization order to make sure format plugin loaded correctly.
## [1.1.38] - 2021-09-23
### Changed
- Update OmniverseAssetConverter library to 7.0.1024 to fix color space import of textures.
## [1.1.37] - 2021-09-22
### Changed
- Update OmniverseAssetConverter library to 7.0.1020 to improve exporter.
- Supports to import/export glTF from/to UsdPreviewSurface and glTF MDL.
- Supports to export USDZ to glTF.
## [1.1.36] - 2021-09-12
### Changed
- Update OmniverseAssetConverter library to 7.0.1012 to integrate latest glTF MDL to support transmission/sheen/texture transform extensions.
## [1.1.35] - 2021-09-07
### Changed
- Update OmniverseAssetConverter library to 7.0.1007 to use translate/orient/scale for transform to fix interpolation issues of animation samples of assimp importer.
- Fix camera import of assimp importer.
- Improve and fix rigid/skeletal animation import for glTF.
## [1.1.34] - 2021-09-03
### Changed
- Update OmniverseAssetConverter library to 7.0.1002 to fix crash caused by invalid memory access.
## [1.1.33] - 2021-09-03
### Changed
- Update OmniverseAssetConverter library to 7.0.999 to improve glTF import.
- Fix material naming conflict for glTF import for USD exporter.
- Fix tuple property set for material loader.
## [1.1.32] - 2021-09-01
### Changed
- Update OmniverseAssetConverter library to 7.0.989 to reduce artifacts size for linux.
## [1.1.31] - 2021-09-01
### Changed
- Update OmniverseAssetConverter library to 7.0.988 to fix linux build caused by DRACO symbol conflict between assimp and USD.
## [1.1.30] - 2021-08-30
### Changed
- Update OmniverseAssetConverter library to 7.0.984 to support import material as UsdPreviewSurface.
- Enable support to import draco compressed meshes of glTF.
## [1.1.29] - 2021-08-09
### Changed
- Update OmniverseAssetConverter library to 7.0.962 to support export non-skinned skeleton.
## [1.1.28] - 2021-08-05
### Changed
- Update OmniverseAssetConverter library to 7.0.961 to fix camera animation issue.
## [1.1.27] - 2021-07-27
### Changed
- Update OmniverseAssetConverter library to 7.0.956 to check invalid mesh to avoid crash.
## [1.1.26] - 2021-07-22
### Changed
- Update AssetConverterContext to support a to_dict() function.
## [1.1.25] - 2021-07-10
### Changed
- Update OmniverseAssetConverter library to 7.0.950 for better glTF material import support.
## [1.1.24] - 2021-06-09
### Fixes
- Use copy on overwrite to avoid removing file firstly for files upload.
## [1.1.23] - 2021-06-07
### Changed
- Update OmniverseAssetConverter library to 7.0.943 to fix default prim issue of animation clip import.
## [1.1.22] - 2021-06-02
### Changed
- Add param for converter context to customize precision of USD transform op.
## [1.1.21] - 2021-06-02
### Changed
- Update OmniverseAssetConverter library to 7.0.942 for supporting customizing precision of USD transform op.
## [1.1.20] - 2021-05-25
### Changed
- Update OmniverseAssetConverter library to 7.0.941 for more glTF import improvement.
## [1.1.19] - 2021-05-10
### Changed
- Update OmniverseAssetConverter library to 7.0.933 to support pivot.
## [1.1.18] - 2021-05-07
### Changed
- Update OmniverseAssetConverter library to 7.0.928 to fix and improve glTF export.
- Add glb import/export support.
- Add embedding textures support for glTF/glb export support.
## [1.1.17] - 2021-05-06
### Changed
- Update OmniverseAssetConverter library to 7.0.925 to fix and improve glTF import.
- Shorten library search path to mitigate long path issue.
### Fixes
- Fix camera import and export issue.
- Fix OmniPBR material export issue for FBX.
## [1.1.16] - 2021-05-05
### Changed
- Update assimp to latest.
### Fixes
- Fix crash to import cameras from glTF.
## [1.1.15] - 2021-05-04
### Changed
- Update OmniverseAssetConverter library to 7.0.916 to provide WA for supporting pivots from FBX files.
## [1.1.14] - 2021-04-28
### Changed
- Update OmniverseAssetConverter library to 7.0.914 to include float type support and fix default/min/max issue of material loader.
## [1.1.13] - 2021-04-12
### Fixes
- Fix the crash to import FBX file that's over 2GB.
## [1.1.12] - 2021-03-30
### Changed
- Update extension icon.
- Remove extension warning.
## [1.1.11] - 2021-03-29
### Changed
- Support to merge all static meshes if they are under the same transform and no skin meshes are there.
## [1.1.10] - 2021-03-24
### Fixes
- Fix anonymous USD export.
- Fix crash of empty stage export to OBJ.
## [1.1.9] - 2021-03-24
### Fixes
- Fix texture populates for FBX that's referenced by property of texture object.
## [1.1.8] - 2021-03-23
### Changed
- Improve material export to display all material params for Kit editing.
## [1.1.7] - 2021-03-19
### Changed
- Shorten the length of native library path to avoid long path issue.
## [1.1.6] - 2021-03-06
### Changed
- Improve texture uploading to avoid those that are not referenced.
## [1.1.5] - 2021-03-05
### Changed
- Support to remove redundant materials that are not referenced by any meshes.
## [1.1.4] - 2021-02-20
### Fixes
- Fix issue that exports timesamples when it has no real transform animation.
## [1.1.3] - 2021-02-19
### Fixes
- More fixes to long path issue on windows.
## [1.1.2] - 2021-02-18
### Fixes
- Shorten the path length of native libraries to fix long path issue.
## [1.1.1] - 2021-02-16
### Fixes
- Fix obj export crash when there are no materials.
## [1.1.0] - 2021-02-15
### Changed
- Separate asset converter from Kit as standalone repo.
## [1.0.1] - 2021-01-26
### Changed
- Add animation export for FBX export.
- Add options to support embedding textures for FBX export
### Fixes
- Fix opacity map import/export for fbx.
- Animation import fixes.
## [1.0.0] - 2021-01-19
### Changed
- Initial extension from original omni.kit.tool.asset_importer.
| 13,617 |
Markdown
| 32.624691 | 165 | 0.729823 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/docs/README.md
|
# Omniverse Asset Converter [omni.kit.asset_converter]
This extension provides interfaces to convert common 3D formats, like, FBX, OBJ, GLTF, and etc, to USD and also supports to convert USD to those assets.
| 209 |
Markdown
| 51.499987 | 152 | 0.784689 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/docs/index.rst
|
omni.kit.asset_converter
########################
Omniverse Asset Converter
Introduction
============
This extension provides interfaces to convert common 3D formats, like, FBX, OBJ, GLTF, and etc, to USD and also supports to convert USD to those assets.
| 257 |
reStructuredText
| 27.666664 | 152 | 0.684825 |
parkerjgit/omniverse-sandbox/poc.extensions/azure-service/docker-compose.yml
|
version: "3.9"
services:
ov-farm-connector:
build:
context: ./
dockerfile: dockerfile
ports:
- "8011:8011"
| 137 |
YAML
| 11.545453 | 28 | 0.562044 |
parkerjgit/omniverse-sandbox/poc.extensions/azure-service/exts/poc.services.azure/config/extension.toml
|
[package]
version = "1.0.0"
authors = ["Josh Parker"]
title = "poc services azure"
description="A simple azure service"
readme = "docs/README.md"
changelog="docs/CHANGELOG.md"
icon = "data/icon.png"
[dependencies]
"omni.services.core" = {}
# Main python module this extension provides, it will be publicly available as "import poc.services.simple".
[[python.module]]
name = "poc.services.azure"
# [python.pipapi]
# archiveDirs = ["/usr/local/lib/python3.7/dist-packages/"]
# use_online_index = false
# requirements = [
# # "azure-storage-blob",
# # "azure-data-tables",
# ]
| 586 |
TOML
| 23.458332 | 108 | 0.691126 |
parkerjgit/omniverse-sandbox/poc.extensions/azure-service/exts/poc.services.azure/poc/services/azure/extension.py
|
import omni.ext
from omni.services.core import main
import sys
for p in sys.path:
print(p)
from azure.storage.blob import BlobServiceClient
from azure.data.tables import TableServiceClient
# Paste in azure storage account connection string here
ACCT_CONN_STR = ""
def hello_azure_blob():
try:
blob_service = BlobServiceClient.from_connection_string(ACCT_CONN_STR)
print(f"hello azure blob: {blob_service.account_name}")
return f"hello azure blob: {blob_service.account_name}"
except ValueError as e:
print("Connection string is invalid.")
raise e
except Exception as e:
print("Failed to Connect to Azure Storage Account.")
raise e
def hello_azure_table():
try:
table_service = TableServiceClient.from_connection_string(ACCT_CONN_STR)
print(f"hello azure table: {table_service.account_name}")
return f"hello azure table: {table_service.account_name}"
except ValueError as e:
print("Connection string is invalid.")
raise e
except Exception as e:
print("Failed to Connect to Azure Storage Account.")
raise e
class PocServicesAzureExtension(omni.ext.IExt):
def on_startup(self, ext_id):
main.register_endpoint("get", "/hello_azure_blob", hello_azure_blob)
main.register_endpoint("get", "/hello_azure_table", hello_azure_table)
def on_shutdown(self):
main.deregister_endpoint("get", "/hello_azure_blob")
main.deregister_endpoint("get", "/hello_azure_table")
| 1,547 |
Python
| 30.591836 | 80 | 0.679379 |
parkerjgit/omniverse-sandbox/poc.extensions/simple-service/exts/poc.services.simple/config/extension.toml
|
[package]
version = "1.0.0"
authors = ["Josh Parker"]
title = "poc services simple"
description="A simple service extension"
readme = "docs/README.md"
changelog="docs/CHANGELOG.md"
icon = "data/icon.png"
[dependencies]
"omni.services.core" = {}
# Main python module this extension provides, it will be publicly available as "import poc.services.simple".
[[python.module]]
name = "poc.services.simple"
| 404 |
TOML
| 24.312498 | 108 | 0.730198 |
parkerjgit/omniverse-sandbox/poc.extensions/simple-service/exts/poc.services.simple/poc/services/simple/extension.py
|
import omni.ext
from omni.services.core import main
def hello_world():
return "hello super simple service!!"
class PocServicesSimpleExtension(omni.ext.IExt):
def on_startup(self, ext_id):
main.register_endpoint("get", "/hello-world", hello_world)
def on_shutdown(self):
main.deregister_endpoint("get", "/hello-world")
| 350 |
Python
| 24.071427 | 66 | 0.694286 |
parkerjgit/omniverse-sandbox/poc.extensions/headless-service/exts/poc.services.headless/poc/services/headless/extension.py
|
import omni.ext
from omni.services.core import main
def hello_headless():
return "hello headless service!!"
class PocServicesHeadlessExtension(omni.ext.IExt):
def on_startup(self, ext_id):
main.register_endpoint("get", "/hello_headless", hello_headless)
def on_shutdown(self):
main.deregister_endpoint("get", "/hello_headless")
| 360 |
Python
| 24.785713 | 72 | 0.705556 |
parkerjgit/omniverse-sandbox/poc.farmOnAks/readme.md
|
# Omniverse farm on Aks
## Getting Started
## Stack
* Omniverse Services/Applications:
* Farm
* Terraform - for provisioning infrasture
* Docker - container virtualization
* Kubernetes - container orchestration
* NVIDIA device plugin for Kubernetes - req. to run GPU enabled containers in your Kubernetes cluster.
* Helm - Package Manager for Kubernetes
* Azure Services
* AKS
## Prerequisites
* [Omniverse farm ~~prerequisites~~ recommendations](https://docs.omniverse.nvidia.com/app_farm/app_farm/omniverse_farm_cloud_setup.html#prerequisites)
* Two types of node configurations are needed: Farm services and farm workers.
* Recommended:
* Farm containers are currently very large (improvements are coming) with a couple of gigs, and your local system storage would need to support that. We tend to run with 100GB file systems size.
* Farm services:
* 3 x the equivalent of an t3.large (ie., 2vCPU, 8GB of Ram)
* Farm Workers:
* Kubernetes >= 1.24. Must be supported by the GPU operator (min = 1.16)
* support the NVIDIA drivers, device plugin and container toolkit and have the GPU Operator installed on them.
* For OV based GPU workloads they'll need to be RTX enabled GPUs for best results.
* [NVIDIA device plugin prerequisites](https://github.com/NVIDIA/k8s-device-plugin#prerequisites):
* NVIDIA drivers ~= 384.81
* nvidia-docker >= 2.0 || nvidia-container-toolkit >= 1.7.0 (>= 1.11.0 to use integrated GPUs on Tegra-based systems)
* nvidia-container-runtime configured as the default low-level runtime
* Kubernetes version >= 1.10
* [NVIDIA Container Toolkit prerequisites]():
* GNU/Linux x86_64 with kernel version > 3.10
* Docker >= 19.03 (recommended, but some distributions may include older versions of Docker. The minimum supported version is 1.12)
* NVIDIA GPU with Architecture >= Kepler (or compute capability 3.0)
* NVIDIA Linux drivers >= 418.81.07 (Note that older driver releases or branches are unsupported.)
## Setup
### 1. Deploy Kubernetes Cluster
> TODO: Verify that version of k8s is supported by farm (ie., >= 1.16, >= 1.24 recommended)
1. Configure resources to be created.
1. Create `/infra-compute/terraform.tfvars` file:
```
resource_group_name = "saz-resources"
ssh_public_key = "~/.ssh/id_rsa.pub"
```
* Notes,
* if you don't have an ssh key pair, [create one](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ssh-from-windows#create-an-ssh-key-pair)
* add "GPU" node label to nodes in node pool, so that we can easily target them by label (when we install nvidia device plugin)
1. Create cloud resources
1. Initialize terraform and provision cloud resources.
```sh
# from `/infra-compute` directory:
terraform init
terraform plan
terraform apply
```
1. Perform sanity checks
1. connect to cluster
```
az aks get-credentials \
--resource-group "dt-sandbox-resources" \
--name "ovfarm-dev-aks-cluster"
```
1. inspect cluster
```
kubectl get all
kubectl describe svc kubernetes
```
### 2a. Deploy Node Pool from image (Recommended)
1. Update your cluster to use the AKS GPU image
1. Install aks preview extension
```
az extension add --name aks-preview
az extension update --name aks-preview
```
1. Register the GPUDedicatedVHDPreview feature (enables feature flag)
```
az feature register --namespace "Microsoft.ContainerService" --name "GPUDedicatedVHDPreview"
az feature show --namespace "Microsoft.ContainerService" --name "GPUDedicatedVHDPreview"
```
1. Register provider (hack to propage the change)
```
az provider register --namespace Microsoft.ContainerService
```
1. Deploy node pool
```
az aks nodepool add \
--resource-group dt-sandbox-resources \
--cluster-name ovfarm-dev-aks-cluster \
--name gpunodepool \
--node-count 1 \
--node-vm-size Standard_NV12ads_A10_v5 \
--node-taints sku=gpu:NoSchedule \
--aks-custom-headers UseGPUDedicatedVHD=true \
--enable-cluster-autoscaler \
--min-count 1 \
--max-count 3 \
--os-sku Ubuntu \
--mode User \
--labels vm_type=GPU
```
. Verify that GPUs are schedulable
```
kubectl get nodes
kubectl describe node <node_name>
```
* Should see "nvidia.com/gpu" listed under "Capacity"
1. Run a sample GPU workload
```sh
kubectl apply -f ./jobs/samples-tf-mnist-demo.yaml
```
1. Get job status
```
kubectl get jobs samples-tf-mnist-demo --watch
```1
1. Get logs
```
kubectl get pods --selector app=samples-tf-mnist-demo
kubectl logs <pod_name>
````
1. [optionally] Update nodepool e.g. to add labels (expensive to recreate)
```
az aks nodepool update \
--resource-group dt-sandbox-resources \
--cluster-name ovfarm-dev-aks-cluster \
--name gpunodepool \
--node-taints "" \
--labels vm_type=GPU
```
1. [optionally] Delete and re-deploy eg., to change vm-size
```
az aks nodepool delete \
--resource-group dt-sandbox-resources \
--cluster-name ovfarm-dev-aks-cluster \
--name gpunodepool
az aks nodepool add ... (see Deploy node pool above)
```
1. [optionally] Clean-up resources
```
az aks nodepool delete \
--resource-group dt-sandbox-resources \
--cluster-name ovfarm-dev-aks-cluster \
--name gpunodepool
```
### 2b. Deploy GPU Node Pool from Daemonset
tbd...
### 3. Deploy Farm
1. Prerequisites
1. NGC CLI
1. windows - download from https://ngc.nvidia.com/setup/installers/cli
1. wsl/linux (amd64):
```
wget --content-disposition https://ngc.nvidia.com/downloads/ngccli_linux.zip && unzip ngccli_linux.zip && chmod u+x ngc-cli/ngc
find ngc-cli/ -type f -exec md5sum {} + | LC_ALL=C sort | md5sum -c ngc-cli.md5
echo "export PATH=\"\$PATH:$(pwd)/ngc-cli\"" >> ~/.bash_profile && source ~/.bash_profile
ngc --version
```
1. NGC API Key
1. generate from https://ngc.nvidia.com/setup
1. login to ngc from cli with API Key
```
ngc config set
````
1. K8s Cluster
1. connect to cluster
```
az aks get-credentials \
--resource-group "dt-sandbox-resources" \
--name "ovfarm-dev-aks-cluster"
```
1. Define variables
```
K8S_NAMESPACE="ovfarm"
NGC_API_KEY=<NGC_API_TOKEN>
1. Create namespace
```
kubectl create namespace $K8S_NAMESPACE
```
* Question: can this be default namespace?
1. Create [Docker Config Secret](https://kubernetes.io/docs/concepts/configuration/secret/#docker-config-secrets)
```
kubectl create secret docker-registry my-registry-secret \
--namespace $K8S_NAMESPACE \
--docker-server="nvcr.io" \
--docker-username='$oauthtoken' \
--docker-password=$NGC_API_KEY
```
1. fetch helm chart
```
helm fetch https://helm.ngc.nvidia.com/nvidia/omniverse/charts/omniverse-farm-0.3.2.tgz \
--username='$oauthtoken' \
--password=$NGC_API_KEY
```
1. configure deployment. besure to use correct dns zone for host in `values.yaml` file. Can get DNS Zone Name with:
```sh
az aks show
--resource-group "ov-resources"
--name "ovfarm-dev-aks-cluster"
--query addonProfiles.httpApplicationRouting.config.HTTPApplicationRoutingZoneName
```
1. install farm
```
helm upgrade \
--install \
omniverse-farm \
omniverse-farm-0.3.2.tgz \
--create-namespace \
--namespace $K8S_NAMESPACE \
--values ./containers/farm/values.yaml
helm list -n ovfarm
```
1. [optionally] update deployment
```
helm upgrade --values ./containers/farm/values.yaml omniverse-farm omniverse-farm-0.3.2.tgz --namespace ovfarm
```
1. Validate the installation.
1. Check that Pods are running
```sh
kubectl get pods -o wide $K8S_NAMESPACE
```
1. Ensure all pods in ready state
```sh
kubectl -n $K8S_NAMESPACE wait --timeout=300s --for condition=Ready pods --all
```
* Note, controller takes a very long time to initialize
1. Check for errors for any pod that aren't ready
```sh
kubectl describe pod <pod_name>
```
1. Check endpoints with curl pod
1. [run curl pod](https://kubernetes.io/docs/tutorials/services/connect-applications-service/#accessing-the-service)
```sh
kubectl run curl --namespace=$K8S_NAMESPACE --image=radial/busyboxplus:curl -i --tty -- sh
# use "exec" if curl pod already exists
kubectl exec curl --namespace=$K8S_NAMESPACE -i --tty -- sh
```
1. check endpoints
```sh
[ root@curl:/ ]$ check_endpoint() {
url=$1
curl -s -o /dev/null "$url" && echo -e "[UP]\t${url}" || echo -e "[DOWN]\t${url}"
}
[ root@curl:/ ]$ check_farm_status() {
echo "======================================================================"
echo "Farm status:"
echo "----------------------------------------------------------------------"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/agents/status"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/dashboard/status"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/jobs/status"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/jobs/load"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/logs/status"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/retries/status"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/tasks/status"
check_endpoint "farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/tasks/list?status=submitted"
echo "======================================================================"
}
[ root@curl:/ ]$ check_farm_status
```
1. log into queue management dashboard
http://farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/dashboard
http://farm.23711a66dc7f46649e88.eastus.aksapp.io/queue/management/ui/
1. Find api docs
http://farm.23711a66dc7f46649e88.eastus.aksapp.io/docs
* Issue Cannot find docs.
1. Explore ConfigMaps
### 4. Submit job
1. Prerequisites
* Python
* Script dependancies
```
pip install requests
pip install toml
```
1. Download sample job
download the example sample jobs:
```
ngc registry resource download-version "nvidia/omniverse-farm/cpu_verification:1.0.0"
ngc registry resource download-version "nvidia/omniverse-farm/gpu_verification:1.0.0"
```
1. Get Jobs API Key
```sh
kubectl get cm omniverse-farm-jobs -o yaml -n $K8S_NAMESPACE | grep api_key
FARM_API_KEY=<api_key>
```
1. Upload job definitions to cluster
```sh
FARM_BASE_URL="http://farm.23711a66dc7f46649e88.eastus.aksapp.io"
python3 ./job_definition_upload.py df.kit --farm-url=$FARM_BASE_URL --api-key=$FARM_API_KEY
python3 ./job_definition_upload.py gpu.kit --farm-url=$FARM_BASE_URL --api-key=$FARM_API_KEY
```
1. Get Job definitions
```sh
curl -X 'GET' \
"${FARM_BASE_URL}/agent/operator/job/definitions" \
-H 'accept: application/json'
```
* TODO: wrong endpoint. fix^
1. Submit CPU test job (df)
```sh
curl -X "POST" \
"${FARM_BASE_URL}/queue/management/tasks/submit" \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"user": "testuser",
"task_type": "df",
"task_args": {},
"metadata": {
"_retry": {
"is_retryable": false
}
},
"status": "submitted"
}'
```
1. Submit GPU test job
```sh
curl -X "POST" \
"${FARM_BASE_URL}/queue/management/tasks/submit" \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"user": "testuser",
"task_type": "gpu",
"task_args": {},
"metadata": {
"_retry": {
"is_retryable": false
}
},
"status": "submitted"
}'
```
https://catalog.ngc.nvidia.com/orgs/nvidia/teams/omniverse-farm/resources/gpu_verification/quick-start-guide
## Troubleshooting
### How to get current installed drivers?
### Are nvidia drivers pre-installed on NV series VMs?
### azurerm terrafrom provider support for enabling aks preview feature GPUDedicatedVHDPreview using terraform.
* pending [azurerm custom header support](https://github.com/hashicorp/terraform-provider-azurerm/issues/6793)
* [failed PR](https://github.com/hashicorp/terraform-provider-azurerm/pull/14178) tried to fix this.
* pending [AKS custom feature support](https://github.com/Azure/AKS/issues/2757)
* possible work around using [xpd provider](https://registry.terraform.io/providers/0x2b3bfa0/xpd/latest/docs/guides/test)
### ISSUE: Cannot find swagger docs at `/docs` after deploying helm chart to AKS
Helm chart does not seem to deploy a swagger docs.
### ISSUE: kubectl not working on wsl2
fix is to copy over kube config
```
mkdir ~/.kube \ && cp /mnt/c/Users/nycjyp/.kube/config ~/.kube
```
## ref
* https://docs.omniverse.nvidia.com/app_farm/app_farm/omniverse_farm_cloud_setup.html
* https://github.com/NVIDIA/k8s-device-plugin#deployment-via-helm
* https://www.youtube.com/watch?v=KplFFvj3XRk
* https://itnext.io/enabling-nvidia-gpus-on-k3s-for-cuda-workloads-a11b96f967b0
* https://learn.microsoft.com/en-us/azure/aks/node-access - node access
* https://forums.developer.nvidia.com/t/set-up-cloud-rendering-using-aws-farm-queue/221879/3?u=mati-nvidia
* https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes/index.html
* https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#post-installation-actions
* https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/
### configure GPU nodes
* https://learn.microsoft.com/en-us/azure/aks/gpu-cluster#manually-install-the-nvidia-device-plugin
* https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/aks/gpu-cluster.md
### Setting up VMs
* https://azuremarketplace.microsoft.com/en-us/marketplace/apps/nvidia.ngc_azure_17_11?tab=overview
* https://michaelcollier.wordpress.com/2017/08/04/how-to-setup-nvidia-driver-on-nv-series-azure-vm/
| 15,071 |
Markdown
| 36.68 | 202 | 0.634397 |
parkerjgit/omniverse-sandbox/poc.farmOnAks/containers/nvidia-device-plugin/values.yaml
|
# overrides https://github.com/NVIDIA/k8s-device-plugin/blob/v0.13.0/deployments/helm/nvidia-device-plugin/values.yaml
nodeSelector:
vm_type: "GPU"
| 151 |
YAML
| 36.999991 | 118 | 0.774834 |
parkerjgit/omniverse-sandbox/poc.farmOnAks/containers/farm/secrets.yaml
|
apiVersion: v1
kind: Secret
metadata:
name: server-vars
namespace: default
stringData:
docker-registry: 'my-registry-secret'
| 130 |
YAML
| 17.714283 | 39 | 0.769231 |
parkerjgit/omniverse-sandbox/poc.farmOnAks/containers/farm/values.yaml
|
global:
imagePullSecrets:
- name: my-registry-secret
ingress:
host: "farm.23711a66dc7f46649e88.eastus.aksapp.io"
annotations:
kubernetes.io/ingress.class: addon-http-application-routing
controller:
serviceConfig:
k8s:
jobTemplateSpecOverrides:
imagePullSecrets:
- name: my-registry-secret
dashboard:
nodeSelector:
vm_type: CPU
| 388 |
YAML
| 19.473683 | 65 | 0.693299 |
parkerjgit/omniverse-sandbox/poc.farmOnAks/jobs/sample-tf-minst-demo.yaml
|
apiVersion: batch/v1
kind: Job
metadata:
labels:
app: samples-tf-mnist-demo
name: samples-tf-mnist-demo
spec:
template:
metadata:
labels:
app: samples-tf-mnist-demo
spec:
containers:
- name: samples-tf-mnist-demo
image: mcr.microsoft.com/azuredocs/samples-tf-mnist-demo:gpu
args: ["--max_steps", "500"]
imagePullPolicy: IfNotPresent
resources:
limits:
nvidia.com/gpu: 1
restartPolicy: OnFailure
tolerations:
- key: "sku"
operator: "Equal"
value: "gpu"
effect: "NoSchedule"
| 611 |
YAML
| 22.538461 | 68 | 0.589198 |
parkerjgit/omniverse-sandbox/poc.farmOnAks/jobs/cpu_verification_v1.0.0/job_definition_upload.py
|
#!/usr/bin/env python3
import argparse
from dataclasses import (
asdict,
dataclass,
field,
)
from typing import (
Dict,
Optional,
)
try:
import requests
except ImportError:
raise Exception("'requests' python package is required, install and try again. 'pip install requests'")
try:
import toml
except ImportError:
raise Exception("'toml' python package is required, install and try again. 'pip install toml'")
@dataclass
class _JobDefinition:
name: str
job_type: str
command: str
job_spec_path: Optional[str] = None
args: list = field(default_factory=list)
task_function: Optional[str] = None
env: Dict = field(default_factory=dict)
log_to_stdout: Optional[bool] = True
extension_paths: list = field(default_factory=list)
allowed_args: Dict = field(default_factory=dict)
headless: Optional[bool] = True
active: Optional[bool] = True
unresolved_command_path: Optional[str] = None
success_return_codes: list = field(default_factory=lambda: [0])
capacity_requirements: Dict = field(default_factory=dict)
working_directory: Optional[str] = ""
container: Optional[str] = None
def _load_job_definitions(job_config_filepath: str, force: bool = False):
job_config = toml.load(job_config_filepath)
jobs = job_config.get("job")
if not jobs:
raise Exception(f"No job definitions found in config file: {job_config_filepath}")
job_definitions = []
for job_name, params in jobs.items():
if not params.get("container"):
print(f"WARNING: There is no container defined for '{job_name}'.")
if "unresolved_command_path" not in params:
params["unresolved_command_path"] = params["command"]
if "job_spec_path" not in params:
params["job_spec_path"] = str(job_config_filepath)
try:
job_definition = _JobDefinition(**params)
except TypeError as exc:
raise Exception(f"Error processing job definition '{job_name}'. {exc}") from exc
job_definitions.append(job_definition)
return job_definitions
def upload_job_definitions(farm_url, job_definitions_file, api_key, timeout):
jobs_save_endpoint = f"{farm_url.rstrip('/')}/queue/management/jobs/save"
job_defs = _load_job_definitions(job_definitions_file)
print(f"Found '{len(job_defs)}' Job definition(s) in '{job_definitions_file}'")
for job in job_defs:
print(f"\nUploading Job definition: '{job.name}'")
response = requests.post(
url=jobs_save_endpoint,
json=asdict(job),
timeout=timeout,
headers={"X-API-KEY": api_key},
)
response.raise_for_status()
print(f'Response: {response.json()}')
def main():
parser = argparse.ArgumentParser(description="Upload and save job definitions found in the job config")
parser.add_argument('job_definitions_file', help="TOML file containing the Job definitions.")
parser.add_argument('--farm-url', help="Farm base URL.", required=True)
parser.add_argument('--api-key', help="Jobs API Key.", required=True)
parser.add_argument('--timeout', type=int, default=60, help="Request timeout.")
args = parser.parse_args()
upload_job_definitions(args.farm_url, args.job_definitions_file, args.api_key, args.timeout)
if __name__ == '__main__':
main()
| 3,404 |
Python
| 31.122641 | 107 | 0.659224 |
parkerjgit/omniverse-sandbox/poc.farmOnLinux/readme.md
|
# Omniverse Farm on Linux (Headless)
## Overview
1. Queue
1. Install Queue (service)
2. Configure Queue URL (for agents to connect. Also, this is base url for api endpoint)
3. Navigate to dashboard at: `http://<queue_url>/queue/management/ui/`
2. Agent
1. Install Agent
1. Connect to Queue
2. Configure Jobs directory (this is where scripts and kits go)
3. Navigate to dashboard at: `http://<agent_url>/agent/management/ui/`
## Running Farm (after setup)
```
/opt/ove/ov-farm-queue/queue.sh &
/opt/ove/ov-farm-agent/agent.sh &
```
## Setup Queue
1. Install Dependencies
```
sudo apt-get update
sudo apt-get install -y --no-install-recommends \
libatomic1 \
libxi6 \
libxrandr2 \
libxt6 \
libegl1 \
libglu1-mesa \
libgomp1 \
libsm6 \
unzip
```
1. Instal Farm Queue package
```sh
sudo mkdir -p /opt/ove/ov-farm-queue
sudo curl https://d4i3qtqj3r0z5.cloudfront.net/farm-queue-launcher%40103.1.0%2Bmaster.33.956d9b7d.teamcity.linux-x86_64.release.zip --output /opt/ove/ov-farm-queue/farm-queue-launcher.zip
sudo unzip /opt/ove/ov-farm-queue/farm-queue-launcher.zip
sudo rm /opt/ove/ov-farm-queue/farm-queue-launcher.zip
```
1. Install the Kit SDK package
```sh
sudo mkdir -p /opt/ove/ov-farm-queue/kit
sudo curl https://d4i3qtqj3r0z5.cloudfront.net/[email protected]%2Brelease.6024.1fc2e16c.tc.linux-x86_64.release.zip --output /opt/ove/ov-farm-queue/kit/kit-sdk-launcher.zip
sudo unzip /opt/ove/ov-farm-queue/kit/kit-sdk-launcher.zip -d /opt/ove/ov-farm-queue/
sudo rm /opt/ove/ov-farm-queue/kit/kit-sdk-launcher.zip
```
1. Recursively set owner of `ov-farm-queue` and subdirectories to non root user
```
sudo chown -R josh:josh ov-farm-queue
```
1. Create launch script
```
cat << 'EOF' > /opt/ove/ov-farm-queue/queue.sh
#!/bin/bash
BASEDIR=$(dirname "$0")
exec $BASEDIR/kit/kit $BASEDIR/apps/omni.farm.queue.headless.kit \
--ext-folder $BASEDIR/exts-farm-queue \
--/exts/omni.services.farm.management.tasks/dbs/task-persistence/connection_string=sqlite:///$BASEDIR//task-management.db
EOF
```
* Note, Queue URL is automatically set to `http://localhost:8222`
1. Make queue script executable
```
sudo chmod +x /opt/ove/ov-farm-queue/queue.sh
```
1. Start Queue
```
./queue.sh &
```
1. Navigate to Queue Management dashboard(s):
```sh
# http://<queue_url>/queue/management/ui/
http://localhost:8222/queue/management/ui/
```
1. Find Queue Management API docs
```sh
# http://<queue_url>/docs
http://localhost:8222/docs
```
1. Perform health check
```
curl -X GET 'http://localhost:8222/status' \
-H 'accept: application/json'
```
## Setup Agent
1. Install Dependencies
```
sudo apt-get update
sudo apt-get install -y --no-install-recommends \
libatomic1 \
libxi6 \
libxrandr2 \
libxt6 \
libegl1 \
libglu1-mesa \
libgomp1 \
libsm6 \
unzip
```
1. Install Farm Agent Package
```
sudo mkdir -p /opt/ove/ov-farm-agent
sudo curl https://d4i3qtqj3r0z5.cloudfront.net/farm-agent-launcher%40103.1.0%2Bmaster.53.238d4340.teamcity.linux-x86_64.release.zip --output /opt/ove/ov-farm-agent/farm-agent-launcher.zip
sudo unzip /opt/ove/ov-farm-agent/farm-agent-launcher.zip -d /opt/ove/ov-farm-agent/
sudo rm /opt/ove/ov-farm-agent/farm-agent-launcher.zip
```
1. Install the Kit SDK package
```sh
sudo mkdir -p /opt/ove/ov-farm-agent/kit
sudo curl https://d4i3qtqj3r0z5.cloudfront.net/[email protected]%2Brelease.6024.1fc2e16c.tc.linux-x86_64.release.zip --output /opt/ove/ov-farm-agent/kit/kit-sdk-launcher.zip
sudo unzip /opt/ove/ov-farm-agent/kit/kit-sdk-launcher.zip -d /opt/ove/ov-farm-agent/kit/
sudo rm /opt/ove/ov-farm-agent/kit/kit-sdk-launcher.zip
```
1. Recursively set owner of `ov-farm-agent` and subdirectories to non root user
```
sudo chown -R josh:josh ov-farm-agent
```
1. Create launch script (configure jobs directory and queue host)
```
cat << 'EOF' > /opt/ove/ov-farm-agent/agent.sh
#!/bin/bash
JOBSDIR="~/code/sandbox/omniverse-sandbox/poc.farmOnLinux/agent/jobs"
exec $BASEDIR/kit/kit $BASEDIR/apps/omni.farm.agent.headless.kit \
--ext-folder $BASEDIR/exts-farm-agent \
--/exts/omni.services.farm.agent.operator/job_store_args/job_directories/0=$JOBSDIR/* \
--/exts/omni.services.farm.agent.controller/manager_host=http://localhost:8222 \
--/exts/omni.services.farm.agent.operator/manager_host=http://localhost:8222
EOF
```
1. Make agent script executable
```
chmod +x /opt/ove/ov-farm-agent/agent.sh
```
1. Start agent (in background)
```
./agent.sh &
```
* [Expected Error](https://docs.omniverse.nvidia.com/app_farm/app_farm/agent.html#output-log) if no supported GPU capacity:
```
2023-01-02 22:49:01 [2,349ms] [Error] [omni.services.farm.facilities.agent.capacity.managers.base] Failed to load capacities for omni.services.farm.facilities.agent.capacity.GPU: NVML Shared Library Not Found
```
1. Navigate to Job Management Dashboard:
```sh
# http://<agent_url>/agent/management/ui/
http://localhost:8223/agent/management/ui/
```
* Note, this form simply edits the file specified by `job-spec-path` property (ie., Job definition path) in configured jobs directory. The effect is same as manually editing file, however not all [properties](https://docs.omniverse.nvidia.com/app_farm/app_farm/guides/creating_job_definitions.html#schema-reference) are exposed in form.
1. Find Agent Management API docs:
```sh
# http://<agent_url>/docs
http://localhost:8223/docs
```
1. Perform health check
```
curl -X GET 'http://localhost:8223/status' \
-H 'accept: application/json'
```
## Job: Hello World
1. [If you did NOT configure jobs directory to use repo] Copy `hello-omniverse` folder to configured jobs directory, e.g., `/opt/ove/ov-farm-agent/jobs`
```
cp -R poc.farmOnLinux/agent/jobs/hello-omniverse /opt/ove/ov-farm-agent/jobs/hello-omniverse
```
> **Notes**:
> * `job type` property is set to "base" (ie., Command or executable) to allow execution of arbitrary shell commands or executable files.
> * `command` property is set to name of shell command "echo". If we were executing a script, this would be the full path to script or executable.
> * `args` (ie., Process arguments) are automatically passed
> * `allowed_args` are passed by the client
1. [If nec] Restart agent to pick up job.
```
kill -9 $(lsof -ti tcp:8223)
./agent.sh &
```
> TODO: what is a better way to restart agent
1. [Optionally] Verify Job has been added by Getting list of jobs
```
curl -X GET 'http://localhost:8223/agent/operator/available' \
-H 'accept: application/json'
```
1. Submit task using `queue/management/tasks/submit` endpoint:
```
curl -X POST "http://localhost:8222/queue/management/tasks/submit" \
--header "Accept: application/json" \
--header "Content-Type: application/json" \
--data '{"user":"my-user-id","task_type":"hello-omniverse","task_args":{},"task_comment":"My job!"}'
```
1. Get Status of task:
```sh
# of task with task id (returned when you submitted task)
curl -X GET 'http://localhost:8222/queue/management/tasks/info/848973c4-5864-416b-976f-56a94cfc8258' \
-H 'accept: application/json'
# of all tasks matching type:
curl -X GET 'http://localhost:8222/queue/management/tasks/list?task_type=hello-omniverse' \
-H 'accept: application/json'
```
## Job: Run a Simple Python Script
1. [If you did NOT configure jobs directory to use repo] Copy `simple-python-script` folder to configured jobs directory, e.g., `/opt/ove/ov-farm-agent/jobs`
```sh
cp -R poc.farmOnLinux/agent/jobs/simple-python-script /opt/ove/ov-farm-agent/jobs/simple-python-script
```
1. [If nec] Restart agent to pick up job.
```
kill -9 $(lsof -ti tcp:8223)
./agent.sh &
```
1. [Optionally] Verify Job has been added by Getting list of jobs
```
curl -X GET 'http://localhost:8223/agent/operator/available' \
-H 'accept: application/json'
```
1. Submit task using `queue/management/tasks/submit` endpoint:
```
curl -X POST "http://localhost:8222/queue/management/tasks/submit" \
--header "Accept: application/json" \
--header "Content-Type: application/json" \
--data '{"user":"my-user-id","task_type":"simple-python-script","task_args":{"name":"Python Script"},"task_comment":"My job!"}'
```
1. Get Status of task:
```sh
# of task with task id (returned when you submitted task)
curl -X GET 'http://localhost:8222/queue/management/tasks/info/848973c4-5864-416b-976f-56a94cfc8258' \
-H 'accept: application/json'
# of all tasks matching type:
curl -X GET 'http://localhost:8222/queue/management/tasks/list?task_type=simple-python-script' \
-H 'accept: application/json'
```
## Questions
* How to get running agent to pick up changes to jobs without restarting service?
| 9,351 |
Markdown
| 37.171428 | 340 | 0.662603 |
parkerjgit/omniverse-sandbox/poc.farmOnLinux/agent/jobs/simple-python-script/script.py
|
import argparse
def main():
parser = argparse.ArgumentParser(description="A simple command-line script")
parser.add_argument("--name", help="Your name")
args = parser.parse_args()
print(f"Hello, {args.name}!")
if __name__ == "__main__":
main()
| 267 |
Python
| 21.333332 | 80 | 0.632959 |
Coriago/examplo-ros2/README.md
|
# examplo-ros2
ROS2 pkgs and resources for examplo bot
## Setup steps
-------
- Install ROS2
- Build+Install repo
- Install Isaac Sim
- Make ROS2 Default Ext
1. Open `~/.local/share/ov/pkg/isaac_sim-2022.1.1/apps/omni.isaac.sim.base.kit` \
2. search for `omni.isaac.ros_bridge` and change it to `omni.isaac.ros2_bridge`
- Setup Examplobot Extension
1. Open the extension manager by doing `Window -> Extensions`
2. Hit the gear icon and add the path `<path-to-repo>/isaac`
3. Enable the Examplo Bot extension by searching for it and hitting the toggle button.
https://docs.omniverse.nvidia.com/prod_launcher/prod_kit/linux-troubleshooting.html
| 647 |
Markdown
| 31.399998 | 86 | 0.749614 |
Coriago/examplo-ros2/src/mecanum_controller/mecanum_plugin.xml
|
<library path="mecanum_controller">
<class name="mecanum_controller/MecanumController" type="mecanum_controller::MecanumController" base_class_type="controller_interface::ControllerInterface">
<description>
The differential mecanum controller transforms linear and angular velocity messages into signals for each wheel(s) for a differential drive robot.
</description>
</class>
</library>
| 401 |
XML
| 49.249994 | 158 | 0.793017 |
Coriago/examplo-ros2/src/mecanum_controller/test/test_mecanum_controller.cpp
|
// Copyright 2020 PAL Robotics SL.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gmock/gmock.h>
#include <array>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "mecanum_controller/mecanum_controller.hpp"
#include "hardware_interface/loaned_command_interface.hpp"
#include "hardware_interface/loaned_state_interface.hpp"
#include "hardware_interface/types/hardware_interface_type_values.hpp"
#include "lifecycle_msgs/msg/state.hpp"
#include "rclcpp/rclcpp.hpp"
using CallbackReturn = rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn;
using hardware_interface::HW_IF_POSITION;
using hardware_interface::HW_IF_VELOCITY;
using hardware_interface::LoanedCommandInterface;
using hardware_interface::LoanedStateInterface;
using lifecycle_msgs::msg::State;
using testing::SizeIs;
class TestableMecanumController : public mecanum_controller::MecanumController
{
public:
using MecanumController::MecanumController;
std::shared_ptr<geometry_msgs::msg::TwistStamped> getLastReceivedTwist()
{
std::shared_ptr<geometry_msgs::msg::TwistStamped> ret;
received_velocity_msg_ptr_.get(ret);
return ret;
}
/**
* @brief wait_for_twist block until a new twist is received.
* Requires that the executor is not spinned elsewhere between the
* message publication and the call to this function
*
* @return true if new twist msg was received, false if timeout
*/
bool wait_for_twist(
rclcpp::Executor & executor,
const std::chrono::milliseconds & timeout = std::chrono::milliseconds(500))
{
rclcpp::WaitSet wait_set;
wait_set.add_subscription(velocity_command_subscriber_);
if (wait_set.wait(timeout).kind() == rclcpp::WaitResultKind::Ready)
{
executor.spin_some();
return true;
}
return false;
}
};
class TestMecanumController : public ::testing::Test
{
protected:
static void SetUpTestCase() { rclcpp::init(0, nullptr); }
void SetUp() override
{
controller_ = std::make_unique<TestableMecanumController>();
pub_node = std::make_shared<rclcpp::Node>("velocity_publisher");
velocity_publisher = pub_node->create_publisher<geometry_msgs::msg::TwistStamped>(
controller_name + "/cmd_vel", rclcpp::SystemDefaultsQoS());
}
static void TearDownTestCase() { rclcpp::shutdown(); }
/// Publish velocity msgs
/**
* linear - magnitude of the linear command in the geometry_msgs::twist message
* angular - the magnitude of the angular command in geometry_msgs::twist message
*/
void publish(double linear, double angular)
{
int wait_count = 0;
auto topic = velocity_publisher->get_topic_name();
while (pub_node->count_subscribers(topic) == 0)
{
if (wait_count >= 5)
{
auto error_msg = std::string("publishing to ") + topic + " but no node subscribes to it";
throw std::runtime_error(error_msg);
}
std::this_thread::sleep_for(std::chrono::milliseconds(100));
++wait_count;
}
geometry_msgs::msg::TwistStamped velocity_message;
velocity_message.header.stamp = pub_node->get_clock()->now();
velocity_message.twist.linear.x = linear;
velocity_message.twist.angular.z = angular;
velocity_publisher->publish(velocity_message);
}
/// \brief wait for the subscriber and publisher to completely setup
void waitForSetup()
{
constexpr std::chrono::seconds TIMEOUT{2};
auto clock = pub_node->get_clock();
auto start = clock->now();
while (velocity_publisher->get_subscription_count() <= 0)
{
if ((clock->now() - start) > TIMEOUT)
{
FAIL();
}
rclcpp::spin_some(pub_node);
}
}
void assignResourcesPosFeedback()
{
std::vector<LoanedStateInterface> state_ifs;
state_ifs.emplace_back(left_wheel_pos_state_);
state_ifs.emplace_back(right_wheel_pos_state_);
std::vector<LoanedCommandInterface> command_ifs;
command_ifs.emplace_back(left_wheel_vel_cmd_);
command_ifs.emplace_back(right_wheel_vel_cmd_);
controller_->assign_interfaces(std::move(command_ifs), std::move(state_ifs));
}
void assignResourcesVelFeedback()
{
std::vector<LoanedStateInterface> state_ifs;
state_ifs.emplace_back(left_wheel_vel_state_);
state_ifs.emplace_back(right_wheel_vel_state_);
std::vector<LoanedCommandInterface> command_ifs;
command_ifs.emplace_back(left_wheel_vel_cmd_);
command_ifs.emplace_back(right_wheel_vel_cmd_);
controller_->assign_interfaces(std::move(command_ifs), std::move(state_ifs));
}
const std::string controller_name = "test_mecanum_controller";
std::unique_ptr<TestableMecanumController> controller_;
const std::vector<std::string> left_wheel_names = {"left_wheel_joint"};
const std::vector<std::string> right_wheel_names = {"right_wheel_joint"};
std::vector<double> position_values_ = {0.1, 0.2};
std::vector<double> velocity_values_ = {0.01, 0.02};
hardware_interface::StateInterface left_wheel_pos_state_{
left_wheel_names[0], HW_IF_POSITION, &position_values_[0]};
hardware_interface::StateInterface right_wheel_pos_state_{
right_wheel_names[0], HW_IF_POSITION, &position_values_[1]};
hardware_interface::StateInterface left_wheel_vel_state_{
left_wheel_names[0], HW_IF_VELOCITY, &velocity_values_[0]};
hardware_interface::StateInterface right_wheel_vel_state_{
right_wheel_names[0], HW_IF_VELOCITY, &velocity_values_[1]};
hardware_interface::CommandInterface left_wheel_vel_cmd_{
left_wheel_names[0], HW_IF_VELOCITY, &velocity_values_[0]};
hardware_interface::CommandInterface right_wheel_vel_cmd_{
right_wheel_names[0], HW_IF_VELOCITY, &velocity_values_[1]};
rclcpp::Node::SharedPtr pub_node;
rclcpp::Publisher<geometry_msgs::msg::TwistStamped>::SharedPtr velocity_publisher;
};
TEST_F(TestMecanumController, configure_fails_without_parameters)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::ERROR);
}
TEST_F(TestMecanumController, configure_fails_with_only_left_or_only_right_side_defined)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(std::vector<std::string>())));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::ERROR);
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(std::vector<std::string>())));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::ERROR);
}
TEST_F(TestMecanumController, configure_fails_with_mismatching_wheel_side_size)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
auto extended_right_wheel_names = right_wheel_names;
extended_right_wheel_names.push_back("extra_wheel");
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(extended_right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::ERROR);
}
TEST_F(TestMecanumController, configure_succeeds_when_wheels_are_specified)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
ASSERT_THAT(
controller_->state_interface_configuration().names,
SizeIs(left_wheel_names.size() + right_wheel_names.size()));
ASSERT_THAT(
controller_->command_interface_configuration().names,
SizeIs(left_wheel_names.size() + right_wheel_names.size()));
}
TEST_F(TestMecanumController, activate_fails_without_resources_assigned)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
ASSERT_EQ(controller_->on_activate(rclcpp_lifecycle::State()), CallbackReturn::ERROR);
}
TEST_F(TestMecanumController, activate_succeeds_with_pos_resources_assigned)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
// We implicitly test that by default position feedback is required
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
assignResourcesPosFeedback();
ASSERT_EQ(controller_->on_activate(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
}
TEST_F(TestMecanumController, activate_succeeds_with_vel_resources_assigned)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("position_feedback", rclcpp::ParameterValue(false)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
assignResourcesVelFeedback();
ASSERT_EQ(controller_->on_activate(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
}
TEST_F(TestMecanumController, activate_fails_with_wrong_resources_assigned_1)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("position_feedback", rclcpp::ParameterValue(false)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
assignResourcesPosFeedback();
ASSERT_EQ(controller_->on_activate(rclcpp_lifecycle::State()), CallbackReturn::ERROR);
}
TEST_F(TestMecanumController, activate_fails_with_wrong_resources_assigned_2)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("position_feedback", rclcpp::ParameterValue(true)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
ASSERT_EQ(controller_->on_configure(rclcpp_lifecycle::State()), CallbackReturn::SUCCESS);
assignResourcesVelFeedback();
ASSERT_EQ(controller_->on_activate(rclcpp_lifecycle::State()), CallbackReturn::ERROR);
}
TEST_F(TestMecanumController, cleanup)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
controller_->get_node()->set_parameter(rclcpp::Parameter("wheel_separation", 0.4));
controller_->get_node()->set_parameter(rclcpp::Parameter("wheel_radius", 0.1));
rclcpp::executors::SingleThreadedExecutor executor;
executor.add_node(controller_->get_node()->get_node_base_interface());
auto state = controller_->configure();
ASSERT_EQ(State::PRIMARY_STATE_INACTIVE, state.id());
assignResourcesPosFeedback();
state = controller_->activate();
ASSERT_EQ(State::PRIMARY_STATE_ACTIVE, state.id());
waitForSetup();
// send msg
const double linear = 1.0;
const double angular = 1.0;
publish(linear, angular);
controller_->wait_for_twist(executor);
ASSERT_EQ(
controller_->update(rclcpp::Time(0, 0, RCL_ROS_TIME), rclcpp::Duration::from_seconds(0.01)),
controller_interface::return_type::OK);
state = controller_->deactivate();
ASSERT_EQ(State::PRIMARY_STATE_INACTIVE, state.id());
ASSERT_EQ(
controller_->update(rclcpp::Time(0, 0, RCL_ROS_TIME), rclcpp::Duration::from_seconds(0.01)),
controller_interface::return_type::OK);
state = controller_->cleanup();
ASSERT_EQ(State::PRIMARY_STATE_UNCONFIGURED, state.id());
// should be stopped
EXPECT_EQ(0.0, left_wheel_vel_cmd_.get_value());
EXPECT_EQ(0.0, right_wheel_vel_cmd_.get_value());
executor.cancel();
}
TEST_F(TestMecanumController, correct_initialization_using_parameters)
{
const auto ret = controller_->init(controller_name);
ASSERT_EQ(ret, controller_interface::return_type::OK);
controller_->get_node()->set_parameter(
rclcpp::Parameter("left_wheel_names", rclcpp::ParameterValue(left_wheel_names)));
controller_->get_node()->set_parameter(
rclcpp::Parameter("right_wheel_names", rclcpp::ParameterValue(right_wheel_names)));
controller_->get_node()->set_parameter(rclcpp::Parameter("wheel_separation", 0.4));
controller_->get_node()->set_parameter(rclcpp::Parameter("wheel_radius", 1.0));
rclcpp::executors::SingleThreadedExecutor executor;
executor.add_node(controller_->get_node()->get_node_base_interface());
auto state = controller_->configure();
assignResourcesPosFeedback();
ASSERT_EQ(State::PRIMARY_STATE_INACTIVE, state.id());
EXPECT_EQ(0.01, left_wheel_vel_cmd_.get_value());
EXPECT_EQ(0.02, right_wheel_vel_cmd_.get_value());
state = controller_->activate();
ASSERT_EQ(State::PRIMARY_STATE_ACTIVE, state.id());
// send msg
const double linear = 1.0;
const double angular = 0.0;
publish(linear, angular);
// wait for msg is be published to the system
ASSERT_TRUE(controller_->wait_for_twist(executor));
ASSERT_EQ(
controller_->update(rclcpp::Time(0, 0, RCL_ROS_TIME), rclcpp::Duration::from_seconds(0.01)),
controller_interface::return_type::OK);
EXPECT_EQ(1.0, left_wheel_vel_cmd_.get_value());
EXPECT_EQ(1.0, right_wheel_vel_cmd_.get_value());
// deactivated
// wait so controller process the second point when deactivated
std::this_thread::sleep_for(std::chrono::milliseconds(500));
state = controller_->deactivate();
ASSERT_EQ(state.id(), State::PRIMARY_STATE_INACTIVE);
ASSERT_EQ(
controller_->update(rclcpp::Time(0, 0, RCL_ROS_TIME), rclcpp::Duration::from_seconds(0.01)),
controller_interface::return_type::OK);
EXPECT_EQ(0.0, left_wheel_vel_cmd_.get_value()) << "Wheels are halted on deactivate()";
EXPECT_EQ(0.0, right_wheel_vel_cmd_.get_value()) << "Wheels are halted on deactivate()";
// cleanup
state = controller_->cleanup();
ASSERT_EQ(State::PRIMARY_STATE_UNCONFIGURED, state.id());
EXPECT_EQ(0.0, left_wheel_vel_cmd_.get_value());
EXPECT_EQ(0.0, right_wheel_vel_cmd_.get_value());
state = controller_->configure();
ASSERT_EQ(State::PRIMARY_STATE_INACTIVE, state.id());
executor.cancel();
}
| 16,941 |
C++
| 37.768879 | 97 | 0.717077 |
Coriago/examplo-ros2/src/mecanum_controller/test/test_accumulator.cpp
|
// Copyright 2020 PAL Robotics SL.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef _MSC_VER
#define _USE_MATH_DEFINES
#endif
#include <mecanum_controller/rolling_mean_accumulator.hpp>
#include <gmock/gmock.h>
#include <cmath>
#include <memory>
TEST(TestAccumulator, test_accumulator)
{
constexpr double THRESHOLD = 1e-12;
mecanum_controller::RollingMeanAccumulator<double> accum(4);
accum.accumulate(1.);
EXPECT_NEAR(1., accum.getRollingMean(), THRESHOLD);
accum.accumulate(1.);
EXPECT_NEAR(1., accum.getRollingMean(), THRESHOLD);
accum.accumulate(5.);
EXPECT_NEAR(7. / 3., accum.getRollingMean(), THRESHOLD);
accum.accumulate(5.);
EXPECT_NEAR(12. / 4., accum.getRollingMean(), THRESHOLD);
// Start removing old values
accum.accumulate(5.);
EXPECT_NEAR(16. / 4., accum.getRollingMean(), THRESHOLD);
accum.accumulate(5.);
EXPECT_NEAR(20. / 4., accum.getRollingMean(), THRESHOLD);
}
TEST(TestAccumulator, spam_accumulator)
{
constexpr double THRESHOLD = 1e-12;
mecanum_controller::RollingMeanAccumulator<double> accum(10);
for (int i = 0; i < 10000; ++i)
{
accum.accumulate(M_PI);
EXPECT_NEAR(M_PI, accum.getRollingMean(), THRESHOLD);
}
}
| 1,712 |
C++
| 28.033898 | 75 | 0.718458 |
Coriago/examplo-ros2/src/mecanum_controller/test/config/test_mecanum_controller.yaml
|
test_mecanum_controller:
ros__parameters:
left_wheel_names: ["left_wheels"]
right_wheel_names: ["right_wheels"]
write_op_modes: ["motor_controller"]
wheel_separation: 0.40
wheels_per_side: 1 # actually 2, but both are controlled by 1 signal
wheel_radius: 0.02
wheel_separation_multiplier: 1.0
left_wheel_radius_multiplier: 1.0
right_wheel_radius_multiplier: 1.0
odom_frame_id: odom
base_frame_id: base_link
pose_covariance_diagonal: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
twist_covariance_diagonal: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
position_feedback: false
open_loop: true
enable_odom_tf: true
cmd_vel_timeout: 500 # milliseconds
publish_limited_velocity: true
velocity_rolling_window_size: 10
linear.x.has_velocity_limits: false
linear.x.has_acceleration_limits: false
linear.x.has_jerk_limits: false
linear.x.max_velocity: 0.0
linear.x.min_velocity: 0.0
linear.x.max_acceleration: 0.0
linear.x.max_jerk: 0.0
linear.x.min_jerk: 0.0
angular.z.has_velocity_limits: false
angular.z.has_acceleration_limits: false
angular.z.has_jerk_limits: false
angular.z.max_velocity: 0.0
angular.z.min_velocity: 0.0
angular.z.max_acceleration: 0.0
angular.z.min_acceleration: 0.0
angular.z.max_jerk: 0.0
angular.z.min_jerk: 0.0
| 1,356 |
YAML
| 28.499999 | 73 | 0.670354 |
Coriago/examplo-ros2/src/mecanum_controller/src/odometry.cpp
|
// Copyright 2020 PAL Robotics S.L.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Author: Enrique Fernández
*/
#include "mecanum_controller/odometry.hpp"
namespace mecanum_controller
{
Odometry::Odometry(size_t velocity_rolling_window_size)
: timestamp_(0.0),
x_(0.0),
y_(0.0),
heading_(0.0),
linear_(0.0),
angular_(0.0),
wheel_separation_(0.0),
left_wheel_radius_(0.0),
right_wheel_radius_(0.0),
left_wheel_old_pos_(0.0),
right_wheel_old_pos_(0.0),
velocity_rolling_window_size_(velocity_rolling_window_size),
linear_accumulator_(velocity_rolling_window_size),
angular_accumulator_(velocity_rolling_window_size)
{
}
void Odometry::init(const rclcpp::Time & time)
{
// Reset accumulators and timestamp:
resetAccumulators();
timestamp_ = time;
}
bool Odometry::update(double left_pos, double right_pos, const rclcpp::Time & time)
{
// We cannot estimate the speed with very small time intervals:
const double dt = time.seconds() - timestamp_.seconds();
if (dt < 0.0001)
{
return false; // Interval too small to integrate with
}
// Get current wheel joint positions:
const double left_wheel_cur_pos = left_pos * left_wheel_radius_;
const double right_wheel_cur_pos = right_pos * right_wheel_radius_;
// Estimate velocity of wheels using old and current position:
const double left_wheel_est_vel = left_wheel_cur_pos - left_wheel_old_pos_;
const double right_wheel_est_vel = right_wheel_cur_pos - right_wheel_old_pos_;
// Update old position with current:
left_wheel_old_pos_ = left_wheel_cur_pos;
right_wheel_old_pos_ = right_wheel_cur_pos;
updateFromVelocity(left_wheel_est_vel, right_wheel_est_vel, time);
return true;
}
bool Odometry::updateFromVelocity(double left_vel, double right_vel, const rclcpp::Time & time)
{
const double dt = time.seconds() - timestamp_.seconds();
// Compute linear and angular diff:
const double linear = (left_vel + right_vel) * 0.5;
// Now there is a bug about scout angular velocity
const double angular = (right_vel - left_vel) / wheel_separation_;
// Integrate odometry:
integrateExact(linear, angular);
timestamp_ = time;
// Estimate speeds using a rolling mean to filter them out:
linear_accumulator_.accumulate(linear / dt);
angular_accumulator_.accumulate(angular / dt);
linear_ = linear_accumulator_.getRollingMean();
angular_ = angular_accumulator_.getRollingMean();
return true;
}
void Odometry::updateOpenLoop(double linear, double angular, const rclcpp::Time & time)
{
/// Save last linear and angular velocity:
linear_ = linear;
angular_ = angular;
/// Integrate odometry:
const double dt = time.seconds() - timestamp_.seconds();
timestamp_ = time;
integrateExact(linear * dt, angular * dt);
}
void Odometry::resetOdometry()
{
x_ = 0.0;
y_ = 0.0;
heading_ = 0.0;
}
void Odometry::setWheelParams(
double wheel_separation, double left_wheel_radius, double right_wheel_radius)
{
wheel_separation_ = wheel_separation;
left_wheel_radius_ = left_wheel_radius;
right_wheel_radius_ = right_wheel_radius;
}
void Odometry::setVelocityRollingWindowSize(size_t velocity_rolling_window_size)
{
velocity_rolling_window_size_ = velocity_rolling_window_size;
resetAccumulators();
}
void Odometry::integrateRungeKutta2(double linear, double angular)
{
const double direction = heading_ + angular * 0.5;
/// Runge-Kutta 2nd order integration:
x_ += linear * cos(direction);
y_ += linear * sin(direction);
heading_ += angular;
}
void Odometry::integrateExact(double linear, double angular)
{
if (fabs(angular) < 1e-6)
{
integrateRungeKutta2(linear, angular);
}
else
{
/// Exact integration (should solve problems when angular is zero):
const double heading_old = heading_;
const double r = linear / angular;
heading_ += angular;
x_ += r * (sin(heading_) - sin(heading_old));
y_ += -r * (cos(heading_) - cos(heading_old));
}
}
void Odometry::resetAccumulators()
{
linear_accumulator_ = RollingMeanAccumulator(velocity_rolling_window_size_);
angular_accumulator_ = RollingMeanAccumulator(velocity_rolling_window_size_);
}
} // namespace mecanum_controller
| 4,710 |
C++
| 27.379518 | 95 | 0.705945 |
Coriago/examplo-ros2/src/mecanum_controller/src/speed_limiter.cpp
|
// Copyright 2020 PAL Robotics S.L.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Author: Enrique Fernández
*/
#include <algorithm>
#include <stdexcept>
#include "mecanum_controller/speed_limiter.hpp"
#include "rcppmath/clamp.hpp"
namespace mecanum_controller
{
SpeedLimiter::SpeedLimiter(
bool has_velocity_limits, bool has_acceleration_limits, bool has_jerk_limits, double min_velocity,
double max_velocity, double min_acceleration, double max_acceleration, double min_jerk,
double max_jerk)
: has_velocity_limits_(has_velocity_limits),
has_acceleration_limits_(has_acceleration_limits),
has_jerk_limits_(has_jerk_limits),
min_velocity_(min_velocity),
max_velocity_(max_velocity),
min_acceleration_(min_acceleration),
max_acceleration_(max_acceleration),
min_jerk_(min_jerk),
max_jerk_(max_jerk)
{
// Check if limits are valid, max must be specified, min defaults to -max if unspecified
if (has_velocity_limits_)
{
if (std::isnan(max_velocity_))
{
throw std::runtime_error("Cannot apply velocity limits if max_velocity is not specified");
}
if (std::isnan(min_velocity_))
{
min_velocity_ = -max_velocity_;
}
}
if (has_acceleration_limits_)
{
if (std::isnan(max_acceleration_))
{
throw std::runtime_error(
"Cannot apply acceleration limits if max_acceleration is not specified");
}
if (std::isnan(min_acceleration_))
{
min_acceleration_ = -max_acceleration_;
}
}
if (has_jerk_limits_)
{
if (std::isnan(max_jerk_))
{
throw std::runtime_error("Cannot apply jerk limits if max_jerk is not specified");
}
if (std::isnan(min_jerk_))
{
min_jerk_ = -max_jerk_;
}
}
}
double SpeedLimiter::limit(double & v, double v0, double v1, double dt)
{
const double tmp = v;
limit_jerk(v, v0, v1, dt);
limit_acceleration(v, v0, dt);
limit_velocity(v);
return tmp != 0.0 ? v / tmp : 1.0;
}
double SpeedLimiter::limit_velocity(double & v)
{
const double tmp = v;
if (has_velocity_limits_)
{
v = rcppmath::clamp(v, min_velocity_, max_velocity_);
}
return tmp != 0.0 ? v / tmp : 1.0;
}
double SpeedLimiter::limit_acceleration(double & v, double v0, double dt)
{
const double tmp = v;
if (has_acceleration_limits_)
{
const double dv_min = min_acceleration_ * dt;
const double dv_max = max_acceleration_ * dt;
const double dv = rcppmath::clamp(v - v0, dv_min, dv_max);
v = v0 + dv;
}
return tmp != 0.0 ? v / tmp : 1.0;
}
double SpeedLimiter::limit_jerk(double & v, double v0, double v1, double dt)
{
const double tmp = v;
if (has_jerk_limits_)
{
const double dv = v - v0;
const double dv0 = v0 - v1;
const double dt2 = 2. * dt * dt;
const double da_min = min_jerk_ * dt2;
const double da_max = max_jerk_ * dt2;
const double da = rcppmath::clamp(dv - dv0, da_min, da_max);
v = v0 + dv0 + da;
}
return tmp != 0.0 ? v / tmp : 1.0;
}
} // namespace mecanum_controller
| 3,529 |
C++
| 24.035461 | 100 | 0.655143 |
Coriago/examplo-ros2/src/mecanum_controller/src/mecanum_controller.cpp
|
// Copyright 2020 PAL Robotics S.L.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Author: Bence Magyar, Enrique Fernández, Manuel Meraz
*/
#include <memory>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "mecanum_controller/mecanum_controller.hpp"
#include "hardware_interface/types/hardware_interface_type_values.hpp"
#include "lifecycle_msgs/msg/state.hpp"
#include "rclcpp/logging.hpp"
namespace
{
constexpr auto DEFAULT_COMMAND_TOPIC = "~/cmd_vel";
constexpr auto DEFAULT_COMMAND_UNSTAMPED_TOPIC = "~/cmd_vel_unstamped";
constexpr auto DEFAULT_COMMAND_OUT_TOPIC = "~/cmd_vel_out";
} // namespace
namespace mecanum_controller
{
using namespace std::chrono_literals;
using controller_interface::interface_configuration_type;
using controller_interface::InterfaceConfiguration;
using hardware_interface::HW_IF_POSITION;
using hardware_interface::HW_IF_VELOCITY;
using lifecycle_msgs::msg::State;
Wheel::Wheel(std::reference_wrapper<hardware_interface::LoanedCommandInterface> velocity,
std::string name) : velocity_(velocity), name(std::move(name)) {}
void Wheel::set_velocity(double velocity)
{
velocity_.get().set_value(velocity);
}
MecanumController::MecanumController() : controller_interface::ControllerInterface() {}
CallbackReturn MecanumController::on_init()
{
try
{
// with the lifecycle node being initialized, we can declare parameters
auto_declare<std::string>("front_left_joint", front_left_joint_name_);
auto_declare<std::string>("front_right_joint", front_right_joint_name_);
auto_declare<std::string>("rear_left_joint", rear_left_joint_name_);
auto_declare<std::string>("rear_right_joint", rear_right_joint_name_);
auto_declare<double>("chassis_center_to_axle", wheel_params_.x_offset);
auto_declare<double>("axle_center_to_wheel", wheel_params_.y_offset);
auto_declare<double>("wheel_radius", wheel_params_.radius);
auto_declare<double>("cmd_vel_timeout", cmd_vel_timeout_.count() / 1000.0);
auto_declare<bool>("use_stamped_vel", use_stamped_vel_);
}
catch (const std::exception & e)
{
fprintf(stderr, "Exception thrown during init stage with message: %s \n", e.what());
return CallbackReturn::ERROR;
}
return CallbackReturn::SUCCESS;
}
InterfaceConfiguration MecanumController::command_interface_configuration() const
{
std::vector<std::string> conf_names;
conf_names.push_back(front_left_joint_name_ + "/" + HW_IF_VELOCITY);
conf_names.push_back(front_right_joint_name_ + "/" + HW_IF_VELOCITY);
conf_names.push_back(rear_left_joint_name_ + "/" + HW_IF_VELOCITY);
conf_names.push_back(rear_right_joint_name_ + "/" + HW_IF_VELOCITY);
return {interface_configuration_type::INDIVIDUAL, conf_names};
}
InterfaceConfiguration MecanumController::state_interface_configuration() const
{
return {interface_configuration_type::NONE};
}
controller_interface::return_type MecanumController::update(
const rclcpp::Time & time, const rclcpp::Duration & /*period*/)
{
auto logger = node_->get_logger();
if (get_state().id() == State::PRIMARY_STATE_INACTIVE)
{
if (!is_halted)
{
halt();
is_halted = true;
}
return controller_interface::return_type::OK;
}
const auto current_time = time;
std::shared_ptr<Twist> last_command_msg;
received_velocity_msg_ptr_.get(last_command_msg);
if (last_command_msg == nullptr)
{
RCLCPP_WARN(logger, "Velocity message received was a nullptr.");
return controller_interface::return_type::ERROR;
}
const auto age_of_last_command = current_time - last_command_msg->header.stamp;
// Brake if cmd_vel has timeout, override the stored command
if (age_of_last_command > cmd_vel_timeout_)
{
last_command_msg->twist.linear.x = 0.0;
last_command_msg->twist.angular.z = 0.0;
}
Twist command = *last_command_msg;
double & linear_x_cmd = command.twist.linear.x;
double & linear_y_cmd = command.twist.linear.y;
double & angular_cmd = command.twist.angular.z;
double x_offset = wheel_params_.x_offset;
double y_offset = wheel_params_.y_offset;
double radius = wheel_params_.radius;
// Compute Wheel Velocities
const double front_left_offset = (-1 * x_offset + -1 * y_offset);
const double front_right_offset = (x_offset + y_offset);
const double rear_left_offset = (-1 * x_offset + -1 * y_offset);
const double rear_right_offset = (x_offset + y_offset);
const double front_left_velocity = (front_left_offset * angular_cmd + linear_x_cmd - linear_y_cmd) / radius;
const double front_right_velocity = (front_right_offset * angular_cmd + linear_x_cmd + linear_y_cmd) / radius;
const double rear_left_velocity = (rear_left_offset * angular_cmd + linear_x_cmd + linear_y_cmd) / radius;
const double rear_right_velocity = (rear_right_offset * angular_cmd + linear_x_cmd - linear_y_cmd) / radius;
// Set Wheel Velocities
front_left_handle_->set_velocity(front_left_velocity);
front_right_handle_->set_velocity(front_right_velocity);
rear_left_handle_->set_velocity(rear_left_velocity);
rear_right_handle_->set_velocity(rear_right_velocity);
// Time update
const auto update_dt = current_time - previous_update_timestamp_;
previous_update_timestamp_ = current_time;
return controller_interface::return_type::OK;
}
CallbackReturn MecanumController::on_configure(const rclcpp_lifecycle::State &)
{
auto logger = node_->get_logger();
// Get Parameters
front_left_joint_name_ = node_->get_parameter("front_left_joint").as_string();
front_right_joint_name_ = node_->get_parameter("front_right_joint").as_string();
rear_left_joint_name_ = node_->get_parameter("rear_left_joint").as_string();
rear_right_joint_name_ = node_->get_parameter("rear_right_joint").as_string();
if (front_left_joint_name_.empty()) {
RCLCPP_ERROR(logger, "front_left_joint_name is not set");
return CallbackReturn::ERROR;
}
if (front_right_joint_name_.empty()) {
RCLCPP_ERROR(logger, "front_right_joint_name is not set");
return CallbackReturn::ERROR;
}
if (rear_left_joint_name_.empty()) {
RCLCPP_ERROR(logger, "rear_left_joint_name is not set");
return CallbackReturn::ERROR;
}
if (rear_right_joint_name_.empty()) {
RCLCPP_ERROR(logger, "rear_right_joint_name is not set");
return CallbackReturn::ERROR;
}
wheel_params_.x_offset = node_->get_parameter("chassis_center_to_axle").as_double();
wheel_params_.y_offset = node_->get_parameter("axle_center_to_wheel").as_double();
wheel_params_.radius = node_->get_parameter("wheel_radius").as_double();
cmd_vel_timeout_ = std::chrono::milliseconds{
static_cast<int>(node_->get_parameter("cmd_vel_timeout").as_double() * 1000.0)};
use_stamped_vel_ = node_->get_parameter("use_stamped_vel").as_bool();
// Run reset to make sure everything is initialized correctly
if (!reset())
{
return CallbackReturn::ERROR;
}
const Twist empty_twist;
received_velocity_msg_ptr_.set(std::make_shared<Twist>(empty_twist));
// initialize command subscriber
if (use_stamped_vel_)
{
velocity_command_subscriber_ = node_->create_subscription<Twist>(
DEFAULT_COMMAND_TOPIC, rclcpp::SystemDefaultsQoS(),
[this](const std::shared_ptr<Twist> msg) -> void {
if (!subscriber_is_active_)
{
RCLCPP_WARN(node_->get_logger(), "Can't accept new commands. subscriber is inactive");
return;
}
if ((msg->header.stamp.sec == 0) && (msg->header.stamp.nanosec == 0))
{
RCLCPP_WARN_ONCE(
node_->get_logger(),
"Received TwistStamped with zero timestamp, setting it to current "
"time, this message will only be shown once");
msg->header.stamp = node_->get_clock()->now();
}
received_velocity_msg_ptr_.set(std::move(msg));
});
}
else
{
velocity_command_unstamped_subscriber_ = node_->create_subscription<geometry_msgs::msg::Twist>(
DEFAULT_COMMAND_UNSTAMPED_TOPIC, rclcpp::SystemDefaultsQoS(),
[this](const std::shared_ptr<geometry_msgs::msg::Twist> msg) -> void {
if (!subscriber_is_active_)
{
RCLCPP_WARN(node_->get_logger(), "Can't accept new commands. subscriber is inactive");
return;
}
// Write fake header in the stored stamped command
std::shared_ptr<Twist> twist_stamped;
received_velocity_msg_ptr_.get(twist_stamped);
twist_stamped->twist = *msg;
twist_stamped->header.stamp = node_->get_clock()->now();
});
}
previous_update_timestamp_ = node_->get_clock()->now();
return CallbackReturn::SUCCESS;
}
CallbackReturn MecanumController::on_activate(const rclcpp_lifecycle::State &)
{
front_left_handle_ = get_wheel(front_left_joint_name_);
front_right_handle_ = get_wheel(front_right_joint_name_);
rear_left_handle_ = get_wheel(rear_left_joint_name_);
rear_right_handle_ = get_wheel(rear_right_joint_name_);
if (!front_left_handle_ || !front_right_handle_ || !rear_left_handle_ || !rear_right_handle_)
{
return CallbackReturn::ERROR;
}
is_halted = false;
subscriber_is_active_ = true;
RCLCPP_DEBUG(node_->get_logger(), "Subscriber and publisher are now active.");
return CallbackReturn::SUCCESS;
}
CallbackReturn MecanumController::on_deactivate(const rclcpp_lifecycle::State &)
{
subscriber_is_active_ = false;
return CallbackReturn::SUCCESS;
}
CallbackReturn MecanumController::on_cleanup(const rclcpp_lifecycle::State &)
{
if (!reset())
{
return CallbackReturn::ERROR;
}
received_velocity_msg_ptr_.set(std::make_shared<Twist>());
return CallbackReturn::SUCCESS;
}
CallbackReturn MecanumController::on_error(const rclcpp_lifecycle::State &)
{
if (!reset())
{
return CallbackReturn::ERROR;
}
return CallbackReturn::SUCCESS;
}
bool MecanumController::reset()
{
subscriber_is_active_ = false;
velocity_command_subscriber_.reset();
velocity_command_unstamped_subscriber_.reset();
received_velocity_msg_ptr_.set(nullptr);
is_halted = false;
return true;
}
CallbackReturn MecanumController::on_shutdown(const rclcpp_lifecycle::State &)
{
return CallbackReturn::SUCCESS;
}
void MecanumController::halt()
{
front_left_handle_->set_velocity(0.0);
front_right_handle_->set_velocity(0.0);
rear_left_handle_->set_velocity(0.0);
rear_right_handle_->set_velocity(0.0);
auto logger = node_->get_logger();
RCLCPP_WARN(logger, "-----HALT CALLED : STOPPING ALL MOTORS-----");
}
std::shared_ptr<Wheel> MecanumController::get_wheel( const std::string & wheel_name )
{
auto logger = node_->get_logger();
if (wheel_name.empty())
{
RCLCPP_ERROR(logger, "Wheel joint name not given. Make sure all joints are specified.");
return nullptr;
}
// Get Command Handle for joint
const auto command_handle = std::find_if(
command_interfaces_.begin(), command_interfaces_.end(),
[&wheel_name](const auto & interface) {
return interface.get_name() == wheel_name &&
interface.get_interface_name() == HW_IF_VELOCITY;
});
if (command_handle == command_interfaces_.end())
{
RCLCPP_ERROR(logger, "Unable to obtain joint command handle for %s", wheel_name.c_str());
return nullptr;
}
return std::make_shared<Wheel>(std::ref(*command_handle), wheel_name);
}
} // namespace mecanum_controller
#include "class_loader/register_macro.hpp"
CLASS_LOADER_REGISTER_CLASS(
mecanum_controller::MecanumController, controller_interface::ControllerInterface)
| 12,060 |
C++
| 32.974648 | 112 | 0.689884 |
Coriago/examplo-ros2/src/mecanum_controller/doc/userdoc.rst
|
.. _diff_drive_controller_userdoc:
diff_drive_controller
=====================
Controller for mobile robots with differential drive.
Input for control are robot body velocity commands which are translated to wheel commands for the differential drive base.
Odometry is computed from hardware feedback and published.
Velocity commands
-----------------
The controller works with a velocity twist from which it extracts the x component of the linear velocity and the z component of the angular velocity. Velocities on other components are ignored.
Hardware interface type
-----------------------
The controller works with wheel joints through a velocity interface.
Other features
--------------
Realtime-safe implementation.
Odometry publishing
Task-space velocity, acceleration and jerk limits
Automatic stop after command time-out
| 854 |
reStructuredText
| 30.666666 | 193 | 0.737705 |
Coriago/examplo-ros2/src/mecanum_controller/include/mecanum_controller/odometry.hpp
|
// Copyright 2020 PAL Robotics S.L.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Author: Luca Marchionni
* Author: Bence Magyar
* Author: Enrique Fernández
* Author: Paul Mathieu
*/
#ifndef MECANUM_CONTROLLER__ODOMETRY_HPP_
#define MECANUM_CONTROLLER__ODOMETRY_HPP_
#include <cmath>
#include "mecanum_controller/rolling_mean_accumulator.hpp"
#include "rclcpp/time.hpp"
namespace mecanum_controller
{
class Odometry
{
public:
explicit Odometry(size_t velocity_rolling_window_size = 10);
void init(const rclcpp::Time & time);
bool update(double left_pos, double right_pos, const rclcpp::Time & time);
bool updateFromVelocity(double left_vel, double right_vel, const rclcpp::Time & time);
void updateOpenLoop(double linear, double angular, const rclcpp::Time & time);
void resetOdometry();
double getX() const { return x_; }
double getY() const { return y_; }
double getHeading() const { return heading_; }
double getLinear() const { return linear_; }
double getAngular() const { return angular_; }
void setWheelParams(double wheel_separation, double left_wheel_radius, double right_wheel_radius);
void setVelocityRollingWindowSize(size_t velocity_rolling_window_size);
private:
using RollingMeanAccumulator = mecanum_controller::RollingMeanAccumulator<double>;
void integrateRungeKutta2(double linear, double angular);
void integrateExact(double linear, double angular);
void resetAccumulators();
// Current timestamp:
rclcpp::Time timestamp_;
// Current pose:
double x_; // [m]
double y_; // [m]
double heading_; // [rad]
// Current velocity:
double linear_; // [m/s]
double angular_; // [rad/s]
// Wheel kinematic parameters [m]:
double wheel_separation_;
double left_wheel_radius_;
double right_wheel_radius_;
// Previous wheel position/state [rad]:
double left_wheel_old_pos_;
double right_wheel_old_pos_;
// Rolling mean accumulators for the linear and angular velocities:
size_t velocity_rolling_window_size_;
RollingMeanAccumulator linear_accumulator_;
RollingMeanAccumulator angular_accumulator_;
};
} // namespace mecanum_controller
#endif // MECANUM_CONTROLLER__ODOMETRY_HPP_
| 2,725 |
C++
| 29.629213 | 100 | 0.725872 |
Coriago/examplo-ros2/src/mecanum_controller/include/mecanum_controller/visibility_control.h
|
// Copyright 2017 Open Source Robotics Foundation, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* This header must be included by all rclcpp headers which declare symbols
* which are defined in the rclcpp library. When not building the rclcpp
* library, i.e. when using the headers in other package's code, the contents
* of this header change the visibility of certain symbols which the rclcpp
* library cannot have, but the consuming code must have inorder to link.
*/
#ifndef MECANUM_CONTROLLER__VISIBILITY_CONTROL_H_
#define MECANUM_CONTROLLER__VISIBILITY_CONTROL_H_
// This logic was borrowed (then namespaced) from the examples on the gcc wiki:
// https://gcc.gnu.org/wiki/Visibility
#if defined _WIN32 || defined __CYGWIN__
#ifdef __GNUC__
#define MECANUM_CONTROLLER_EXPORT __attribute__((dllexport))
#define MECANUM_CONTROLLER_IMPORT __attribute__((dllimport))
#else
#define MECANUM_CONTROLLER_EXPORT __declspec(dllexport)
#define MECANUM_CONTROLLER_IMPORT __declspec(dllimport)
#endif
#ifdef MECANUM_CONTROLLER_BUILDING_DLL
#define MECANUM_CONTROLLER_PUBLIC MECANUM_CONTROLLER_EXPORT
#else
#define MECANUM_CONTROLLER_PUBLIC MECANUM_CONTROLLER_IMPORT
#endif
#define MECANUM_CONTROLLER_PUBLIC_TYPE MECANUM_CONTROLLER_PUBLIC
#define MECANUM_CONTROLLER_LOCAL
#else
#define MECANUM_CONTROLLER_EXPORT __attribute__((visibility("default")))
#define MECANUM_CONTROLLER_IMPORT
#if __GNUC__ >= 4
#define MECANUM_CONTROLLER_PUBLIC __attribute__((visibility("default")))
#define MECANUM_CONTROLLER_LOCAL __attribute__((visibility("hidden")))
#else
#define MECANUM_CONTROLLER_PUBLIC
#define MECANUM_CONTROLLER_LOCAL
#endif
#define MECANUM_CONTROLLER_PUBLIC_TYPE
#endif
#endif // MECANUM_CONTROLLER__VISIBILITY_CONTROL_H_
| 2,251 |
C
| 38.508771 | 79 | 0.76988 |
Coriago/examplo-ros2/src/mecanum_controller/include/mecanum_controller/rolling_mean_accumulator.hpp
|
// Copyright 2020 PAL Robotics S.L.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Author: Víctor López
*/
#ifndef MECANUM_CONTROLLER__ROLLING_MEAN_ACCUMULATOR_HPP_
#define MECANUM_CONTROLLER__ROLLING_MEAN_ACCUMULATOR_HPP_
#include <cassert>
#include <cstdio>
#include <vector>
namespace mecanum_controller
{
/**
* \brief Simplification of boost::accumulators::accumulator_set<double,
* bacc::stats<bacc::tag::rolling_mean>> to avoid dragging boost dependencies
*
* Computes the mean of the last accumulated elements
*/
template <typename T>
class RollingMeanAccumulator
{
public:
explicit RollingMeanAccumulator(size_t rolling_window_size)
: buffer_(rolling_window_size, 0.0), next_insert_(0), sum_(0.0), buffer_filled_(false)
{
}
void accumulate(T val)
{
sum_ -= buffer_[next_insert_];
sum_ += val;
buffer_[next_insert_] = val;
next_insert_++;
buffer_filled_ |= next_insert_ >= buffer_.size();
next_insert_ = next_insert_ % buffer_.size();
}
T getRollingMean() const
{
size_t valid_data_count = buffer_filled_ * buffer_.size() + !buffer_filled_ * next_insert_;
assert(valid_data_count > 0);
return sum_ / valid_data_count;
}
private:
std::vector<T> buffer_;
size_t next_insert_;
T sum_;
bool buffer_filled_;
};
} // namespace mecanum_controller
#endif // MECANUM_CONTROLLER__ROLLING_MEAN_ACCUMULATOR_HPP_
| 1,904 |
C++
| 27.014705 | 95 | 0.700105 |
Coriago/examplo-ros2/src/mecanum_controller/include/mecanum_controller/speed_limiter.hpp
|
// Copyright 2020 PAL Robotics S.L.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Author: Enrique Fernández
*/
#ifndef MECANUM_CONTROLLER__SPEED_LIMITER_HPP_
#define MECANUM_CONTROLLER__SPEED_LIMITER_HPP_
#include <cmath>
namespace mecanum_controller
{
class SpeedLimiter
{
public:
/**
* \brief Constructor
* \param [in] has_velocity_limits if true, applies velocity limits
* \param [in] has_acceleration_limits if true, applies acceleration limits
* \param [in] has_jerk_limits if true, applies jerk limits
* \param [in] min_velocity Minimum velocity [m/s], usually <= 0
* \param [in] max_velocity Maximum velocity [m/s], usually >= 0
* \param [in] min_acceleration Minimum acceleration [m/s^2], usually <= 0
* \param [in] max_acceleration Maximum acceleration [m/s^2], usually >= 0
* \param [in] min_jerk Minimum jerk [m/s^3], usually <= 0
* \param [in] max_jerk Maximum jerk [m/s^3], usually >= 0
*/
SpeedLimiter(
bool has_velocity_limits = false, bool has_acceleration_limits = false,
bool has_jerk_limits = false, double min_velocity = NAN, double max_velocity = NAN,
double min_acceleration = NAN, double max_acceleration = NAN, double min_jerk = NAN,
double max_jerk = NAN);
/**
* \brief Limit the velocity and acceleration
* \param [in, out] v Velocity [m/s]
* \param [in] v0 Previous velocity to v [m/s]
* \param [in] v1 Previous velocity to v0 [m/s]
* \param [in] dt Time step [s]
* \return Limiting factor (1.0 if none)
*/
double limit(double & v, double v0, double v1, double dt);
/**
* \brief Limit the velocity
* \param [in, out] v Velocity [m/s]
* \return Limiting factor (1.0 if none)
*/
double limit_velocity(double & v);
/**
* \brief Limit the acceleration
* \param [in, out] v Velocity [m/s]
* \param [in] v0 Previous velocity [m/s]
* \param [in] dt Time step [s]
* \return Limiting factor (1.0 if none)
*/
double limit_acceleration(double & v, double v0, double dt);
/**
* \brief Limit the jerk
* \param [in, out] v Velocity [m/s]
* \param [in] v0 Previous velocity to v [m/s]
* \param [in] v1 Previous velocity to v0 [m/s]
* \param [in] dt Time step [s]
* \return Limiting factor (1.0 if none)
* \see http://en.wikipedia.org/wiki/Jerk_%28physics%29#Motion_control
*/
double limit_jerk(double & v, double v0, double v1, double dt);
private:
// Enable/Disable velocity/acceleration/jerk limits:
bool has_velocity_limits_;
bool has_acceleration_limits_;
bool has_jerk_limits_;
// Velocity limits:
double min_velocity_;
double max_velocity_;
// Acceleration limits:
double min_acceleration_;
double max_acceleration_;
// Jerk limits:
double min_jerk_;
double max_jerk_;
};
} // namespace mecanum_controller
#endif // MECANUM_CONTROLLER__SPEED_LIMITER_HPP_
| 3,435 |
C++
| 31.415094 | 88 | 0.661718 |
Coriago/examplo-ros2/src/mecanum_controller/include/mecanum_controller/mecanum_controller.hpp
|
// Copyright 2020 PAL Robotics S.L.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Author: Bence Magyar, Enrique Fernández, Manuel Meraz
*/
#ifndef MECANUM_CONTROLLER__MECANUM_CONTROLLER_HPP_
#define MECANUM_CONTROLLER__MECANUM_CONTROLLER_HPP_
#include <chrono>
#include <cmath>
#include <memory>
#include <queue>
#include <string>
#include <vector>
#include "controller_interface/controller_interface.hpp"
#include "mecanum_controller/visibility_control.h"
#include "geometry_msgs/msg/twist.hpp"
#include "geometry_msgs/msg/twist_stamped.hpp"
#include "hardware_interface/handle.hpp"
#include "rclcpp/rclcpp.hpp"
#include "rclcpp_lifecycle/state.hpp"
#include "realtime_tools/realtime_box.h"
#include "realtime_tools/realtime_buffer.h"
#include "realtime_tools/realtime_publisher.h"
#include <hardware_interface/loaned_command_interface.hpp>
namespace mecanum_controller
{
using CallbackReturn = rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn;
class Wheel {
public:
Wheel(std::reference_wrapper<hardware_interface::LoanedCommandInterface> velocity, std::string name);
void set_velocity(double velocity);
private:
std::reference_wrapper<hardware_interface::LoanedCommandInterface> velocity_;
std::string name;
};
class MecanumController : public controller_interface::ControllerInterface
{
using Twist = geometry_msgs::msg::TwistStamped;
public:
MECANUM_CONTROLLER_PUBLIC
MecanumController();
MECANUM_CONTROLLER_PUBLIC
controller_interface::InterfaceConfiguration command_interface_configuration() const override;
MECANUM_CONTROLLER_PUBLIC
controller_interface::InterfaceConfiguration state_interface_configuration() const override;
MECANUM_CONTROLLER_PUBLIC
controller_interface::return_type update(
const rclcpp::Time & time, const rclcpp::Duration & period) override;
MECANUM_CONTROLLER_PUBLIC
CallbackReturn on_init() override;
MECANUM_CONTROLLER_PUBLIC
CallbackReturn on_configure(const rclcpp_lifecycle::State & previous_state) override;
MECANUM_CONTROLLER_PUBLIC
CallbackReturn on_activate(const rclcpp_lifecycle::State & previous_state) override;
MECANUM_CONTROLLER_PUBLIC
CallbackReturn on_deactivate(const rclcpp_lifecycle::State & previous_state) override;
MECANUM_CONTROLLER_PUBLIC
CallbackReturn on_cleanup(const rclcpp_lifecycle::State & previous_state) override;
MECANUM_CONTROLLER_PUBLIC
CallbackReturn on_error(const rclcpp_lifecycle::State & previous_state) override;
MECANUM_CONTROLLER_PUBLIC
CallbackReturn on_shutdown(const rclcpp_lifecycle::State & previous_state) override;
protected:
std::shared_ptr<Wheel> get_wheel(const std::string & wheel_name);
std::shared_ptr<Wheel> front_left_handle_;
std::shared_ptr<Wheel> front_right_handle_;
std::shared_ptr<Wheel> rear_left_handle_;
std::shared_ptr<Wheel> rear_right_handle_;
std::string front_left_joint_name_;
std::string front_right_joint_name_;
std::string rear_left_joint_name_;
std::string rear_right_joint_name_;
struct WheelParams
{
double x_offset = 0.0; // Chassis Center to Axle Center
double y_offset = 0.0; // Axle Center to Wheel Center
double radius = 0.0; // Assumed to be the same for all wheels
} wheel_params_;
// Timeout to consider cmd_vel commands old
std::chrono::milliseconds cmd_vel_timeout_{500};
rclcpp::Time previous_update_timestamp_{0};
// Topic Subscription
bool subscriber_is_active_ = false;
rclcpp::Subscription<Twist>::SharedPtr velocity_command_subscriber_ = nullptr;
rclcpp::Subscription<geometry_msgs::msg::Twist>::SharedPtr
velocity_command_unstamped_subscriber_ = nullptr;
realtime_tools::RealtimeBox<std::shared_ptr<Twist>> received_velocity_msg_ptr_{nullptr};
bool is_halted = false;
bool use_stamped_vel_ = true;
bool reset();
void halt();
};
} // namespace mecanum_controller
#endif // MECANUM_CONTROLLER__MECANUM_CONTROLLER_HPP_
| 4,454 |
C++
| 32.49624 | 105 | 0.759542 |
Coriago/examplo-ros2/src/robot_hardware/robot_hardware.xml
|
<library path="robot_hardware">
<class name="robot_hardware/IsaacDriveHardware"
type="robot_hardware::IsaacDriveHardware"
base_class_type="hardware_interface::SystemInterface">
<description>
The ROS2 Control hardware interface to talk with Isaac Sim robot drive train.
</description>
</class>
</library>
| 339 |
XML
| 36.777774 | 83 | 0.713864 |
Coriago/examplo-ros2/src/robot_hardware/src/isaac_drive.cpp
|
// Copyright 2021 ros2_control Development Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "robot_hardware/isaac_drive.hpp"
#include <chrono>
#include <cmath>
#include <limits>
#include <memory>
#include <vector>
#include "hardware_interface/types/hardware_interface_type_values.hpp"
#include "rclcpp/rclcpp.hpp"
#include "sensor_msgs/msg/joint_state.hpp"
using std::placeholders::_1;
namespace robot_hardware
{
CallbackReturn IsaacDriveHardware::on_init(const hardware_interface::HardwareInfo & info)
{
// rmw_qos_profile_t custom_qos_profile = rmw_qos_profile_default;
// custom_qos_profile.depth = 7;
node_ = rclcpp::Node::make_shared("isaac_hardware_interface");
// PUBLISHER SETUP
isaac_publisher_ = node_->create_publisher<sensor_msgs::msg::JointState>("isaac_joint_commands", rclcpp::SystemDefaultsQoS());
realtime_isaac_publisher_ = std::make_shared<realtime_tools::RealtimePublisher<sensor_msgs::msg::JointState>>(
isaac_publisher_);
// SUBSCRIBER SETUP
const sensor_msgs::msg::JointState empty_joint_state;
received_joint_msg_ptr_.set(std::make_shared<sensor_msgs::msg::JointState>(empty_joint_state));
isaac_subscriber_ = node_->create_subscription<sensor_msgs::msg::JointState>("isaac_joint_states", rclcpp::SystemDefaultsQoS(),
[this](const std::shared_ptr<sensor_msgs::msg::JointState> msg) -> void
{
if (!subscriber_is_active_) {
RCLCPP_WARN( rclcpp::get_logger("isaac_hardware_interface"), "Can't accept new commands. subscriber is inactive");
return;
}
received_joint_msg_ptr_.set(std::move(msg));
});
// INTERFACE SETUP
if (hardware_interface::SystemInterface::on_init(info) != CallbackReturn::SUCCESS)
{
return CallbackReturn::ERROR;
}
// hw_start_sec_ = stod(info_.hardware_parameters["example_param_hw_start_duration_sec"]);
// hw_stop_sec_ = stod(info_.hardware_parameters["example_param_hw_stop_duration_sec"]);
hw_positions_.resize(info_.joints.size(), std::numeric_limits<double>::quiet_NaN());
hw_velocities_.resize(info_.joints.size(), std::numeric_limits<double>::quiet_NaN());
hw_commands_.resize(info_.joints.size(), std::numeric_limits<double>::quiet_NaN());
for (const hardware_interface::ComponentInfo & joint : info_.joints)
{
joint_names_.push_back(joint.name);
if (joint.command_interfaces.size() != 1)
{
RCLCPP_FATAL(
rclcpp::get_logger("IsaacDriveHardware"),
"Joint '%s' has %zu command interfaces found. 1 expected.", joint.name.c_str(),
joint.command_interfaces.size());
return CallbackReturn::ERROR;
}
if (joint.command_interfaces[0].name != hardware_interface::HW_IF_VELOCITY)
{
RCLCPP_FATAL(
rclcpp::get_logger("IsaacDriveHardware"),
"Joint '%s' have %s command interfaces found. '%s' expected.", joint.name.c_str(),
joint.command_interfaces[0].name.c_str(), hardware_interface::HW_IF_VELOCITY);
return CallbackReturn::ERROR;
}
if (joint.state_interfaces.size() != 2)
{
RCLCPP_FATAL(
rclcpp::get_logger("IsaacDriveHardware"),
"Joint '%s' has %zu state interface. 2 expected.", joint.name.c_str(),
joint.state_interfaces.size());
return CallbackReturn::ERROR;
}
if (joint.state_interfaces[0].name != hardware_interface::HW_IF_POSITION)
{
RCLCPP_FATAL(
rclcpp::get_logger("IsaacDriveHardware"),
"Joint '%s' have '%s' as first state interface. '%s' expected.", joint.name.c_str(),
joint.state_interfaces[0].name.c_str(), hardware_interface::HW_IF_POSITION);
return CallbackReturn::ERROR;
}
if (joint.state_interfaces[1].name != hardware_interface::HW_IF_VELOCITY)
{
RCLCPP_FATAL(
rclcpp::get_logger("IsaacDriveHardware"),
"Joint '%s' have '%s' as second state interface. '%s' expected.", joint.name.c_str(),
joint.state_interfaces[1].name.c_str(), hardware_interface::HW_IF_VELOCITY);
return CallbackReturn::ERROR;
}
}
return CallbackReturn::SUCCESS;
}
std::vector<hardware_interface::StateInterface> IsaacDriveHardware::export_state_interfaces()
{
std::vector<hardware_interface::StateInterface> state_interfaces;
for (auto i = 0u; i < info_.joints.size(); i++)
{
state_interfaces.emplace_back(hardware_interface::StateInterface(
info_.joints[i].name, hardware_interface::HW_IF_POSITION, &hw_positions_[i]));
state_interfaces.emplace_back(hardware_interface::StateInterface(
info_.joints[i].name, hardware_interface::HW_IF_VELOCITY, &hw_velocities_[i]));
}
return state_interfaces;
}
std::vector<hardware_interface::CommandInterface> IsaacDriveHardware::export_command_interfaces()
{
std::vector<hardware_interface::CommandInterface> command_interfaces;
for (auto i = 0u; i < info_.joints.size(); i++)
{
command_interfaces.emplace_back(hardware_interface::CommandInterface(
info_.joints[i].name, hardware_interface::HW_IF_VELOCITY, &hw_commands_[i]));
}
return command_interfaces;
}
CallbackReturn IsaacDriveHardware::on_activate(
const rclcpp_lifecycle::State & /*previous_state*/)
{
RCLCPP_INFO(rclcpp::get_logger("IsaacDriveHardware"), "Activating ...please wait...");
// set some default values
for (auto i = 0u; i < hw_positions_.size(); i++)
{
if (std::isnan(hw_positions_[i]))
{
hw_positions_[i] = 0;
hw_velocities_[i] = 0;
hw_commands_[i] = 0;
}
joint_names_map_[joint_names_[i]] = i + 1; // ADD 1 to differentiate null key
}
subscriber_is_active_ = true;
RCLCPP_INFO(rclcpp::get_logger("IsaacDriveHardware"), "Successfully activated!");
return CallbackReturn::SUCCESS;
}
CallbackReturn IsaacDriveHardware::on_deactivate(
const rclcpp_lifecycle::State & /*previous_state*/)
{
RCLCPP_INFO(rclcpp::get_logger("IsaacDriveHardware"), "Deactivating ...please wait...");
subscriber_is_active_ = false;
RCLCPP_INFO(rclcpp::get_logger("IsaacDriveHardware"), "Successfully deactivated!");
return CallbackReturn::SUCCESS;
}
// || ||
// \/ THE STUFF THAT MATTERS \/
hardware_interface::return_type IsaacDriveHardware::read()
{
rclcpp::spin_some(node_);
std::shared_ptr<sensor_msgs::msg::JointState> last_command_msg;
received_joint_msg_ptr_.get(last_command_msg);
if (last_command_msg == nullptr)
{
RCLCPP_WARN(rclcpp::get_logger("IsaacDriveHardware"), "Velocity message received was a nullptr.");
return hardware_interface::return_type::ERROR;
}
auto names = last_command_msg->name;
auto positions = last_command_msg->position;
auto velocities = last_command_msg->velocity;
for (auto i = 0u; i < names.size(); i++) {
uint p = joint_names_map_[names[i]];
if (p > 0) {
hw_positions_[p - 1] = positions[i];
hw_velocities_[p - 1] = velocities[i];
}
}
return hardware_interface::return_type::OK;
}
hardware_interface::return_type robot_hardware::IsaacDriveHardware::write()
{
// RCLCPP_INFO(rclcpp::get_logger("IsaacDriveHardware"), "Velocity: %f", hw_commands_[0]);
if (realtime_isaac_publisher_->trylock()) {
auto & realtime_isaac_command = realtime_isaac_publisher_->msg_;
realtime_isaac_command.header.stamp = node_->get_clock()->now();
realtime_isaac_command.name = joint_names_;
realtime_isaac_command.velocity = hw_commands_;
realtime_isaac_publisher_->unlockAndPublish();
}
rclcpp::spin_some(node_);
return hardware_interface::return_type::OK;
}
} // namespace robot_hardware
#include "pluginlib/class_list_macros.hpp"
PLUGINLIB_EXPORT_CLASS(
robot_hardware::IsaacDriveHardware, hardware_interface::SystemInterface)
| 8,210 |
C++
| 32.514286 | 129 | 0.684166 |
Coriago/examplo-ros2/src/robot_hardware/include/robot_hardware/visibility_control.h
|
// Copyright 2017 Open Source Robotics Foundation, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* This header must be included by all rclcpp headers which declare symbols
* which are defined in the rclcpp library. When not building the rclcpp
* library, i.e. when using the headers in other package's code, the contents
* of this header change the visibility of certain symbols which the rclcpp
* library cannot have, but the consuming code must have inorder to link.
*/
#ifndef ROBOT_HARDWARE__VISIBILITY_CONTROL_H_
#define ROBOT_HARDWARE__VISIBILITY_CONTROL_H_
// This logic was borrowed (then namespaced) from the examples on the gcc wiki:
// https://gcc.gnu.org/wiki/Visibility
#if defined _WIN32 || defined __CYGWIN__
#ifdef __GNUC__
#define ROBOT_HARDWARE_EXPORT __attribute__((dllexport))
#define ROBOT_HARDWARE_IMPORT __attribute__((dllimport))
#else
#define ROBOT_HARDWARE_EXPORT __declspec(dllexport)
#define ROBOT_HARDWARE_IMPORT __declspec(dllimport)
#endif
#ifdef ROBOT_HARDWARE_BUILDING_DLL
#define ROBOT_HARDWARE_PUBLIC ROBOT_HARDWARE_EXPORT
#else
#define ROBOT_HARDWARE_PUBLIC ROBOT_HARDWARE_IMPORT
#endif
#define ROBOT_HARDWARE_PUBLIC_TYPE ROBOT_HARDWARE_PUBLIC
#define ROBOT_HARDWARE_LOCAL
#else
#define ROBOT_HARDWARE_EXPORT __attribute__((visibility("default")))
#define ROBOT_HARDWARE_IMPORT
#if __GNUC__ >= 4
#define ROBOT_HARDWARE_PUBLIC __attribute__((visibility("default")))
#define ROBOT_HARDWARE_LOCAL __attribute__((visibility("hidden")))
#else
#define ROBOT_HARDWARE_PUBLIC
#define ROBOT_HARDWARE_LOCAL
#endif
#define ROBOT_HARDWARE_PUBLIC_TYPE
#endif
#endif // ROBOT_HARDWARE__VISIBILITY_CONTROL_H_
| 2,162 |
C
| 37.624999 | 79 | 0.76087 |
Coriago/examplo-ros2/src/robot_hardware/include/robot_hardware/isaac_drive.hpp
|
// Copyright 2021 ros2_control Development Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ROBOT_HARDWARE__ISAAC_DRIVE_HPP_
#define ROBOT_HARDWARE__ISAAC_DRIVE_HPP_
#include <memory>
#include <string>
#include <vector>
#include <map>
#include "hardware_interface/handle.hpp"
#include "hardware_interface/hardware_info.hpp"
#include "hardware_interface/system_interface.hpp"
#include "hardware_interface/types/hardware_interface_return_values.hpp"
#include "rclcpp/macros.hpp"
#include "rclcpp_lifecycle/node_interfaces/lifecycle_node_interface.hpp"
#include "rclcpp_lifecycle/state.hpp"
#include "robot_hardware/visibility_control.h"
#include "rclcpp/rclcpp.hpp"
#include "sensor_msgs/msg/joint_state.hpp"
#include "realtime_tools/realtime_box.h"
#include "realtime_tools/realtime_buffer.h"
#include "realtime_tools/realtime_publisher.h"
using CallbackReturn = rclcpp_lifecycle::node_interfaces::LifecycleNodeInterface::CallbackReturn;
namespace robot_hardware
{
class IsaacDriveHardware : public hardware_interface::SystemInterface
{
public:
RCLCPP_SHARED_PTR_DEFINITIONS(IsaacDriveHardware)
ROBOT_HARDWARE_PUBLIC
CallbackReturn on_init(const hardware_interface::HardwareInfo & info) override;
ROBOT_HARDWARE_PUBLIC
std::vector<hardware_interface::StateInterface> export_state_interfaces() override;
ROBOT_HARDWARE_PUBLIC
std::vector<hardware_interface::CommandInterface> export_command_interfaces() override;
ROBOT_HARDWARE_PUBLIC
CallbackReturn on_activate(const rclcpp_lifecycle::State & previous_state) override;
ROBOT_HARDWARE_PUBLIC
CallbackReturn on_deactivate(const rclcpp_lifecycle::State & previous_state) override;
ROBOT_HARDWARE_PUBLIC
hardware_interface::return_type read() override;
ROBOT_HARDWARE_PUBLIC
hardware_interface::return_type write() override;
private:
// Parameters for the DiffBot simulation
double hw_start_sec_;
double hw_stop_sec_;
// Store the command for the simulated robot
std::vector<double> hw_commands_;
std::vector<double> hw_positions_;
std::vector<double> hw_velocities_;
std::vector<std::string> joint_names_;
std::map<std::string, uint> joint_names_map_;
// Pub Sub to isaac
rclcpp::Node::SharedPtr node_;
std::shared_ptr<rclcpp::Publisher<sensor_msgs::msg::JointState>> isaac_publisher_ = nullptr;
std::shared_ptr<realtime_tools::RealtimePublisher<sensor_msgs::msg::JointState>>
realtime_isaac_publisher_ = nullptr;
bool subscriber_is_active_ = false;
rclcpp::Subscription<sensor_msgs::msg::JointState>::SharedPtr isaac_subscriber_ = nullptr;
realtime_tools::RealtimeBox<std::shared_ptr<sensor_msgs::msg::JointState>> received_joint_msg_ptr_{nullptr};
};
} // namespace robot_hardware
#endif // ROBOT_HARDWARE__DIFFBOT_SYSTEM_HPP_
| 3,284 |
C++
| 34.32258 | 110 | 0.766748 |
Coriago/examplo-ros2/src/robot_description/launch/rviz.launch.py
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument, ExecuteProcess
from launch_ros.actions import Node
import xacro
def generate_launch_description():
# Check if we're told to use sim time
use_sim_time = LaunchConfiguration('use_sim_time')
# Process the URDF file
pkg_path = os.path.join(get_package_share_directory('robot_description'))
xacro_file = os.path.join(pkg_path,'urdf', 'robots','examplo.urdf.xacro')
robot_description_config = xacro.process_file(xacro_file)
print(robot_description_config.toxml())
# Create a robot_state_publisher node
params = {'robot_description': robot_description_config.toxml(), 'use_sim_time': use_sim_time}
node_robot_state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[params]
)
# Start Rviz2 with basic view
rviz2_config_path = os.path.join(get_package_share_directory('robot_description'), 'config/isaac.rviz')
run_rviz2 = ExecuteProcess(
cmd=['rviz2', '-d', rviz2_config_path],
output='screen'
)
# Launch!
return LaunchDescription([
DeclareLaunchArgument(
'use_sim_time',
default_value='true',
description='Use sim time if true'),
node_robot_state_publisher,
run_rviz2
])
| 1,543 |
Python
| 29.879999 | 107 | 0.680493 |
Coriago/examplo-ros2/src/robot_description/launch/joy.launch.py
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import ExecuteProcess, IncludeLaunchDescription, RegisterEventHandler
from launch.event_handlers import OnProcessExit
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
def generate_launch_description():
# Joystick Teleop
joy = Node(
package='joy',
executable='joy_node',
name='joy_node',
parameters=[{
'dev': '/dev/input/js0',
'deadzone': 0.3,
'autorepeat_rate': 20.0,
}])
config_filepath = os.path.join(get_package_share_directory('teleop_twist_joy'), 'config/xbox.config.yaml')
teleop = Node(
package='teleop_twist_joy',
executable='teleop_node',
name='teleop_twist_joy_node',
parameters=[config_filepath],
)
# Launch!
return LaunchDescription([
joy,
teleop
])
| 1,068 |
Python
| 26.410256 | 110 | 0.632959 |
Coriago/examplo-ros2/src/robot_description/launch/isaac.launch.py
|
import os
from ament_index_python.packages import get_package_prefix, get_package_share_directory
from launch import LaunchDescription
from launch.actions import ExecuteProcess, IncludeLaunchDescription, RegisterEventHandler
from launch.event_handlers import OnProcessExit
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
import xacro
def generate_launch_description():
# Get Local Files
pkg_path = os.path.join(get_package_share_directory('robot_description'))
xacro_file = os.path.join(pkg_path, 'urdf', 'robots','examplo.urdf.xacro')
controllers_file = os.path.join(pkg_path, 'config', 'controllers.yaml')
joystick_file = os.path.join(pkg_path, 'config', 'xbox-holonomic.config.yaml')
rviz_file = os.path.join(pkg_path, 'config', 'isaac.rviz')
robot_description_config = xacro.process_file(xacro_file)
robot_description_xml = robot_description_config.toxml()
source_code_path = os.path.abspath(os.path.join(pkg_path, "../../../../src/robot_description"))
urdf_save_path = os.path.join(source_code_path, "examplo.urdf")
with open(urdf_save_path, 'w') as f:
f.write(robot_description_xml)
# Create a robot_state_publisher node
description_params = {'robot_description': robot_description_xml, 'use_sim_time': True }
node_robot_state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[description_params]
)
# Starts ROS2 Control
control_node = Node(
package="controller_manager",
executable="ros2_control_node",
parameters=[{'robot_description': robot_description_xml, 'use_sim_time': True }, controllers_file],
output="screen",
)
# Starts ROS2 Control Joint State Broadcaster
joint_state_broadcaster_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=["joint_state_broadcaster", "--controller-manager", "/controller_manager"],
)
# Starts ROS2 Control Mecanum Drive Controller
mecanum_drive_controller_spawner = Node(
package="controller_manager",
executable="spawner",
arguments=["mecanum_controller", "-c", "/controller_manager"],
)
mecanum_drive_controller_delay = RegisterEventHandler(
event_handler=OnProcessExit(
target_action=joint_state_broadcaster_spawner,
on_exit=[mecanum_drive_controller_spawner],
)
)
# Start Rviz2 with basic view
run_rviz2_node = Node(
package='rviz2',
executable='rviz2',
parameters=[{ 'use_sim_time': True }],
name='isaac_rviz2',
output='screen',
arguments=[["-d"], [rviz_file]],
)
# run_rviz2 = ExecuteProcess(
# cmd=['rviz2', '-d', rviz_file],
# output='screen'
# )
rviz2_delay = RegisterEventHandler(
event_handler=OnProcessExit(
target_action=joint_state_broadcaster_spawner,
on_exit=[run_rviz2_node],
)
)
# Start Joystick Node
joy = Node(
package='joy',
executable='joy_node',
name='joy_node',
parameters=[{
'dev': '/dev/input/js0',
'deadzone': 0.3,
'autorepeat_rate': 20.0,
}])
# Start Teleop Node to translate joystick commands to robot commands
joy_teleop = Node(
package='teleop_twist_joy',
executable='teleop_node',
name='teleop_twist_joy_node',
parameters=[joystick_file],
remappings={('/cmd_vel', '/mecanum_controller/cmd_vel_unstamped')}
)
# Launch!
return LaunchDescription([
control_node,
node_robot_state_publisher,
joint_state_broadcaster_spawner,
mecanum_drive_controller_delay,
rviz2_delay,
joy,
joy_teleop
])
| 3,985 |
Python
| 31.672131 | 107 | 0.63187 |
Coriago/examplo-ros2/src/robot_description/config/controllers.yaml
|
controller_manager:
ros__parameters:
update_rate: 10 # Hz
joint_state_broadcaster:
type: joint_state_broadcaster/JointStateBroadcaster
mecanum_controller:
type: mecanum_controller/MecanumController
joint_state_broadcaster:
ros__parameters:
extra_joints: [
"rear_right_roller_0_joint",
"rear_right_roller_1_joint",
"rear_right_roller_2_joint",
"rear_right_roller_3_joint",
"rear_right_roller_4_joint",
"rear_right_roller_5_joint",
"rear_right_roller_6_joint",
"rear_right_roller_7_joint",
"rear_right_roller_8_joint",
"rear_left_roller_0_joint",
"rear_left_roller_1_joint",
"rear_left_roller_2_joint",
"rear_left_roller_3_joint",
"rear_left_roller_4_joint",
"rear_left_roller_5_joint",
"rear_left_roller_6_joint",
"rear_left_roller_7_joint",
"rear_left_roller_8_joint",
"front_left_roller_0_joint",
"front_left_roller_1_joint",
"front_left_roller_2_joint",
"front_left_roller_3_joint",
"front_left_roller_4_joint",
"front_left_roller_5_joint",
"front_left_roller_6_joint",
"front_left_roller_7_joint",
"front_left_roller_8_joint",
"front_right_roller_0_joint",
"front_right_roller_1_joint",
"front_right_roller_2_joint",
"front_right_roller_3_joint",
"front_right_roller_4_joint",
"front_right_roller_5_joint",
"front_right_roller_6_joint",
"front_right_roller_7_joint",
"front_right_roller_8_joint"]
mecanum_controller:
ros__parameters:
front_left_joint: "front_left_mecanum_joint"
front_right_joint: "front_right_mecanum_joint"
rear_left_joint: "rear_left_mecanum_joint"
rear_right_joint: "rear_right_mecanum_joint"
chassis_center_to_axle: 0.145
axle_center_to_wheel: 0.128
wheel_radius: 0.0485
cmd_vel_timeout: 0.5
use_stamped_vel: false
| 1,933 |
YAML
| 29.21875 | 57 | 0.636317 |
Coriago/examplo-ros2/src/robot_description/config/xbox-holonomic.config.yaml
|
teleop_twist_joy_node:
ros__parameters:
require_enable_button: false
axis_linear: # Left thumb stick vertical
x: 1
y: 0
scale_linear:
x: 0.7
y: 0.7
axis_angular: # Left thumb stick horizontal
yaw: 3
scale_angular:
yaw: 1.5
| 285 |
YAML
| 16.874999 | 48 | 0.575439 |
Coriago/examplo-ros2/src/robot_control/setup.py
|
from setuptools import setup
package_name = 'robot_control'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='helios',
maintainer_email='[email protected]',
description='Control the robot',
license='MIT',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'publish = robot_control.publish_joint_command:main',
],
},
)
| 664 |
Python
| 23.629629 | 65 | 0.596386 |
Coriago/examplo-ros2/src/robot_control/robot_control/publish_joint_command.py
|
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import JointState
class PublishJointCmd(Node):
def __init__(self):
super().__init__('publish_joint_commands')
self.publisher_ = self.create_publisher(JointState, 'isaac_joint_commands', 10)
timer_period = 0.5 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
def timer_callback(self):
velocity_cmds = JointState()
velocity_cmds.name = [
'front_left_mecanum_joint',
'front_right_mecanum_joint',
'rear_left_mecanum_joint',
'rear_right_mecanum_joint']
velocity_cmds.velocity = [ 15.0, 15.0, 15.0, 15.0 ]
self.publisher_.publish(velocity_cmds)
self.get_logger().info('Publishing: ...')
self.i += 1
def main(args=None):
rclpy.init(args=args)
node = PublishJointCmd()
rclpy.spin(node)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 1,191 |
Python
| 24.913043 | 87 | 0.609572 |
Coriago/examplo-ros2/isaac/omni.isaac.examplo_bot/omni/isaac/examplo_bot/import_bot/__init__.py
|
from omni.isaac.examplo_bot.import_bot.import_bot import ImportBot
from omni.isaac.examplo_bot.import_bot.import_bot_extension import ImportBotExtension
| 154 |
Python
| 37.749991 | 85 | 0.844156 |
Coriago/examplo-ros2/isaac/omni.isaac.examplo_bot/omni/isaac/examplo_bot/import_bot/import_bot_extension.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.examplo_bot.base_sample import BaseSampleExtension
from omni.isaac.examplo_bot.import_bot.import_bot import ImportBot
class ImportBotExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name="Examplo Bot",
submenu_name="",
name="Import URDF",
title="Load the URDF for Examplo Bot",
doc_link="",
overview="This loads the examplo bot into Isaac Sim.",
file_path=os.path.abspath(__file__),
sample=ImportBot(),
)
return
| 1,079 |
Python
| 37.571427 | 76 | 0.688601 |
Coriago/examplo-ros2/isaac/omni.isaac.examplo_bot/omni/isaac/examplo_bot/import_bot/import_bot.py
|
import omni.graph.core as og
import omni.usd
from omni.isaac.examplo_bot.base_sample import BaseSample
from omni.isaac.urdf import _urdf
from omni.isaac.core.robots import Robot
from omni.isaac.core.utils import prims
from omni.isaac.core_nodes.scripts.utils import set_target_prims
from omni.kit.viewport_legacy import get_default_viewport_window
from pxr import UsdPhysics
import omni.kit.commands
import os
import numpy as np
import math
import carb
def set_drive_params(drive, stiffness, damping, max_force):
drive.GetStiffnessAttr().Set(stiffness)
drive.GetDampingAttr().Set(damping)
drive.GetMaxForceAttr().Set(max_force)
return
class ImportBot(BaseSample):
def __init__(self) -> None:
super().__init__()
return
def setup_scene(self):
world = self.get_world()
world.scene.add_default_ground_plane()
self.setup_perspective_cam()
self.setup_world_action_graph()
return
async def setup_post_load(self):
self._world = self.get_world()
self.robot_name = "examplo"
self.extension_path = os.path.abspath(__file__)
self.project_root_path = os.path.abspath(os.path.join(self.extension_path, "../../../../../../.."))
self.path_to_urdf = os.path.join(self.project_root_path, "src/robot_description/examplo.urdf")
carb.log_info(self.path_to_urdf)
self._robot_prim_path = self.import_robot(self.path_to_urdf)
if self._robot_prim_path is None:
print("Error: failed to import robot")
return
self._robot_prim = self._world.scene.add(
Robot(prim_path=self._robot_prim_path, name=self.robot_name, position=np.array([0.0, 0.0, 0.3]))
)
self.configure_robot(self._robot_prim_path)
return
def import_robot(self, urdf_path):
import_config = _urdf.ImportConfig()
import_config.merge_fixed_joints = False
import_config.fix_base = False
import_config.make_default_prim = True
import_config.self_collision = False
import_config.create_physics_scene = False
import_config.import_inertia_tensor = True
import_config.default_drive_strength = 1047.19751
import_config.default_position_drive_damping = 52.35988
import_config.default_drive_type = _urdf.UrdfJointTargetType.JOINT_DRIVE_VELOCITY
import_config.distance_scale = 1.0
import_config.density = 0.0
result, prim_path = omni.kit.commands.execute( "URDFParseAndImportFile",
urdf_path=urdf_path,
import_config=import_config)
if result:
return prim_path
return None
def configure_robot(self, robot_prim_path):
w_sides = ['left', 'right']
l_sides = ['front', 'rear']
stage = self._world.stage
for w_side in w_sides:
for l_side in l_sides:
for i in range(9):
joint_name = "{}_{}_roller_{}_joint".format(l_side, w_side, i)
joint_path = "{}/{}_{}_mecanum_link/{}".format(robot_prim_path, l_side, w_side, joint_name)
prim = stage.GetPrimAtPath(joint_path)
omni.kit.commands.execute(
"UnapplyAPISchemaCommand",
api=UsdPhysics.DriveAPI,
prim=prim,
api_prefix="drive",
multiple_api_token="angular")
# drive = UsdPhysics.DriveAPI.Get(prim, "angular")
# set_drive_params(drive, 0.0, 2.0, 0.0)
front_left = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath(f"{robot_prim_path}/chassis_link/front_left_mecanum_joint"), "angular")
front_right = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath(f"{robot_prim_path}/chassis_link/front_right_mecanum_joint"), "angular")
rear_left = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath(f"{robot_prim_path}/chassis_link/rear_left_mecanum_joint"), "angular")
rear_right = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath(f"{robot_prim_path}/chassis_link/rear_right_mecanum_joint"), "angular")
set_drive_params(front_left, 0, math.radians(1e5), 98.0)
set_drive_params(front_right, 0, math.radians(1e5), 98.0)
set_drive_params(rear_left, 0, math.radians(1e5), 98.0)
set_drive_params(rear_right, 0, math.radians(1e5), 98.0)
self.create_lidar(robot_prim_path)
self.create_depth_camera()
self.setup_robot_action_graph(robot_prim_path)
return
def create_lidar(self, robot_prim_path):
lidar_parent = "{}/lidar_link".format(robot_prim_path)
lidar_path = "/lidar"
self.lidar_prim_path = lidar_parent + lidar_path
result, prim = omni.kit.commands.execute(
"RangeSensorCreateLidar",
path=lidar_path,
parent=lidar_parent,
min_range=0.4,
max_range=25.0,
draw_points=False,
draw_lines=True,
horizontal_fov=360.0,
vertical_fov=30.0,
horizontal_resolution=0.4,
vertical_resolution=4.0,
rotation_rate=0.0,
high_lod=False,
yaw_offset=0.0,
enable_semantics=False
)
return
def create_depth_camera(self):
self.depth_left_camera_path = f"{self._robot_prim_path}/zed_left_camera_frame/left_cam"
self.depth_right_camera_path = f"{self._robot_prim_path}/zed_right_camera_frame/right_cam"
self.left_camera = prims.create_prim(
prim_path=self.depth_left_camera_path,
prim_type="Camera",
attributes={
"focusDistance": 1,
"focalLength": 24,
"horizontalAperture": 20.955,
"verticalAperture": 15.2908,
"clippingRange": (0.1, 1000000),
"clippingPlanes": np.array([1.0, 0.0, 1.0, 1.0]),
},
)
self.right_camera = prims.create_prim(
prim_path=self.depth_right_camera_path,
prim_type="Camera",
attributes={
"focusDistance": 1,
"focalLength": 24,
"horizontalAperture": 20.955,
"verticalAperture": 15.2908,
"clippingRange": (0.1, 1000000),
"clippingPlanes": np.array([1.0, 0.0, 1.0, 1.0]),
},
)
return
def setup_world_action_graph(self):
og.Controller.edit(
{"graph_path": "/globalclock", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("Context", "omni.isaac.ros2_bridge.ROS2Context"),
("PublishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
],
og.Controller.Keys.CONNECT: [
("OnPlaybackTick.outputs:tick", "PublishClock.inputs:execIn"),
("Context.outputs:context", "PublishClock.inputs:context"),
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
],
}
)
return
def setup_perspective_cam(self):
# Get the Viewport and the Default Camera
viewport_window = get_default_viewport_window()
camera = self.get_world().stage.GetPrimAtPath(viewport_window.get_active_camera())
# Get Default Cam Values
camAttributes = {}
camOrientation = None
camTranslation = None
for att in camera.GetAttributes():
name = att.GetName()
if not (name.startswith('omni') or name.startswith('xform')):
camAttributes[att.GetName()] = att.Get()
elif name == 'xformOp:orient':
convertedQuat = [att.Get().GetReal()] + list(att.Get().GetImaginary())
camOrientation = np.array(convertedQuat)
elif name == 'xformOp:translate':
camTranslation = np.array(list(att.Get()))
# Modify what we want
camAttributes["clippingRange"] = (0.1, 1000000)
camAttributes["clippingPlanes"] = np.array([1.0, 0.0, 1.0, 1.0])
# Create a new camera with desired values
cam_path = "/World/PerspectiveCam"
prims.create_prim(
prim_path=cam_path,
prim_type="Camera",
translation=camTranslation,
orientation=camOrientation,
attributes=camAttributes,
)
# Use the camera for our viewport
viewport_window.set_active_camera(cam_path)
return
def setup_robot_action_graph(self, robot_prim_path):
robot_controller_path = f"{robot_prim_path}/ros_interface_controller"
og.Controller.edit(
{"graph_path": robot_controller_path, "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("Context", "omni.isaac.ros2_bridge.ROS2Context"),
("PublishJointState", "omni.isaac.ros2_bridge.ROS2PublishJointState"),
("SubscribeJointState", "omni.isaac.ros2_bridge.ROS2SubscribeJointState"),
("articulation_controller", "omni.isaac.core_nodes.IsaacArticulationController"),
("isaac_read_lidar_beams_node", "omni.isaac.range_sensor.IsaacReadLidarBeams"),
("ros2_publish_laser_scan", "omni.isaac.ros2_bridge.ROS2PublishLaserScan"),
("ros2_camera_helper", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("ros2_camera_helper_02", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("ros2_camera_helper_03", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("isaac_create_viewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
("set_active_camera", "omni.graph.ui.SetActiveViewportCamera"),
("get_prim_path", "omni.graph.nodes.GetPrimPath"),
("constant_token", "omni.graph.nodes.ConstantToken"),
("constant_token_02", "omni.graph.nodes.ConstantToken"),
],
og.Controller.Keys.SET_VALUES: [
("PublishJointState.inputs:topicName", "isaac_joint_states"),
("SubscribeJointState.inputs:topicName", "isaac_joint_commands"),
("articulation_controller.inputs:usePath", False),
("ros2_publish_laser_scan.inputs:topicName", "laser_scan"),
("ros2_publish_laser_scan.inputs:frameId", "base_link"),
("ros2_camera_helper.inputs:frameId", "base_link"),
("ros2_camera_helper_02.inputs:frameId", "base_link"),
("ros2_camera_helper_02.inputs:topicName", "camera_info"),
("ros2_camera_helper_03.inputs:frameId", "base_link"),
("ros2_camera_helper_03.inputs:topicName", "depth"),
("isaac_create_viewport.inputs:viewportId", 1),
("constant_token.inputs:value", "camera_info"),
("constant_token_02.inputs:value", "depth"),
],
og.Controller.Keys.CONNECT: [
("OnPlaybackTick.outputs:tick", "PublishJointState.inputs:execIn"),
("OnPlaybackTick.outputs:tick", "SubscribeJointState.inputs:execIn"),
("OnPlaybackTick.outputs:tick", "isaac_read_lidar_beams_node.inputs:execIn"),
("OnPlaybackTick.outputs:tick", "isaac_create_viewport.inputs:execIn"),
("OnPlaybackTick.outputs:tick", "articulation_controller.inputs:execIn"),
("ReadSimTime.outputs:simulationTime", "PublishJointState.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "ros2_publish_laser_scan.inputs:timeStamp"),
("Context.outputs:context", "PublishJointState.inputs:context"),
("Context.outputs:context", "SubscribeJointState.inputs:context"),
("Context.outputs:context", "ros2_publish_laser_scan.inputs:context"),
("Context.outputs:context", "ros2_camera_helper.inputs:context"),
("Context.outputs:context", "ros2_camera_helper_02.inputs:context"),
("SubscribeJointState.outputs:jointNames", "articulation_controller.inputs:jointNames"),
("SubscribeJointState.outputs:velocityCommand", "articulation_controller.inputs:velocityCommand"),
("isaac_read_lidar_beams_node.outputs:execOut", "ros2_publish_laser_scan.inputs:execIn"),
("isaac_read_lidar_beams_node.outputs:azimuthRange", "ros2_publish_laser_scan.inputs:azimuthRange"),
("isaac_read_lidar_beams_node.outputs:depthRange", "ros2_publish_laser_scan.inputs:depthRange"),
("isaac_read_lidar_beams_node.outputs:horizontalFov", "ros2_publish_laser_scan.inputs:horizontalFov"),
("isaac_read_lidar_beams_node.outputs:horizontalResolution", "ros2_publish_laser_scan.inputs:horizontalResolution"),
("isaac_read_lidar_beams_node.outputs:intensitiesData", "ros2_publish_laser_scan.inputs:intensitiesData"),
("isaac_read_lidar_beams_node.outputs:linearDepthData", "ros2_publish_laser_scan.inputs:linearDepthData"),
("isaac_read_lidar_beams_node.outputs:numCols", "ros2_publish_laser_scan.inputs:numCols"),
("isaac_read_lidar_beams_node.outputs:numRows", "ros2_publish_laser_scan.inputs:numRows"),
("isaac_read_lidar_beams_node.outputs:rotationRate", "ros2_publish_laser_scan.inputs:rotationRate"),
("isaac_create_viewport.outputs:viewport", "ros2_camera_helper.inputs:viewport"),
("isaac_create_viewport.outputs:viewport", "ros2_camera_helper_02.inputs:viewport"),
("isaac_create_viewport.outputs:viewport", "set_active_camera.inputs:viewport"),
("isaac_create_viewport.outputs:execOut", "set_active_camera.inputs:execIn"),
("set_active_camera.outputs:execOut", "ros2_camera_helper.inputs:execIn"),
("set_active_camera.outputs:execOut", "ros2_camera_helper_02.inputs:execIn"),
("get_prim_path.outputs:primPath", "set_active_camera.inputs:primPath"),
("constant_token.inputs:value", "ros2_camera_helper_02.inputs:type"),
],
}
)
set_target_prims(primPath=f"{robot_controller_path}/articulation_controller", targetPrimPaths=[robot_prim_path])
set_target_prims(primPath=f"{robot_controller_path}/PublishJointState", targetPrimPaths=[robot_prim_path])
set_target_prims(primPath=f"{robot_controller_path}/isaac_read_lidar_beams_node", targetPrimPaths=[self.lidar_prim_path], inputName="inputs:lidarPrim")
set_target_prims(primPath=f"{robot_controller_path}/get_prim_path", targetPrimPaths=[self.depth_left_camera_path], inputName="inputs:prim")
return
async def setup_pre_reset(self):
return
async def setup_post_reset(self):
return
async def setup_post_clear(self):
return
def world_cleanup(self):
self._world.scene.remove_object(self.robot_name)
return
| 15,832 |
Python
| 49.746795 | 159 | 0.593734 |
Coriago/examplo-ros2/isaac/omni.isaac.examplo_bot/docs/CHANGELOG.md
|
**********
CHANGELOG
**********
[0.1.0] - 2022-6-26
========================
Added
-------
- Initial version of Examplo Bot Extension
| 137 |
Markdown
| 10.499999 | 42 | 0.430657 |
John-Dillermand/Isaac-Orbit/README.md
|
# Isaac-Orbit Maintenance Branch
| 34 |
Markdown
| 10.666663 | 32 | 0.794118 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/reach/reach_env_cfg.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import MISSING
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.managers import ActionTermCfg as ActionTerm
from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RandomizationTermCfg as RandTerm
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise
from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg
from omni.isaac.orbit.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg, RigidObject, RigidObjectCfg
import omni.isaac.orbit_tasks.manipulation.reach.mdp as mdp
##
# Scene definition
##
@configclass
class ReachSceneCfg(InteractiveSceneCfg):
"""Configuration for the scene with a robotic arm."""
robot: ArticulationCfg = MISSING
# ground plane
ground = AssetBaseCfg(
prim_path="/World/ground",
spawn=sim_utils.GroundPlaneCfg(size=(100.0, 100.0)),
)
# Cube for robot
cube = AssetBaseCfg(
prim_path="{ENV_REGEX_NS}/Cuboid",
spawn=sim_utils.CuboidCfg(size=(0.4, 0.2, 1.0)),
)
#robot
#UR10_CFG.init_state.pos = (0.0, 0.0, .5)
#robot: ArticulationCfg = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/robot")
# lights
dome_light = AssetBaseCfg(
prim_path="/World/DomeLight",
spawn=sim_utils.DomeLightCfg(color=(0.9, 0.9, 0.9), intensity=500.0),
)
distant_light = AssetBaseCfg(
prim_path="/World/DistantLight",
spawn=sim_utils.DistantLightCfg(color=(0.9, 0.9, 0.9), intensity=2500.0),
init_state=AssetBaseCfg.InitialStateCfg(rot=(0.738, 0.477, 0.477, 0.0)),
)
taskBoard = AssetBaseCfg(
prim_path = "{ENV_REGEX_NS}/Cuboid/TaskBoards",
spawn = sim_utils.UsdFileCfg(usd_path ="/home/chris/Desktop/ws_sims/Models/taskBoards.usdc",
scale=[0.001,0.001,0.001]
),
init_state=AssetBaseCfg.InitialStateCfg(pos=(-0.15, 0.25, 0.7))
#init_state= InitialStateCfg()
)
# Object to move
object = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Object",
init_state=RigidObjectCfg.InitialStateCfg(pos=[0, 0.25, 0.6], rot=[1, 0, 0, 0]),
spawn=UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
scale=(0.8, 0.8, 0.8),
rigid_props=RigidBodyPropertiesCfg(
solver_position_iteration_count=16,
solver_velocity_iteration_count=1,
max_angular_velocity=1000.0,
max_linear_velocity=1000.0,
max_depenetration_velocity=5.0,
disable_gravity=False,
),
),
)
table = AssetBaseCfg(
prim_path="{ENV_REGEX_NS}/Table",
init_state=AssetBaseCfg.InitialStateCfg(pos=[-0.23, 0.1200, 0,], rot=[1, 0, 0, 0]),
spawn=UsdFileCfg(usd_path="/home/chris/Desktop/ws_sims/Models/TableMahog.usdc",scale=[0.0008,0.0008,0.0008]),
)
##
# MDP settings
##
@configclass
class CommandsCfg:
"""Command terms for the MDP."""
# ee_pose = mdp.UniformPoseCommandCfg(
# asset_name="robot",
# body_name=MISSING,
# resampling_time_range=(4.0, 4.0),
# debug_vis=True,
# ranges=mdp.UniformPoseCommandCfg.Ranges(
# #pos_x=(0.2, 0.2),
# #pos_y=(-0.2, 0.2),
# #pos_z=(0.2, 0.25),
# #roll=(0.0, 0.0),
# #pitch=MISSING, # depends on end-effector axis
# #yaw=(-3.14, 3.14),
# pos_x=(0, 0),
# pos_y=(0.482, 0.482),
# pos_z=(0.165, 0.165),
# roll=(0,0),
# pitch=(0,0), # depends on end-effector
# yaw=(0,0),
# ),
# )
@configclass
class ActionsCfg:
"""Action specifications for the MDP."""
arm_action: ActionTerm = MISSING
gripper_action: ActionTerm | None = None
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
#joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
#joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
#pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "ee_pose"})
actions = ObsTerm(func=mdp.last_action)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class RandomizationCfg:
"""Configuration for randomization."""
reset_robot_joints = RandTerm(
func=mdp.reset_joints_by_scale,
mode="reset",
params={
"position_range": (0.5, 1.5),
"velocity_range": (0.0, 0.0),
},
)
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
# task terms
#end_effector_position_tracking = RewTerm(
# func=mdp.position_command_error,
# weight=-0.2,
# params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"},
#)
#end_effector_orientation_tracking = RewTerm(
# func=mdp.orientation_command_error,
# weight=-0.05,
# params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"},
#)
# action penalty
action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.0001)
joint_vel = RewTerm(
func=mdp.joint_vel_l2,
weight=-0.0001,
params={"asset_cfg": SceneEntityCfg("robot")},
)
@configclass
class TerminationsCfg:
"""Termination terms for the MDP."""
time_out = DoneTerm(func=mdp.time_out, time_out=True)
@configclass
class CurriculumCfg:
"""Curriculum terms for the MDP."""
action_rate = CurrTerm(
func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -0.005, "num_steps": 4500}
)
##
# Environment configuration
##
#new commit thing
@configclass
class ReachEnvCfg(RLTaskEnvCfg):
"""Configuration for the reach end-effector pose tracking environment."""
# Scene settings
scene: ReachSceneCfg = ReachSceneCfg(num_envs=4096, env_spacing=2.5)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
randomization: RandomizationCfg = RandomizationCfg()
curriculum: CurriculumCfg = CurriculumCfg()
def __post_init__(self):
"""Post initialization."""
# general settings
self.decimation = 2
self.episode_length_s = 12.0
self.viewer.eye = (3.5, 3.5, 3.5)
# simulation settings
self.sim.dt = 1.0 / 60.0
| 7,921 |
Python
| 30.943548 | 117 | 0.633758 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/erc/baseEnvironment.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script demonstrates how to create a simple environment with a cartpole. It combines the concepts of
scene, action, observation and randomization managers to create an environment.
"""
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on creating a cartpole base environment.")
parser.add_argument("--num_envs", type=int, default=4, help="Number of environments to spawn.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import math
import torch
#import omni.isaac.orbit.envs.mdp as mdp
#from omni.isaac.orbit.envs import BaseEnv, BaseEnvCfg
#from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
#from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
#from omni.isaac.orbit.managers import RandomizationTermCfg as RandTerm
#from omni.isaac.orbit.managers import SceneEntityCfg
#from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.envs import RLTaskEnv
#from omni.isaac.orbit_tasks.classic.cartpole.cartpole_env_cfg import CartpoleSceneCfg
from . import TaskBoardEnvCfg
#from omni.isaac.orbit.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg
#from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import UsdFileCfg
from omni.isaac.orbit_assets import UR10_CFG # isort:skip
import omni.isaac.contrib_tasks # noqa: F401
import omni.isaac.orbit_tasks # noqa: F401
from omni.isaac.orbit_tasks.utils import parse_env_cfg
###--------------------------------------------------------------------------------------------
import gymnasium as gym
import torch
def main():
"""Random actions agent with Orbit environment."""
# create environment configuration
env_cfg = parse_env_cfg(
args_cli.task, use_gpu=not args_cli.cpu, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
# create environment
env = gym.make(args_cli.task, cfg=env_cfg)
# print info (this is vectorized environment)
print(f"[INFO]: Gym observation space: {env.observation_space}")
print(f"[INFO]: Gym action space: {env.action_space}")
# reset environment
env.reset()
# simulate environment
while simulation_app.is_running():
# run everything in inference mode
with torch.inference_mode():
# sample actions from -1 to 1
actions = 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device) - 1
# apply actions
env.step(actions)
# close the simulator
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 3,071 |
Python
| 32.391304 | 115 | 0.715728 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/manipulation/erc/base_taskboards_env_cfg.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import math
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg, RigidObject, RigidObjectCfg
from omni.isaac.orbit.envs import RLTaskEnvCfg
from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup
from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm
from omni.isaac.orbit.managers import RewardTermCfg as RewTerm #| Old implementation
from omni.isaac.orbit.scene import InteractiveSceneCfg
from omni.isaac.orbit.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg, UsdFileCfg
from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg
from omni.isaac.orbit.sensors import FrameTransformerCfg
from omni.isaac.orbit.sensors.frame_transformer.frame_transformer_cfg import FrameTransformerCfg
#import omni.isaac.orbit_tasks.classic.cartpole.mdp as mdp # | Old implementation
from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG # isort: skip
from omni.isaac.orbit.managers import SceneEntityCfg
import omni.isaac.orbit_tasks.manipulation.lift.mdp as mdp # | Own Implementation
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
##
# Pre-defined configs
##
#from omni.isaac.orbit_assets.cartpole import CARTPOLE_CFG # isort:skip
from omni.isaac.orbit_assets import UR10_CFG # isort:skip
##
# Scene definition
@configclass
class TaskBoardSceneCfg(InteractiveSceneCfg):
"""Configuration for a task-board scene."""
# ground plane
ground = AssetBaseCfg(
prim_path="/World/ground",
spawn=sim_utils.GroundPlaneCfg(size=(100.0, 100.0)),
)
# Cube for robot
cube = AssetBaseCfg(
prim_path="{ENV_REGEX_NS}/Cuboid",
spawn=sim_utils.CuboidCfg(size=(0.4, 0.2, 1.0)),
)
#robot
UR10_CFG.init_state.pos = (0.0, 0.0, .5)
robot: ArticulationCfg = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/robot")
# lights
dome_light = AssetBaseCfg(
prim_path="/World/DomeLight",
spawn=sim_utils.DomeLightCfg(color=(0.9, 0.9, 0.9), intensity=500.0),
)
distant_light = AssetBaseCfg(
prim_path="/World/DistantLight",
spawn=sim_utils.DistantLightCfg(color=(0.9, 0.9, 0.9), intensity=2500.0),
init_state=AssetBaseCfg.InitialStateCfg(rot=(0.738, 0.477, 0.477, 0.0)),
)
taskBoard = AssetBaseCfg(
prim_path = "{ENV_REGEX_NS}/Cuboid/TaskBoards",
spawn = sim_utils.UsdFileCfg(usd_path ="/home/chris/Desktop/ws_sims/Models/taskBoards.usdc",
scale=[0.001,0.001,0.001]
),
init_state=AssetBaseCfg.InitialStateCfg(pos=(-0.15, 0.25, 0.7))
#init_state= InitialStateCfg()
)
# Object to move
object = RigidObjectCfg(
prim_path="{ENV_REGEX_NS}/Object",
init_state=RigidObjectCfg.InitialStateCfg(pos=[0, 0.25, 0.6], rot=[1, 0, 0, 0]),
spawn=UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
scale=(0.8, 0.8, 0.8),
rigid_props=RigidBodyPropertiesCfg(
solver_position_iteration_count=16,
solver_velocity_iteration_count=1,
max_angular_velocity=1000.0,
max_linear_velocity=1000.0,
max_depenetration_velocity=5.0,
disable_gravity=False,
),
),
)
table = AssetBaseCfg(
prim_path="{ENV_REGEX_NS}/Table",
init_state=AssetBaseCfg.InitialStateCfg(pos=[-0.23, 0.1200, 0,], rot=[1, 0, 0, 0]),
spawn=UsdFileCfg(usd_path="/home/chris/Desktop/ws_sims/Models/TableMahog.usdc",scale=[0.0008,0.0008,0.0008]),
)
# Listens to the required transforms
#marker_cfg = FRAME_MARKER_CFG.copy()
#marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1)
#marker_cfg.prim_path = "/Visuals/FrameTransformer"
#ee_frame = FrameTransformerCfg(
# prim_path="{ENV_REGEX_NS}/robot/shoulder_link",
# debug_vis=False,
# visualizer_cfg=marker_cfg,
# target_frames=[
# FrameTransformerCfg.FrameCfg(
# prim_path="{ENV_REGEX_NS}/robot/wrist_link_3",
# name="end_effector",
# offset=OffsetCfg(
# pos=[0.0, 0.0, 0.1034],
# ),
# ),
# ],
#)
#Env config finished
@configclass
class CommandsCfg:
"""Command terms for the MDP."""
object_pose = mdp.UniformPoseCommandCfg(
asset_name="robot",
body_name="wrist_3_link", # will be set by agent env cfg
resampling_time_range=(5.0, 5.0),
debug_vis=True,
ranges=mdp.UniformPoseCommandCfg.Ranges(
pos_x=(0.4, 0.6), pos_y=(-0.25, 0.25), pos_z=(0.25, 0.5), roll=(0.0, 0.0), pitch=(0.0, 0.0), yaw=(0.0, 0.0)
),
)
@configclass
class ActionsCfg:
"""Action specifications for the environment."""
body_joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=100.0)
##
# MDP settings
##
@configclass
class ObservationsCfg:
"""Observation specifications for the MDP."""
@configclass
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
#object_position = ObsTerm(func=mdp.object_position_in_robot_root_frame)
#target_object_position = ObsTerm(func=mdp.generated_commands, params={"command_name": "object_pose"})
actions = ObsTerm(func=mdp.last_action)
def __post_init__(self):
self.enable_corruption = True
self.concatenate_terms = True
# observation groups
policy: PolicyCfg = PolicyCfg()
@configclass
class RewardsCfg:
"""Reward terms for the MDP."""
## (1) Constant running reward
alive = RewTerm(func=mdp.is_alive, weight=1.0)
## (2) Failure penalty
terminating = RewTerm(func=mdp.is_terminated, weight=-2.0)
## (3) Primary task: keep pole upright
#reaching_object = RewTerm(func=mdp.object_ee_distance, params={"std": 0.1}, weight=1.0)
#lifting_object = RewTerm(func=mdp.object_is_lifted, params={"minimal_height": 0.06}, weight=15.0)
#Penalty
joint_vel = RewTerm(
func=mdp.joint_vel_l2,
weight=-1e-4,
params={"asset_cfg": SceneEntityCfg("robot")},
)
#pole_pos = RewTerm(
# func=mdp.joint_pos_target_l2,
# weight=-1.0,
# params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*"]), "target": 0.0},
#)
## (4) Shaping tasks: lower cart velocity
#cart_vel = RewTerm(
# func=mdp.joint_vel_l1,
# weight=-0.01,
# params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*"])},
#)
## (5) Shaping tasks: lower pole angular velocity
#pole_vel = RewTerm(
# func=mdp.joint_vel_l1,
# weight=-0.005,
# params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*"])},
#)
#lifting_object = RewTerm(func=mdp.object_is_lifted, params={"minimal_height": 0.06}, weight=15.0)
#@configclass
#class TerminationsCfg:
# """Termination terms for the MDP."""
#
# # (1) Time out
# time_out = DoneTerm(func=mdp.time_out, time_out=True)
# # (2) Cart out of bounds
# cart_out_of_bounds = DoneTerm(
# func=mdp.joint_pos_manual_limit,
# params={"asset_cfg": SceneEntityCfg("robot", joint_names=[".*"], "bounds": (-3.0, 3.0)},
# )
@configclass
class CurriculumCfg:
"""Configuration for the curriculum."""
pass
##
# Environment configuration
##
@configclass
class TaskBoardEnvCfg(RLTaskEnvCfg):
"""Configuration for the task-board environment."""
# Scene settings
scene: TaskBoardSceneCfg = TaskBoardSceneCfg(num_envs=4096, env_spacing=4.0, replicate_physics=True)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
#randomization: RandomizationCfg = RandomizationCfg()
# MDP settings
curriculum: CurriculumCfg = CurriculumCfg()
rewards: RewardsCfg = RewardsCfg()
#terminations: TerminationsCfg = TerminationsCfg()
# No command generator
commands: CommandsCfg = CommandsCfg()
# Post initialization
def __post_init__(self) -> None:
"""Post initialization."""
# general settings
self.decimation = 2
self.episode_length_s = 5
# viewer settings
self.viewer.eye = (8.0, 0.0, 5.0)
# simulation settings
self.sim.dt = 1 / 120
| 8,993 |
Python
| 31.352518 | 119 | 0.637162 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.contrib_tasks/config/extension.toml
|
[package]
# Note: Semantic Versioning is used: https://semver.org/
version = "0.0.1"
# Description
title = "ORBIT Environments contributed by the community."
description="Extension containing environments for robot learning contributed by the community."
readme = "docs/README.md"
repository = "https://github.com/NVIDIA-Omniverse/Orbit"
category = "robotics"
keywords = ["robotics", "rl", "il", "learning"]
[dependencies]
"omni.isaac.orbit" = {}
"omni.isaac.orbit_tasks" = {}
"omni.isaac.core" = {}
"omni.isaac.gym" = {}
"omni.replicator.isaac" = {}
[[python.module]]
name = "omni.isaac.contrib_tasks"
| 608 |
TOML
| 25.47826 | 96 | 0.710526 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.contrib_tasks/omni/isaac/contrib_tasks/__init__.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Module containing environments contributed by the community.
We use OpenAI Gym registry to register the environment and their default configuration file.
The default configuration file is passed to the argument "kwargs" in the Gym specification registry.
The string is parsed into respective configuration container which needs to be passed to the environment
class. This is done using the function :meth:`load_cfg_from_registry` in the sub-module
:mod:`omni.isaac.orbit.utils.parse_cfg`.
Note:
This is a slight abuse of kwargs since they are meant to be directly passed into the environment class.
Instead, we remove the key :obj:`cfg_file` from the "kwargs" dictionary and the user needs to provide
the kwarg argument :obj:`cfg` while creating the environment.
Usage:
>>> import gymnasium as gym
>>> import omni.isaac.contrib_tasks
>>> from omni.isaac.orbit_tasks.utils.parse_cfg import load_cfg_from_registry
>>>
>>> task_name = "Isaac-Contrib-<my-registered-env-name>-v0"
>>> cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point")
>>> env = gym.make(task_name, cfg=cfg)
"""
from __future__ import annotations
import gymnasium as gym # noqa: F401
import os
import toml
# Conveniences to other module directories via relative paths
ORBIT_CONTRIB_TASKS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../"))
"""Path to the extension source directory."""
ORBIT_CONTRIB_TASKS_METADATA = toml.load(os.path.join(ORBIT_CONTRIB_TASKS_EXT_DIR, "config", "extension.toml"))
"""Extension metadata dictionary parsed from the extension.toml file."""
# Configure the module-level variables
__version__ = ORBIT_CONTRIB_TASKS_METADATA["package"]["version"]
| 1,853 |
Python
| 40.199999 | 111 | 0.740421 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.contrib_tasks/docs/CHANGELOG.rst
|
Changelog
---------
0.0.1 (2023-01-17)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Initial dummy extension for community to contribute environments.
| 141 |
reStructuredText
| 11.90909 | 67 | 0.58156 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.contrib_tasks/docs/README.md
|
# Orbit: Contributed Environment
This extension serves as a platform to host contributed environments from the robotics and machine learning
community. The extension follows the same style as the `omni.isaac.orbit_tasks` extension.
The environments should follow the `gym.Env` API from OpenAI Gym version `0.21.0`. They need to be registered using
the Gym registry.
To follow the same convention, each environment's name is composed of `Isaac-Contrib-<Task>-<Robot>-v<X>`,
where `<Task>` indicates the skill to learn in the environment, `<Robot>` indicates the embodiment of the
acting agent, and `<X>` represents the version of the environment (which can be used to suggest different
observation or action spaces).
The environments can be configured using either Python classes (wrapped using `configclass` decorator) or through
YAML files. The template structure of the environment is always put at the same level as the environment file
itself. However, its various instances should be included in directories within the environment directory itself.
The environments should then be registered in the `omni/isaac/contrib_tasks/__init__.py`:
```python
import gymnasium as gym
gym.register(
id="Isaac-Contrib-<my-awesome-env>-v0",
entry_point="omni.isaac.contrib_tasks.<your-env-package>:<your-env-class>",
disable_env_checker=True,
kwargs={"cfg_entry_point": "omni.isaac.contrib_tasks.<your-env-package-cfg>:<your-env-class-cfg>"},
)
```
| 1,463 |
Markdown
| 47.799998 | 115 | 0.772386 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/test/compat/test_kit_utils.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
from omni.isaac.kit import SimulationApp
# launch the simulator
config = {"headless": False}
simulation_app = SimulationApp(config)
"""Rest everything follows."""
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.viewports import set_camera_view
import omni.isaac.orbit.compat.utils.kit as kit_utils
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
class TestKitUtilities(unittest.TestCase):
"""Test fixture for checking Kit utilities in Orbit."""
@classmethod
def tearDownClass(cls):
"""Closes simulator after running all test fixtures."""
simulation_app.close()
def setUp(self) -> None:
"""Create a blank new stage for each test."""
# Simulation time-step
self.dt = 0.1
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy")
# Set camera view
set_camera_view(eye=[1.0, 1.0, 1.0], target=[0.0, 0.0, 0.0])
# Spawn things into stage
self._populate_scene()
# Wait for spawning
stage_utils.update_stage()
def tearDown(self) -> None:
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
self.sim.clear()
def test_rigid_body_properties(self):
"""Disable setting of rigid body properties."""
# create marker
prim_utils.create_prim(
"/World/marker", usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd"
)
# set marker properties
kit_utils.set_nested_rigid_body_properties("/World/marker", rigid_body_enabled=False)
kit_utils.set_nested_collision_properties("/World/marker", collision_enabled=False)
# play simulation
self.sim.reset()
for _ in range(5):
self.sim.step()
"""
Helper functions.
"""
@staticmethod
def _populate_scene():
"""Add prims to the scene."""
# Ground-plane
kit_utils.create_ground_plane("/World/defaultGroundPlane")
# Lights-1
prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0))
# Lights-2
prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0))
if __name__ == "__main__":
unittest.main()
| 2,711 |
Python
| 29.818181 | 107 | 0.648469 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/test/compat/sensors/test_camera.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
import logging
from omni.isaac.kit import SimulationApp
# launch the simulator
config = {"headless": True}
simulation_app = SimulationApp(config)
# disable matplotlib debug messages
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
"""Rest everything follows."""
import numpy as np
import os
import random
import scipy.spatial.transform as tf
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.replicator.core as rep
from omni.isaac.core.prims import RigidPrim
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.torch import set_seed
from omni.isaac.core.utils.viewports import set_camera_view
from pxr import Gf, UsdGeom
import omni.isaac.orbit.compat.utils.kit as kit_utils
from omni.isaac.orbit.compat.sensors.camera import Camera, PinholeCameraCfg
from omni.isaac.orbit.utils.math import convert_quat
from omni.isaac.orbit.utils.timer import Timer
class TestCameraSensor(unittest.TestCase):
"""Test fixture for checking camera interface."""
@classmethod
def tearDownClass(cls):
"""Closes simulator after running all test fixtures."""
simulation_app.close()
def setUp(self) -> None:
"""Create a blank new stage for each test."""
# Simulation time-step
self.dt = 0.01
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy")
# Set camera view
set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
# Fix random seed -- to generate same scene every time
set_seed(0)
# Spawn things into stage
self._populate_scene()
# Wait for spawning
stage_utils.update_stage()
def tearDown(self) -> None:
"""Stops simulator after each test."""
# close all the opened viewport from before.
rep.vp_manager.destroy_hydra_textures()
# stop simulation
self.sim.stop()
self.sim.clear()
def test_camera_resolution(self):
"""Checks that a camera provides image at the resolution specified."""
# Create camera instance
camera_cfg = PinholeCameraCfg(
sensor_tick=0,
height=480,
width=640,
data_types=["rgb", "distance_to_image_plane"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera = Camera(cfg=camera_cfg, device="cpu")
camera.spawn("/World/CameraSensor")
# Play simulator
self.sim.reset()
# Initialize sensor
camera.initialize()
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# expected camera image shape
height_expected, width_expected = camera.image_shape
# check that the camera image shape is correct
for im_data in camera.data.output.values():
if not isinstance(im_data, np.ndarray):
continue
height, width = im_data.shape[:2]
self.assertEqual(height, height_expected)
self.assertEqual(width, width_expected)
def test_default_camera(self):
"""Checks that the pre-existing stage camera is configured correctly."""
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(test_dir, "output", "camera", "kit_persp")
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Create replicator writer
rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3)
# Create camera instance
camera_cfg = PinholeCameraCfg(
sensor_tick=0,
height=180,
width=320,
data_types=["rgb", "distance_to_image_plane", "normals", "distance_to_camera"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera = Camera(cfg=camera_cfg, device="cpu")
# Note: the camera is spawned by default in the stage
# camera.spawn("/World/CameraSensor")
# Play simulator
self.sim.reset()
# Initialize sensor
camera.initialize("/OmniverseKit_Persp")
# Set camera pose
set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
# Simulate physics
for i in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# Save images
rep_writer.write(camera.data.output)
# Check image data
# expect same frame number
self.assertEqual(i + 1, camera.frame)
# expected camera image shape
height_expected, width_expected = camera.image_shape
# check that the camera image shape is correct
for im_data in camera.data.output.values():
if not isinstance(im_data, np.ndarray):
continue
height, width = im_data.shape[:2]
self.assertEqual(height, height_expected)
self.assertEqual(width, width_expected)
def test_single_cam(self):
"""Checks that the single camera gets created properly."""
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(test_dir, "output", "camera", "single")
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Create replicator writer
rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3)
# Create camera instance
camera_cfg = PinholeCameraCfg(
sensor_tick=0,
height=180,
width=320,
data_types=[
"rgb",
"distance_to_image_plane",
"normals",
"distance_to_camera",
# "instance_segmentation",
# "semantic_segmentation",
"bounding_box_2d_tight",
"bounding_box_2d_loose",
"bounding_box_2d_tight",
"bounding_box_3d",
],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera = Camera(cfg=camera_cfg, device="cpu")
# Note: the camera is spawned by default in the stage
camera.spawn("/World/CameraSensor")
# Play simulator
self.sim.reset()
# Initialize sensor
camera.initialize()
# Set camera position directly
# Note: Not a recommended way but was feeling lazy to do it properly.
camera.set_world_pose_from_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0])
# Simulate physics
for i in range(4):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# Save images
rep_writer.write(camera.data.output)
# Check image data
# expected camera image shape
height_expected, width_expected = camera.image_shape
# check that the camera image shape is correct
for im_data in camera.data.output.values():
if not isinstance(im_data, np.ndarray):
continue
height, width = im_data.shape[:2]
self.assertEqual(height, height_expected)
self.assertEqual(width, width_expected)
def test_multiple_cam(self):
"""Checks that the multiple cameras created properly."""
# Create camera instance
# -- default viewport
camera_def_cfg = PinholeCameraCfg(
sensor_tick=0,
height=180,
width=320,
data_types=["rgb"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera_def = Camera(cfg=camera_def_cfg, device="cpu")
# -- camera 1
camera1_cfg = PinholeCameraCfg(
sensor_tick=0,
height=180,
width=320,
data_types=["rgb", "distance_to_image_plane", "normals"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera_1 = Camera(cfg=camera1_cfg, device="cpu")
# -- camera 2
camera2_cfg = PinholeCameraCfg(
sensor_tick=0,
height=256,
width=256,
data_types=["rgb", "distance_to_image_plane", "normals", "distance_to_camera"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera_2 = Camera(cfg=camera2_cfg, device="cpu")
# Note: the camera is spawned by default in the stage
camera_1.spawn("/World/CameraSensor1")
camera_2.spawn("/World/CameraSensor2")
# Play simulator
self.sim.reset()
# Initialize sensor
camera_def.initialize("/OmniverseKit_Persp")
camera_1.initialize()
camera_2.initialize()
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera_def.update(self.dt)
camera_1.update(self.dt)
camera_2.update(self.dt)
# Check image data
for cam in [camera_def, camera_1, camera_2]:
# expected camera image shape
height_expected, width_expected = cam.image_shape
# check that the camera image shape is correct
for im_data in cam.data.output.values():
if not isinstance(im_data, np.ndarray):
continue
height, width = im_data.shape[:2]
self.assertEqual(height, height_expected)
self.assertEqual(width, width_expected)
def test_intrinsic_matrix(self):
"""Checks that the camera's set and retrieve methods work for intrinsic matrix."""
# Create camera instance
camera_cfg = PinholeCameraCfg(
sensor_tick=0,
height=240,
width=320,
data_types=["rgb", "distance_to_image_plane"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera = Camera(cfg=camera_cfg, device="cpu")
# Note: the camera is spawned by default in the stage
camera.spawn("/World/CameraSensor")
# Desired properties (obtained from realsense camera at 320x240 resolution)
rs_intrinsic_matrix = [229.31640625, 0.0, 164.810546875, 0.0, 229.826171875, 122.1650390625, 0.0, 0.0, 1.0]
rs_intrinsic_matrix = np.array(rs_intrinsic_matrix).reshape(3, 3)
# Set matrix into simulator
camera.set_intrinsic_matrix(rs_intrinsic_matrix)
# Play simulator
self.sim.reset()
# Initialize sensor
camera.initialize()
# Simulate physics
for _ in range(10):
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# Check that matrix is correct
K = camera.data.intrinsic_matrix
# TODO: This is not correctly setting all values in the matrix since the
# vertical aperture and aperture offsets are not being set correctly
# This is a bug in the simulator.
self.assertAlmostEqual(rs_intrinsic_matrix[0, 0], K[0, 0], 4)
# self.assertAlmostEqual(rs_intrinsic_matrix[1, 1], K[1, 1], 4)
# Display results
print(f">>> Desired intrinsic matrix: \n{rs_intrinsic_matrix}")
print(f">>> Current intrinsic matrix: \n{camera.data.intrinsic_matrix}")
def test_set_pose_ros(self):
"""Checks that the camera's set and retrieve methods work for pose in ROS convention."""
# Create camera instance
camera_cfg = PinholeCameraCfg(
sensor_tick=0,
height=240,
width=320,
data_types=["rgb", "distance_to_image_plane"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera = Camera(cfg=camera_cfg, device="cpu")
# Note: the camera is spawned by default in the stage
camera.spawn("/World/CameraSensor")
# Play simulator
self.sim.reset()
# Initialize sensor
camera.initialize()
# Simulate physics
for _ in range(10):
# set camera pose randomly
camera_position = np.random.random(3) * 5.0
camera_orientation = convert_quat(tf.Rotation.random().as_quat(), "wxyz")
camera.set_world_pose_ros(pos=camera_position, quat=camera_orientation)
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# Check that pose is correct
# -- position
np.testing.assert_almost_equal(camera.data.position, camera_position, 4)
# -- orientation
if np.sign(camera.data.orientation[0]) != np.sign(camera_orientation[0]):
camera_orientation *= -1
np.testing.assert_almost_equal(camera.data.orientation, camera_orientation, 4)
def test_set_pose_from_view(self):
"""Checks that the camera's set method works for look-at pose."""
# Create camera instance
camera_cfg = PinholeCameraCfg(
sensor_tick=0,
height=240,
width=320,
data_types=["rgb", "distance_to_image_plane"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera = Camera(cfg=camera_cfg, device="cpu")
# Note: the camera is spawned by default in the stage
camera.spawn("/World/CameraSensor")
# Play simulator
self.sim.reset()
# Initialize sensor
camera.initialize()
# Test look-at pose
# -- inputs
eye = np.array([2.5, 2.5, 2.5])
targets = [np.array([0.0, 0.0, 0.0]), np.array([2.5, 2.5, 0.0])]
# -- expected outputs
camera_position = eye.copy()
camera_orientations = [
np.array([-0.17591989, 0.33985114, 0.82047325, -0.42470819]),
np.array([0.0, 1.0, 0.0, 0.0]),
]
# check that the camera pose is correct
for target, camera_orientation in zip(targets, camera_orientations):
# set camera pose
camera.set_world_pose_from_view(eye=eye, target=target)
# perform rendering
self.sim.step()
# update camera
camera.update(self.dt)
# Check that pose is correct
# -- position
np.testing.assert_almost_equal(camera.data.position, camera_position, 4)
# # -- orientation
if np.sign(camera.data.orientation[0]) != np.sign(camera_orientation[0]):
camera_orientation *= -1
np.testing.assert_almost_equal(camera.data.orientation, camera_orientation, 4)
def test_throughput(self):
"""Checks that the single camera gets created properly with a rig."""
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(test_dir, "output", "camera", "throughput")
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Create replicator writer
rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3)
# Create camera instance
camera_cfg = PinholeCameraCfg(
sensor_tick=0,
height=480,
width=640,
data_types=["rgb", "distance_to_image_plane"],
usd_params=PinholeCameraCfg.UsdCameraCfg(clipping_range=(0.1, 1.0e5)),
)
camera = Camera(cfg=camera_cfg, device="cpu")
# Note: the camera is spawned by default in the stage
camera.spawn("/World/CameraSensor")
# Play simulator
self.sim.reset()
# Initialize sensor
camera.initialize()
# Set camera pose
camera.set_world_pose_from_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# Simulate physics
for _ in range(5):
# perform rendering
self.sim.step()
# update camera
with Timer(f"Time taken for updating camera with shape {camera.image_shape}"):
camera.update(self.dt)
# Save images
with Timer(f"Time taken for writing data with shape {camera.image_shape} "):
rep_writer.write(camera.data.output)
print("----------------------------------------")
# Check image data
# expected camera image shape
height_expected, width_expected = camera.image_shape
# check that the camera image shape is correct
for im_data in camera.data.output.values():
if not isinstance(im_data, np.ndarray):
continue
height, width = im_data.shape[:2]
self.assertEqual(height, height_expected)
self.assertEqual(width, width_expected)
"""
Helper functions.
"""
@staticmethod
def _populate_scene():
"""Add prims to the scene."""
# Ground-plane
kit_utils.create_ground_plane("/World/defaultGroundPlane")
# Lights-1
prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0))
# Lights-2
prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0))
# Random objects
for i in range(8):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# create prim
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
_ = prim_utils.create_prim(
f"/World/Objects/Obj_{i:02d}",
prim_type,
translation=position,
scale=(0.25, 0.25, 0.25),
semantic_label=prim_type,
)
# add rigid properties
rigid_obj = RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0)
# cast to geom prim
geom_prim = getattr(UsdGeom, prim_type)(rigid_obj.prim)
# set random color
color = Gf.Vec3f(random.random(), random.random(), random.random())
geom_prim.CreateDisplayColorAttr()
geom_prim.GetDisplayColorAttr().Set([color])
if __name__ == "__main__":
unittest.main(verbosity=2)
| 19,171 |
Python
| 37.888438 | 115 | 0.579886 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/test/compat/sensors/test_height_scanner.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
import logging
from omni.isaac.kit import SimulationApp
# launch the simulator
config = {"headless": True}
simulation_app = SimulationApp(config)
# disable matplotlib debug messages
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
"""Rest everything follows."""
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import scipy.spatial.transform as tf
import unittest
import omni.isaac.core.utils.prims as prim_utils
from omni.isaac.core.objects.cuboid import DynamicCuboid
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.viewports import set_camera_view
import omni.isaac.orbit.compat.utils.kit as kit_utils
from omni.isaac.orbit.compat.sensors.height_scanner import HeightScanner, HeightScannerCfg
from omni.isaac.orbit.compat.sensors.height_scanner.utils import create_points_from_grid, plot_height_grid
from omni.isaac.orbit.utils.math import convert_quat
from omni.isaac.orbit.utils.timer import Timer
class TestHeightScannerSensor(unittest.TestCase):
"""Test fixture for checking height scanner interface."""
@classmethod
def tearDownClass(cls):
"""Closes simulator after running all test fixtures."""
simulation_app.close()
def setUp(self) -> None:
"""Create a blank new stage for each test."""
# Simulation time-step
self.dt = 0.1
# Load kit helper
self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy")
# Set camera view
set_camera_view(eye=[5.0, 5.0, 5.0], target=[0.0, 0.0, 0.0])
# Spawn things into stage
self._populate_scene()
# Add height scanner
# -- create query points from a gri
self.grid_size = (1.0, 0.6)
self.grid_resolution = 0.1
scan_points = create_points_from_grid(self.grid_size, self.grid_resolution)
# -- create sensor instance
scanner_config = HeightScannerCfg(
sensor_tick=0.0,
offset=(0.0, 0.0, 0.0),
points=scan_points,
max_distance=0.45,
)
self.height_scanner = HeightScanner(scanner_config)
# -- spawn sensor
self.height_scanner.spawn("/World/heightScanner")
self.height_scanner.set_visibility(True)
def tearDown(self) -> None:
"""Stops simulator after each test."""
self.sim.stop()
self.sim.clear()
def test_height_scanner_visibility(self):
"""Checks that height map visibility method works."""
# Play simulator
self.sim.reset()
# Setup the stage
self.height_scanner.initialize()
# flag for visualizing sensor
toggle = True
# Simulate physics
for i in range(100):
# set visibility
if i % 100 == 0:
toggle = not toggle
print(f"Setting visibility: {toggle}")
self.height_scanner.set_visibility(toggle)
# perform rendering
self.sim.step()
# compute yaw -> quaternion
yaw = 10 * i
quat = tf.Rotation.from_euler("z", yaw, degrees=True).as_quat()
quat = convert_quat(quat, "wxyz")
# update sensor
self.height_scanner.update(self.dt, [0.0, 0.0, 0.5], quat)
def test_static_height_scanner(self):
"""Checks that static height map scanner is set correctly and provides right measurements."""
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
plot_dir = os.path.join(test_dir, "output", "height_scan", "static")
if not os.path.exists(plot_dir):
os.makedirs(plot_dir, exist_ok=True)
# Play simulator
self.sim.reset()
# Setup the stage
self.height_scanner.initialize()
# Simulate physics
for i in range(5):
# perform rendering
self.sim.step()
# update camera
self.height_scanner.update(self.dt, [0.0, 0.0, 0.5], [1.0, 0.0, 0.0, 0.0])
# print state
print(self.height_scanner)
# create figure
fig = plt.figure()
ax = plt.gca()
# plot the scanned distance
caxes = plot_height_grid(self.height_scanner.data.hit_distance, self.grid_size, self.grid_resolution, ax=ax)
fig.colorbar(caxes, ax=ax)
# add grid
ax.grid(color="w", linestyle="--", linewidth=1)
# disreset figure
plt.savefig(os.path.join(plot_dir, f"{i:03d}.png"), bbox_inches="tight", pad_inches=0.1)
plt.close()
def test_dynamic_height_scanner(self):
"""Checks that height map scanner works when base frame is rotating."""
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
plot_dir = os.path.join(test_dir, "output", "height_scan", "dynamic")
if not os.path.exists(plot_dir):
os.makedirs(plot_dir, exist_ok=True)
# Play simulator
self.sim.reset()
# Setup the stage
self.height_scanner.initialize()
# Simulate physics
for i in range(5):
# perform rendering
self.sim.step()
# compute yaw -> quaternion
yaw = 10 * i
quat = tf.Rotation.from_euler("z", yaw, degrees=True).as_quat()
quat = convert_quat(quat, "wxyz")
# update sensor
self.height_scanner.update(self.dt, [0.0, 0.0, 0.5], quat)
# create figure
fig = plt.figure()
ax = plt.gca()
# plot the scanned distance
caxes = plot_height_grid(self.height_scanner.data.hit_distance, self.grid_size, self.grid_resolution, ax=ax)
fig.colorbar(caxes, ax=ax)
# add grid
ax.grid(color="w", linestyle="--", linewidth=1)
# disreset figure
plt.savefig(os.path.join(plot_dir, f"{i:03d}.png"), bbox_inches="tight", pad_inches=0.1)
plt.close()
def test_height_scanner_filtering(self):
"""Checks that static height map scanner filters out the ground prim.
The first time, all the cube prims are ignored. After that, they are ignored one-by-one cyclically.
"""
# Create directory to dump results
test_dir = os.path.dirname(os.path.abspath(__file__))
plot_dir = os.path.join(test_dir, "output", "height_scan", "filter")
if not os.path.exists(plot_dir):
os.makedirs(plot_dir, exist_ok=True)
# Configure filter prims
self.height_scanner.set_filter_prims([f"/World/Cube/cube{i:02d}" for i in range(4)])
# Play simulator
self.sim.reset()
# Setup the stage
self.height_scanner.initialize()
# Simulate physics
for i in range(6):
# set different filter prims
if i > 0:
cube_id = i - 1
self.height_scanner.set_filter_prims([f"/World/Cube/cube{cube_id:02}"])
if i > 4:
self.height_scanner.set_filter_prims(None)
# perform rendering
self.sim.step()
# update sensor
self.height_scanner.update(self.dt, [0.0, 0.0, 0.5], [1.0, 0.0, 0.0, 0.0])
# create figure
fig = plt.figure()
ax = plt.gca()
# plot the scanned distance
caxes = plot_height_grid(self.height_scanner.data.hit_distance, self.grid_size, self.grid_resolution, ax=ax)
fig.colorbar(caxes, ax=ax)
# add grid
ax.grid(color="w", linestyle="--", linewidth=1)
# disreset figure
plt.savefig(os.path.join(plot_dir, f"{i:03d}.png"), bbox_inches="tight", pad_inches=0.1)
plt.close()
def test_scanner_throughput(self):
"""Measures the scanner throughput while using scan points used for ANYmal robot."""
# Add height scanner
# -- create sensor instance
scanner_config = HeightScannerCfg(
sensor_tick=0.0,
offset=(0.0, 0.0, 0.0),
points=self._compute_anymal_height_scanner_points(),
max_distance=1.0,
)
self.anymal_height_scanner = HeightScanner(scanner_config)
# -- spawn sensor
self.anymal_height_scanner.spawn("/World/heightScannerAnymal")
self.anymal_height_scanner.set_visibility(True)
# Play simulator
self.sim.reset()
# Setup the stage
self.anymal_height_scanner.initialize()
# Turn rendering on
self.anymal_height_scanner.set_visibility(True)
# Simulate physics
for i in range(2):
# perform rendering
self.sim.step()
# update sensor
with Timer(f"[No Vis , Step {i:02d}]: Scanning time for {scanner_config.points.shape[0]} points"):
self.anymal_height_scanner.update(self.dt, [0.0, 0.0, 0.5], [1.0, 0.0, 0.0, 0.0])
# Turn rendering off
self.anymal_height_scanner.set_visibility(False)
# Simulate physics
for i in range(2):
# perform rendering
self.sim.step()
# update sensor
with Timer(f"[With Vis, Step {i:02d}] Scanning time for {scanner_config.points.shape[0]} points"):
self.anymal_height_scanner.update(self.dt, [0.0, 0.0, 0.5], [1.0, 0.0, 0.0, 0.0])
"""
Helper functions.
"""
@staticmethod
def _populate_scene():
"""Add prims to the scene."""
# Ground-plane
kit_utils.create_ground_plane("/World/defaultGroundPlane")
# Lights-1
prim_utils.create_prim("/World/Light/GreySphere", "SphereLight", translation=(4.5, 3.5, 10.0))
# Lights-2
prim_utils.create_prim("/World/Light/WhiteSphere", "SphereLight", translation=(-4.5, 3.5, 10.0))
# Cubes
num_cubes = 4
for i in range(num_cubes):
# resolve position to put them on vertex of a regular polygon
theta = 2 * np.pi / num_cubes
c, s = np.cos(theta * i), np.sin(theta * i)
rotm = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
position = np.matmul(rotm, np.asarray([0.25, 0.25, 0.1 + 0.05 * i]))
color = np.array([random.random(), random.random(), random.random()])
# create prim
_ = DynamicCuboid(
prim_path=f"/World/Cube/cube{i:02d}", position=position, size=position[2] * 2, color=color
)
@staticmethod
def _compute_anymal_height_scanner_points() -> np.ndarray:
"""Computes the query height-scan points relative to base frame of robot.
Returns:
A numpy array of shape (N, 3) comprising of quey scan points.
"""
# offset from the base frame - over each leg
offsets = [[0.45, 0.3, 0.0], [-0.46, 0.3, 0.0], [0.45, -0.3, 0.0], [-0.46, -0.3, 0.0]]
offsets = np.asarray(offsets)
# local grid over each offset point
measurement_points = [
[0.1, 0.0, 0.0],
[0.0, 0.1, 0.0],
[-0.1, 0.0, 0.0],
[0.0, -0.1, 0.0],
[0.1, 0.1, 0.0],
[-0.1, 0.1, 0.0],
[0.1, -0.1, 0.0],
[-0.1, -0.1, 0.0],
[0.2, 0.0, 0.0],
[0.0, 0.2, 0.0],
[-0.2, 0.0, 0.0],
[0.0, -0.2, 0.0],
[0.2, 0.2, 0.0],
[-0.2, 0.2, 0.0],
[0.2, -0.2, 0.0],
[-0.2, -0.2, 0.0],
[0.3, 0.0, 0.0],
[0.3, 0.1, 0.0],
[0.3, 0.2, 0.0],
[0.3, -0.1, 0.0],
[0.3, -0.2, 0.0],
[-0.3, 0.0, 0.0],
[-0.3, 0.1, 0.0],
[-0.3, 0.2, 0.0],
[-0.3, -0.1, 0.0],
[-0.3, -0.2, 0.0],
[0.0, 0.0, 0.0],
]
measurement_points = np.asarray(measurement_points)
# create a joined list over offsets and local measurement points
# we use broadcasted addition to make this operation quick
scan_points = (offsets[:, None, :] + measurement_points).reshape(-1, 3)
return scan_points
if __name__ == "__main__":
unittest.main()
| 12,567 |
Python
| 36.183432 | 120 | 0.562505 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/test/assets/test_rigid_object.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# ignore private usage of variables warning
# pyright: reportPrivateUsage=none
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
from omni.isaac.orbit.app import AppLauncher
# launch omniverse app
app_launcher = AppLauncher(headless=True)
simulation_app = app_launcher.app
"""Rest everything follows."""
import ctypes
import torch
import unittest
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import RigidObject, RigidObjectCfg
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
class TestRigidObject(unittest.TestCase):
"""Test for rigid object class."""
def setUp(self):
"""Create a blank new stage for each test."""
# Create a new stage
stage_utils.create_new_stage()
# Simulation time-step
self.dt = 0.01
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(dt=self.dt, device="cuda:0")
self.sim = sim_utils.SimulationContext(sim_cfg)
def tearDown(self):
"""Stops simulator after each test."""
# stop simulation
self.sim.stop()
# clear the stage
self.sim.clear_instance()
"""
Tests
"""
def test_initialization(self):
"""Test initialization for with rigid body API at the provided prim path."""
# Create rigid object
cube_object_cfg = RigidObjectCfg(
prim_path="/World/Object",
spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd"),
init_state=RigidObjectCfg.InitialStateCfg(),
)
cube_object = RigidObject(cfg=cube_object_cfg)
# Check that boundedness of articulation is correct
self.assertEqual(ctypes.c_long.from_address(id(cube_object)).value, 1)
# Play sim
self.sim.reset()
# Check if object is initialized
self.assertTrue(cube_object._is_initialized)
self.assertEqual(len(cube_object.body_names), 1)
# Check buffers that exists and have correct shapes
self.assertTrue(cube_object.data.root_pos_w.shape == (1, 3))
self.assertTrue(cube_object.data.root_quat_w.shape == (1, 4))
# Simulate physics
for _ in range(20):
# perform rendering
self.sim.step()
# update object
cube_object.update(self.dt)
def test_external_force_on_single_body(self):
"""Test application of external force on the base of the object.
In this test, we apply a force equal to the weight of the object on the base of
one of the objects. We check that the object does not move. For the other object,
we do not apply any force and check that it falls down.
"""
# Create parent prims
prim_utils.create_prim("/World/Table_1", "Xform", translation=(0.0, -1.0, 0.0))
prim_utils.create_prim("/World/Table_2", "Xform", translation=(0.0, 1.0, 0.0))
# Create rigid object
cube_object_cfg = RigidObjectCfg(
prim_path="/World/Table_.*/Object",
spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd"),
init_state=RigidObjectCfg.InitialStateCfg(),
)
# create handles for the objects
cube_object = RigidObject(cfg=cube_object_cfg)
# Play the simulator
self.sim.reset()
# Find bodies to apply the force
body_ids, _ = cube_object.find_bodies(".*")
# Sample a large force
external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=self.sim.device)
external_wrench_b[0, 0, 2] = 9.81 * cube_object.root_physx_view.get_masses()[0]
# Now we are ready!
for _ in range(5):
# reset root state
root_state = cube_object.data.default_root_state.clone()
root_state[0, :2] = torch.tensor([0.0, -1.0], device=self.sim.device)
root_state[1, :2] = torch.tensor([0.0, 1.0], device=self.sim.device)
cube_object.write_root_state_to_sim(root_state)
# reset object
cube_object.reset()
# apply force
cube_object.set_external_force_and_torque(
external_wrench_b[..., :3], external_wrench_b[..., 3:], body_ids=body_ids
)
# perform simulation
for _ in range(50):
# apply action to the object
cube_object.write_data_to_sim()
# perform step
self.sim.step()
# update buffers
cube_object.update(self.dt)
# check condition that the objects have fallen down
self.assertLess(abs(cube_object.data.root_pos_w[0, 2].item()), 1e-3)
self.assertLess(cube_object.data.root_pos_w[1, 2].item(), -1.0)
if __name__ == "__main__":
# run main
unittest.main(verbosity=2, exit=False)
# close sim app
simulation_app.close()
| 5,245 |
Python
| 34.687075 | 119 | 0.618494 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/ray_caster/patterns/patterns_cfg.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the ray-cast sensor."""
from __future__ import annotations
import torch
from collections.abc import Callable, Sequence
from dataclasses import MISSING
from omni.isaac.orbit.utils import configclass
from . import patterns
@configclass
class PatternBaseCfg:
"""Base configuration for a pattern."""
func: Callable[[PatternBaseCfg, str], tuple[torch.Tensor, torch.Tensor]] = MISSING
"""Function to generate the pattern.
The function should take in the configuration and the device name as arguments. It should return
the pattern's starting positions and directions as a tuple of torch.Tensor.
"""
@configclass
class GridPatternCfg(PatternBaseCfg):
"""Configuration for the grid pattern for ray-casting.
Defines a 2D grid of rays in the coordinates of the sensor.
"""
func: Callable = patterns.grid_pattern
resolution: float = MISSING
"""Grid resolution (in meters)."""
size: tuple[float, float] = MISSING
"""Grid size (length, width) (in meters)."""
direction: tuple[float, float, float] = (0.0, 0.0, -1.0)
"""Ray direction. Defaults to (0.0, 0.0, -1.0)."""
@configclass
class PinholeCameraPatternCfg(PatternBaseCfg):
"""Configuration for a pinhole camera depth image pattern for ray-casting."""
func: Callable = patterns.pinhole_camera_pattern
focal_length: float = 24.0
"""Perspective focal length (in cm). Defaults to 24.0cm.
Longer lens lengths narrower FOV, shorter lens lengths wider FOV.
"""
horizontal_aperture: float = 20.955
"""Horizontal aperture (in mm). Defaults to 20.955mm.
Emulates sensor/film width on a camera.
Note:
The default value is the horizontal aperture of a 35 mm spherical projector.
"""
horizontal_aperture_offset: float = 0.0
"""Offsets Resolution/Film gate horizontally. Defaults to 0.0."""
vertical_aperture_offset: float = 0.0
"""Offsets Resolution/Film gate vertically. Defaults to 0.0."""
width: int = MISSING
"""Width of the image (in pixels)."""
height: int = MISSING
"""Height of the image (in pixels)."""
@configclass
class BpearlPatternCfg(PatternBaseCfg):
"""Configuration for the Bpearl pattern for ray-casting."""
func: Callable = patterns.bpearl_pattern
horizontal_fov: float = 360.0
"""Horizontal field of view (in degrees). Defaults to 360.0."""
horizontal_res: float = 10.0
"""Horizontal resolution (in degrees). Defaults to 10.0."""
# fmt: off
vertical_ray_angles: Sequence[float] = [
89.5, 86.6875, 83.875, 81.0625, 78.25, 75.4375, 72.625, 69.8125, 67.0, 64.1875, 61.375,
58.5625, 55.75, 52.9375, 50.125, 47.3125, 44.5, 41.6875, 38.875, 36.0625, 33.25, 30.4375,
27.625, 24.8125, 22, 19.1875, 16.375, 13.5625, 10.75, 7.9375, 5.125, 2.3125
]
# fmt: on
"""Vertical ray angles (in degrees). Defaults to a list of 32 angles.
Note:
We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles
are not evenly spaced.
"""
| 3,192 |
Python
| 30.303921 | 100 | 0.677945 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/camera/camera_cfg.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
from dataclasses import MISSING
from typing import Literal
from omni.isaac.orbit.sim import FisheyeCameraCfg, PinholeCameraCfg
from omni.isaac.orbit.utils import configclass
from ..sensor_base_cfg import SensorBaseCfg
from .camera import Camera
@configclass
class CameraCfg(SensorBaseCfg):
"""Configuration for a camera sensor."""
@configclass
class OffsetCfg:
"""The offset pose of the sensor's frame from the sensor's parent frame."""
pos: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0)."""
rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0)."""
convention: Literal["opengl", "ros", "world"] = "ros"
"""The convention in which the frame offset is applied. Defaults to "ros".
- ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) convention.
- ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention.
- ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention.
"""
class_type: type = Camera
offset: OffsetCfg = OffsetCfg()
"""The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.
Note:
The parent frame is the frame the sensor attaches to. For example, the parent frame of a
camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``.
"""
spawn: PinholeCameraCfg | FisheyeCameraCfg | None = MISSING
"""Spawn configuration for the asset.
If None, then the prim is not spawned by the asset. Instead, it is assumed that the
asset is already present in the scene.
"""
data_types: list[str] = ["rgb"]
"""List of sensor names/types to enable for the camera. Defaults to ["rgb"]."""
width: int = MISSING
"""Width of the image in pixels."""
height: int = MISSING
"""Height of the image in pixels."""
semantic_types: list[str] = ["class"]
"""List of allowed semantic types the types. Defaults to ["class"].
For example, if semantic types is [“class”], only the bounding boxes for prims with semantics of
type “class” will be retrieved.
More information available at:
https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering
"""
colorize: bool = False
"""whether to output colorized semantic information or non-colorized one. Defaults to False.
If True, the semantic images will be a 2D array of RGBA values, where each pixel is colored according to
the semantic type. Accordingly, the information output will contain mapping from color to semantic labels.
If False, the semantic images will be a 2D array of integers, where each pixel is an integer representing
the semantic ID. Accordingly, the information output will contain mapping from semantic ID to semantic labels.
"""
| 3,318 |
Python
| 37.593023 | 123 | 0.671187 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/managers/randomization_manager.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Randomization manager for randomizing different elements in the scene."""
from __future__ import annotations
import torch
from collections.abc import Sequence
from prettytable import PrettyTable
from typing import TYPE_CHECKING
import carb
from .manager_base import ManagerBase, ManagerTermBase
from .manager_term_cfg import RandomizationTermCfg
if TYPE_CHECKING:
from omni.isaac.orbit.envs import RLTaskEnv
class RandomizationManager(ManagerBase):
"""Manager for randomizing different elements in the scene.
The randomization manager applies randomization to any instance in the scene. For example, changing the
masses of objects or their friction coefficients, or applying random pushes to the robot. The user can
specify several modes of randomization to specialize the behavior based on when to apply the randomization.
The randomization terms are parsed from a config class containing the manager's settings and each term's
parameters. Each randomization term should instantiate the :class:`RandomizationTermCfg` class.
Randomization terms can be grouped by their mode. The mode is a user-defined string that specifies when
the randomization term should be applied. This provides the user complete control over when randomization
terms should be applied.
For a typical training process, you may want to randomize in the following modes:
- "startup": Randomization term is applied once at the beginning of the training.
- "reset": Randomization is applied at every reset.
- "interval": Randomization is applied at pre-specified intervals of time.
However, you can also define your own modes and use them in the training process as you see fit.
.. note::
The mode ``"interval"`` is the only mode that is handled by the manager itself which is based on
the environment's time step.
"""
_env: RLTaskEnv
"""The environment instance."""
def __init__(self, cfg: object, env: RLTaskEnv):
"""Initialize the randomization manager.
Args:
cfg: A configuration object or dictionary (``dict[str, RandomizationTermCfg]``).
env: An environment object.
"""
super().__init__(cfg, env)
def __str__(self) -> str:
"""Returns: A string representation for randomization manager."""
msg = f"<RandomizationManager> contains {len(self._mode_term_names)} active terms.\n"
# add info on each mode
for mode in self._mode_term_names:
# create table for term information
table = PrettyTable()
table.title = f"Active Randomization Terms in Mode: '{mode}'"
# add table headers based on mode
if mode == "interval":
table.field_names = ["Index", "Name", "Interval time range (s)"]
table.align["Name"] = "l"
for index, (name, cfg) in enumerate(zip(self._mode_term_names[mode], self._mode_term_cfgs[mode])):
table.add_row([index, name, cfg.interval_range_s])
else:
table.field_names = ["Index", "Name"]
table.align["Name"] = "l"
for index, name in enumerate(self._mode_term_names[mode]):
table.add_row([index, name])
# convert table to string
msg += table.get_string()
msg += "\n"
return msg
"""
Properties.
"""
@property
def active_terms(self) -> dict[str, list[str]]:
"""Name of active randomization terms.
The keys are the modes of randomization and the values are the names of the randomization terms.
"""
return self._mode_term_names
@property
def available_modes(self) -> list[str]:
"""Modes of randomization."""
return list(self._mode_term_names.keys())
"""
Operations.
"""
def reset(self, env_ids: Sequence[int] | None = None) -> dict[str, float]:
# call all terms that are classes
for mode_cfg in self._mode_class_term_cfgs.values():
for term_cfg in mode_cfg:
term_cfg.func.reset(env_ids=env_ids)
# nothing to log here
return {}
def randomize(self, mode: str, env_ids: Sequence[int] | None = None, dt: float | None = None):
"""Calls each randomization term in the specified mode.
Note:
For interval mode, the time step of the environment is used to determine if the randomization should be
applied. If the time step is not constant, the user should pass the time step to this function.
Args:
mode: The mode of randomization.
env_ids: The indices of the environments to apply randomization to.
Defaults to None, in which case the randomization is applied to all environments.
dt: The time step of the environment. This is only used for the "interval" mode.
Defaults to None, in which case the randomization is not applied.
Raises:
ValueError: If the mode is ``"interval"`` and the time step is not provided.
"""
# check if mode is valid
if mode not in self._mode_term_names:
carb.log_warn(f"Randomization mode '{mode}' is not defined. Skipping randomization.")
return
# iterate over all the randomization terms
for index, term_cfg in enumerate(self._mode_term_cfgs[mode]):
# resample interval if needed
if mode == "interval":
if dt is None:
raise ValueError(
f"Randomization mode '{mode}' requires the time step of the environment"
" to be passed to the randomization manager."
)
# extract time left for this term
time_left = self._interval_mode_time_left[index]
# update the time left for each environment
time_left -= dt
# check if the interval has passed
env_ids = (time_left <= 0.0).nonzero().flatten()
if len(env_ids) > 0:
lower, upper = term_cfg.interval_range_s
time_left[env_ids] = torch.rand(len(env_ids), device=self.device) * (upper - lower) + lower
# call the randomization term
term_cfg.func(self._env, env_ids, **term_cfg.params)
"""
Operations - Term settings.
"""
def set_term_cfg(self, term_name: str, cfg: RandomizationTermCfg):
"""Sets the configuration of the specified term into the manager.
The method finds the term by name by searching through all the modes.
It then updates the configuration of the term with the first matching name.
Args:
term_name: The name of the randomization term.
cfg: The configuration for the randomization term.
Raises:
ValueError: If the term name is not found.
"""
term_found = False
for mode, terms in self._mode_term_names.items():
if term_name in terms:
self._mode_term_cfgs[mode][terms.index(term_name)] = cfg
term_found = True
break
if not term_found:
raise ValueError(f"Randomization term '{term_name}' not found.")
def get_term_cfg(self, term_name: str) -> RandomizationTermCfg:
"""Gets the configuration for the specified term.
The method finds the term by name by searching through all the modes.
It then returns the configuration of the term with the first matching name.
Args:
term_name: The name of the randomization term.
Returns:
The configuration of the randomization term.
Raises:
ValueError: If the term name is not found.
"""
for mode, terms in self._mode_term_names.items():
if term_name in terms:
return self._mode_term_cfgs[mode][terms.index(term_name)]
raise ValueError(f"Randomization term '{term_name}' not found.")
"""
Helper functions.
"""
def _prepare_terms(self):
"""Prepares a list of randomization functions."""
# parse remaining randomization terms and decimate their information
self._mode_term_names: dict[str, list[str]] = dict()
self._mode_term_cfgs: dict[str, list[RandomizationTermCfg]] = dict()
self._mode_class_term_cfgs: dict[str, list[RandomizationTermCfg]] = dict()
# buffer to store the time left for each environment for "interval" mode
self._interval_mode_time_left: list[torch.Tensor] = list()
# check if config is dict already
if isinstance(self.cfg, dict):
cfg_items = self.cfg.items()
else:
cfg_items = self.cfg.__dict__.items()
# iterate over all the terms
for term_name, term_cfg in cfg_items:
# check for non config
if term_cfg is None:
continue
# check for valid config type
if not isinstance(term_cfg, RandomizationTermCfg):
raise TypeError(
f"Configuration for the term '{term_name}' is not of type RandomizationTermCfg."
f" Received: '{type(term_cfg)}'."
)
# resolve common parameters
self._resolve_common_term_cfg(term_name, term_cfg, min_argc=2)
# check if mode is a new mode
if term_cfg.mode not in self._mode_term_names:
# add new mode
self._mode_term_names[term_cfg.mode] = list()
self._mode_term_cfgs[term_cfg.mode] = list()
self._mode_class_term_cfgs[term_cfg.mode] = list()
# add term name and parameters
self._mode_term_names[term_cfg.mode].append(term_name)
self._mode_term_cfgs[term_cfg.mode].append(term_cfg)
# check if the term is a class
if isinstance(term_cfg.func, ManagerTermBase):
self._mode_class_term_cfgs[term_cfg.mode].append(term_cfg)
# resolve the mode of randomization
if term_cfg.mode == "interval":
if term_cfg.interval_range_s is None:
raise ValueError(
f"Randomization term '{term_name}' has mode 'interval' but 'interval_range_s' is not specified."
)
# sample the time left for each environment
lower, upper = term_cfg.interval_range_s
time_left = torch.rand(self.num_envs, device=self.device) * (upper - lower) + lower
self._interval_mode_time_left.append(time_left)
| 10,919 |
Python
| 40.679389 | 120 | 0.608481 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/__init__.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-package containing compatibility code with previous release of Orbit.
.. note::
This package is not part of the public API and may be removed in future releases.
"""
| 302 |
Python
| 24.249998 | 85 | 0.738411 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/sensor_base.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Base class for sensors.
This class defines an interface for sensors similar to how the :class:`omni.isaac.orbit.robot.robot_base.RobotBase` class works.
Each sensor class should inherit from this class and implement the abstract methods.
"""
from __future__ import annotations
from abc import abstractmethod
from typing import Any
from warnings import warn
import carb
class SensorBase:
"""The base class for implementing a sensor.
Note:
These sensors are not vectorized yet.
"""
def __init__(self, sensor_tick: float = 0.0):
"""Initialize the sensor class.
The sensor tick is the time between two sensor buffers. If the sensor tick is zero, then the sensor
buffers are filled at every simulation step.
Args:
sensor_tick: Simulation seconds between sensor buffers. Defaults to 0.0.
"""
# print warning to notify user that the sensor is not vectorized
carb.log_warn("This implementation of the sensor is not vectorized yet. Please use the vectorized version.")
# Copy arguments to class
self._sensor_tick: float = sensor_tick
# Current timestamp of animation (in seconds)
self._timestamp: float = 0.0
# Timestamp from last update
self._timestamp_last_update: float = 0.0
# Frame number when the measurement is taken
self._frame: int = 0
def __init_subclass__(cls, **kwargs):
"""This throws a deprecation warning on subclassing."""
warn(f"{cls.__name__} will be deprecated from v1.0.", DeprecationWarning, stacklevel=1)
super().__init_subclass__(**kwargs)
"""
Properties
"""
@property
def frame(self) -> int:
"""Frame number when the measurement took place."""
return self._frame
@property
def timestamp(self) -> float:
"""Simulation time of the measurement (in seconds)."""
return self._timestamp
@property
def sensor_tick(self) -> float:
"""Simulation seconds between sensor buffers (ticks)."""
return self._sensor_tick
@property
def data(self) -> Any:
"""The data from the simulated sensor."""
return None # noqa: R501
"""
Helpers
"""
def set_visibility(self, visible: bool):
"""Set visibility of the instance in the scene.
Note:
Sensors are mostly XForms which do not have any mesh associated to them. Thus,
overriding this method is optional.
Args:
visible: Whether to make instance visible or invisible.
"""
pass
"""
Operations
"""
@abstractmethod
def spawn(self, parent_prim_path: str):
"""Spawns the sensor into the stage.
Args:
parent_prim_path: The path of the parent prim to attach sensor to.
"""
raise NotImplementedError
@abstractmethod
def initialize(self):
"""Initializes the sensor handles and internal buffers."""
raise NotImplementedError
def reset(self):
"""Resets the sensor internals."""
# Set current time
self._timestamp = 0.0
self._timestamp_last_update = 0.0
# Set zero captures
self._frame = 0
def update(self, dt: float, *args, **kwargs):
"""Updates the buffers at sensor frequency.
This function performs time-based checks and fills the data into the data container. It
calls the function :meth:`buffer()` to fill the data. The function :meth:`buffer()` should
not be called directly.
Args:
dt: The simulation time-step.
args: Other positional arguments passed to function :meth:`buffer()`.
kwargs: Other keyword arguments passed to function :meth:`buffer()`.
"""
# Get current time
self._timestamp += dt
# Buffer the sensor data.
if (self._timestamp - self._timestamp_last_update) >= self._sensor_tick:
# Buffer the data
self.buffer(*args, **kwargs)
# Update the frame count
self._frame += 1
# Update capture time
self._timestamp_last_update = self._timestamp
@abstractmethod
def buffer(self, *args, **kwargs):
"""Fills the buffers of the sensor data.
This function does not perform any time-based checks and directly fills the data into the data container.
Warning:
Although this method is public, `update(dt)` should be the preferred way of filling buffers.
"""
raise NotImplementedError
| 4,751 |
Python
| 30.263158 | 128 | 0.62429 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/camera/camera.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Camera class in Omniverse workflows."""
from __future__ import annotations
import builtins
import math
import numpy as np
import scipy.spatial.transform as tf
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
import omni.replicator.core as rep
import omni.usd
from omni.isaac.core.prims import XFormPrim
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.rotations import gf_quat_to_np_array
from pxr import Gf, Sdf, Usd, UsdGeom
from omni.isaac.orbit.utils import class_to_dict, to_camel_case
from omni.isaac.orbit.utils.math import convert_quat
from ..sensor_base import SensorBase
from .camera_cfg import FisheyeCameraCfg, PinholeCameraCfg
@dataclass
class CameraData:
"""Data container for the camera sensor."""
position: np.ndarray = None
"""Position of the sensor origin in world frame, following ROS convention."""
orientation: np.ndarray = None
"""Quaternion orientation `(w, x, y, z)` of the sensor origin in world frame, following ROS convention."""
intrinsic_matrix: np.ndarray = None
"""The intrinsic matrix for the camera."""
image_shape: tuple[int, int] = None
"""A tuple containing (height, width) of the camera sensor."""
output: dict[str, Any] = None
"""The retrieved sensor data with sensor types as key.
The format of the data is available in the `Replicator Documentation`_.
.. _Replicator Documentation: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output
"""
class Camera(SensorBase):
r"""The camera sensor for acquiring visual data.
This class wraps over the `UsdGeom Camera`_ for providing a consistent API for acquiring visual data.
It ensures that the camera follows the ROS convention for the coordinate system.
Summarizing from the `replicator extension`_, the following sensor types are supported:
- ``"rgb"``: A rendered color image.
- ``"distance_to_camera"``: An image containing the distance to camera optical center.
- ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis.
- ``"normals"``: An image containing the local surface normal vectors at each pixel.
- ``"motion_vectors"``: An image containing the motion vector data at each pixel.
- ``"instance_segmentation"``: The instance segmentation data.
- ``"semantic_segmentation"``: The semantic segmentation data.
- ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions).
- ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions).
- ``"bounding_box_3d"``: The 3D view space bounding box data.
- ``"occlusion"``: The occlusion information (such as instance id, semantic id and occluded ratio).
The camera sensor supports the following projection types:
- ``"pinhole"``: Standard pinhole camera model (disables fisheye parameters).
- ``"fisheye_orthographic"``: Fisheye camera model using orthographic correction.
- ``"fisheye_equidistant"``: Fisheye camera model using equidistant correction.
- ``"fisheye_equisolid"``: Fisheye camera model using equisolid correction.
- ``"fisheye_polynomial"``: Fisheye camera model with :math:`360^{\circ}` spherical projection.
- ``"fisheye_spherical"``: Fisheye camera model with :math:`360^{\circ}` full-frame projection.
.. _replicator extension: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output
.. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html
"""
def __init__(self, cfg: PinholeCameraCfg | FisheyeCameraCfg, device: str = "cpu"):
"""Initializes the camera sensor.
If the ``device`` is ``"cpu"``, the output data is returned as a numpy array. If the ``device`` is
``"cuda"``, then a Warp array is returned. Note that only the valid sensor types will be moved to GPU.
Args:
cfg: The configuration parameters.
device: The device on which to receive data. Defaults to "cpu".
"""
# store inputs
self.cfg = cfg
self.device = device
# initialize base class
super().__init__(self.cfg.sensor_tick)
# change the default rendering settings
# TODO: Should this be done here or maybe inside the app config file?
rep.settings.set_render_rtx_realtime(antialiasing="FXAA")
# Xform prim for handling transforms
self._sensor_xform: XFormPrim = None
# UsdGeom Camera prim for the sensor
self._sensor_prim: UsdGeom.Camera = None
# Create empty variables for storing output data
self._data = CameraData()
self._data.output = dict.fromkeys(self.cfg.data_types, None)
# Flag to check that sensor is spawned.
self._is_spawned = False
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
# message for class
return (
f"Camera @ '{self.prim_path}': \n"
f"\tdata types : {list(self._data.output.keys())} \n"
f"\ttick rate (s): {self.sensor_tick}\n"
f"\ttimestamp (s): {self.timestamp}\n"
f"\tframe : {self.frame}\n"
f"\tshape : {self.image_shape}\n"
f"\tposition : {self._data.position} \n"
f"\torientation : {self._data.orientation} \n"
)
"""
Properties
"""
@property
def prim_path(self) -> str:
"""The path to the camera prim."""
return prim_utils.get_prim_path(self._sensor_prim)
@property
def render_product_path(self) -> str:
"""The path of the render product for the camera.
This can be used via replicator interfaces to attach to writes or external annotator registry.
"""
return self._render_product_path
@property
def data(self) -> CameraData:
"""Data related to Camera sensor."""
return self._data
@property
def image_shape(self) -> tuple[int, int]:
"""A tuple containing (height, width) of the camera sensor."""
return (self.cfg.height, self.cfg.width)
"""
Configuration
"""
def set_visibility(self, visible: bool):
"""Set visibility of the instance in the scene.
Args:
visible: Whether to make instance visible or invisible.
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` first.
"""
# check camera prim
if self._sensor_prim is None:
raise RuntimeError("Camera prim is None. Please call 'initialize(...)' first.")
# get imageable object
imageable = UsdGeom.Imageable(self._sensor_prim)
# set visibility
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def set_intrinsic_matrix(self, matrix: np.ndarray, focal_length: float = 1.0):
"""Set parameters of the USD camera from its intrinsic matrix.
The intrinsic matrix and focal length are used to set the following parameters to the USD camera:
- ``focal_length``: The focal length of the camera.
- ``horizontal_aperture``: The horizontal aperture of the camera.
- ``vertical_aperture``: The vertical aperture of the camera.
- ``horizontal_aperture_offset``: The horizontal offset of the camera.
- ``vertical_aperture_offset``: The vertical offset of the camera.
.. warning::
Due to limitations of Omniverse camera, we need to assume that the camera is a spherical lens,
i.e. has square pixels, and the optical center is centered at the camera eye. If this assumption
is not true in the input intrinsic matrix, then the camera will not set up correctly.
Args:
intrinsic_matrix: The intrinsic matrix for the camera.
focal_length: Focal length to use when computing aperture values. Defaults to 1.0.
"""
# convert to numpy for sanity
intrinsic_matrix = np.asarray(matrix, dtype=float)
# extract parameters from matrix
f_x = intrinsic_matrix[0, 0]
c_x = intrinsic_matrix[0, 2]
f_y = intrinsic_matrix[1, 1]
c_y = intrinsic_matrix[1, 2]
# get viewport parameters
height, width = self.image_shape
height, width = float(height), float(width)
# resolve parameters for usd camera
params = {
"focal_length": focal_length,
"horizontal_aperture": width * focal_length / f_x,
"vertical_aperture": height * focal_length / f_y,
"horizontal_aperture_offset": (c_x - width / 2) / f_x,
"vertical_aperture_offset": (c_y - height / 2) / f_y,
}
# set parameters for camera
for param_name, param_value in params.items():
# convert to camel case (CC)
param_name = to_camel_case(param_name, to="CC")
# get attribute from the class
param_attr = getattr(self._sensor_prim, f"Get{param_name}Attr")
# set value
# note: We have to do it this way because the camera might be on a different layer (default cameras are on session layer),
# and this is the simplest way to set the property on the right layer.
omni.usd.utils.set_prop_val(param_attr(), param_value)
"""
Operations - Set pose.
"""
def set_world_pose_ros(self, pos: Sequence[float] = None, quat: Sequence[float] = None):
r"""Set the pose of the camera w.r.t. world frame using ROS convention.
In USD, the camera is always in **Y up** convention. This means that the camera is looking down the -Z axis
with the +Y axis pointing up , and +X axis pointing right. However, in ROS, the camera is looking down
the +Z axis with the +Y axis pointing down, and +X axis pointing right. Thus, the camera needs to be rotated
by :math:`180^{\circ}` around the X axis to follow the ROS convention.
.. math::
T_{ROS} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & -1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD}
Args:
pos: The cartesian coordinates (in meters). Defaults to None.
quat: The quaternion orientation in (w, x, y, z). Defaults to None.
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
"""
# check camera prim exists
if self._sensor_prim is None:
raise RuntimeError("Camera prim is None. Please call 'initialize(...)' first.")
# convert from meters to stage units
if pos is not None:
pos = np.asarray(pos)
# convert rotation matrix from ROS convention to OpenGL
if quat is not None:
rotm = tf.Rotation.from_quat(convert_quat(quat, "xyzw")).as_matrix()
rotm[:, 2] = -rotm[:, 2]
rotm[:, 1] = -rotm[:, 1]
# convert to isaac-sim convention
quat_gl = tf.Rotation.from_matrix(rotm).as_quat()
quat_gl = convert_quat(quat_gl, "wxyz")
else:
quat_gl = None
# set the pose
self._sensor_xform.set_world_pose(pos, quat_gl)
def set_world_pose_from_view(self, eye: Sequence[float], target: Sequence[float] = [0, 0, 0]):
"""Set the pose of the camera from the eye position and look-at target position.
Args:
eye: The position of the camera's eye.
target: The target location to look at. Defaults to [0, 0, 0].
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
NotImplementedError: If the stage up-axis is not "Y" or "Z".
"""
# check camera prim exists
if self._sensor_prim is None:
raise RuntimeError("Camera prim is None. Please call 'initialize(...)' first.")
# compute camera's eye pose
eye_position = Gf.Vec3d(np.asarray(eye).tolist())
target_position = Gf.Vec3d(np.asarray(target).tolist())
# compute forward direction
forward_dir = (eye_position - target_position).GetNormalized()
# get up axis
up_axis_token = stage_utils.get_stage_up_axis()
if up_axis_token == UsdGeom.Tokens.y:
# deal with degenerate case
if forward_dir == Gf.Vec3d(0, 1, 0):
up_axis = Gf.Vec3d(0, 0, 1)
elif forward_dir == Gf.Vec3d(0, -1, 0):
up_axis = Gf.Vec3d(0, 0, -1)
else:
up_axis = Gf.Vec3d(0, 1, 0)
elif up_axis_token == UsdGeom.Tokens.z:
# deal with degenerate case
if forward_dir == Gf.Vec3d(0, 0, 1):
up_axis = Gf.Vec3d(0, 1, 0)
elif forward_dir == Gf.Vec3d(0, 0, -1):
up_axis = Gf.Vec3d(0, -1, 0)
else:
up_axis = Gf.Vec3d(0, 0, 1)
else:
raise NotImplementedError(f"This method is not supported for up-axis '{up_axis_token}'.")
# compute matrix transformation
# view matrix: camera_T_world
matrix_gf = Gf.Matrix4d(1).SetLookAt(eye_position, target_position, up_axis)
# camera position and rotation in world frame
matrix_gf = matrix_gf.GetInverse()
cam_pos = np.array(matrix_gf.ExtractTranslation())
cam_quat = gf_quat_to_np_array(matrix_gf.ExtractRotationQuat())
# set camera pose
self._sensor_xform.set_world_pose(cam_pos, cam_quat)
"""
Operations
"""
def spawn(self, parent_prim_path: str, translation: Sequence[float] = None, orientation: Sequence[float] = None):
"""Spawns the sensor into the stage.
The USD Camera prim is spawned under the parent prim at the path ``parent_prim_path`` with the provided input
translation and orientation.
Args:
parent_prim_path: The path of the parent prim to attach sensor to.
translation: The local position offset w.r.t. parent prim. Defaults to None.
orientation: The local rotation offset in (w, x, y, z) w.r.t.
parent prim. Defaults to None.
"""
# Check if sensor is already spawned
if self._is_spawned:
raise RuntimeError(f"The camera sensor instance has already been spawned at: {self.prim_path}.")
# Create sensor prim path
prim_path = stage_utils.get_next_free_path(f"{parent_prim_path}/Camera")
# Create sensor prim
self._sensor_prim = UsdGeom.Camera(prim_utils.define_prim(prim_path, "Camera"))
# Add replicator camera attributes
self._define_usd_camera_attributes()
# Set the transformation of the camera
self._sensor_xform = XFormPrim(self.prim_path)
self._sensor_xform.set_local_pose(translation, orientation)
# Set spawning to true
self._is_spawned = True
def initialize(self, cam_prim_path: str = None):
"""Initializes the sensor handles and internal buffers.
This function creates handles and registers the provided data types with the replicator registry to
be able to access the data from the sensor. It also initializes the internal buffers to store the data.
The function also allows initializing to a camera not spawned by using the :meth:`spawn` method.
For instance, cameras that already exist in the USD stage. In such cases, the USD settings present on
the camera prim is used instead of the settings passed in the configuration object.
Args:
cam_prim_path: The prim path to existing camera. Defaults to None.
has_rig: Whether the passed camera prim path is attached to a rig. Defaults to False.
Raises:
RuntimeError: When input `cam_prim_path` is None, the method defaults to using the last
camera prim path set when calling the :meth:`spawn` function. In case, the camera was not spawned
and no valid `cam_prim_path` is provided, the function throws an error.
"""
# Check that sensor has been spawned
if cam_prim_path is None:
if not self._is_spawned:
raise RuntimeError("Initialize the camera failed! Please provide a valid argument for `prim_path`.")
else:
# Get prim at path
cam_prim = prim_utils.get_prim_at_path(cam_prim_path)
# Check if prim is valid
if cam_prim.IsValid() is False:
raise RuntimeError(f"Initialize the camera failed! Invalid prim path: {cam_prim_path}.")
# Force to set active camera to input prim path
self._sensor_prim = UsdGeom.Camera(cam_prim)
self._sensor_xform = XFormPrim(cam_prim_path)
# Enable synthetic data sensors
self._render_product_path = rep.create.render_product(
self.prim_path, resolution=(self.cfg.width, self.cfg.height)
)
# Attach the sensor data types to render node
self._rep_registry: dict[str, rep.annotators.Annotator] = dict.fromkeys(self.cfg.data_types, None)
# -- iterate over each data type
for name in self.cfg.data_types:
# init params -- Checked from rep.scripts.writes_default.basic_writer.py
# note: we are verbose here to make it easier to understand the code.
# if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned.
# if colorize is false, the data is returned as a uint32 image with ids as values.
if name in ["bounding_box_2d_tight", "bounding_box_2d_loose", "bounding_box_3d"]:
init_params = {"semanticTypes": self.cfg.semantic_types}
elif name in ["semantic_segmentation", "instance_segmentation"]:
init_params = {"semanticTypes": self.cfg.semantic_types, "colorize": False}
elif name in ["instance_id_segmentation"]:
init_params = {"colorize": False}
else:
init_params = None
# create annotator node
rep_annotator = rep.AnnotatorRegistry.get_annotator(name, init_params, device=self.device)
rep_annotator.attach([self._render_product_path])
# add to registry
self._rep_registry[name] = rep_annotator
# Reset internal buffers
self.reset()
# When running in standalone mode, need to render a few times to fill all the buffers
# FIXME: Check with simulation team to get rid of this. What if someone has render or other callbacks?
if builtins.ISAAC_LAUNCHED_FROM_TERMINAL is False:
# get simulation context
sim_context = SimulationContext.instance()
# render a few times
for _ in range(4):
sim_context.render()
def reset(self):
# reset the timestamp
super().reset()
# reset the buffer
self._data.position = None
self._data.orientation = None
self._data.intrinsic_matrix = self._compute_intrinsic_matrix()
self._data.image_shape = self.image_shape
self._data.output = dict.fromkeys(self._data.output, None)
def buffer(self):
"""Updates the internal buffer with the latest data from the sensor.
This function reads the intrinsic matrix and pose of the camera. It also reads the data from
the annotator registry and updates the internal buffer.
Note:
When running in standalone mode, the function renders the scene a few times to fill all the buffers.
During this time, the physics simulation is paused. This is a known issue with Isaac Sim.
"""
# -- intrinsic matrix
self._data.intrinsic_matrix = self._compute_intrinsic_matrix()
# -- pose
self._data.position, self._data.orientation = self._compute_ros_pose()
# -- read the data from annotator registry
for name in self._rep_registry:
self._data.output[name] = self._rep_registry[name].get_data()
# -- update the trigger call data (needed by replicator BasicWriter method)
self._data.output["trigger_outputs"] = {"on_time": self.frame}
"""
Private Helpers
"""
def _define_usd_camera_attributes(self):
"""Creates and sets USD camera attributes.
This function creates additional attributes on the camera prim used by Replicator.
It also sets the default values for these attributes based on the camera configuration.
"""
# camera attributes
# reference: omni.replicator.core.scripts.create.py: camera()
attribute_types = {
"cameraProjectionType": "token",
"fthetaWidth": "float",
"fthetaHeight": "float",
"fthetaCx": "float",
"fthetaCy": "float",
"fthetaMaxFov": "float",
"fthetaPolyA": "float",
"fthetaPolyB": "float",
"fthetaPolyC": "float",
"fthetaPolyD": "float",
"fthetaPolyE": "float",
}
# get camera prim
prim = prim_utils.get_prim_at_path(self.prim_path)
# create attributes
for attr_name, attr_type in attribute_types.items():
# check if attribute does not exist
if prim.GetAttribute(attr_name).Get() is None:
# create attribute based on type
if attr_type == "token":
prim.CreateAttribute(attr_name, Sdf.ValueTypeNames.Token)
elif attr_type == "float":
prim.CreateAttribute(attr_name, Sdf.ValueTypeNames.Float)
# set attribute values
# -- projection type
projection_type = to_camel_case(self.cfg.projection_type, to="cC")
prim.GetAttribute("cameraProjectionType").Set(projection_type)
# -- other attributes
for param_name, param_value in class_to_dict(self.cfg.usd_params).items():
# check if value is valid
if param_value is None:
continue
# convert to camel case (CC)
param = to_camel_case(param_name, to="cC")
# get attribute from the class
prim.GetAttribute(param).Set(param_value)
def _compute_intrinsic_matrix(self) -> np.ndarray:
"""Compute camera's matrix of intrinsic parameters.
Also called calibration matrix. This matrix works for linear depth images. We assume square pixels.
Note:
The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera.
The coordinates of points on the image plane are in the homogeneous representation.
Returns:
A 3 x 3 numpy array containing the intrinsic parameters for the camera.
Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` first.
"""
# check camera prim exists
if self._sensor_prim is None:
raise RuntimeError("Camera prim is None. Please call 'initialize(...)' first.")
# get camera parameters
focal_length = self._sensor_prim.GetFocalLengthAttr().Get()
horiz_aperture = self._sensor_prim.GetHorizontalApertureAttr().Get()
# get viewport parameters
height, width = self.image_shape
# calculate the field of view
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
# calculate the focal length in pixels
focal_px = width * 0.5 / math.tan(fov / 2)
# create intrinsic matrix for depth linear
a = focal_px
b = width * 0.5
c = focal_px
d = height * 0.5
# return the matrix
return np.array([[a, 0, b], [0, c, d], [0, 0, 1]], dtype=float)
def _compute_ros_pose(self) -> tuple[np.ndarray, np.ndarray]:
"""Computes the pose of the camera in the world frame with ROS convention.
This methods uses the ROS convention to resolve the input pose. In this convention,
we assume that the camera front-axis is +Z-axis and up-axis is -Y-axis.
Returns:
A tuple of the position (in meters) and quaternion (w, x, y, z).
"""
# get camera's location in world space
prim_tf = self._sensor_prim.ComputeLocalToWorldTransform(Usd.TimeCode.Default())
# GfVec datatypes are row vectors that post-multiply matrices to effect transformations,
# which implies, for example, that it is the fourth row of a GfMatrix4d that specifies
# the translation of the transformation. Thus, we take transpose here to make it post multiply.
prim_tf = np.transpose(prim_tf)
# extract the position (convert it to SI units-- assumed that stage units is 1.0)
pos = prim_tf[0:3, 3]
# extract rotation
# Note: OpenGL camera transform is such that camera faces along -z axis and +y is up.
# In robotics, we need to rotate it such that the camera is along +z axis and -y is up.
cam_rotm = prim_tf[0:3, 0:3]
# make +z forward
cam_rotm[:, 2] = -cam_rotm[:, 2]
# make +y down
cam_rotm[:, 1] = -cam_rotm[:, 1]
# convert rotation to quaternion
quat = tf.Rotation.from_matrix(cam_rotm).as_quat()
return pos, convert_quat(quat, "wxyz")
| 25,979 |
Python
| 44.739437 | 155 | 0.626083 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/camera/__init__.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Camera wrapper around USD camera prim to provide an interface that follows the robotics convention.
"""
from .camera import Camera, CameraData
from .camera_cfg import FisheyeCameraCfg, PinholeCameraCfg
| 330 |
Python
| 26.583331 | 99 | 0.781818 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/camera/camera_cfg.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the camera sensor."""
from __future__ import annotations
from dataclasses import MISSING
from omni.isaac.orbit.utils import configclass
@configclass
class PinholeCameraCfg:
"""Configuration for a pinhole camera sensor."""
@configclass
class UsdCameraCfg:
"""USD related configuration of the sensor.
The parameter is kept default from USD if it is set to None. This includes the default
parameters (in case the sensor is created) or the ones set by the user (in case the sensor is
loaded from existing USD stage).
Reference:
* https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html
* https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html
"""
clipping_range: tuple[float, float] = None
"""Near and far clipping distances (in stage units)."""
focal_length: float = None
"""Perspective focal length (in mm). Longer lens lengths narrower FOV, shorter lens lengths wider FOV."""
focus_distance: float = None
"""Distance from the camera to the focus plane (in stage units).
The distance at which perfect sharpness is achieved.
"""
f_stop: float = None
"""Lens aperture. Defaults to 0.0, which turns off focusing.
Controls Distance Blurring. Lower Numbers decrease focus range, larger numbers increase it.
"""
horizontal_aperture: float = None
"""Horizontal aperture (in mm). Emulates sensor/film width on a camera."""
horizontal_aperture_offset: float = None
"""Offsets Resolution/Film gate horizontally."""
vertical_aperture_offset: float = None
"""Offsets Resolution/Film gate vertically."""
sensor_tick: float = 0.0
"""Simulation seconds between sensor buffers. Defaults to 0.0."""
data_types: list[str] = ["rgb"]
"""List of sensor names/types to enable for the camera. Defaults to ["rgb"]."""
width: int = MISSING
"""Width of the image in pixels."""
height: int = MISSING
"""Height of the image in pixels."""
semantic_types: list[str] = ["class"]
"""List of allowed semantic types the types. Defaults to ["class"].
For example, if semantic types is [“class”], only the bounding boxes for prims with semantics of
type “class” will be retrieved.
More information available at:
https://docs.omniverse.nvidia.com/app_code/prod_extensions/ext_replicator/semantic_schema_editor.html
"""
projection_type: str = "pinhole"
"""Type of projection to use for the camera. Defaults to "pinhole"."""
usd_params: UsdCameraCfg = UsdCameraCfg()
"""Parameters for setting USD camera settings."""
@configclass
class FisheyeCameraCfg(PinholeCameraCfg):
"""Configuration for a fisheye camera sensor."""
@configclass
class UsdCameraCfg(PinholeCameraCfg.UsdCameraCfg):
"""USD related configuration of the sensor for the fisheye model."""
fisheye_nominal_width: float = None
"""Nominal width of fisheye lens model."""
fisheye_nominal_height: float = None
"""Nominal height of fisheye lens model."""
fisheye_optical_centre_x: float = None
"""Horizontal optical centre position of fisheye lens model."""
fisheye_optical_centre_y: float = None
"""Vertical optical centre position of fisheye lens model."""
fisheye_max_fov: float = None
"""Maximum field of view of fisheye lens model."""
fisheye_polynomial_a: float = None
"""First component of fisheye polynomial."""
fisheye_polynomial_b: float = None
"""Second component of fisheye polynomial."""
fisheye_polynomial_c: float = None
"""Third component of fisheye polynomial."""
fisheye_polynomial_d: float = None
"""Fourth component of fisheye polynomial."""
fisheye_polynomial_e: float = None
"""Fifth component of fisheye polynomial."""
projection_type: str = "fisheye_polynomial"
"""Type of projection to use for the camera. Defaults to "fisheye_polynomial"."""
usd_params: UsdCameraCfg = UsdCameraCfg()
"""Parameters for setting USD camera settings."""
| 4,406 |
Python
| 39.431192 | 118 | 0.667045 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/height_scanner/height_scanner_cfg.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Configuration for the height scanner sensor."""
from __future__ import annotations
from dataclasses import MISSING
from omni.isaac.orbit.utils import configclass
@configclass
class HeightScannerCfg:
"""Configuration for the height-scanner sensor."""
sensor_tick: float = 0.0
"""Simulation seconds between sensor buffers. Defaults to 0.0."""
points: list = MISSING
"""The 2D scan points to query ray-casting from. Results are reported in this order."""
offset: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""The offset from the frame the sensor is attached to. Defaults to (0.0, 0.0, 0.0)."""
direction: tuple[float, float, float] = (0.0, 0.0, -1.0)
"""Unit direction for the scanner ray-casting. Defaults to (0.0, 0.0, -1.0)."""
max_distance: float = 100.0
"""Maximum distance from the sensor to ray cast to. Defaults to 100.0."""
filter_prims: list[str] = list()
"""A list of prim names to ignore ray-cast collisions with. Defaults to empty list."""
| 1,136 |
Python
| 35.677418 | 91 | 0.684859 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/height_scanner/height_scanner.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import numpy as np
import scipy.spatial.transform as tf
from collections.abc import Sequence
from dataclasses import dataclass
import omni
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.prims import XFormPrim
from omni.isaac.orbit.utils.math import convert_quat
from ..sensor_base import SensorBase
from .height_scanner_cfg import HeightScannerCfg
from .height_scanner_marker import HeightScannerMarker
@dataclass
class HeightScannerData:
"""Data container for the height-scanner sensor."""
position: np.ndarray = None
"""Position of the sensor origin in world frame."""
orientation: np.ndarray = None
"""Orientation of the sensor origin in quaternion (w, x, y, z) in world frame."""
hit_points: np.ndarray = None
"""The end point locations of ray-casted rays. Shape is (N, 3), where N is
the number of scan points."""
hit_distance: np.ndarray = None
"""The ray-cast travel distance from query point. Shape is (N,), where N is
the number of scan points."""
hit_status: np.ndarray = None
"""Whether the ray hit an object or not. Shape is (N,), where N is
the number of scan points.
It is set to ``1`` if the ray hit an object, and ``0`` otherwise.
"""
class HeightScanner(SensorBase):
"""A two-dimensional height-map scan sensor.
A local map is often required to plan a robot's motion over a limited time horizon. For mobile systems,
often we care about the terrain for locomotion. The height-map, also called elevation map, simplifies the
terrain as a two-dimensional surface. Each grid-cell represents the height of the terrain.
Unlike algorithms which fuse depth measurements to create an elevation map :cite:p:`frankhauser2018probabilistic`,
in this method we directly use the PhysX API for ray-casting and query the height of the terrain from a set
of query scan points. These points represent the location of the grid cells.
The height-scanner uses PhysX for ray-casting to collision bodies. To prevent the casting to certain prims
in the scene (such as the robot on which height-scanner is present), one needs to provide the names of the
prims to not check collision with as a part of the dictionary config.
The scanner offset :math:`(x_o, y_o, z_o)` is the offset of the sensor from the frame it is attached to.
During the :meth:`update` or :meth:`buffer`, the pose of the mounted frame needs to be provided.
If visualization is enabled, rays that have a hit are displayed in red, while a miss is displayed in blue.
During a miss, the point's distance is set to the maximum ray-casting distance.
"""
def __init__(self, cfg: HeightScannerCfg):
"""Initializes the scanner object.
Args:
cfg: The configuration parameters.
"""
# TODO: Use generic range sensor from Isaac Sim?
# Reference: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_range_sensor.html#isaac-sim-generic-range-sensor-example
# store inputs
self.cfg = cfg
# initialize base class
super().__init__(self.cfg.sensor_tick)
# Points to query ray-casting from
self._scan_points = np.asarray(self.cfg.points)
# If points are 2D, add dimension along z (z=0 relative to the sensor frame)
if self._scan_points.shape[1] == 2:
self._scan_points = np.pad(self._scan_points, [(0, 0), (0, 1)], constant_values=0)
# Flag to check that sensor is spawned.
self._is_spawned = False
# Whether to visualize the scanner points. Defaults to False.
self._visualize = False
# Xform prim for the sensor rig
self._sensor_xform: XFormPrim = None
# Create empty variables for storing output data
self._data = HeightScannerData()
def __str__(self) -> str:
"""Returns: A string containing information about the instance."""
# message for class
return (
f"Height Scanner @ '{self.prim_path}': \n"
f"\ttick rate (s) : {self.sensor_tick}\n"
f"\ttimestamp (s) : {self.timestamp}\n"
f"\tframe : {self.frame}\n"
f"\tposition : {self.data.position}\n"
f"\torientation : {self.data.orientation}\n"
f"\t# of hits : {np.sum(self.data.hit_status)} / {self._scan_points[0]}\n"
)
"""
Properties
"""
@property
def prim_path(self) -> str:
"""The path to the height-map sensor."""
return self._sensor_xform.prim_path
@property
def data(self) -> HeightScannerData:
"""Data related to height scanner."""
return self._data
"""
Helpers
"""
def set_visibility(self, visible: bool):
"""Enables drawing of the scan points in the viewport.
Args:
visible: Whether to draw scan points or not.
"""
# copy argument
self._visualize = visible
# set visibility
self._height_scanner_vis.set_visibility(visible)
def set_filter_prims(self, names: list[str]):
"""Set the names of prims to ignore ray-casting collisions with.
If None is passed into argument, then no filtering is performed.
Args:
names: A list of prim names to ignore ray-cast collisions with.
"""
# default
if names is None:
self.cfg.filter_prims = list()
else:
# set into the class
self.cfg.filter_prims = names
"""
Operations
"""
def spawn(self, parent_prim_path: str): # noqa: D102
# Check if sensor is already spawned
if self._is_spawned:
raise RuntimeError(f"The height scanner sensor instance has already been spawned at: {self.prim_path}.")
# Create sensor prim path
prim_path = stage_utils.get_next_free_path(f"{parent_prim_path}/HeightScan_Xform")
# Create the sensor prim
prim_utils.create_prim(prim_path, "XForm")
self._sensor_xform = XFormPrim(prim_path, translation=self.cfg.offset)
# Create visualization marker
# TODO: Move this inside the height-scan prim to make it cleaner?
vis_prim_path = stage_utils.get_next_free_path("/World/Visuals/HeightScan")
self._height_scanner_vis = HeightScannerMarker(vis_prim_path, count=self._scan_points.shape[0], radius=0.02)
# Set spawning to true
self._is_spawned = True
def initialize(self): # noqa: D102
# Check that sensor has been spawned
if not self._is_spawned:
raise RuntimeError("Height scanner sensor must be spawned first. Please call `spawn(...)`.")
# Initialize Xform class
self._sensor_xform.initialize()
# Acquire physx ray-casting interface
self._physx_query_interface = omni.physx.get_physx_scene_query_interface()
# Since height scanner is fictitious sensor, we have no schema config to set in this case.
# Initialize buffers
self.reset()
def reset(self): # noqa: D102
# reset the timestamp
super().reset()
# reset the buffer
self._data.hit_points = np.empty(shape=(self._scan_points.shape))
self._data.hit_distance = np.empty(shape=(self._scan_points.shape[0]))
self._data.hit_status = np.zeros(shape=(self._scan_points.shape[0]))
self._data.position = None
self._data.orientation = None
def update(self, dt: float, pos: Sequence[float], quat: Sequence[float]):
"""Updates the buffers at sensor frequency.
Args:
dt: The simulation time-step.
pos: Position of the frame to which the sensor is attached.
quat: Quaternion (w, x, y, z) of the frame to which the sensor is attached.
"""
super().update(dt, pos, quat)
def buffer(self, pos: Sequence[float], quat: Sequence[float]):
"""Fills the buffers of the sensor data.
This function uses the input position and orientation to compute the ray-casting queries
and fill the buffers. If a collision is detected, then the hit distance is stored in the buffer.
Otherwise, the hit distance is set to the maximum value specified in the configuration.
Args:
pos: Position of the frame to which the sensor is attached.
quat: Quaternion (w, x, y, z) of the frame to which the sensor is attached.
"""
# convert to numpy for sanity
pos = np.asarray(pos)
quat = np.asarray(quat)
# account for the offset of the sensor
self._data.position, self._data.orientation = (pos + self.cfg.offset, quat)
# construct 3D rotation matrix for grid points
# TODO: Check if this is the most generic case. It ignores the base pitch and roll.
tf_rot = tf.Rotation.from_quat(convert_quat(self.data.orientation, "xyzw"))
rpy = tf_rot.as_euler("xyz", degrees=False)
rpy[:2] = 0
rotation = tf.Rotation.from_euler("xyz", rpy).as_matrix()
# transform the scan points to world frame
world_scan_points = np.matmul(rotation, self._scan_points.T).T + self.data.position
# iterate over all the points and query ray-caster
for i in range(world_scan_points.shape[0]):
# set current query info to empty
self._query_info = None
# perform ray-cast to get distance of a point along (0, 0, -1)
self._physx_query_interface.raycast_all(
tuple(world_scan_points[i]),
self.cfg.direction,
self.cfg.max_distance,
self._hit_report_callback,
)
# check if hit happened based on query info and add to data
if self._query_info is not None:
self._data.hit_status[i] = 1
self._data.hit_distance[i] = self._query_info.distance
else:
self._data.hit_status[i] = 0
self._data.hit_distance[i] = self.cfg.max_distance
# add ray-end point (in world frame) to data
self._data.hit_points[i] = world_scan_points[i] + np.array(self.cfg.direction) * self._data.hit_distance[i]
# visualize the prim
if self._visualize:
self._height_scanner_vis.set_status(status=self._data.hit_status)
self._height_scanner_vis.set_world_poses(positions=self._data.hit_points)
"""
Private helpers
"""
def _hit_report_callback(self, hit) -> bool:
"""A PhysX callback to filter out hit-reports that are on the collision bodies.
Returns:
If True, continue casting the ray. Otherwise, stop and report the result.
"""
# unset the query info
self._query_info = None
# get ray's current contact rigid body
current_hit_body = hit.rigid_body
# check if the collision body is in the filtering list
if current_hit_body in self.cfg.filter_prims:
# continue casting the ray
return True
else:
# set the hit information
self._query_info = hit
return False
| 11,466 |
Python
| 39.807829 | 152 | 0.634048 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/height_scanner/__init__.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Height-scanner based on ray-casting operations using PhysX ray-caster.
"""
from .height_scanner import HeightScanner, HeightScannerData
from .height_scanner_cfg import HeightScannerCfg
| 313 |
Python
| 25.166665 | 70 | 0.782748 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/height_scanner/utils.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Utilities to create and visualize 2D height-maps."""
from __future__ import annotations
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from matplotlib.image import AxesImage
def create_points_from_grid(size: tuple[float, float], resolution: float) -> np.ndarray:
"""Creates a list of points from 2D mesh-grid.
The terrain scan is approximated with a grid map of the input resolution.
By default, we consider the origin as the center of the local map and the scan size ``(X, Y)`` is the
map size. Given these settings, the elevation map spans from: ``(- X / 2, - Y / 2)`` to
``(+ X / 2, + Y / 2)``.
Example:
For a grid of size (0.2, 0.2) with resolution of 0.1, the created points will first x-axis fixed, while the
y-axis changes, i.e.:
.. code-block:: none
[
[-0.1, -0.1], [-0.1, 0.0], [-0.1, 0.1],
[0.0, -0.1], [0.0, 0.], [0.0, 0.1],
[0.1, -0.1], [0.1, 0.0], [0.1, 0.1],
]
Args:
size: The 2D scan region along x and y directions (in meters).
resolution: The resolution of the scanner (in meters/cell).
Returns:
A set of points of shape (N, 2) or (N, 3), where first x is fixed while y changes.
"""
# Compute the scan grid
# Note: np.arange does not include end-point when dealing with floats. That is why we add resolution.
x = np.arange(-size[0] / 2, size[0] / 2 + resolution, resolution)
y = np.arange(-size[1] / 2, size[1] / 2 + resolution, resolution)
grid = np.meshgrid(x, y, sparse=False, indexing="ij")
# Concatenate the scan grid into points array (N, 2): first x is fixed while y changes
return np.vstack(list(map(np.ravel, grid))).T
def plot_height_grid(
hit_distance: np.ndarray, size: tuple[float, float], resolution: float, ax: Axes = None
) -> AxesImage:
"""Plots the sensor height-map distances using matplotlib.
If the axes is not provided, a new figure is created.
Note:
This method currently only supports if the grid is evenly spaced, i.e. the scan points are created using
:meth:`create_points_from_grid` method.
Args:
hit_distance: The ray hit distance measured from the sensor.
size: The 2D scan region along x and y directions (in meters).
resolution: The resolution of the scanner (in meters/cell).
ax: The current matplotlib axes to plot in.. Defaults to None.
Returns:
Image axes of the created plot.
"""
# Check that request of keys has same length as available axes.
if ax is None:
# Create axes if not provided
# Setup a figure
_, ax = plt.subplots()
# turn axes off
ax.clear()
# resolve shape of the heightmap
x = np.arange(-size[0] / 2, size[0] / 2 + resolution, resolution)
y = np.arange(-size[1] / 2, size[1] / 2 + resolution, resolution)
shape = (len(x), len(y))
# convert the map shape
heightmap = hit_distance.reshape(shape)
# plot the scanned distance
caxes = ax.imshow(heightmap, cmap="turbo", interpolation="none", vmin=0)
# set the label
ax.set_xlabel("y (m)")
ax.set_ylabel("x (m)")
# set the ticks
ax.set_xticks(np.arange(shape[1]), minor=False)
ax.set_yticks(np.arange(shape[0]), minor=False)
ax.set_xticklabels([round(value, 2) for value in y])
ax.set_yticklabels([round(value, 2) for value in x])
# add grid
ax.grid(color="w", linestyle="--", linewidth=1)
# return the color axes
return caxes
| 3,707 |
Python
| 36.454545 | 115 | 0.62962 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/sensors/height_scanner/height_scanner_marker.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Helper class to handle visual sphere markers to show ray-casting of height scanner."""
from __future__ import annotations
import numpy as np
import torch
from collections.abc import Sequence
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from pxr import Gf, UsdGeom
class HeightScannerMarker:
"""Helper class to handle visual sphere markers to show ray-casting of height scanner.
The class uses :class:`UsdGeom.PointInstancer` for efficient handling of multiple markers in the stage.
It creates two spherical markers of different colors. Based on the indices provided the referenced
marker is activated.
The status marker (proto-indices) of the point instancer is used to store the following information:
- :obj:`0` -> ray miss (blue sphere).
- :obj:`1` -> successful ray hit (red sphere).
- :obj:`2` -> invisible ray (disabled visualization)
"""
def __init__(self, prim_path: str, count: int, radius: float = 1.0) -> None:
"""Initialize the class.
Args:
prim_path: The prim path of the point instancer.
count: The number of markers to create.
radius: The radius of the spherical markers. Defaults to 1.0.
Raises:
ValueError: When a prim at the given path exists but is not a valid point instancer.
"""
# check inputs
stage = stage_utils.get_current_stage()
# -- prim path
if prim_utils.is_prim_path_valid(prim_path):
prim = prim_utils.get_prim_at_path(prim_path)
if not prim.IsA(UsdGeom.PointInstancer):
raise ValueError(f"The prim at path {prim_path} cannot be parsed as a `PointInstancer` object")
self._instancer_manager = UsdGeom.PointInstancer(prim)
else:
self._instancer_manager = UsdGeom.PointInstancer.Define(stage, prim_path)
# store inputs
self.prim_path = prim_path
self.count = count
self._radius = radius
# create manager for handling instancing of frame markers
self._instancer_manager = UsdGeom.PointInstancer.Define(stage, prim_path)
# TODO: Make this generic marker for all and put inside the `omni.isaac.orbit.marker` directory.
# create a child prim for the marker
# -- target missed
prim = prim_utils.create_prim(f"{prim_path}/point_miss", "Sphere", attributes={"radius": self._radius})
geom = UsdGeom.Sphere(prim)
geom.GetDisplayColorAttr().Set([(0.0, 0.0, 1.0)])
# -- target achieved
prim = prim_utils.create_prim(f"{prim_path}/point_hit", "Sphere", attributes={"radius": self._radius})
geom = UsdGeom.Sphere(prim)
geom.GetDisplayColorAttr().Set([(1.0, 0.0, 0.0)])
# -- target invisible
prim = prim_utils.create_prim(f"{prim_path}/point_invisible", "Sphere", attributes={"radius": self._radius})
geom = UsdGeom.Sphere(prim)
geom.GetDisplayColorAttr().Set([(0.0, 0.0, 1.0)])
prim_utils.set_prim_visibility(prim, visible=False)
# add child reference to point instancer
relation_manager = self._instancer_manager.GetPrototypesRel()
relation_manager.AddTarget(f"{prim_path}/point_miss") # target index: 0
relation_manager.AddTarget(f"{prim_path}/point_hit") # target index: 1
relation_manager.AddTarget(f"{prim_path}/point_invisible") # target index: 2
# buffers for storing data in pixar Gf format
# TODO: Make them very far away from the scene?
self._proto_indices = [2] * self.count
self._gf_positions = [Gf.Vec3f(0.0, 0.0, -10.0) for _ in range(self.count)]
self._gf_orientations = [Gf.Quath() for _ in range(self.count)]
# specify that all initial prims are related to same geometry
self._instancer_manager.GetProtoIndicesAttr().Set(self._proto_indices)
# set initial positions of the targets
self._instancer_manager.GetPositionsAttr().Set(self._gf_positions)
self._instancer_manager.GetOrientationsAttr().Set(self._gf_orientations)
def set_visibility(self, visible: bool):
"""Sets the visibility of the markers.
The method does this through the USD API.
Args:
visible: flag to set the visibility.
"""
imageable = UsdGeom.Imageable(self._instancer_manager)
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def set_world_poses(
self,
positions: np.ndarray | torch.Tensor | None = None,
orientations: np.ndarray | torch.Tensor | None = None,
indices: Sequence[int] | None = None,
):
"""Update marker poses in the simulation world frame.
Args:
positions:
Positions in the world frame. Shape is (M, 3). Defaults to None, which means left unchanged.
orientations:
Quaternion orientations (w, x, y, z) in the world frame of the prims. Shape is (M, 4).
Defaults to None, which means left unchanged.
indices: Indices to specify which alter poses.
Shape is (M,), where M <= total number of markers. Defaults to None (i.e: all markers).
"""
# resolve inputs
if positions is not None:
positions = positions.tolist()
if orientations is not None:
orientations = orientations.tolist()
if indices is None:
indices = range(self.count)
# change marker locations
for i, marker_index in enumerate(indices):
if positions is not None:
self._gf_positions[marker_index][:] = positions[i]
if orientations is not None:
self._gf_orientations[marker_index].SetReal(orientations[i][0])
self._gf_orientations[marker_index].SetImaginary(orientations[i][1:])
# apply to instance manager
self._instancer_manager.GetPositionsAttr().Set(self._gf_positions)
self._instancer_manager.GetOrientationsAttr().Set(self._gf_orientations)
def set_status(self, status: list[int] | np.ndarray | torch.Tensor, indices: Sequence[int] | None = None):
"""Updates the marker activated by the instance manager.
Args:
status: Decides which prototype marker to visualize. Shape is (M)
indices: Indices to specify which alter poses. Shape is (M,), where M <= total number of markers.
Defaults to None (i.e: all markers).
"""
# default values
if indices is None:
indices = range(self.count)
# resolve input
if status is not list:
proto_indices = status.tolist()
else:
proto_indices = status
# change marker locations
for i, marker_index in enumerate(indices):
self._proto_indices[marker_index] = int(proto_indices[i])
# apply to instance manager
self._instancer_manager.GetProtoIndicesAttr().Set(self._proto_indices)
| 7,247 |
Python
| 43.195122 | 116 | 0.633642 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/utils/kit.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import contextlib
import math
from collections.abc import Sequence
import carb
import omni.isaac.core.utils.nucleus as nucleus_utils
import omni.isaac.core.utils.prims as prim_utils
import omni.kit
from omni.isaac.core.materials import PhysicsMaterial
from omni.isaac.core.prims import GeometryPrim
from pxr import Gf, PhysxSchema, UsdPhysics, UsdShade
def create_ground_plane(
prim_path: str,
z_position: float = 0.0,
static_friction: float = 1.0,
dynamic_friction: float = 1.0,
restitution: float = 0.0,
color: Sequence[float] | None = (0.065, 0.0725, 0.080),
**kwargs,
):
"""Spawns a ground plane into the scene.
This method spawns the default ground plane (grid plane) from Isaac Sim into the scene.
It applies a physics material to the ground plane and sets the color of the ground plane.
Args:
prim_path: The prim path to spawn the ground plane at.
z_position: The z-location of the plane. Defaults to 0.
static_friction: The static friction coefficient. Defaults to 1.0.
dynamic_friction: The dynamic friction coefficient. Defaults to 1.0.
restitution: The coefficient of restitution. Defaults to 0.0.
color: The color of the ground plane.
Defaults to (0.065, 0.0725, 0.080).
Keyword Args:
usd_path: The USD path to the ground plane. Defaults to the asset path
`Isaac/Environments/Grid/default_environment.usd` on the Isaac Sim Nucleus server.
improve_patch_friction: Whether to enable patch friction. Defaults to False.
combine_mode: Determines the way physics materials will be combined during collisions.
Available options are `average`, `min`, `multiply`, `multiply`, and `max`. Defaults to `average`.
light_intensity: The power intensity of the light source. Defaults to 1e7.
light_radius: The radius of the light source. Defaults to 50.0.
"""
# Retrieve path to the plane
if "usd_path" in kwargs:
usd_path = kwargs["usd_path"]
else:
# get path to the nucleus server
assets_root_path = nucleus_utils.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Unable to access the Isaac Sim assets folder on Nucleus server.")
return
# prepend path to the grid plane
usd_path = f"{assets_root_path}/Isaac/Environments/Grid/default_environment.usd"
# Spawn Ground-plane
prim_utils.create_prim(prim_path, usd_path=usd_path, translation=(0.0, 0.0, z_position))
# Create physics material
material = PhysicsMaterial(
f"{prim_path}/groundMaterial",
static_friction=static_friction,
dynamic_friction=dynamic_friction,
restitution=restitution,
)
# Apply PhysX Rigid Material schema
physx_material_api = PhysxSchema.PhysxMaterialAPI.Apply(material.prim)
# Set patch friction property
improve_patch_friction = kwargs.get("improve_patch_friction", False)
physx_material_api.CreateImprovePatchFrictionAttr().Set(improve_patch_friction)
# Set combination mode for coefficients
combine_mode = kwargs.get("friciton_combine_mode", "multiply")
physx_material_api.CreateFrictionCombineModeAttr().Set(combine_mode)
physx_material_api.CreateRestitutionCombineModeAttr().Set(combine_mode)
# Apply physics material to ground plane
collision_prim_path = prim_utils.get_prim_path(
prim_utils.get_first_matching_child_prim(
prim_path, predicate=lambda x: prim_utils.get_prim_type_name(x) == "Plane"
)
)
geom_prim = GeometryPrim(collision_prim_path, disable_stablization=False, collision=True)
geom_prim.apply_physics_material(material)
# Change the color of the plane
# Warning: This is specific to the default grid plane asset.
if color is not None:
omni.kit.commands.execute(
"ChangeProperty",
prop_path=f"{prim_path}/Looks/theGrid.inputs:diffuse_tint",
value=Gf.Vec3f(*color),
prev=None,
)
# Add light source
# By default, the one from Isaac Sim is too dim for large number of environments.
# Warning: This is specific to the default grid plane asset.
ambient_light = kwargs.get("ambient_light", True)
if ambient_light:
attributes = {"intensity": 600.0}
attributes = {f"inputs:{k}": v for k, v in attributes.items()}
# create light prim
prim_utils.create_prim(f"{prim_path}/AmbientLight", "DistantLight", attributes=attributes)
def move_nested_prims(source_ns: str, target_ns: str):
"""Moves all prims from source namespace to target namespace.
This function also moves all the references inside the source prim path
to the target prim path.
Args:
source_ns: The source prim path.
target_ns: The target prim path.
"""
# check if target namespace exists
prim_utils.define_prim(target_ns)
# move all children prim from source namespace
prim = prim_utils.get_prim_at_path(source_ns)
for children in prim.GetChildren():
orig_prim_path = prim_utils.get_prim_path(children)
new_prim_path = orig_prim_path.replace(source_ns, target_ns)
prim_utils.move_prim(orig_prim_path, new_prim_path)
def set_drive_dof_properties(
prim_path: str,
dof_name: str,
stiffness: float | None = None,
damping: float | None = None,
max_velocity: float | None = None,
max_force: float | None = None,
) -> None:
"""Set the DOF properties of a drive on an articulation.
Args:
prim_path: The prim path to the articulation root.
dof_name: The name of the DOF/joint.
stiffness: The stiffness of the drive.
damping: The damping of the drive.
max_velocity: The max velocity of the drive.
max_force: The max effort of the drive.
Raises:
ValueError: When no joint of given name found under the provided prim path.
"""
# find matching prim path for the dof name
dof_prim = prim_utils.get_first_matching_child_prim(prim_path, lambda x: dof_name in x)
if not dof_prim.IsValid():
raise ValueError(f"No joint named '{dof_name}' found in articulation '{prim_path}'.")
# obtain the dof drive type
if dof_prim.IsA(UsdPhysics.RevoluteJoint):
drive_type = "angular"
elif dof_prim.IsA(UsdPhysics.PrismaticJoint):
drive_type = "linear"
else:
# get joint USD prim
dof_prim_path = prim_utils.get_prim_path(dof_prim)
raise ValueError(f"The joint at path '{dof_prim_path}' is not linear or angular.")
# convert to USD Physics drive
if dof_prim.HasAPI(UsdPhysics.DriveAPI):
drive_api = UsdPhysics.DriveAPI(dof_prim, drive_type)
else:
drive_api = UsdPhysics.DriveAPI.Apply(dof_prim, drive_type)
# convert DOF type to be force
if not drive_api.GetTypeAttr():
drive_api.CreateTypeAttr().Set("force")
else:
drive_api.GetTypeAttr().Set("force")
# set stiffness of the drive
if stiffness is not None:
# convert from radians to degrees
# note: gains have units "per degrees"
if drive_type == "angular":
stiffness = stiffness * math.pi / 180
# set property
if not drive_api.GetStiffnessAttr():
drive_api.CreateStiffnessAttr(stiffness)
else:
drive_api.GetStiffnessAttr().Set(stiffness)
# set damping of the drive
if damping is not None:
# convert from radians to degrees
# note: gains have units "per degrees"
if drive_type == "angular":
damping = damping * math.pi / 180
# set property
if not drive_api.GetDampingAttr():
drive_api.CreateDampingAttr(damping)
else:
drive_api.GetDampingAttr().Set(damping)
# set maximum force
if max_force is not None:
if not drive_api.GetMaxForceAttr():
drive_api.CreateMaxForceAttr(max_force)
else:
drive_api.GetMaxForceAttr().Set(max_force)
# convert to physx schema
drive_schema = PhysxSchema.PhysxJointAPI(dof_prim)
# set maximum velocity
if max_velocity is not None:
# convert from radians to degrees
if drive_type == "angular":
max_velocity = math.degrees(max_velocity)
# set property
if not drive_schema.GetMaxJointVelocityAttr():
drive_schema.CreateMaxJointVelocityAttr(max_velocity)
else:
drive_schema.GetMaxJointVelocityAttr().Set(max_velocity)
def set_articulation_properties(
prim_path: str,
articulation_enabled: bool | None = None,
solver_position_iteration_count: int | None = None,
solver_velocity_iteration_count: int | None = None,
sleep_threshold: float | None = None,
stabilization_threshold: float | None = None,
enable_self_collisions: bool | None = None,
) -> None:
"""Set PhysX parameters for an articulation prim.
Args:
prim_path: The prim path to the articulation root.
articulation_enabled: Whether the articulation should be enabled/disabled.
solver_position_iteration_count: Solver position iteration counts for the body.
solver_velocity_iteration_count: Solver velocity iteration counts for the body.
sleep_threshold: Mass-normalized kinetic energy threshold below which an
actor may go to sleep.
stabilization_threshold: The mass-normalized kinetic energy threshold below
which an articulation may participate in stabilization.
enable_self_collisions: Boolean defining whether self collisions should be
enabled or disabled.
Raises:
ValueError: When no articulation schema found at specified articulation path.
"""
# get articulation USD prim
articulation_prim = prim_utils.get_prim_at_path(prim_path)
# check if prim has articulation applied on it
if not UsdPhysics.ArticulationRootAPI(articulation_prim):
raise ValueError(f"No articulation schema present for prim '{prim_path}'.")
# retrieve the articulation api
physx_articulation_api = PhysxSchema.PhysxArticulationAPI(articulation_prim)
if not physx_articulation_api:
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Apply(articulation_prim)
# set enable/disable rigid body API
if articulation_enabled is not None:
physx_articulation_api.GetArticulationEnabledAttr().Set(articulation_enabled)
# set solver position iteration
if solver_position_iteration_count is not None:
physx_articulation_api.GetSolverPositionIterationCountAttr().Set(solver_position_iteration_count)
# set solver velocity iteration
if solver_velocity_iteration_count is not None:
physx_articulation_api.GetSolverVelocityIterationCountAttr().Set(solver_velocity_iteration_count)
# set sleep threshold
if sleep_threshold is not None:
physx_articulation_api.GetSleepThresholdAttr().Set(sleep_threshold)
# set stabilization threshold
if stabilization_threshold is not None:
physx_articulation_api.GetStabilizationThresholdAttr().Set(stabilization_threshold)
# set self collisions
if enable_self_collisions is not None:
physx_articulation_api.GetEnabledSelfCollisionsAttr().Set(enable_self_collisions)
def set_rigid_body_properties(
prim_path: str,
rigid_body_enabled: bool | None = None,
solver_position_iteration_count: int | None = None,
solver_velocity_iteration_count: int | None = None,
linear_damping: float | None = None,
angular_damping: float | None = None,
max_linear_velocity: float | None = None,
max_angular_velocity: float | None = None,
sleep_threshold: float | None = None,
stabilization_threshold: float | None = None,
max_depenetration_velocity: float | None = None,
max_contact_impulse: float | None = None,
enable_gyroscopic_forces: bool | None = None,
disable_gravity: bool | None = None,
retain_accelerations: bool | None = None,
):
"""Set PhysX parameters for a rigid body prim.
Args:
prim_path: The prim path to the rigid body.
rigid_body_enabled: Whether to enable or disable rigid body API.
solver_position_iteration_count: Solver position iteration counts for the body.
solver_velocity_iteration_count: Solver velocity iteration counts for the body.
linear_damping: Linear damping coefficient.
angular_damping: Angular damping coefficient.
max_linear_velocity: Max allowable linear velocity for rigid body (in m/s).
max_angular_velocity: Max allowable angular velocity for rigid body (in rad/s).
sleep_threshold: Mass-normalized kinetic energy threshold below which an actor
may go to sleep.
stabilization_threshold: Mass-normalized kinetic energy threshold below which
an actor may participate in stabilization.
max_depenetration_velocity: The maximum depenetration velocity permitted to
be introduced by the solver (in m/s).
max_contact_impulse: The limit on the impulse that may be applied at a contact.
enable_gyroscopic_forces: Enables computation of gyroscopic forces on the
rigid body.
disable_gravity: Disable gravity for the actor.
retain_accelerations: Carries over forces/accelerations over sub-steps.
Raises:
ValueError: When no rigid-body schema found at specified prim path.
"""
# get rigid-body USD prim
rigid_body_prim = prim_utils.get_prim_at_path(prim_path)
# check if prim has rigid-body applied on it
if not UsdPhysics.RigidBodyAPI(rigid_body_prim):
raise ValueError(f"No rigid body schema present for prim '{prim_path}'.")
# retrieve the USD rigid-body api
usd_rigid_body_api = UsdPhysics.RigidBodyAPI(rigid_body_prim)
# retrieve the physx rigid-body api
physx_rigid_body_api = PhysxSchema.PhysxRigidBodyAPI(rigid_body_prim)
if not physx_rigid_body_api:
physx_rigid_body_api = PhysxSchema.PhysxRigidBodyAPI.Apply(rigid_body_prim)
# set enable/disable rigid body API
if rigid_body_enabled is not None:
usd_rigid_body_api.GetRigidBodyEnabledAttr().Set(rigid_body_enabled)
# set solver position iteration
if solver_position_iteration_count is not None:
physx_rigid_body_api.GetSolverPositionIterationCountAttr().Set(solver_position_iteration_count)
# set solver velocity iteration
if solver_velocity_iteration_count is not None:
physx_rigid_body_api.GetSolverVelocityIterationCountAttr().Set(solver_velocity_iteration_count)
# set linear damping
if linear_damping is not None:
physx_rigid_body_api.GetLinearDampingAttr().Set(linear_damping)
# set angular damping
if angular_damping is not None:
physx_rigid_body_api.GetAngularDampingAttr().Set(angular_damping)
# set max linear velocity
if max_linear_velocity is not None:
physx_rigid_body_api.GetMaxLinearVelocityAttr().Set(max_linear_velocity)
# set max angular velocity
if max_angular_velocity is not None:
max_angular_velocity = math.degrees(max_angular_velocity)
physx_rigid_body_api.GetMaxAngularVelocityAttr().Set(max_angular_velocity)
# set sleep threshold
if sleep_threshold is not None:
physx_rigid_body_api.GetSleepThresholdAttr().Set(sleep_threshold)
# set stabilization threshold
if stabilization_threshold is not None:
physx_rigid_body_api.GetStabilizationThresholdAttr().Set(stabilization_threshold)
# set max depenetration velocity
if max_depenetration_velocity is not None:
physx_rigid_body_api.GetMaxDepenetrationVelocityAttr().Set(max_depenetration_velocity)
# set max contact impulse
if max_contact_impulse is not None:
physx_rigid_body_api.GetMaxContactImpulseAttr().Set(max_contact_impulse)
# set enable gyroscopic forces
if enable_gyroscopic_forces is not None:
physx_rigid_body_api.GetEnableGyroscopicForcesAttr().Set(enable_gyroscopic_forces)
# set disable gravity
if disable_gravity is not None:
physx_rigid_body_api.GetDisableGravityAttr().Set(disable_gravity)
# set retain accelerations
if retain_accelerations is not None:
physx_rigid_body_api.GetRetainAccelerationsAttr().Set(retain_accelerations)
def set_collision_properties(
prim_path: str,
collision_enabled: bool | None = None,
contact_offset: float | None = None,
rest_offset: float | None = None,
torsional_patch_radius: float | None = None,
min_torsional_patch_radius: float | None = None,
):
"""Set PhysX properties of collider prim.
Args:
prim_path: The prim path of parent.
collision_enabled: Whether to enable/disable collider.
contact_offset: Contact offset of a collision shape (in m).
rest_offset: Rest offset of a collision shape (in m).
torsional_patch_radius: Defines the radius of the contact patch
used to apply torsional friction (in m).
min_torsional_patch_radius: Defines the minimum radius of the
contact patch used to apply torsional friction (in m).
Raises:
ValueError: When no collision schema found at specified prim path.
"""
# get USD prim
collider_prim = prim_utils.get_prim_at_path(prim_path)
# check if prim has collision applied on it
if not UsdPhysics.CollisionAPI(collider_prim):
raise ValueError(f"No collider schema present for prim '{prim_path}'.")
# retrieve the collision api
physx_collision_api = PhysxSchema.PhysxCollisionAPI(collider_prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(collider_prim)
# set enable/disable collision API
if collision_enabled is not None:
physx_collision_api.GetCollisionEnabledAttr().Set(collision_enabled)
# set contact offset
if contact_offset is not None:
physx_collision_api.GetContactOffsetAttr().Set(contact_offset)
# set rest offset
if rest_offset is not None:
physx_collision_api.GetRestOffsetAttr().Set(rest_offset)
# set torsional patch radius
if torsional_patch_radius is not None:
physx_collision_api.GetTorsionalPatchRadiusAttr().Set(torsional_patch_radius)
# set min torsional patch radius
if min_torsional_patch_radius is not None:
physx_collision_api.GetMinTorsionalPatchRadiusAttr().Set(min_torsional_patch_radius)
def apply_physics_material(prim_path: str, material_path: str, weaker_than_descendants: bool = False):
"""Apply a physics material to a prim.
Physics material can be applied only to a prim with physics-enabled on them. This includes having
a collision APIs, or deformable body APIs, or being a particle system.
Args:
prim_path: The prim path of parent.
material_path: The prim path of the material to apply.
Raises:
ValueError: If the material path does not exist on stage.
ValueError: When prim at specified path is not physics-enabled.
"""
# check if material exists
if not prim_utils.is_prim_path_valid(material_path):
raise ValueError(f"Physics material '{material_path}' does not exist.")
# get USD prim
prim = prim_utils.get_prim_at_path(prim_path)
# check if prim has collision applied on it
has_collider = prim.HasAPI(UsdPhysics.CollisionAPI)
has_deformable_body = prim.HasAPI(PhysxSchema.PhysxDeformableBodyAPI)
has_particle_system = prim.IsA(PhysxSchema.PhysxParticleSystem)
if not (has_collider or has_deformable_body or has_particle_system):
raise ValueError(
f"Cannot apply physics material on prim '{prim_path}'. It is neither a collider,"
" nor a deformable body, nor a particle system."
)
# obtain material binding API
if prim.HasAPI(UsdShade.MaterialBindingAPI):
material_binding_api = UsdShade.MaterialBindingAPI(prim)
else:
material_binding_api = UsdShade.MaterialBindingAPI.Apply(prim)
# obtain the material prim
material = UsdShade.Material(prim_utils.get_prim_at_path(material_path))
# resolve token for weaker than descendants
if weaker_than_descendants:
binding_strength = UsdShade.Tokens.weakerThanDescendants
else:
binding_strength = UsdShade.Tokens.strongerThanDescendants
# apply the material
material_binding_api.Bind(material, bindingStrength=binding_strength, materialPurpose="physics")
def set_nested_articulation_properties(prim_path: str, **kwargs) -> None:
"""Set PhysX parameters on all articulations under specified prim-path.
Note:
Check the method meth:`set_articulation_properties` for keyword arguments.
Args:
prim_path: The prim path under which to search and apply articulation properties.
Keyword Args:
articulation_enabled: Whether the articulation should be enabled/disabled.
solver_position_iteration_count: Solver position iteration counts for the body.
solver_velocity_iteration_count: Solver velocity iteration counts for the body.
sleep_threshold: Mass-normalized kinetic energy threshold below which an
actor may go to sleep.
stabilization_threshold: The mass-normalized kinetic energy threshold below
which an articulation may participate in stabilization.
enable_self_collisions: Boolean defining whether self collisions should be
enabled or disabled.
"""
# get USD prim
prim = prim_utils.get_prim_at_path(prim_path)
# iterate over all prims under prim-path
all_prims = [prim]
while len(all_prims) > 0:
# get current prim
child_prim = all_prims.pop(0)
# set articulation properties
with contextlib.suppress(ValueError):
set_articulation_properties(prim_utils.get_prim_path(child_prim), **kwargs)
# add all children to tree
all_prims += child_prim.GetChildren()
def set_nested_rigid_body_properties(prim_path: str, **kwargs):
"""Set PhysX parameters on all rigid bodies under specified prim-path.
Note:
Check the method meth:`set_rigid_body_properties` for keyword arguments.
Args:
prim_path: The prim path under which to search and apply rigid-body properties.
Keyword Args:
rigid_body_enabled: Whether to enable or disable rigid body API.
solver_position_iteration_count: Solver position iteration counts for the body.
solver_velocity_iteration_count: Solver velocity iteration counts for the body.
linear_damping: Linear damping coefficient.
angular_damping: Angular damping coefficient.
max_linear_velocity: Max allowable linear velocity for rigid body (in m/s).
max_angular_velocity: Max allowable angular velocity for rigid body (in rad/s).
sleep_threshold: Mass-normalized kinetic energy threshold below which an actor
may go to sleep.
stabilization_threshold: Mass-normalized kinetic energy threshold below which
an actor may participate in stabilization.
max_depenetration_velocity: The maximum depenetration velocity permitted to
be introduced by the solver (in m/s).
max_contact_impulse: The limit on the impulse that may be applied at a contact.
enable_gyroscopic_forces: Enables computation of gyroscopic forces on the
rigid body.
disable_gravity: Disable gravity for the actor.
retain_accelerations: Carries over forces/accelerations over sub-steps.
"""
# get USD prim
prim = prim_utils.get_prim_at_path(prim_path)
# iterate over all prims under prim-path
all_prims = [prim]
while len(all_prims) > 0:
# get current prim
child_prim = all_prims.pop(0)
# set rigid-body properties
with contextlib.suppress(ValueError):
set_rigid_body_properties(prim_utils.get_prim_path(child_prim), **kwargs)
# add all children to tree
all_prims += child_prim.GetChildren()
def set_nested_collision_properties(prim_path: str, **kwargs):
"""Set the collider properties of all meshes under a specified prim path.
Note:
Check the method meth:`set_collision_properties` for keyword arguments.
Args:
prim_path: The prim path under which to search and apply collider properties.
Keyword Args:
collision_enabled: Whether to enable/disable collider.
contact_offset: Contact offset of a collision shape (in m).
rest_offset: Rest offset of a collision shape (in m).
torsional_patch_radius: Defines the radius of the contact patch
used to apply torsional friction (in m).
min_torsional_patch_radius: Defines the minimum radius of the
contact patch used to apply torsional friction (in m).
"""
# get USD prim
prim = prim_utils.get_prim_at_path(prim_path)
# iterate over all prims under prim-path
all_prims = [prim]
while len(all_prims) > 0:
# get current prim
child_prim = all_prims.pop(0)
# set collider properties
with contextlib.suppress(ValueError):
set_collision_properties(prim_utils.get_prim_path(child_prim), **kwargs)
# add all children to tree
all_prims += child_prim.GetChildren()
def apply_nested_physics_material(prim_path: str, material_path: str, weaker_than_descendants: bool = False):
"""Apply the physics material on all meshes under a specified prim path.
Physics material can be applied only to a prim with physics-enabled on them. This includes having
a collision APIs, or deformable body APIs, or being a particle system.
Args:
prim_path: The prim path under which to search and apply physics material.
material_path: The path to the physics material to apply.
weaker_than_descendants: Whether the material should override the
descendants materials. Defaults to False.
Raises:
ValueError: If the material path does not exist on stage.
"""
# check if material exists
if not prim_utils.is_prim_path_valid(material_path):
raise ValueError(f"Physics material '{material_path}' does not exist.")
# get USD prim
prim = prim_utils.get_prim_at_path(prim_path)
# iterate over all prims under prim-path
all_prims = [prim]
while len(all_prims) > 0:
# get current prim
child_prim = all_prims.pop(0)
# set physics material
with contextlib.suppress(ValueError):
apply_physics_material(prim_utils.get_prim_path(child_prim), material_path, weaker_than_descendants)
# add all children to tree
all_prims += child_prim.GetChildren()
| 27,088 |
Python
| 43.775207 | 112 | 0.692668 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/markers/point_marker.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""A class to coordinate groups of visual markers (loaded from USD)."""
from __future__ import annotations
import numpy as np
import torch
from collections.abc import Sequence
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from pxr import Gf, UsdGeom
class PointMarker:
"""A class to coordinate groups of visual sphere markers for goal-conditioned tasks.
This class allows visualization of multiple spheres. These can be used to represent a
goal-conditioned task. For instance, if a robot is performing a task of reaching a target, the
class can be used to display a red sphere when the target is far away and a green sphere when
the target is achieved. Otherwise, the class can be used to display spheres, for example, to
mark contact points.
The class uses `UsdGeom.PointInstancer`_ for efficient handling of multiple markers in the stage.
It creates two spherical markers of different colors. Based on the indices provided the referenced
marker is activated:
- :obj:`0` corresponds to unachieved target (red sphere).
- :obj:`1` corresponds to achieved target (green sphere).
Usage:
To create 24 point target markers of radius 0.2 and show them as achieved targets:
.. code-block:: python
from omni.isaac.orbit.compat.markers import PointMarker
# create a point marker
marker = PointMarker("/World/Visuals/goal", 24, radius=0.2)
# set position of the marker
marker_positions = np.random.uniform(-1.0, 1.0, (24, 3))
marker.set_world_poses(marker_positions)
# set status of the marker to show achieved targets
marker.set_status([1] * 24)
.. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html
"""
def __init__(self, prim_path: str, count: int, radius: float = 1.0):
"""Initialize the class.
Args:
prim_path: The prim path where the PointInstancer will be created.
count: The number of marker duplicates to create.
radius: The radius of the sphere. Defaults to 1.0.
Raises:
ValueError: When a prim already exists at the :obj:`prim_path` and it is not a :class:`UsdGeom.PointInstancer`.
"""
# check inputs
stage = stage_utils.get_current_stage()
# -- prim path
if prim_utils.is_prim_path_valid(prim_path):
prim = prim_utils.get_prim_at_path(prim_path)
if not prim.IsA(UsdGeom.PointInstancer):
raise ValueError(f"The prim at path {prim_path} cannot be parsed as a `PointInstancer` object")
self._instancer_manager = UsdGeom.PointInstancer(prim)
else:
self._instancer_manager = UsdGeom.PointInstancer.Define(stage, prim_path)
# store inputs
self.prim_path = prim_path
self.count = count
self._radius = radius
# create manager for handling instancing of frame markers
self._instancer_manager = UsdGeom.PointInstancer.Define(stage, prim_path)
# create a child prim for the marker
# -- target not achieved
prim = prim_utils.create_prim(f"{prim_path}/target_far", "Sphere", attributes={"radius": self._radius})
geom = UsdGeom.Sphere(prim)
geom.GetDisplayColorAttr().Set([(1.0, 0.0, 0.0)])
# -- target achieved
prim = prim_utils.create_prim(f"{prim_path}/target_close", "Sphere", attributes={"radius": self._radius})
geom = UsdGeom.Sphere(prim)
geom.GetDisplayColorAttr().Set([(0.0, 1.0, 0.0)])
# -- target invisible
prim = prim_utils.create_prim(f"{prim_path}/target_invisible", "Sphere", attributes={"radius": self._radius})
geom = UsdGeom.Sphere(prim)
geom.GetDisplayColorAttr().Set([(0.0, 0.0, 1.0)])
prim_utils.set_prim_visibility(prim, visible=False)
# add child reference to point instancer
relation_manager = self._instancer_manager.GetPrototypesRel()
relation_manager.AddTarget(f"{prim_path}/target_far") # target index: 0
relation_manager.AddTarget(f"{prim_path}/target_close") # target index: 1
relation_manager.AddTarget(f"{prim_path}/target_invisible") # target index: 2
# buffers for storing data in pixar Gf format
# FUTURE: Make them very far away from the scene?
self._proto_indices = [0] * self.count
self._gf_positions = [Gf.Vec3f() for _ in range(self.count)]
self._gf_orientations = [Gf.Quath() for _ in range(self.count)]
# FUTURE: add option to set scales
# specify that all initial prims are related to same geometry
self._instancer_manager.GetProtoIndicesAttr().Set(self._proto_indices)
# set initial positions of the targets
self._instancer_manager.GetPositionsAttr().Set(self._gf_positions)
self._instancer_manager.GetOrientationsAttr().Set(self._gf_orientations)
def set_visibility(self, visible: bool):
"""Sets the visibility of the markers.
The method does this through the USD API.
Args:
visible: flag to set the visibility.
"""
imageable = UsdGeom.Imageable(self._instancer_manager)
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def set_world_poses(
self,
positions: np.ndarray | torch.Tensor | None = None,
orientations: np.ndarray | torch.Tensor | None = None,
indices: Sequence[int] | None = None,
):
"""Update marker poses in the simulation world frame.
Args:
positions:
Positions in the world frame. Shape is (M, 3). Defaults to None, which means left unchanged.
orientations:
Quaternion orientations (w, x, y, z) in the world frame of the prims. Shape is (M, 4).
Defaults to None, which means left unchanged.
indices: Indices to specify which alter poses.
Shape is (M,), where M <= total number of markers. Defaults to None (i.e: all markers).
"""
# resolve inputs
if positions is not None:
positions = positions.tolist()
if orientations is not None:
orientations = orientations.tolist()
if indices is None:
indices = range(self.count)
# change marker locations
for i, marker_index in enumerate(indices):
if positions is not None:
self._gf_positions[marker_index][:] = positions[i]
if orientations is not None:
self._gf_orientations[marker_index].SetReal(orientations[i][0])
self._gf_orientations[marker_index].SetImaginary(orientations[i][1:])
# apply to instance manager
self._instancer_manager.GetPositionsAttr().Set(self._gf_positions)
self._instancer_manager.GetOrientationsAttr().Set(self._gf_orientations)
def set_status(self, status: list[int] | np.ndarray | torch.Tensor, indices: Sequence[int] | None = None):
"""Updates the marker activated by the instance manager.
Args:
status: Decides which prototype marker to visualize. Shape is (M)
indices: Indices to specify which alter poses.
Shape is (M,), where M <= total number of markers. Defaults to None (i.e: all markers).
"""
# default values
if indices is None:
indices = range(self.count)
# resolve input
if status is not list:
proto_indices = status.tolist()
else:
proto_indices = status
# change marker locations
for i, marker_index in enumerate(indices):
self._proto_indices[marker_index] = int(proto_indices[i])
# apply to instance manager
self._instancer_manager.GetProtoIndicesAttr().Set(self._proto_indices)
| 8,156 |
Python
| 42.854838 | 123 | 0.637813 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/markers/__init__.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This submodule provides marker utilities for simplifying creation of UI elements in the GUI.
Currently, the module provides two classes:
* :class:`StaticMarker` for creating a group of markers from a single USD file.
* :class:`PointMarker` for creating a group of spheres.
.. note::
For some simple usecases, it may be sufficient to use the debug drawing utilities from Isaac Sim.
The debug drawing API is available in the `omni.isaac.debug_drawing`_ module. It allows drawing of
points and splines efficiently on the UI.
.. _omni.isaac.debug_drawing: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/ext_omni_isaac_debug_drawing.html
"""
from .point_marker import PointMarker
from .static_marker import StaticMarker
| 879 |
Python
| 31.592591 | 127 | 0.758817 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/compat/markers/static_marker.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""A class to coordinate groups of visual markers (loaded from USD)."""
from __future__ import annotations
import numpy as np
import torch
from collections.abc import Sequence
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.core.utils.stage as stage_utils
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.core.prims import GeometryPrim
from pxr import Gf, UsdGeom
import omni.isaac.orbit.compat.utils.kit as kit_utils
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR, check_file_path
class StaticMarker:
"""A class to coordinate groups of visual markers (loaded from USD).
This class allows visualization of different UI elements in the scene, such as points and frames.
The class uses `UsdGeom.PointInstancer`_ for efficient handling of the element in the stage
via instancing of the marker.
Usage:
To create 24 default frame markers with a scale of 0.5:
.. code-block:: python
from omni.isaac.orbit.compat.markers import StaticMarker
# create a static marker
marker = StaticMarker("/World/Visuals/frames", 24, scale=(0.5, 0.5, 0.5))
# set position of the marker
marker_positions = np.random.uniform(-1.0, 1.0, (24, 3))
marker.set_world_poses(marker_positions)
.. _UsdGeom.PointInstancer: https://graphics.pixar.com/usd/dev/api/class_usd_geom_point_instancer.html
"""
def __init__(
self,
prim_path: str,
count: int,
usd_path: str | None = None,
scale: tuple[float, float, float] = (1.0, 1.0, 1.0),
color: tuple[float, float, float] | None = None,
):
"""Initialize the class.
When the class is initialized, the :class:`UsdGeom.PointInstancer` is created into the stage
and the marker prim is registered into it.
Args:
prim_path: The prim path where the PointInstancer will be created.
count: The number of marker duplicates to create.
usd_path: The USD file path to the marker. Defaults to the USD path for the RGB frame axis marker.
scale: The scale of the marker. Defaults to (1.0, 1.0, 1.0).
color: The color of the marker. If provided, it overrides the existing color on all the
prims of the marker. Defaults to None.
Raises:
ValueError: When a prim already exists at the :obj:`prim_path` and it is not a
:class:`UsdGeom.PointInstancer`.
FileNotFoundError: When the USD file at :obj:`usd_path` does not exist.
"""
# resolve default markers in the UI elements
# -- USD path
if usd_path is None:
usd_path = f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd"
else:
if not check_file_path(usd_path):
raise FileNotFoundError(f"USD file for the marker not found at: {usd_path}")
# -- prim path
stage = stage_utils.get_current_stage()
if prim_utils.is_prim_path_valid(prim_path):
# retrieve prim if it exists
prim = prim_utils.get_prim_at_path(prim_path)
if not prim.IsA(UsdGeom.PointInstancer):
raise ValueError(f"The prim at path {prim_path} cannot be parsed as a `PointInstancer` object")
self._instancer_manager = UsdGeom.PointInstancer(prim)
else:
# create a new prim
self._instancer_manager = UsdGeom.PointInstancer.Define(stage, prim_path)
# store inputs
self.prim_path = prim_path
self.count = count
self._usd_path = usd_path
# create manager for handling instancing of frame markers
self._instancer_manager = UsdGeom.PointInstancer.Define(stage, prim_path)
# create a child prim for the marker
prim_utils.create_prim(f"{prim_path}/marker", usd_path=self._usd_path)
# disable any physics on the marker
# FIXME: Also support disabling rigid body properties on the marker.
# Currently, it is not possible on GPU pipeline.
# kit_utils.set_nested_rigid_body_properties(f"{prim_path}/marker", rigid_body_enabled=False)
kit_utils.set_nested_collision_properties(f"{prim_path}/marker", collision_enabled=False)
# apply material to marker
if color is not None:
prim = GeometryPrim(f"{prim_path}/marker")
material = PreviewSurface(f"{prim_path}/markerColor", color=np.asarray(color))
prim.apply_visual_material(material, weaker_than_descendants=False)
# add child reference to point instancer
# FUTURE: Add support for multiple markers in the same instance manager?
relation_manager = self._instancer_manager.GetPrototypesRel()
relation_manager.AddTarget(f"{prim_path}/marker") # target index: 0
# buffers for storing data in pixar Gf format
# FUTURE: Make them very far away from the scene?
self._gf_positions = [Gf.Vec3f() for _ in range(self.count)]
self._gf_orientations = [Gf.Quath() for _ in range(self.count)]
self._gf_scales = [Gf.Vec3f(*tuple(scale)) for _ in range(self.count)]
# specify that all vis prims are related to same geometry
self._instancer_manager.GetProtoIndicesAttr().Set([0] * self.count)
# set initial positions of the targets
self._instancer_manager.GetScalesAttr().Set(self._gf_scales)
self._instancer_manager.GetPositionsAttr().Set(self._gf_positions)
self._instancer_manager.GetOrientationsAttr().Set(self._gf_orientations)
def set_visibility(self, visible: bool):
"""Sets the visibility of the markers.
The method does this through the USD API.
Args:
visible: flag to set the visibility.
"""
imageable = UsdGeom.Imageable(self._instancer_manager)
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def set_world_poses(
self,
positions: np.ndarray | torch.Tensor | None = None,
orientations: np.ndarray | torch.Tensor | None = None,
indices: Sequence[int] | None = None,
):
"""Update marker poses in the simulation world frame.
Args:
positions: Positions in the world frame. Shape is (M, 3). Defaults to None, which means left unchanged.
orientations: Quaternion orientations (w, x, y, z) in the world frame of the prims. Shape is (M, 4).
Defaults to None, which means left unchanged.
indices: Indices to specify which alter poses. Shape is (M,) where M <= total number of markers.
Defaults to None (i.e: all markers).
"""
# resolve inputs
if positions is not None:
positions = positions.tolist()
if orientations is not None:
orientations = orientations.tolist()
if indices is None:
indices = range(self.count)
# change marker locations
for i, marker_index in enumerate(indices):
if positions is not None:
self._gf_positions[marker_index][:] = positions[i]
if orientations is not None:
self._gf_orientations[marker_index].SetReal(orientations[i][0])
self._gf_orientations[marker_index].SetImaginary(orientations[i][1:])
# apply to instance manager
self._instancer_manager.GetPositionsAttr().Set(self._gf_positions)
self._instancer_manager.GetOrientationsAttr().Set(self._gf_orientations)
def set_scales(self, scales: np.ndarray | torch.Tensor, indices: Sequence[int] | None = None):
"""Update marker poses in the simulation world frame.
Args:
scales: Scale applied before any rotation is applied. Shape is (M, 3).
indices: Indices to specify which alter poses.
Shape is (M,), where M <= total number of markers. Defaults to None (i.e: all markers).
"""
# default arguments
if indices is None:
indices = range(self.count)
# resolve inputs
scales = scales.tolist()
# change marker locations
for i, marker_index in enumerate(indices):
self._gf_scales[marker_index][:] = scales[i]
# apply to instance manager
self._instancer_manager.GetScalesAttr().Set(self._gf_scales)
| 8,586 |
Python
| 42.811224 | 115 | 0.637317 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/string.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""Sub-module containing utilities for transforming strings and regular expressions."""
from __future__ import annotations
import ast
import importlib
import inspect
import re
from collections.abc import Callable, Sequence
from typing import Any
"""
String formatting.
"""
def to_camel_case(snake_str: str, to: str = "cC") -> str:
"""Converts a string from snake case to camel case.
Args:
snake_str: A string in snake case (i.e. with '_')
to: Convention to convert string to. Defaults to "cC".
Raises:
ValueError: Invalid input argument `to`, i.e. not "cC" or "CC".
Returns:
A string in camel-case format.
"""
# check input is correct
if to not in ["cC", "CC"]:
msg = "to_camel_case(): Choose a valid `to` argument (CC or cC)"
raise ValueError(msg)
# convert string to lower case and split
components = snake_str.lower().split("_")
if to == "cC":
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + "".join(x.title() for x in components[1:])
else:
# Capitalize first letter in all the components
return "".join(x.title() for x in components)
def to_snake_case(camel_str: str) -> str:
"""Converts a string from camel case to snake case.
Args:
camel_str: A string in camel case.
Returns:
A string in snake case (i.e. with '_')
"""
camel_str = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_str)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", camel_str).lower()
"""
String <-> Callable operations.
"""
def is_lambda_expression(name: str) -> bool:
"""Checks if the input string is a lambda expression.
Args:
name: The input string.
Returns:
Whether the input string is a lambda expression.
"""
try:
ast.parse(name)
return isinstance(ast.parse(name).body[0], ast.Expr) and isinstance(ast.parse(name).body[0].value, ast.Lambda)
except SyntaxError:
return False
def callable_to_string(value: Callable) -> str:
"""Converts a callable object to a string.
Args:
value: A callable object.
Raises:
ValueError: When the input argument is not a callable object.
Returns:
A string representation of the callable object.
"""
# check if callable
if not callable(value):
raise ValueError(f"The input argument is not callable: {value}.")
# check if lambda function
if value.__name__ == "<lambda>":
return f"lambda {inspect.getsourcelines(value)[0][0].strip().split('lambda')[1].strip().split(',')[0]}"
else:
# get the module and function name
module_name = value.__module__
function_name = value.__name__
# return the string
return f"{module_name}:{function_name}"
def string_to_callable(name: str) -> Callable:
"""Resolves the module and function names to return the function.
Args:
name: The function name. The format should be 'module:attribute_name' or a
lambda expression of format: 'lambda x: x'.
Raises:
ValueError: When the resolved attribute is not a function.
ValueError: When the module cannot be found.
Returns:
Callable: The function loaded from the module.
"""
try:
if is_lambda_expression(name):
callable_object = eval(name)
else:
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
callable_object = getattr(mod, attr_name)
# check if attribute is callable
if callable(callable_object):
return callable_object
else:
raise AttributeError(f"The imported object is not callable: '{name}'")
except (ValueError, ModuleNotFoundError) as e:
msg = (
f"Could not resolve the input string '{name}' into callable object."
" The format of input should be 'module:attribute_name'.\n"
f"Received the error:\n {e}."
)
raise ValueError(msg)
"""
Regex operations.
"""
def resolve_matching_names(keys: str | Sequence[str], list_of_strings: Sequence[str]) -> tuple[list[int], list[str]]:
"""Match a list of query regular expressions against a list of strings and return the matched indices and names.
When a list of query regular expressions is provided, the function checks each target string against each
query regular expression and returns the indices of the matched strings and the matched strings.
This means that the ordering is dictated by the order of the target strings and not the order of the query
regular expressions.
For example, if the list of strings is ['a', 'b', 'c', 'd', 'e'] and the regular expressions are ['a|c', 'b'],
then the function will return the indices of the matched strings and the matched strings, i.e.
([0, 1, 2], ['a', 'b', 'c']).
Note:
The function does not sort the indices. It returns the indices in the order they are found.
Args:
keys: A regular expression or a list of regular expressions to match the strings in the list.
list_of_strings: A list of strings to match.
Returns:
A tuple of lists containing the matched indices and names.
Raises:
ValueError: When multiple matches are found for a string in the list.
ValueError: When not all regular expressions are matched.
"""
# resolve name keys
if isinstance(keys, str):
keys = [keys]
# find matching patterns
index_list = []
names_list = []
# book-keeping to check that we always have a one-to-one mapping
# i.e. each target string should match only one regular expression
target_strings_match_found = [None for _ in range(len(list_of_strings))]
keys_match_found = [[] for _ in range(len(keys))]
# loop over all target strings
for target_index, potential_match_string in enumerate(list_of_strings):
for key_index, re_key in enumerate(keys):
if re.fullmatch(re_key, potential_match_string):
# check if match already found
if target_strings_match_found[target_index]:
raise ValueError(
f"Multiple matches for '{potential_match_string}':"
f" '{target_strings_match_found[target_index]}' and '{re_key}'!"
)
# add to list
target_strings_match_found[target_index] = re_key
index_list.append(target_index)
names_list.append(potential_match_string)
# add for regex key
keys_match_found[key_index].append(potential_match_string)
# check that all regular expressions are matched
if not all(keys_match_found):
# make this print nicely aligned for debugging
msg = "\n"
for key, value in zip(keys, keys_match_found):
msg += f"\t{key}: {value}\n"
msg += f"Available strings: {list_of_strings}\n"
# raise error
raise ValueError(
f"Not all regular expressions are matched! Please check that the regular expressions are correct: {msg}"
)
# return
return index_list, names_list
def resolve_matching_names_values(
data: dict[str, Any], list_of_strings: Sequence[str]
) -> tuple[list[int], list[str], list[Any]]:
"""Match a list of regular expressions in a dictionary against a list of strings and return
the matched indices, names, and values.
For example, if the dictionary is {'a|b|c': 1, 'd|e': 2} and the list of strings is ['a', 'b', 'c', 'd', 'e'],
then the function will return the indices of the matched strings, the matched strings, and the values, i.e.
([0, 1, 2, 3, 4], ['a', 'b', 'c', 'd', 'e'], [1, 1, 1, 2, 2]).
Note:
The function does not sort the indices. It returns the indices in the order they are found.
Args:
data: A dictionary of regular expressions and values to match the strings in the list.
list_of_strings: A list of strings to match.
Returns:
A tuple of lists containing the matched indices, names, and values.
Raises:
TypeError: When the input argument `data` is not a dictionary.
ValueError: When multiple matches are found for a string in the dictionary.
ValueError: When not all regular expressions in the data keys are matched.
"""
# check valid input
if not isinstance(data, dict):
raise TypeError(f"Input argument `data` should be a dictionary. Received: {data}")
# find matching patterns
index_list = []
names_list = []
values_list = []
# book-keeping to check that we always have a one-to-one mapping
# i.e. each target string should match only one regular expression
target_strings_match_found = [None for _ in range(len(list_of_strings))]
keys_match_found = [[] for _ in range(len(data))]
# loop over all target strings
for target_index, potential_match_string in enumerate(list_of_strings):
for key_index, (re_key, value) in enumerate(data.items()):
if re.fullmatch(re_key, potential_match_string):
# check if match already found
if target_strings_match_found[target_index]:
raise ValueError(
f"Multiple matches for '{potential_match_string}':"
f" '{target_strings_match_found[target_index]}' and '{re_key}'!"
)
# add to list
target_strings_match_found[target_index] = re_key
index_list.append(target_index)
names_list.append(potential_match_string)
values_list.append(value)
# add for regex key
keys_match_found[key_index].append(potential_match_string)
# check that all regular expressions are matched
if not all(keys_match_found):
# make this print nicely aligned for debugging
msg = "\n"
for key, value in zip(data.keys(), keys_match_found):
msg += f"\t{key}: {value}\n"
msg += f"Available strings: {list_of_strings}\n"
# raise error
raise ValueError(
f"Not all regular expressions are matched! Please check that the regular expressions are correct: {msg}"
)
# return
return index_list, names_list, values_list
| 10,678 |
Python
| 36.734982 | 118 | 0.623057 |
John-Dillermand/Isaac-Orbit/orbit/source/extensions/omni.isaac.orbit_assets/docs/CHANGELOG.rst
|
Changelog
---------
0.1.0 (2023-12-20)
~~~~~~~~~~~~~~~~~~
Added
^^^^^
* Moved all assets' configuration from ``omni.isaac.orbit`` to ``omni.isaac.orbit_assets`` extension.
| 175 |
reStructuredText
| 14.999999 | 101 | 0.577143 |
John-Dillermand/Isaac-Orbit/orbit/source/standalone/tutorials/04_sensors/run_usd_camera.py
|
# Copyright (c) 2022-2024, The ORBIT Project Developers.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
This script shows how to use the camera sensor from the Orbit framework.
The camera sensor is created and interfaced through the Omniverse Replicator API. However, instead of using
the simulator or OpenGL convention for the camera, we use the robotics or ROS convention.
.. code-block:: bash
# Usage
./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py
"""
from __future__ import annotations
"""Launch Isaac Sim Simulator first."""
import argparse
from omni.isaac.orbit.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="This script demonstrates how to use the camera sensor.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU device for camera output.")
parser.add_argument("--draw", action="store_true", default=False, help="Draw the obtained pointcloud on viewport.")
parser.add_argument("--save", action="store_true", default=False, help="Save the obtained data to disk.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
"""Rest everything follows."""
import numpy as np
import os
import random
import torch
import omni.isaac.core.utils.prims as prim_utils
import omni.isaac.debug_draw._debug_draw as omni_debug_draw
import omni.replicator.core as rep
from omni.isaac.core.prims import RigidPrim
from pxr import Gf, UsdGeom
import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.sensors.camera import Camera, CameraCfg
from omni.isaac.orbit.utils import convert_dict_to_backend
from omni.isaac.orbit.utils.math import project_points, transform_points, unproject_depth
def define_sensor() -> Camera:
"""Defines the camera sensor to add to the scene."""
# Setup camera sensor
# In contras to the ray-cast camera, we spawn the prim at these locations.
# This means the camera sensor will be attached to these prims.
prim_utils.create_prim("/World/Origin_00", "Xform")
prim_utils.create_prim("/World/Origin_01", "Xform")
camera_cfg = CameraCfg(
prim_path="/World/Origin_.*/CameraSensor",
update_period=0,
height=480,
width=640,
data_types=["rgb", "distance_to_image_plane", "normals"],
spawn=sim_utils.PinholeCameraCfg(
focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5)
),
)
# Create camera
camera = Camera(cfg=camera_cfg)
return camera
def design_scene():
"""Design the scene."""
# Populate scene
# -- Ground-plane
cfg = sim_utils.GroundPlaneCfg()
cfg.func("/World/defaultGroundPlane", cfg)
# -- Lights
cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
cfg.func("/World/Light", cfg)
# Xform to hold objects
prim_utils.create_prim("/World/Objects", "Xform")
# Random objects
for i in range(8):
# sample random position
position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0])
position *= np.asarray([1.5, 1.5, 0.5])
# create prim
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
_ = prim_utils.create_prim(
f"/World/Objects/Obj_{i:02d}",
prim_type,
translation=position,
scale=(0.25, 0.25, 0.25),
semantic_label=prim_type,
)
# add rigid properties
rigid_obj = RigidPrim(f"/World/Objects/Obj_{i:02d}", mass=5.0)
# cast to geom prim
geom_prim = getattr(UsdGeom, prim_type)(rigid_obj.prim)
# set random color
color = Gf.Vec3f(random.random(), random.random(), random.random())
geom_prim.CreateDisplayColorAttr()
geom_prim.GetDisplayColorAttr().Set([color])
# Sensors
camera = define_sensor()
# return the scene information
scene_entities = {"camera": camera}
return scene_entities
def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict):
"""Run the simulator."""
# extract entities for simplified notation
camera: Camera = scene_entities["camera"]
# Create replicator writer
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera")
rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3)
# Acquire draw interface
draw_interface = omni_debug_draw.acquire_debug_draw_interface()
# Set pose: There are two ways to set the pose of the camera.
# -- Option-1: Set pose using view
eyes = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device)
targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device)
camera.set_world_poses_from_view(eyes, targets)
# -- Option-2: Set pose using ROS
# position = torch.tensor([[2.5, 2.5, 2.5]], device=sim.device)
# orientation = torch.tensor([[-0.17591989, 0.33985114, 0.82047325, -0.42470819]], device=sim.device)
# camera.set_world_poses(position, orientation, env_ids=[0], convention="ros")
# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(3):
sim.step()
# Simulate physics
while simulation_app.is_running():
# Step simulation
sim.step()
# Update camera data
camera.update(dt=sim.get_physics_dt())
# Print camera info
print(camera)
print("Received shape of rgb image: ", camera.data.output["rgb"].shape)
print("Received shape of depth image: ", camera.data.output["distance_to_image_plane"].shape)
print("-------------------------------")
# Extract camera data
if args_cli.save:
# Save images from camera 1
camera_index = 1
# note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy.
if sim.backend == "torch":
# tensordict allows easy indexing of tensors in the dictionary
single_cam_data = convert_dict_to_backend(camera.data.output[camera_index], backend="numpy")
else:
# for numpy, we need to manually index the data
single_cam_data = dict()
for key, value in camera.data.output.items():
single_cam_data[key] = value[camera_index]
# Extract the other information
single_cam_info = camera.data.info[camera_index]
# Pack data back into replicator format to save them using its writer
rep_output = dict()
for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()):
if info is not None:
rep_output[key] = {"data": data, "info": info}
else:
rep_output[key] = data
# Save images
# Note: We need to provide On-time data for Replicator to save the images.
rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]}
rep_writer.write(rep_output)
# Draw pointcloud
if not args_cli.headless and args_cli.draw:
# Pointcloud in world frame
points_3d_cam = unproject_depth(
camera.data.output["distance_to_image_plane"], camera.data.intrinsic_matrices
)
points_3d_world = transform_points(points_3d_cam, camera.data.pos_w, camera.data.quat_w_ros)
# Check methods are valid
im_height, im_width = camera.image_shape
# -- project points to (u, v, d)
reproj_points = project_points(points_3d_cam, camera.data.intrinsic_matrices)
reproj_depths = reproj_points[..., -1].view(-1, im_width, im_height).transpose_(1, 2)
sim_depths = camera.data.output["distance_to_image_plane"].squeeze(-1)
torch.testing.assert_allclose(reproj_depths, sim_depths)
# Convert to numpy for visualization
if not isinstance(points_3d_world, np.ndarray):
points_3d_world = points_3d_world.cpu().numpy()
# Clear any existing points
draw_interface.clear_points()
# Obtain drawing settings
num_batch = points_3d_world.shape[0]
num_points = points_3d_world.shape[1]
points_size = [1.25] * num_points
# Fix random seed
random.seed(0)
# Visualize the points
for index in range(num_batch):
# generate random color
color = [random.random() for _ in range(3)]
color += [1.0]
# plain color for points
points_color = [color] * num_points
draw_interface.draw_points(points_3d_world[index].tolist(), points_color, points_size)
def main():
"""Main function."""
# Load simulation context
sim_cfg = sim_utils.SimulationCfg(device="cpu" if args_cli.cpu else "cuda")
sim = sim_utils.SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
# design the scene
scene_entities = design_scene()
# Play simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run simulator
run_simulator(sim, scene_entities)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()
| 9,780 |
Python
| 37.507874 | 115 | 0.62863 |
John-Dillermand/Isaac-Orbit/orbit/docker/docker-compose.yaml
|
services:
# This service is used to build the Docker image
# The docker image is built from the root directory
orbit:
build:
context: ../
dockerfile: docker/Dockerfile
args:
- ISAACSIM_VERSION=${ISAACSIM_VERSION}
- ISAACSIM_PATH=${DOCKER_ISAACSIM_PATH}
- DOCKER_USER_HOME=${DOCKER_USER_HOME}
image: orbit
container_name: orbit
env_file:
- .env
# We set DOCKER_ISAACSIM_PATH and then forward it to ISAACSIM_PATH within
# the container to avoid collision with pre-existing ISAACSIM_PATH env vars
# that could come from installing Orbit on the local machine, causing build errors
environment:
- ISAACSIM_PATH=${DOCKER_ISAACSIM_PATH}
# This should also be enabled for X11 forwarding
# - DISPLAY=${DISPLAY}
volumes:
# These volumes follow from this page
# https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_faq.html#save-isaac-sim-configs-on-local-disk
- type: volume
source: isaac-cache-kit
target: ${DOCKER_ISAACSIM_PATH}/kit/cache
- type: volume
source: isaac-cache-ov
target: ${DOCKER_USER_HOME}/.cache/ov
- type: volume
source: isaac-cache-pip
target: ${DOCKER_USER_HOME}/.cache/pip
- type: volume
source: isaac-cache-gl
target: ${DOCKER_USER_HOME}/.cache/nvidia/GLCache
- type: volume
source: isaac-cache-compute
target: ${DOCKER_USER_HOME}/.nv/ComputeCache
- type: volume
source: isaac-logs
target: ${DOCKER_USER_HOME}/.nvidia-omniverse/logs
- type: volume
source: isaac-carb-logs
target: ${DOCKER_ISAACSIM_PATH}/kit/logs/Kit/Isaac-Sim
- type: volume
source: isaac-data
target: ${DOCKER_USER_HOME}/.local/share/ov/data
- type: volume
source: isaac-docs
target: ${DOCKER_USER_HOME}/Documents
# These volumes allow X11 Forwarding
# We currently comment these out because they can
# cause bugs and warnings for people uninterested in
# X11 Forwarding from within the docker. We keep them
# as comments as a convenience for those seeking X11
# forwarding until a scripted solution is developed
# - type: bind
# source: /tmp/.X11-unix
# target: /tmp/.X11-unix
# - type: bind
# source: ${HOME}/.Xauthority
# target: ${DOCKER_USER_HOME}/.Xauthority
# This overlay allows changes on the local files to
# be reflected within the container immediately
- type: bind
source: ../source
target: /workspace/orbit/source
- type: bind
source: ../docs
target: /workspace/orbit/docs
# The effect of these volumes is twofold:
# 1. Prevent root-owned files from flooding the _build and logs dir
# on the host machine
# 2. Preserve the artifacts in persistent volumes for later copying
# to the host machine
- type: volume
source: orbit-docs
target: /workspace/orbit/docs/_build
- type: volume
source: orbit-logs
target: /workspace/orbit/logs
- type: volume
source: orbit-data
target: /workspace/orbit/data_storage
network_mode: host
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [ gpu ]
# This is the entrypoint for the container
entrypoint: bash
stdin_open: true
tty: true
volumes:
# isaac-sim
isaac-cache-kit:
isaac-cache-ov:
isaac-cache-pip:
isaac-cache-gl:
isaac-cache-compute:
isaac-logs:
isaac-carb-logs:
isaac-data:
isaac-docs:
# orbit
orbit-docs:
orbit-logs:
orbit-data:
| 3,820 |
YAML
| 32.226087 | 121 | 0.620681 |
John-Dillermand/Isaac-Orbit/orbit/docs/index.rst
|
Overview
========
**Orbit** is a unified and modular framework for robot learning that aims to simplify common workflows
in robotics research (such as RL, learning from demonstrations, and motion planning). It is built upon
`NVIDIA Isaac Sim`_ to leverage the latest simulation capabilities for photo-realistic scenes, and fast
and efficient simulation. The core objectives of the framework are:
- **Modularity**: Easily customize and add new environments, robots, and sensors.
- **Agility**: Adapt to the changing needs of the community.
- **Openness**: Remain open-sourced to allow the community to contribute and extend the framework.
- **Battery-included**: Include a number of environments, sensors, and tasks that are ready to use.
For more information about the framework, please refer to the `paper <https://arxiv.org/abs/2301.04195>`_
:cite:`mittal2023orbit`. For clarifications on NVIDIA Isaac ecosystem, please check out the
:doc:`/source/refs/faq` section.
.. figure:: source/_static/tasks.jpg
:width: 100%
:alt: Example tasks created using orbit
.. toctree::
:maxdepth: 2
:caption: Getting Started
source/setup/installation
source/setup/developer
source/setup/sample
.. toctree::
:maxdepth: 2
:caption: Features
source/features/environments
source/features/actuators
.. source/features/motion_generators
.. toctree::
:maxdepth: 1
:caption: Resources
:titlesonly:
source/tutorials/index
source/how-to/index
source/deployment/index
.. toctree::
:maxdepth: 1
:caption: Source API
source/api/index
.. toctree::
:maxdepth: 1
:caption: References
source/refs/faq
source/refs/migration
source/refs/contributing
source/refs/troubleshooting
source/refs/issues
source/refs/changelog
source/refs/license
source/refs/bibliography
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. _NVIDIA Isaac Sim: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html
| 2,029 |
reStructuredText
| 25.710526 | 105 | 0.727945 |
John-Dillermand/Isaac-Orbit/orbit/docs/source/how-to/save_camera_output.rst
|
.. _how-to-save-images-and-3d-reprojection:
Saving rendered images and 3D re-projection
===========================================
.. currentmodule:: omni.isaac.orbit
This guide accompanied with the ``run_usd_camera.py`` script in the ``orbit/source/standalone/tutorials/04_sensors``
directory.
.. dropdown:: Code for run_usd_camera.py
:icon: code
.. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py
:language: python
:emphasize-lines: 137-139, 172-196, 200-204, 214-232
:linenos:
Saving using Replicator Basic Writer
------------------------------------
To save camera outputs, we use the basic write class from Omniverse Replicator. This class allows us to save the
images in a numpy format. For more information on the basic writer, please check the
`documentation <https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/writer_examples.html>`_.
.. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py
:language: python
:lines: 137-139
:dedent:
While stepping the simulator, the images can be saved to the defined folder. Since the BasicWriter only supports
saving data using NumPy format, we first need to convert the PyTorch sensors to NumPy arrays before packing
them in a dictionary.
.. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py
:language: python
:lines: 172-192
:dedent:
After this step, we can save the images using the BasicWriter.
.. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py
:language: python
:lines: 193-196
:dedent:
Projection into 3D Space
------------------------
We include utilities to project the depth image into 3D Space. The re-projection operations are done using
PyTorch operations which allows faster computation.
.. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py
:language: python
:lines: 200-204
:dedent:
The resulting point cloud can be visualized using the :mod:`omni.isaac.debug_draw` extension from Isaac Sim.
This makes it easy to visualize the point cloud in the 3D space.
.. literalinclude:: ../../../source/standalone/tutorials/04_sensors/run_usd_camera.py
:language: python
:lines: 214-232
:dedent:
Executing the script
--------------------
To run the accompanying script, execute the following command:
.. code-block:: bash
./orbit.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --draw
The simulation should start, and you can observe different objects falling down. An output folder will be created
in the ``orbit/source/standalone/tutorials/04_sensors`` directory, where the images will be saved. Additionally,
you should see the point cloud in the 3D space drawn on the viewport.
To stop the simulation, close the window, press the ``STOP`` button in the UI, or use ``Ctrl+C`` in the terminal.
| 2,951 |
reStructuredText
| 33.729411 | 116 | 0.713318 |
John-Dillermand/Isaac-Orbit/orbit/docs/source/api/orbit/omni.isaac.orbit.managers.rst
|
orbit.managers
==============
.. automodule:: omni.isaac.orbit.managers
.. rubric:: Classes
.. autosummary::
SceneEntityCfg
ManagerBase
ManagerTermBase
ManagerTermBaseCfg
ObservationManager
ObservationGroupCfg
ObservationTermCfg
ActionManager
ActionTerm
ActionTermCfg
RandomizationManager
RandomizationTermCfg
CommandManager
CommandTerm
CommandTermCfg
RewardManager
RewardTermCfg
TerminationManager
TerminationTermCfg
CurriculumManager
CurriculumTermCfg
Scene Entity
------------
.. autoclass:: SceneEntityCfg
:members:
:exclude-members: __init__
Manager Base
------------
.. autoclass:: ManagerBase
:members:
.. autoclass:: ManagerTermBase
:members:
.. autoclass:: ManagerTermBaseCfg
:members:
:exclude-members: __init__
Observation Manager
-------------------
.. autoclass:: ObservationManager
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: ObservationGroupCfg
:members:
:exclude-members: __init__
.. autoclass:: ObservationTermCfg
:members:
:exclude-members: __init__
Action Manager
--------------
.. autoclass:: ActionManager
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: ActionTerm
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: ActionTermCfg
:members:
:exclude-members: __init__
Randomization Manager
---------------------
.. autoclass:: RandomizationManager
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: RandomizationTermCfg
:members:
:exclude-members: __init__
Command Manager
---------------
.. autoclass:: CommandManager
:members:
.. autoclass:: CommandTerm
:members:
:exclude-members: __init__, class_type
.. autoclass:: CommandTermCfg
:members:
:exclude-members: __init__, class_type
Reward Manager
--------------
.. autoclass:: RewardManager
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: RewardTermCfg
:exclude-members: __init__
Termination Manager
-------------------
.. autoclass:: TerminationManager
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: TerminationTermCfg
:members:
:exclude-members: __init__
Curriculum Manager
------------------
.. autoclass:: CurriculumManager
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: CurriculumTermCfg
:members:
:exclude-members: __init__
| 2,518 |
reStructuredText
| 16.253425 | 42 | 0.640985 |
John-Dillermand/Isaac-Orbit/orbit/docs/source/deployment/docker.rst
|
.. _deployment-docker:
Docker Guide
============
.. caution::
Due to the dependency on Isaac Sim docker image, by running this container you are implicitly
agreeing to the `NVIDIA Omniverse EULA`_. If you do not agree to the EULA, do not run this container.
Setup Instructions
------------------
.. note::
The following steps are taken from the NVIDIA Omniverse Isaac Sim documentation on `container installation`_.
They have been added here for the sake of completeness.
Docker and Docker Compose
~~~~~~~~~~~~~~~~~~~~~~~~~
We have tested the container using Docker Engine version 24.0.2 and Docker Compose version 2.18.1.
We recommend using these versions or newer.
* To install Docker, please follow the instructions for your operating system on the `Docker website`_.
* To install Docker Compose, please follow the instructions for your operating system on the `docker-compose`_ page.
* Follow the post-installation steps for Docker on the `post-installation steps`_ page. These steps allow you to run
Docker without using ``sudo``.
* To build and run GPU-accelerated containers, you also need install the `NVIDIA Container Toolkit`_.
Please follow the instructions on the `Container Toolkit website`_ for installation steps.
Obtaining the Isaac Sim Container
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Get access to the `Isaac Sim container`_ by joining the NVIDIA Developer Program credentials.
* Generate your `NGC API key`_ to access locked container images from NVIDIA GPU Cloud (NGC).
* This step requires you to create an NGC account if you do not already have one.
* You would also need to install the NGC CLI to perform operations from the command line.
* Once you have your generated API key and have installed the NGC CLI, you need to log in to NGC
from the terminal.
.. code:: bash
ngc config set
* Use the command line to pull the Isaac Sim container image from NGC.
.. code:: bash
docker login nvcr.io
* For the username, enter ``$oauthtoken`` exactly as shown. It is a special username that is used to
authenticate with NGC.
.. code:: text
Username: $oauthtoken
Password: <Your NGC API Key>
Directory Organization
----------------------
The root of the Orbit repository contains the ``docker`` directory that has various files and scripts
needed to run Orbit inside a Docker container. These are summarized below:
* ``Dockerfile``: Defines orbit image by overlaying Orbit dependencies onto the Isaac Sim Docker image.
* ``docker-compose.yaml``: Creates mounts to allow direct editing of Orbit code from the host machine that runs
the container along with X11 forwarding. It also creates several named volumes such as ``isaac-cache-kit`` to store frequently
re-used resources compiled by Isaac Sim, such as shaders, and to retain logs, data, and documents.
* ``.env``: Stores environment variables required for the build process and the container itself.
* ``container.sh``: A script that wraps the ``docker-compose`` command to build the image and run the container.
Running the Container
---------------------
.. note::
The docker container copies all the files from the repository into the container at the
location ``/workspace/orbit`` at build time. This means that any changes made to the files in the container would not
normally be reflected in the repository after the image has been built, i.e. after ``./container.sh start`` is run.
For a faster development cycle, we mount the following directories in the Orbit repository into the container
so that you can edit their files from the host machine:
* ``source``: This is the directory that contains the Orbit source code.
* ``docs``: This is the directory that contains the source code for Orbit documentation. This is overlaid except
for the ``_build`` subdirectory where build artifacts are stored.
The script ``container.sh`` wraps around three basic ``docker-compose`` commands:
1. ``start``: This builds the image and brings up the container in detached mode (i.e. in the background).
2. ``enter``: This begins a new bash process in an existing orbit container, and which can be exited
without bringing down the container.
3. ``copy``: This copies the ``logs``, ``data_storage`` and ``docs/_build`` artifacts, from the ``orbit-logs``, ``orbit-data`` and ``orbit-docs``
volumes respectively, to the ``docker/artifacts`` directory. These artifacts persist between docker
container instances.
4. ``stop``: This brings down the container and removes it.
The following shows how to launch the container in a detached state and enter it:
.. code:: bash
# Launch the container in detached mode
./docker/container.sh start
# Enter the container
./docker/container.sh enter
To copy files from the container to the host machine, you can use the following command:
.. code:: bash
# Copy the file /workspace/orbit/logs to the current directory
docker cp orbit:/workspace/orbit/logs .
The script ``container.sh`` provides a wrapper around this command to copy the ``logs`` , ``data_storage`` and ``docs/_build``
directories to the ``docker/artifacts`` directory. This is useful for copying the logs, data and documentation:
.. code:: bash
# Copy the logs, data_storage and docs/_build directories to the docker/artifacts directory
./docker/container.sh copy
To stop the container, you can use the following command:
.. code:: bash
# stop the container
./docker/container.sh stop
Python Interpreter
~~~~~~~~~~~~~~~~~~
The container uses the Python interpreter provided by Isaac Sim. This interpreter is located at
``/isaac-sim/python.sh``. We set aliases inside the container to make it easier to run the Python
interpreter. You can use the following commands to run the Python interpreter:
.. code:: bash
# Run the Python interpreter -> points to /isaac-sim/python.sh
python
Understanding the mounted volumes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``docker-compose.yaml`` file creates several named volumes that are mounted to the container.
These are summarized below:
* ``isaac-cache-kit``: This volume is used to store cached Kit resources (`/isaac-sim/kit/cache` in container)
* ``isaac-cache-ov``: This volume is used to store cached OV resources (`/root/.cache/ov` in container)
* ``isaac-cache-pip``: This volume is used to store cached pip resources (`/root/.cache/pip`` in container)
* ``isaac-cache-gl``: This volume is used to store cached GLCache resources (`/root/.cache/nvidia/GLCache` in container)
* ``isaac-cache-compute``: This volume is used to store cached compute resources (`/root/.nv/ComputeCache` in container)
* ``isaac-logs``: This volume is used to store logs generated by Omniverse. (`/root/.nvidia-omniverse/logs` in container)
* ``isaac-carb-logs``: This volume is used to store logs generated by carb. (`/isaac-sim/kit/logs/Kit/Isaac-Sim` in container)
* ``isaac-data``: This volume is used to store data generated by Omniverse. (`/root/.local/share/ov/data` in container)
* ``isaac-docs``: This volume is used to store documents generated by Omniverse. (`/root/Documents` in container)
* ``orbit-docs``: This volume is used to store documentation of Orbit when built inside the container. (`/workspace/orbit/docs/_build` in container)
* ``orbit-logs``: This volume is used to store logs generated by Orbit workflows when ran inside the container. (`/workspace/orbit/logs` in container)
* ``orbit-data``: This volume is used to store whatever data users may want to preserve between container runs. (`/workspace/orbit/data_storage` in container)
To view the contents of these volumes, you can use the following command:
.. code:: bash
# list all volumes
docker volume ls
# inspect a specific volume, e.g. isaac-cache-kit
docker volume inspect isaac-cache-kit
Known Issues
------------
Invalid mount config for type "bind"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you see the following error when building the container:
.. code:: text
⠋ Container orbit Creating 0.0s
Error response from daemon: invalid mount config for type "bind": bind source path does not exist: ${HOME}/.Xauthority
This means that the ``.Xauthority`` file is not present in the home directory of the host machine.
The portion of the docker-compose.yaml that enables this is commented out by default, so this shouldn't
happen unless it has been altered. This file is required for X11 forwarding to work. To fix this, you can
create an empty ``.Xauthority`` file in your home directory.
.. code:: bash
touch ${HOME}/.Xauthority
A similar error but requires a different fix:
.. code:: text
⠋ Container orbit Creating 0.0s
Error response from daemon: invalid mount config for type "bind": bind source path does not exist: /tmp/.X11-unix
This means that the folder/files are either not present or not accessible on the host machine.
The portion of the docker-compose.yaml that enables this is commented out by default, so this
shouldn't happen unless it has been altered. This usually happens when you have multiple docker
versions installed on your machine. To fix this, you can try the following:
* Remove all docker versions from your machine.
.. code:: bash
sudo apt remove docker*
sudo apt remove docker docker-engine docker.io containerd runc docker-desktop docker-compose-plugin
sudo snap remove docker
sudo apt clean autoclean && sudo apt autoremove --yes
* Install the latest version of docker based on the instructions in the setup section.
WebRTC and WebSocket Streaming
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When streaming the GUI from Isaac Sim, there are `several streaming clients`_ available. There is a `known issue`_ when
attempting to use WebRTC streaming client on Google Chrome and Safari while running Isaac Sim inside a container.
To avoid this problem, we suggest using either the Native Streaming Client or WebSocket options, or using the
Mozilla Firefox browser on which WebRTC works.
Streaming is the only supported method for visualizing the Isaac GUI from within the container. The Omniverse Streaming Client
is freely available from the Omniverse app, and is easy to use. The other streaming methods similarly require only a web browser.
If users want to use X11 forwarding in order to have the apps behave as local GUI windows, they can uncomment the relevant portions
in docker-compose.yaml.
.. _`NVIDIA Omniverse EULA`: https://docs.omniverse.nvidia.com/platform/latest/common/NVIDIA_Omniverse_License_Agreement.html
.. _`container installation`: https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_container.html
.. _`Docker website`: https://docs.docker.com/desktop/install/linux-install/
.. _`docker-compose`: https://docs.docker.com/compose/install/linux/#install-using-the-repository
.. _`NVIDIA Container Toolkit`: https://github.com/NVIDIA/nvidia-container-toolkit
.. _`Container Toolkit website`: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
.. _`post-installation steps`: https://docs.docker.com/engine/install/linux-postinstall/
.. _`Isaac Sim container`: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim
.. _`NGC API key`: https://docs.nvidia.com/ngc/gpu-cloud/ngc-user-guide/index.html#generating-api-key
.. _`several streaming clients`: https://docs.omniverse.nvidia.com/isaacsim/latest/installation/manual_livestream_clients.html
.. _`known issue`: https://forums.developer.nvidia.com/t/unable-to-use-webrtc-when-i-run-runheadless-webrtc-sh-in-remote-headless-container/222916
| 12,008 |
reStructuredText
| 47.228915 | 204 | 0.71569 |
leith-bartrich/kit-exts-fie-pypathextract/README.md
|
# FIE Extensions Path Extractor
This is an extension for NVidia Omniverse.
It's a kludge, useful for some development tasks.
It's simple. It spawns a window with some buttons. Those buttons will allow a developer running an "Kit" based
application to save a text file containing paths to the extensions currently available to the Kit Extesion Manager.
This is useful in getting a python IDE to "see" extensions that are not in the python path by default,
but are added to the python path at the last moment by Kit, when it resovles the configured extensions
fully.
I originally designed it to allow my instance of PyCharm to index python code inside extensions (to allow code-completion
to function.)
There are buttons to dump "all" extensions and alternatively "enabled" extensions.
If you relink the app, or if the kit app downloads newer versions of extensions from a repository, the file you dump will be outdated and need to be regenerated. This is unavoidable. Kit can and does dynamically manage its extensions based on the given config (.toml or .kit) file every run-time. And further, since it can use network bound repositories, it's possible a change in network services data, will change your extension paths. So, when your code-completion breaks, it's probably time to run the extension and generate a new .pth file.
It's worth noting here, that the same is true of the extension paths that NVidia puts inside the .vscode project they generate when creating an extension template (./.vscode/settings.json). That listing of folders can easily fall out of date. But NVidia provides no solution to updating that list after the initial template generation.
I recommend dumping "all" extensions rather than "enabled." For code-completion purposes it's better to index all that's available to the runtime under all configurations, rather than all that's currently configured by the running .toml or .kit file.
My hope is that NVidia will make this extension obsolete by providing better support for code-completion and indexing within Kit, in the long run.
# Usage within an IDE environment
Making a particular python IDE "see" the extensions with this file is beyond the scope of this document. But some hints on windows:
- Kit has a python.bat in the root. My app's python.bat calls it:
```
@echo off
SET PYTHONPATH=%PYTHONPATH%;.\python_app_site\
.\app\python.bat %*
```
- I created a directory "python_app_site" in my app.
- if a ".pth" text file, containing paths is found in a python's "site" package directory, it will automatically add those
paths to the python path. So I dump my .pth file from the extension into that dir.
That alone is not enough. Because just adding to the PYTHONPATH doesn't qualify the directory as a "site."
- documentation for python's "site" module reveals information about "sitecustomize" and "usercustomize" module
imports which can customize a python instance. I have a sitecustomize.py file in that dir. It runs automatically as per the documentation. And it explicitly re-adds the directory as a "site." This action is what causes python
to read the .pth text file and use it.
```
#get our path
import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
app_dir_path = os.path.dirname(dir_path)
#get site module and add our app site
import site
print("adding app site dir: " + dir_path)
site.addsitedir(dir_path)
#try and get kit's site dir run sitecustomize
# kit_site_dir_path = os.path.join(app_dir_path,"app","site")
# if not os.path.exists(kit_site_dir_path):
# kit_site_dir_path = os.path.join(app_dir_path,"app","kit","site")
# kit_site_dir_customize_path = os.path.join(kit_site_dir_path,"sitecustomize.py")
# if os.path.exists(kit_site_dir_customize_path):
# print("running kit sitecustomize: " + kit_site_dir_customize_path)
# exec(open(kit_site_dir_customize_path).read())```
```
- Point the IDE to the app's python.bat as its "main interpreter" or as the interpreter to use to index, or code-complete; and it should be able to index. It'll see the whole of the omni.* packages found, and the like. In theory, one could also decide to generate stubs by using the python interpreter directly (hint: WingIDE users).
Note: the above all assume the "linked app" is actually Kit, rather than code or create. You might need to asjust pathing if you are developing through code or create. As in: not "./app/python.bat" but rather "./app/kit/python.bat"
# A word on sitecustomize
I really hate using sitecustomize here. But I also hate even more that Kit uses it.
sitecustomize can only be used once, in any python runtime. And it is meant for the usage of the "site." Meaning: it's for configuration management. Meaning: it should be reserved for sysadmins and dev_ops. It should NOT be consumed by the application. The application isn't your sysadmin. It is meant to be configured by the sysadmin. NVidia is using a sitecustomize in their Kit python.bat system. And they really shouldn't.
I in turn, am suggesting that you, a developer (who is a sysadmin usually in your own world) should use it; or a usercustomize. I think that suggestion is reasonable. Becasue you are configuring a development environment.
You'll see I commented some code that tries to run NVidia's sitecustomize anyway. But if you take a look at theirs, you'll see it's redundant. It's just adding static paths to statically configured extensions. Mine will find those extensions anyway. So they're not needed when using this solution.
# License
This software is provided under the New BSD license. See LICENSE.txt.
Portions of the code-base come from NVidia and are licensed under the Apache 2.0 license or other open source licenses. e.g. the contents of the ./tools directories.
# Included NVidia documentation
Documentation below is from NVidia's original template project for general usage of extensions.
# Extension Project Template
This project was automatically generated.
- `app` - It is a folder link to the location of your *Omniverse Kit* based app.
- `exts` - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "us.fie.omni.ext.pypathextract" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
Alternatively, you can launch your app from console with this folder added to search path and your extension enabled, e.g.:
```
> app\omni.code.bat --ext-folder exts --enable company.hello.world
```
# App Link Setup
If `app` folder link doesn't exist or broken it can be created again. For better developer experience it is recommended to create a folder link named `app` to the *Omniverse Kit* app installed from *Omniverse Launcher*. Convenience script to use is included.
Run:
```
> link_app.bat
```
If successful you should see `app` folder link in the root of this repo.
If multiple Omniverse apps is installed script will select recommended one. Or you can explicitly pass an app:
```
> link_app.bat --app create
```
You can also just pass a path to create link to:
```
> link_app.bat --path "C:/Users/bob/AppData/Local/ov/pkg/create-2021.3.4"
```
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 7,986 |
Markdown
| 54.465277 | 549 | 0.765339 |
leith-bartrich/kit-exts-fie-pypathextract/exts/us.fie.omni.ext.pypathextract/us/fie/omni/ext/pypathextract/extension.py
|
import omni.ext
import omni.ui as ui
import omni.kit
import omni.kit.app
import omni.kit.extensions
import omni.kit.mainwindow
import omni.kit.window.filepicker
import os
import os.path
import carb
import carb.tokens
import carb.settings
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
_save_win:omni.kit.window.filepicker.FilePickerDialog = None
_window:ui.Window = None
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[us.fie.omni.ext.pypathextract] FIE Python_Path Extractor startup")
menu_path = f"Window/FIE Ext Path Extractor"
self._menu = omni.kit.ui.get_editor_menu().add_item(menu_path,self.on_menu_click, True)
def make_window(self):
win_flags = ui.WINDOW_FLAGS_NO_CLOSE
self._window = ui.Window("FIE Python_Path Extractor", width=300, height=300,flags=win_flags)
with self._window.frame:
with ui.VStack(height=0):
with ui.CollapsableFrame("Save Enabled"):
ui.Button("to .pth", clicked_fn=self.extract_enabled_exts_to_bat)
with ui.CollapsableFrame("Save All"):
ui.Button("to .pth", clicked_fn=self.extract_all_exts_to_bat)
def on_menu_click(self, menu, toggled):
if self._window is None:
self.make_window()
self._window.visible = toggled
def on_shutdown(self):
print("[us.fie.omni.ext.pypathextract] FIE Python_Path Extractor shutdown")
omni.kit.ui.get_editor_menu().remove_item(self._menu)
if self._window is not None:
self._window.destroy()
def save_all_clicked(self, f_name:str, dir_name:str):
self._save_win.hide()
if (f_name == ""):
return
if (dir_name == ""):
return
write_path = os.path.join(dir_name,f_name)
app = omni.kit.app.get_app_interface()
exman = app.get_extension_manager()
extensions = exman.get_extensions()
exman.get_extensions
lines = []
for ext in extensions:
line = self.ext_to_line(ext) + "\n"
lines.append(line)
print("Writing all to: " + write_path)
with open(write_path,'wt') as f:
f.writelines(lines)
def save_enabled_clicked(self, f_name:str,dir_name:str):
self._save_win.hide()
if (f_name == ""):
return
if (dir_name == ""):
return
write_path = os.path.join(dir_name,f_name)
app = omni.kit.app.get_app_interface()
exman = app.get_extension_manager()
extensions = exman.get_extensions()
lines = []
for ext in extensions:
if ext['enabled'] == True:
line = self.ext_to_line(ext) + "\n"
lines.append(line)
print("Writing enabled to: " + write_path)
with open(write_path,'wt') as f:
f.writelines(lines)
def ext_to_line(self, ext) -> str:
ret = ""
pre_bundle_dir = '/'.join([ext['path'],"pip_prebundle"])
if os.path.exists(pre_bundle_dir):
ret = pre_bundle_dir + "\n"
ret += ext['path']
return ret
def make_save_win(self):
app_path = carb.tokens.get_tokens_interface().resolve("${app}")
self._save_win = omni.kit.window.filepicker.FilePickerDialog(title="Save Ext Paths to File", current_directory=app_path,item_filter_options=['*.pth'])
self._save_win.navigate_to(app_path)
def extract_all_exts_to_bat(self):
if self._save_win is None:
self.make_save_win()
self._save_win.set_click_apply_handler(self.save_all_clicked)
self._save_win.show()
def extract_enabled_exts_to_bat(self):
if self._save_win is None:
self.make_save_win()
self._save_win.set_click_apply_handler(self.save_enabled_clicked)
self._save_win.show()
| 4,341 |
Python
| 30.23741 | 158 | 0.601705 |
leith-bartrich/kit-exts-fie-pypathextract/exts/us.fie.omni.ext.pypathextract/config/extension.toml
|
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "FIE PythonPath Extractor"
description="Used to extract a useful set of extension directories for populating PYTHONPATH in some development scenarios."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Other"
# Keywords for the extension
keywords = ["kit", "fie", "dev","extensions","python"]
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.window.filepicker" = {}
# Main python module this extension provides, it will be publicly available as "import us.fie.omni.ext.pypathextract".
[[python.module]]
name = "us.fie.omni.ext.pypathextract"
| 902 |
TOML
| 29.099999 | 124 | 0.742794 |
KallPap/FRL-SHAC-Extension/README.md
|
# SHAC
This repository contains the implementation for the paper [Accelerated Policy Learning with Parallel Differentiable Simulation](https://short-horizon-actor-critic.github.io/) (ICLR 2022).
In this paper, we present a GPU-based differentiable simulation and propose a policy learning method named SHAC leveraging the developed differentiable simulation. We provide a comprehensive benchmark set for policy learning with differentiable simulation. The benchmark set contains six robotic control problems for now as shown in the figure below.
<p align="center">
<img src="figures/envs.png" alt="envs" width="800" />
</p>
## Installation
- `git clone https://github.com/NVlabs/DiffRL.git --recursive`
- The code has been tested on
- Operating System: Ubuntu 16.04, 18.04, 20.04, 21.10, 22.04
- Python Version: 3.7, 3.8
- GPU: TITAN X, RTX 1080, RTX 2080, RTX 3080, RTX 3090, RTX 3090 Ti
#### Prerequisites
- In the project folder, create a virtual environment in Anaconda:
```
conda env create -f diffrl_conda.yml
conda activate shac
```
- dflex
```
cd dflex
pip install -e .
```
- rl_games, forked from [rl-games](https://github.com/Denys88/rl_games) (used for PPO and SAC training):
````
cd externals/rl_games
pip install -e .
````
- Install an older version of protobuf required for TensorboardX:
````
pip install protobuf==3.20.0
````
#### Test Examples
A test example can be found in the `examples` folder.
```
python test_env.py --env AntEnv
```
If the console outputs `Finish Successfully` in the last line, the code installation succeeds.
## Training
Running the following commands in `examples` folder allows to train Ant with SHAC.
```
python train_shac.py --cfg ./cfg/shac/ant.yaml --logdir ./logs/Ant/shac
```
We also provide a one-line script in the `examples/train_script.sh` folder to replicate the results reported in the paper for both our method and for baseline method. The results might slightly differ from the paper due to the randomness of the cuda and different Operating System/GPU/Python versions. The plot reported in paper is produced with TITAN X on Ubuntu 16.04.
#### SHAC (Our Method)
For example, running the following commands in `examples` folder allows to train Ant and SNU Humanoid (Humanoid MTU in the paper) environments with SHAC respectively for 5 individual seeds.
```
python train_script.py --env Ant --algo shac --num-seeds 5
```
```
python train_script.py --env SNUHumanoid --algo shac --num-seeds 5
```
#### Baseline Algorithms
For example, running the following commands in `examples` folder allows to train Ant environment with PPO implemented in RL_games for 5 individual seeds,
```
python train_script.py --env Ant --algo ppo --num-seeds 5
```
## Testing
To test the trained policy, you can input the policy checkpoint into the training script and use a `--play` flag to indicate it is for testing. For example, the following command allows to test a trained policy (assume the policy is located in `logs/Ant/shac/policy.pt`)
```
python train_shac.py --cfg ./cfg/shac/ant.yaml --checkpoint ./logs/Ant/shac/policy.pt --play [--render]
```
The `--render` flag indicates whether to export the video of the task execution. If does, the exported video is encoded in `.usd` format, and stored in the `examples/output` folder. To visualize the exported `.usd` file, refer to [USD at NVIDIA](https://developer.nvidia.com/usd).
## Citation
If you find our paper or code is useful, please consider citing:
```kvk
@inproceedings{xu2021accelerated,
title={Accelerated Policy Learning with Parallel Differentiable Simulation},
author={Xu, Jie and Makoviychuk, Viktor and Narang, Yashraj and Ramos, Fabio and Matusik, Wojciech and Garg, Animesh and Macklin, Miles},
booktitle={International Conference on Learning Representations},
year={2021}
}
```
| 3,885 |
Markdown
| 34.327272 | 370 | 0.73668 |
KallPap/FRL-SHAC-Extension/examples/train_shac_checkpoint_gd.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# gradient-based policy optimization by actor critic method
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import argparse
import envs
import algorithms.shac_checkpoint_gd as shac_checkpoint_gd
import os
import sys
import yaml
import torch
import numpy as np
import copy
from utils.common import *
def parse_arguments(description="Testing Args", custom_parameters=[]):
parser = argparse.ArgumentParser()
for argument in custom_parameters:
if ("name" in argument) and ("type" in argument or "action" in argument):
help_str = ""
if "help" in argument:
help_str = argument["help"]
if "type" in argument:
if "default" in argument:
parser.add_argument(argument["name"], type=argument["type"], default=argument["default"], help=help_str)
else:
print("ERROR: default must be specified if using type")
elif "action" in argument:
parser.add_argument(argument["name"], action=argument["action"], help=help_str)
else:
print()
print("ERROR: command line argument name, type/action must be defined, argument not added to parser")
print("supported keys: name, type, default, action, help")
print()
args = parser.parse_args()
if args.test:
args.play = args.test
args.train = False
elif args.play:
args.train = False
else:
args.train = True
return args
def get_args(): # TODO: delve into the arguments
custom_parameters = [
{"name": "--test", "action": "store_true", "default": False,
"help": "Run trained policy, no training"},
{"name": "--cfg", "type": str, "default": "./cfg/shac_checkpoint_gd/ant.yaml",
"help": "Configuration file for training/playing"},
{"name": "--play", "action": "store_true", "default": False,
"help": "Run trained policy, the same as test"},
{"name": "--checkpoint", "type": str, "default": "Base",
"help": "Path to the saved weights"},
{"name": "--logdir", "type": str, "default": "logs/tmp/shac_checkpoint_gd/"},
{"name": "--save-interval", "type": int, "default": 0},
{"name": "--no-time-stamp", "action": "store_true", "default": False,
"help": "whether not add time stamp at the log path"},
{"name": "--device", "type": str, "default": "cuda:0"},
{"name": "--seed", "type": int, "default": 0, "help": "Random seed"},
{"name": "--render", "action": "store_true", "default": False,
"help": "whether generate rendering file."}]
# parse arguments
args = parse_arguments(
description="SHAC_CHECKPOINT_GD",
custom_parameters=custom_parameters)
return args
if __name__ == '__main__':
args = get_args()
with open(args.cfg, 'r') as f:
cfg_train = yaml.load(f, Loader=yaml.SafeLoader)
if args.play or args.test:
cfg_train["params"]["config"]["num_actors"] = cfg_train["params"]["config"].get("player", {}).get("num_actors", 1)
if not args.no_time_stamp:
args.logdir = os.path.join(args.logdir, get_time_stamp())
args.device = torch.device(args.device)
vargs = vars(args)
cfg_train["params"]["general"] = {}
for key in vargs.keys():
cfg_train["params"]["general"][key] = vargs[key]
traj_optimizer = shac_checkpoint_gd.SHAC(cfg_train)
if args.train:
traj_optimizer.train()
else:
traj_optimizer.play(cfg_train)
| 4,109 |
Python
| 34.73913 | 124 | 0.607447 |
KallPap/FRL-SHAC-Extension/examples/combine_batch_logs.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
based on https://stackoverflow.com/questions/43068200/how-to-display-the-average-of-multiple-runs-on-tensorboard
'''
import os
from collections import defaultdict
import numpy as np
import shutil
import tensorflow as tf
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from tensorboardX import SummaryWriter
import argparse
tag_mapping = {#'rewards0/frame': 'policy_loss/step', 'rewards0/iter': 'policy_loss/iter', 'rewards0/time': 'policy_loss/time',
'rewards0/frame': 'rewards/step', 'rewards0/iter': 'rewards/iter', 'rewards0/time': 'rewards/time',
# 'rewards/frame': 'policy_loss/step', 'rewards/iter': 'policy_loss/iter', 'rewards/time': 'policy_loss/time',
'rewards/frame': 'rewards/step', 'rewards/step': 'rewards/step', 'rewards/iter': 'rewards/iter', 'rewards/time': 'rewards/time',
'policy_loss/step': 'policy_loss/step', 'policy_loss/iter': 'policy_loss/iter', 'policy_loss/time': 'policy_loss/time',
'actor_loss/iter': 'actor_loss/iter', 'actor_loss/step': 'actor_loss/step',
# 'policy_loss/step': 'rewards/step', 'policy_loss/iter': 'rewards/iter', 'policy_loss/time': 'rewards/time',
'training_loss/step': 'training_loss/step', 'training_loss/iter': 'training_loss/iter', 'training_loss/time': 'training_loss/time',
'best_policy_loss/step': 'best_policy_loss/step',
'episode_lengths/iter': 'episode_lengths/iter', 'episode_lengths/step': 'episode_lengths/step', 'episode_lengths/frame': 'episode_lengths/step',
'value_loss/step': 'value_loss/step', 'value_loss/iter': 'value_loss/iter'}
def tabulate_events(dpath):
summary_iterators = []
for dname in os.listdir(dpath):
for subfolder_name in args.subfolder_names:
if os.path.exists(os.path.join(dpath, dname, subfolder_name)):
summary_iterators.append(EventAccumulator(os.path.join(dpath, dname, subfolder_name)).Reload())
break
tags = summary_iterators[0].Tags()['scalars']
# for it in summary_iterators:
# assert it.Tags()['scalars'] == tags
out_values = dict()
out_steps = dict()
for tag in tags:
if tag not in tag_mapping.keys():
continue
# gathering steps
steps_set = set()
for summary in summary_iterators:
for event in summary.Scalars(tag):
steps_set.add(event.step)
is_reward = ('reward' in tag)
is_loss = ('loss' in tag)
steps = list(steps_set)
steps.sort()
# steps = steps[:500]
new_tag_name = tag_mapping[tag]
out_values[new_tag_name] = np.zeros((len(steps), len(summary_iterators)))
out_steps[new_tag_name] = np.array(steps)
for summary_id, summary in enumerate(summary_iterators):
events = summary.Scalars(tag)
i = 0
for step_id, step in enumerate(steps):
while i + 1 < len(events) and events[i + 1].step <= step:
i += 1
# if events[i].value > 100000. or events[i].value < -100000.:
# import IPython
# IPython.embed()
out_values[new_tag_name][step_id, summary_id] = events[i].value
return out_steps, out_values
def write_combined_events(dpath, acc_steps, acc_values, dname='combined'):
fpath = os.path.join(dpath, dname)
if os.path.exists(fpath):
shutil.rmtree(fpath)
writer = SummaryWriter(fpath)
tags = acc_values.keys()
for tag in tags:
for i in range(len(acc_values[tag])):
mean = np.array(acc_values[tag][i]).mean()
writer.add_scalar(tag, mean, acc_steps[tag][i])
writer.flush()
parser = argparse.ArgumentParser()
parser.add_argument('--batch-folder', type = str, default='path/to/batch/folder')
parser.add_argument('--subfolder-names', type = str, nargs = '+', default=['log', 'runs']) # 'runs' for rl
args = parser.parse_args()
dpath = args.batch_folder
acc_steps, acc_values = tabulate_events(dpath)
write_combined_events(dpath, acc_steps, acc_values)
| 4,683 |
Python
| 39.730434 | 160 | 0.62823 |
KallPap/FRL-SHAC-Extension/examples/test_env.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import torch
import random
import envs
from utils.common import *
import argparse
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument('--env', type = str, default = 'AntEnv')
parser.add_argument('--num-envs', type = int, default = 64)
parser.add_argument('--render', default = False, action = 'store_true')
args = parser.parse_args()
seeding()
env_fn = getattr(envs, args.env)
env = env_fn(num_envs = args.num_envs, \
device = 'cuda:0', \
render = args.render, \
seed = 0, \
stochastic_init = True, \
MM_caching_frequency = 16, \
no_grad = True)
obs = env.reset()
num_actions = env.num_actions
t_start = time.time()
reward_episode = 0.
for i in range(1000):
actions = torch.randn((args.num_envs, num_actions), device = 'cuda:0')
obs, reward, done, info = env.step(actions)
reward_episode += reward
t_end = time.time()
print('fps = ', 1000 * args.num_envs / (t_end - t_start))
print('mean reward = ', reward_episode.mean().detach().cpu().item())
print('Finish Successfully')
| 1,731 |
Python
| 25.646153 | 76 | 0.677643 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.