file_path
stringlengths
20
202
content
stringlengths
9
3.85M
size
int64
9
3.85M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
8
993
alphanum_fraction
float64
0.26
0.93
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/wrapTokens.cpp
// // Copyright 2016 Pixar // // Licensed under the Apache License, Version 2.0 (the "Apache License") // with the following modification; you may not use this file except in // compliance with the Apache License and the following modification to it: // Section 6. Trademarks. is deleted and replaced with: // // 6. Trademarks. This License does not grant permission to use the trade // names, trademarks, service marks, or product names of the Licensor // and its affiliates, except as required to comply with Section 4(c) of // the License and to reproduce the content of the NOTICE file. // // You may obtain a copy of the Apache License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the Apache License with the above modification is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the Apache License for the specific // language governing permissions and limitations under the Apache License. // // GENERATED FILE. DO NOT EDIT. #include <boost/python/class.hpp> #include ".//tokens.h" PXR_NAMESPACE_USING_DIRECTIVE namespace { // Helper to return a static token as a string. We wrap tokens as Python // strings and for some reason simply wrapping the token using def_readonly // bypasses to-Python conversion, leading to the error that there's no // Python type for the C++ TfToken type. So we wrap this functor instead. class _WrapStaticToken { public: _WrapStaticToken(const TfToken* token) : _token(token) { } std::string operator()() const { return _token->GetString(); } private: const TfToken* _token; }; template <typename T> void _AddToken(T& cls, const char* name, const TfToken& token) { cls.add_static_property(name, boost::python::make_function( _WrapStaticToken(&token), boost::python::return_value_policy< boost::python::return_by_value>(), boost::mpl::vector1<std::string>())); } } // anonymous void wrapOmniWarpSceneIndexTokens() { boost::python::class_<OmniWarpSceneIndexTokensType, boost::noncopyable> cls("Tokens", boost::python::no_init); _AddToken(cls, "warpDependentPrims", OmniWarpSceneIndexTokens->warpDependentPrims); _AddToken(cls, "warpSourceFile", OmniWarpSceneIndexTokens->warpSourceFile); _AddToken(cls, "OmniWarpComputationAPI", OmniWarpSceneIndexTokens->OmniWarpComputationAPI); }
2,626
C++
35.999999
95
0.690023
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationAPIAdapter.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include <pxr/base/tf/stringUtils.h> #include <pxr/imaging/hd/retainedDataSource.h> #include <pxr/usdImaging/usdImaging/dataSourceAttribute.h> #include "warpComputationAPIAdapter.h" #include "warpComputationAPI.h" PXR_NAMESPACE_OPEN_SCOPE TF_DEFINE_PRIVATE_TOKENS( _tokens, (warpComputation) (sourceFile) (dependentPrims) (simulationParams) ); TF_REGISTRY_FUNCTION(TfType) { typedef WarpComputationAPIAdapter Adapter; TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >(); t.SetFactory< UsdImagingAPISchemaAdapterFactory<Adapter> >(); } // ---------------------------------------------------------------------------- namespace { class SimulationParamsDataSource : public HdSampledDataSource { public: HD_DECLARE_DATASOURCE(SimulationParamsDataSource); SimulationParamsDataSource( const VtDictionary &dict) : _customData(dict) { } VtValue GetValue(Time shutterOffset) { return VtValue(_customData); } bool GetContributingSampleTimesForInterval( Time startTime, Time endTime, std::vector<Time> * outSampleTimes) { return false; } VtDictionary _customData; }; class DependentPrimsDataSource : public HdPathArrayDataSource { public: HD_DECLARE_DATASOURCE(DependentPrimsDataSource); DependentPrimsDataSource( const UsdRelationship &rel) : _usdRel(rel) { } VtValue GetValue( HdSampledDataSource::Time shutterOffset) { return VtValue(GetTypedValue(shutterOffset)); } VtArray<SdfPath> GetTypedValue( HdSampledDataSource::Time shutterOffset) { SdfPathVector paths; _usdRel.GetForwardedTargets(&paths); VtArray<SdfPath> vtPaths(paths.begin(), paths.end()); return vtPaths; } bool GetContributingSampleTimesForInterval( HdSampledDataSource::Time startTime, HdSampledDataSource::Time endTime, std::vector<HdSampledDataSource::Time> *outSampleTimes) { return false; } private: UsdRelationship _usdRel; }; HD_DECLARE_DATASOURCE_HANDLES(DependentPrimsDataSource); class _WarpComputationDataSource : public HdContainerDataSource { public: HD_DECLARE_DATASOURCE(_WarpComputationDataSource); _WarpComputationDataSource( const UsdPrim &prim, const UsdImagingDataSourceStageGlobals &stageGlobals) : _api(prim) , _stageGlobals(stageGlobals) { } TfTokenVector GetNames() override { TfTokenVector result; result.reserve(4); result.push_back(_tokens->warpComputation); if (UsdAttribute attr = _api.GetSourceFileAttr()) { result.push_back(_tokens->sourceFile); VtDictionary customData = attr.GetCustomData(); VtDictionary::iterator iter = customData.begin(); if (iter != customData.end()) { result.push_back(_tokens->simulationParams); } } if (_api.GetDependentPrimsRel()) { result.push_back(_tokens->dependentPrims); } return result; } HdDataSourceBaseHandle Get(const TfToken &name) override { if (name == _tokens->sourceFile) { if (UsdAttribute attr = _api.GetSourceFileAttr()) { return UsdImagingDataSourceAttributeNew(attr, _stageGlobals); } } else if (name == _tokens->dependentPrims) { if (UsdRelationship rel = _api.GetDependentPrimsRel()) { return DependentPrimsDataSource::New(rel); } } else if (name == _tokens->simulationParams) { if (UsdAttribute attr = _api.GetSourceFileAttr()) { VtDictionary customData = attr.GetCustomData(); VtDictionary::iterator iter = customData.begin(); if (iter != customData.end()) { return SimulationParamsDataSource::New(customData); } } } return nullptr; } private: OmniWarpSceneIndexWarpComputationAPI _api; const UsdImagingDataSourceStageGlobals &_stageGlobals; }; HD_DECLARE_DATASOURCE_HANDLES(_WarpComputationDataSource); } // anonymous namespace // ---------------------------------------------------------------------------- HdContainerDataSourceHandle WarpComputationAPIAdapter::GetImagingSubprimData( UsdPrim const& prim, TfToken const& subprim, TfToken const& appliedInstanceName, const UsdImagingDataSourceStageGlobals &stageGlobals) { OmniWarpSceneIndexWarpComputationAPI _api(prim); std::string pythonModuleName; UsdAttribute attr = _api.GetSourceFileAttr(); attr.Get(&pythonModuleName, 0.f); if (pythonModuleName.length()) { return HdRetainedContainerDataSource::New( _tokens->warpComputation, _WarpComputationDataSource::New( prim, stageGlobals)); } return nullptr; } #if PXR_VERSION < 2308 HdDataSourceLocatorSet WarpComputationAPIAdapter::InvalidateImagingSubprim( UsdPrim const& prim, TfToken const& subprim, TfToken const& appliedInstanceName, TfTokenVector const& properties) #else HdDataSourceLocatorSet WarpComputationAPIAdapter::InvalidateImagingSubprim( UsdPrim const& prim, TfToken const& subprim, TfToken const& appliedInstanceName, TfTokenVector const& properties, const UsdImagingPropertyInvalidationType invalidationType) #endif { #if 0 if (!subprim.IsEmpty() || appliedInstanceName.IsEmpty()) { return HdDataSourceLocatorSet(); } std::string prefix = TfStringPrintf( "collections:%s:", appliedInstanceName.data()); for (const TfToken &propertyName : properties) { if (TfStringStartsWith(propertyName.GetString(), prefix)) { return HdDataSourceLocator( _tokens->usdCollections, appliedInstanceName); } } #endif return HdDataSourceLocatorSet(); } PXR_NAMESPACE_CLOSE_SCOPE
6,767
C++
25.4375
79
0.645338
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpPythonModule.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include <pxr/base/tf/pyInvoke.h> #include <pxr/base/tf/errorMark.h> #include <pxr/base/tf/pyExceptionState.h> #include <pxr/base/tf/pyInterpreter.h> #include <pxr/imaging/hd/tokens.h> #include "warpPythonModule.h" #include "tokens.h" PXR_NAMESPACE_OPEN_SCOPE OmniWarpPythonModule::OmniWarpPythonModule(const SdfPath &primPath, const std::string& moduleName, UsdImagingStageSceneIndexConstRefPtr usdImagingSi) : _primPath(primPath), _moduleName(moduleName), _usdImagingSi(usdImagingSi) { } OmniWarpPythonModule::~OmniWarpPythonModule() { TfPyLock pyLock; boost::python::object result; TfPyInvokeAndReturn(_moduleName.c_str(), "terminate_sim", &result, _primPath); } void OmniWarpPythonModule::InitMesh(VtIntArray indices, VtVec3fArray vertices, VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams) { TfPyLock pyLock; boost::python::object result; TfPyInvokeAndReturn(_moduleName.c_str(), "initialize_sim_mesh", &result, _primPath, indices, vertices, depIndices, depVertices, simParams); } void OmniWarpPythonModule::InitParticles( VtVec3fArray positions, VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams) { TfPyLock pyLock; boost::python::object result; TfPyInvokeAndReturn(_moduleName.c_str(), "initialize_sim_particles", &result, _primPath, positions, depIndices, depVertices, simParams); } VtVec3fArray OmniWarpPythonModule::ExecSim(VtDictionary simParams) { return ExecSim(simParams, VtVec3fArray()); } VtVec3fArray OmniWarpPythonModule::ExecSim(VtDictionary simParams, VtVec3fArray dependentVertices) { TfPyLock pyLock; boost::python::object result; float dt = 0.f; if (_usdImagingSi) { dt = _usdImagingSi->GetTime().GetValue(); } if (TfPyInvokeAndReturn(_moduleName.c_str(), "exec_sim", &result, _primPath, dt, dependentVertices, simParams)) { boost::python::extract<VtVec3fArray> theResults(result); if (theResults.check()) { return theResults(); } } return VtVec3fArray(); } PXR_NAMESPACE_CLOSE_SCOPE
2,735
C++
30.090909
115
0.729068
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/particles.py
# Copyright 2023 NVIDIA CORPORATION # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import warp as wp import warp.sim import warp.sim.render import numpy as np from pxr import Vt, Sdf wp.init() global_examples = {} # need radius of spehere class Example2: def __init__(self): self.frame_dt = 1.0 / 60 self.frame_count = 400 self.sim_substeps = 64 self.sim_dt = self.frame_dt / self.sim_substeps self.sim_steps = self.frame_count * self.sim_substeps self.sim_time = 0.0 self.radius = 0.1 self.builder = wp.sim.ModelBuilder() self.builder.default_particle_radius = self.radius def update(self): self.model.particle_grid.build(self.state_0.particle_q, self.radius * 2.0) for s in range(self.sim_substeps): self.state_0.clear_forces() self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt) # swap states (self.state_0, self.state_1) = (self.state_1, self.state_0) def terminate_sim(primPath: Sdf.Path): global global_examples global_examples[primPath] = None def initialize_sim_particles(primPath: Sdf.Path, src_positions: Vt.Vec3fArray, dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): global global_examples global_examples[primPath] = Example2() for pt in src_positions: global_examples[primPath].builder.add_particle(pt, (5.0, 0.0, 0.0), 0.1) global_examples[primPath].model = global_examples[primPath].builder.finalize() global_examples[primPath].model.particle_kf = 25.0 global_examples[primPath].model.soft_contact_kd = 100.0 global_examples[primPath].model.soft_contact_kf *= 2.0 global_examples[primPath].state_0 = global_examples[primPath].model.state() global_examples[primPath].state_1 = global_examples[primPath].model.state() global_examples[primPath].integrator = wp.sim.SemiImplicitIntegrator() def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): # Not respecting sim_dt at all, using internal time global global_examples global_examples[primPath].update() return Vt.Vec3fArray.FromNumpy(global_examples[primPath].state_0.particle_q.numpy())
2,841
Python
33.658536
136
0.697994
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/cloth.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Sim Cloth # # Shows a simulation of an FEM cloth model colliding against a static # rigid body mesh using the wp.sim.ModelBuilder(). # ########################################################################### import os import math import numpy as np import warp as wp import warp.sim import warp.sim.render from pxr import Usd, UsdGeom, Vt, Sdf import sys wp.init() global_examples = {} class Example: def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray): self.sim_width = 64 self.sim_height = 64 self.frame_dt = 1.0 / 60 self.frame_count = 400 self.sim_substeps = 32 self.sim_dt = self.frame_dt / self.sim_substeps self.sim_steps = self.frame_count * self.sim_substeps self.sim_time = 0.0 builder = wp.sim.ModelBuilder() # sim BCs clothEdgeBendingStiffness = 0.01 clothEdgeDampingStiffness = 0.0 clothTriAreaStiffness = 1000000.0 clothTriDampingStiffness = 100.0 clothTriElasticStiffness = 1000000.0 colliderContactDistance = 1.0 colliderContactQueryRange = 100.0 contactDampingStiffness = 10000.0 contactElasticStiffness = 500000.0 contactFrictionCoeff = 0.75 contactFrictionStiffness = 10000.0 globalScale = 0.01 # cloth grid builder.add_cloth_grid( pos=(0.0, 50.0, -25.0), rot=wp.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.5), vel=(0.0, 0.0, 0.0), dim_x=self.sim_width, dim_y=self.sim_height, cell_x=1.0, cell_y=1.0, mass=0.1, fix_left=True, tri_ke=clothTriElasticStiffness * globalScale, tri_ka=clothTriAreaStiffness * globalScale, tri_kd=clothTriDampingStiffness * globalScale, edge_ke=clothEdgeBendingStiffness * globalScale, edge_kd=clothEdgeDampingStiffness * globalScale ) # add collider (must have identity transform until we xforms piped through Hydra plugin) mesh = wp.sim.Mesh(points, indices) builder.add_shape_mesh( body=-1, mesh=mesh, pos=(0.0, 0.0, 0.0), rot=wp.quat_identity(), scale=(1.0, 1.0, 1.0), ke=1.0e2, kd=1.0e2, kf=1.0e1, ) # set sim BCs self.model = builder.finalize() self.model.ground = True self.model.allocate_soft_contacts(self.model.particle_count) self.model.gravity = (0, -980, 0) self.model.soft_contact_ke = contactElasticStiffness * globalScale self.model.soft_contact_kf = contactFrictionStiffness * globalScale self.model.soft_contact_mu = contactFrictionCoeff self.model.soft_contact_kd = contactDampingStiffness * globalScale self.model.soft_contact_margin = colliderContactDistance * colliderContactQueryRange self.model.particle_radius = colliderContactDistance self.integrator = wp.sim.SemiImplicitIntegrator() self.state_0 = self.model.state() self.state_1 = self.model.state() def update(self, sim_time: float): wp.sim.collide(self.model, self.state_0) for s in range(self.sim_substeps): self.state_0.clear_forces() self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt) (self.state_0, self.state_1) = (self.state_1, self.state_0) def terminate_sim(primPath: Sdf.Path): global global_examples global_examples[primPath] = None def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray, dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): global global_examples global_examples[primPath] = Example(dep_mesh_indices, dep_mesh_points) def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): # Not respecting sim_dt at all, using internal time global global_examples global_examples[primPath].update(sim_dt) return Vt.Vec3fArray.FromNumpy(global_examples[primPath].state_0.particle_q.numpy())
4,791
Python
33.978102
112
0.625339
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/deform01.py
# Copyright 2023 NVIDIA CORPORATION # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warp as wp import numpy as np from pxr import Vt, Sdf @wp.kernel def deform(positions: wp.array(dtype=wp.vec3), t: float): tid = wp.tid() x = positions[tid] offset = -wp.sin(x[0]) * 0.06 scale = wp.sin(t) x = x + wp.vec3(0.0, offset * scale, 0.0) positions[tid] = x class Example: def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray): self.mesh = wp.Mesh( points=wp.array(points, dtype=wp.vec3), indices=wp.array(indices, dtype=int), ) def update(self, sim_time: float): wp.launch(kernel=deform, dim=len(self.mesh.points), inputs=[self.mesh.points, sim_time]) # refit the mesh BVH to account for the deformation self.mesh.refit() wp.init() global_examples = {} def terminate_sim(primPath: Sdf.Path): global global_examples global_examples[primPath] = None def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray, dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): global global_examples global_examples[primPath] = Example(src_indices, src_points) def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): global global_examples # Sim expects 60 samples per second (or hydra time of 1.0) global_examples[primPath].update(sim_dt / 60.0) return Vt.Vec3fArray.FromNumpy(global_examples[primPath].mesh.points.numpy()) def is_enabled(): return True
2,140
Python
31.439393
112
0.693458
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/ocean.py
# Copyright 2023 NVIDIA CORPORATION # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warp as wp import numpy as np from pxr import Vt, Sdf wp.init() sim_params_global = { 'wave_amplitude': 1.5, 'wave_directionality': 0.0, 'wind_speed': 10.0, 'water_depth': 50.0, 'scale': 1.0, 'direction': 0.0, } #warp function definitions # fractional part of a (w.r.t. floor(a)) @wp.func def frac(a: float): return a - wp.floor(a) # square of a @wp.func def sqr(a: float): return a * a @wp.func def alpha_beta_spectrum(omega: float, peak_omega: float, alpha: float, beta: float, gravity: float): return ( (alpha * gravity * gravity / wp.pow(omega, 5.0)) * wp.exp(- beta * wp.pow(peak_omega/omega, 4.0)) ) @wp.func def jonswap_peak_sharpening(omega: float, peak_omega: float, gamma: float): sigma = float(0.07) if omega > peak_omega: sigma = float(0.09) return wp.pow(gamma, wp.exp(- 0.5 * sqr( (omega - peak_omega) / (sigma * peak_omega)) )) @wp.func def jonswap_spectrum(omega: float, gravity: float, wind_speed: float, fetch_km: float, gamma: float): #https://wikiwaves.org/Ocean-Wave_Spectra#JONSWAP_Spectrum fetch = 1000.0 * fetch_km alpha = 0.076 * wp.pow(wind_speed * wind_speed / (gravity * fetch), 0.22) peak_omega = 22.0 * wp.pow(wp.abs(gravity * gravity / (wind_speed * fetch)), 1.0/3.0) return (jonswap_peak_sharpening(omega, peak_omega, gamma) * alpha_beta_spectrum(omega, peak_omega, alpha, 1.25, gravity)) @wp.func def TMA_spectrum(omega: float, gravity: float, wind_speed: float, fetch_km: float, gamma: float, waterdepth: float): #https://dl.acm.org/doi/10.1145/2791261.2791267 omegaH = omega * wp.sqrt(waterdepth/gravity) omegaH = wp.max(0.0, wp.min(2.2, omegaH)) phi = 0.5 * omegaH * omegaH if omegaH > 1.0: phi = 1.0 - 0.5 * sqr(2.0 - omegaH) return phi * jonswap_spectrum(omega, gravity, wind_speed, fetch_km, gamma); #warp kernel definitions @wp.kernel def update_profile(profile: wp.array(dtype=wp.vec3), profile_res: int, profile_data_num: int, lambdaMin: float, lambdaMax: float, profile_extend: float, time: float, windspeed: float, waterdepth: float ): x = wp.tid() randself = wp.rand_init(7) # sampling parameters omega0 = wp.sqrt(2.0 * 3.14159 * 9.80665 / lambdaMin) omega1 = wp.sqrt(2.0 * 3.14159 * 9.80665 / lambdaMax) omega_delta = wp.abs(omega1 - omega0) / float(profile_data_num) # we blend three displacements for seamless spatial profile tiling space_pos_1 = profile_extend * float(x) / float(profile_res) space_pos_2 = space_pos_1 + profile_extend space_pos_3 = space_pos_1 - profile_extend p1 = wp.vec2(0.0,0.0) p2 = wp.vec2(0.0,0.0) p3 = wp.vec2(0.0,0.0) for i in range(0, profile_data_num): omega = wp.abs(omega0 + (omega1 - omega0) * float(i) / float(profile_data_num)) # linear sampling of omega k = omega * omega / 9.80665 phase = -time * omega + wp.randf(randself) * 2.0 * 3.14159 amplitude = float(10000.0) * wp.sqrt(wp.abs(2.0 * omega_delta * TMA_spectrum(omega, 9.80665, windspeed, 100.0, 3.3, waterdepth))) p1 = wp.vec2( p1[0] + amplitude * wp.sin(phase + space_pos_1 * k), p1[1] - amplitude * wp.cos(phase + space_pos_1 * k) ) p2 = wp.vec2( p2[0] + amplitude * wp.sin(phase + space_pos_2 * k), p2[1] - amplitude * wp.cos(phase + space_pos_2 * k) ) p3 = wp.vec2( p3[0] + amplitude * wp.sin(phase + space_pos_3 * k), p3[1] - amplitude * wp.cos(phase + space_pos_3 * k) ) # cubic blending coefficients s = float(float(x) / float(profile_res)) c1 = float(2.0 * s * s * s - 3.0 * s * s + 1.0) c2 = float(-2.0 * s * s * s + 3.0 * s * s) disp_out = wp.vec3( (p1[0] + c1 * p2[0] + c2 * p3[0]) / float(profile_data_num), (p1[1] + c1 * p2[1] + c2 * p3[1]) / float(profile_data_num), 0. ) wp.store(profile, x, disp_out) @wp.kernel def update_points(out_points: wp.array(dtype=wp.vec3), in_points: wp.array(dtype=wp.vec3), profile: wp.array(dtype=wp.vec3), profile_res: int, profile_extent: float, amplitude: float, directionality: float, direction: float, antiAlias: int, camPosX: float, camPosY: float, camPosZ: float): tid = wp.tid() p_crd = in_points[tid] p_crd = wp.vec3(p_crd[0], p_crd[2], p_crd[1]) randself = wp.rand_init(7) disp_x = float(0.) disp_y = float(0.) disp_z = float(0.) w_sum = float(0.) direction_count = (int)(128) for d in range(0, direction_count): r = float(d) * 2. * 3.14159265359 / float(direction_count) + 0.02 dir_x = wp.cos(r) dir_y = wp.sin(r) # directional amplitude t = wp.abs( direction - r ) if (t > 3.14159265359): t = 2.0 * 3.14159265359 - t t = pow(t, 1.2) dirAmp = (2.0 * t * t * t - 3.0 * t * t + 1.0) * 1.0 + (- 2.0 * t * t * t + 3.0 * t * t) * (1.0 - directionality) dirAmp = dirAmp / (1.0 + 10.0 * directionality) rand_phase = wp.randf(randself) x_crd = (p_crd[0] * dir_x + p_crd[2] * dir_y) / profile_extent + rand_phase pos_0 = int(wp.floor(x_crd * float(profile_res))) % profile_res if x_crd < 0.: pos_0 = pos_0 + profile_res - 1 pos_1 = int(pos_0 + 1) % profile_res p_disp_0 = profile[pos_0] p_disp_1 = profile[pos_1] w = frac( x_crd * float(profile_res) ) prof_height_x = dirAmp * float((1. - w) * p_disp_0[0] + w * p_disp_1[0]) prof_height_y = dirAmp * float((1. - w) * p_disp_0[1] + w * p_disp_1[1]) disp_x = disp_x + dir_x * prof_height_x disp_y = disp_y + prof_height_y disp_z = disp_z + dir_y * prof_height_x w_sum = w_sum + 1. # simple anti-aliasing: reduce amplitude with increasing distance to viewpoint if (antiAlias > 0): v1 = wp.normalize( wp.vec3( p_crd[0] - camPosX, max( 100.0, wp.abs(p_crd[1] - camPosY)), p_crd[2] - camPosZ) ) amplitude *= wp.sqrt( wp.abs(v1[1]) ) # write output vertex position outP = wp.vec3(p_crd[0] + amplitude * disp_x / w_sum, p_crd[1] + amplitude * disp_y / w_sum, p_crd[2] + amplitude * disp_z / w_sum) wp.store(out_points, tid, wp.vec3(outP[0], outP[2], outP[1])) class Example: def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray): # profile buffer intializations print('[Ocean deformer] Initializing profile buffer.') self.profile_extent = 410.0 #physical size of profile, should be around half the resolution self.profile_res = int(8192) self.profile_wavenum = int(1000) self.profile_CUDA = wp.zeros(self.profile_res, dtype=wp.vec3, device="cuda:0") self.points_in = wp.array(points, dtype=wp.vec3, device="cuda:0") self.points_out = wp.array(points, dtype=wp.vec3, device="cuda:0") print(self.points_in) print(self.points_out) def update(self, sim_time: float): global sim_params_global # params wave_amplitude = sim_params_global["wave_amplitude"] wave_directionality = sim_params_global["wave_directionality"] wind_speed = sim_params_global["wind_speed"] water_depth = sim_params_global["water_depth"] scale = sim_params_global["scale"] direction = sim_params_global["direction"] # Parameters time = float(sim_time) amplitude = max(0.0001, min(1000.0, float(wave_amplitude))) minWavelength = 0.1 maxWavelength = 250.0 direction = float(direction) % 6.28318530718 directionality = max(0.0, min(1.0, 0.02 * float(wave_directionality))) windspeed = max(0.0, min(30.0, float(wind_speed))) waterdepth = max(1.0, min(1000.0, float(water_depth))) scale = min(10000.0, max(0.001, float(scale))) antiAlias = int(0) campos = [0.0, 0.0, 0.0] # create 1D profile buffer for this timestep using wave paramters stored in internal self CUDA memory wp.launch( kernel=update_profile, dim=self.profile_res, inputs=[self.profile_CUDA, int(self.profile_res), int(self.profile_wavenum), float(minWavelength), float(maxWavelength), float(self.profile_extent), float(time), float(windspeed), float(waterdepth)], outputs=[], device="cuda:0") # update point positions using the profile buffer created above wp.launch( kernel=update_points, dim=len(self.points_out), inputs=[self.points_out, self.points_in, self.profile_CUDA, int(self.profile_res), float(self.profile_extent*scale), float(amplitude), float(directionality), float(direction), int(antiAlias), float(campos[0]), float(campos[1]), float(campos[2]) ], outputs=[], device="cuda:0") global_examples = {} def terminate_sim(primPath: Sdf.Path): global global_examples global_examples[primPath] = None def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray, dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): global global_examples global sim_params_global if sim_params: sim_params_global = sim_params global_examples[primPath] = Example(src_indices, src_points) def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None): global global_examples global sim_params_global if sim_params: sim_params_global = sim_params # Sim expects 60 samples per second (or hydra time of 1.0) global_examples[primPath].update(sim_dt / 60.0) return Vt.Vec3fArray.FromNumpy(global_examples[primPath].points_out.numpy())
11,029
Python
37.838028
260
0.580288
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/preferences.py
# # Copyright 2016 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # from pxr.Usdviewq.qt import QtCore, QtGui, QtWidgets from .preferencesUI import Ui_Preferences class Preferences(QtWidgets.QDialog): def __init__(self, parent, attr): super(Preferences, self).__init__(parent) self._ui = Ui_Preferences() self._ui.setupUi(self) self._attr = attr metadata = self._attr.GetMetadata("customData") self._ui.scaleSpinBox.setValue(metadata["scale"]) self._ui.directionSpinBox.setValue(metadata["direction"]) self._ui.windSpeedSpinBox.setValue(metadata["wind_speed"]) self._ui.waterDepthSpinBox.setValue(metadata["water_depth"]) self._ui.waveAmplitudeSpinBox.setValue(metadata["wave_amplitude"]) self._ui.waveDirectionalitySpinBox.setValue(metadata["wave_directionality"]) self._ui.buttonBox.clicked.connect(self._buttonBoxButtonClicked) def _apply(self): self._attr.SetMetadataByDictKey('customData', 'scale', self._ui.scaleSpinBox.value()) self._attr.SetMetadataByDictKey('customData', 'direction', self._ui.directionSpinBox.value()) self._attr.SetMetadataByDictKey('customData', 'wind_speed', self._ui.windSpeedSpinBox.value()) self._attr.SetMetadataByDictKey('customData', 'water_depth', self._ui.waterDepthSpinBox.value()) self._attr.SetMetadataByDictKey('customData', 'wave_amplitude', self._ui.waveAmplitudeSpinBox.value()) self._attr.SetMetadataByDictKey('customData', 'wave_directionality', self._ui.waveDirectionalitySpinBox.value()) def _buttonBoxButtonClicked(self, button): role = self._ui.buttonBox.buttonRole(button) Roles = QtWidgets.QDialogButtonBox.ButtonRole if role == Roles.AcceptRole or role == Roles.ApplyRole: self._apply() if role == Roles.AcceptRole or role == Roles.RejectRole: self.close()
2,923
Python
46.16129
120
0.718782
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/__init__.py
from pxr import Tf from pxr.Usdviewq.plugin import PluginContainer from .preferences import Preferences def launchPreferences(usdviewApi): prim = usdviewApi.stage.GetPrimAtPath("/World/grid/Grid") attr = prim.GetAttribute("warp:sourceFile") _preferencesDlg = Preferences(usdviewApi.qMainWindow, attr) _preferencesDlg.show() _preferencesDlg = None class OceanSimPluginContainer(PluginContainer): def registerPlugins(self, plugRegistry, usdviewApi): self._launchPreferences = plugRegistry.registerCommandPlugin( "OceanSimPluginContainer.launchPreferences", "Launch Preferences", launchPreferences) def configureView(self, plugRegistry, plugUIBuilder): tutMenu = plugUIBuilder.findOrCreateMenu("OceanSim") tutMenu.addItem(self._launchPreferences) Tf.Type.Define(OceanSimPluginContainer)
878
Python
32.807691
69
0.749431
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/preferencesUI_pyside6.py
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'preferencesUI.ui' ## ## Created by: Qt User Interface Compiler version 6.5.1 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QAbstractButton, QApplication, QDialog, QDialogButtonBox, QDoubleSpinBox, QFrame, QHBoxLayout, QLabel, QSizePolicy, QSpacerItem, QVBoxLayout, QWidget) class Ui_Preferences(object): def setupUi(self, Ocean_Simulation_Settings): if not Ocean_Simulation_Settings.objectName(): Ocean_Simulation_Settings.setObjectName(u"Ocean_Simulation_Settings") Ocean_Simulation_Settings.resize(295, 99) self.verticalLayout = QVBoxLayout() self.verticalLayout.setObjectName(u"verticalLayout") self.prefsOverButtonsLayout = QVBoxLayout() self.prefsOverButtonsLayout.setObjectName(u"prefsOverButtonsLayout") self.horizontalLayout_3 = QHBoxLayout() self.horizontalLayout_3.setObjectName(u"horizontalLayout_3") self.scaleLabel = QLabel() self.scaleLabel.setObjectName(u"scaleLabel") self.horizontalLayout_3.addWidget(self.scaleLabel) self.horizontalSpacer_2a = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_3.addItem(self.horizontalSpacer_2a) self.scaleSpinBox = QDoubleSpinBox() self.scaleSpinBox.setObjectName(u"scaleSpinBox") self.scaleSpinBox.setDecimals(2) self.scaleSpinBox.setMinimum(0.000000000000000) self.scaleSpinBox.setValue(1.000000000000000) self.horizontalLayout_3.addWidget(self.scaleSpinBox) self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_3) self.horizontalLayout_4 = QHBoxLayout() self.horizontalLayout_4.setObjectName(u"horizontalLayout_4") self.directionLabel = QLabel() self.directionLabel.setObjectName(u"directionLabel") self.horizontalLayout_4.addWidget(self.directionLabel) self.horizontalSpacer_2b = QSpacerItem(26, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_4.addItem(self.horizontalSpacer_2b) self.directionSpinBox = QDoubleSpinBox() self.directionSpinBox.setObjectName(u"directionSpinBox") self.directionSpinBox.setDecimals(2) self.directionSpinBox.setMinimum(0.000000000000000) self.directionSpinBox.setValue(0.000000000000000) self.horizontalLayout_4.addWidget(self.directionSpinBox) self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_4) self.horizontalLayout_5 = QHBoxLayout() self.horizontalLayout_5.setObjectName(u"horizontalLayout_5") self.windSpeedLabel = QLabel() self.windSpeedLabel.setObjectName(u"windSpeedLabel") self.horizontalLayout_5.addWidget(self.windSpeedLabel) self.horizontalSpacer_2c = QSpacerItem(24, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_5.addItem(self.horizontalSpacer_2c) self.windSpeedSpinBox = QDoubleSpinBox() self.windSpeedSpinBox.setObjectName(u"windSpeedSpinBox") self.windSpeedSpinBox.setDecimals(2) self.windSpeedSpinBox.setMinimum(0.000000000000000) self.windSpeedSpinBox.setValue(10.000000000000000) self.horizontalLayout_5.addWidget(self.windSpeedSpinBox) self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_5) self.horizontalLayout_6 = QHBoxLayout() self.horizontalLayout_6.setObjectName(u"horizontalLayout_6") self.waterDepthLabel = QLabel() self.waterDepthLabel.setObjectName(u"waterDepthLabel") self.horizontalLayout_6.addWidget(self.waterDepthLabel) self.horizontalSpacer_2d = QSpacerItem(24, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_6.addItem(self.horizontalSpacer_2d) self.waterDepthSpinBox = QDoubleSpinBox() self.waterDepthSpinBox.setObjectName(u"waterDepthSpinBox") self.waterDepthSpinBox.setDecimals(2) self.waterDepthSpinBox.setMinimum(0.000000000000000) self.waterDepthSpinBox.setValue(50.000000000000000) self.horizontalLayout_6.addWidget(self.waterDepthSpinBox) self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_6) self.horizontalLayout_7 = QHBoxLayout() self.horizontalLayout_7.setObjectName(u"horizontalLayout_7") self.waveAmplitudeLabel = QLabel() self.waveAmplitudeLabel.setObjectName(u"waveAmplitudeLabel") self.horizontalLayout_7.addWidget(self.waveAmplitudeLabel) self.horizontalSpacer_2e = QSpacerItem(21, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_7.addItem(self.horizontalSpacer_2e) self.waveAmplitudeSpinBox = QDoubleSpinBox() self.waveAmplitudeSpinBox.setObjectName(u"waveAmplitudeSpinBox") self.waveAmplitudeSpinBox.setDecimals(2) self.waveAmplitudeSpinBox.setMinimum(0.000000000000000) self.waveAmplitudeSpinBox.setValue(1.500000000000000) self.horizontalLayout_7.addWidget(self.waveAmplitudeSpinBox) self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_7) self.horizontalLayout_8 = QHBoxLayout() self.horizontalLayout_8.setObjectName(u"horizontalLayout_8") self.waveDirectionalityLabel = QLabel() self.waveDirectionalityLabel.setObjectName(u"waveDirectionalityLabel") self.horizontalLayout_8.addWidget(self.waveDirectionalityLabel) self.horizontalSpacer_2f = QSpacerItem(17, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_8.addItem(self.horizontalSpacer_2f) self.waveDirectionalitySpinBox = QDoubleSpinBox() self.waveDirectionalitySpinBox.setObjectName(u"waveDirectionalitySpinBox") self.waveDirectionalitySpinBox.setMinimum(0.000000000000000) self.waveDirectionalitySpinBox.setValue(0.000000000000000) self.horizontalLayout_8.addWidget(self.waveDirectionalitySpinBox) self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_8) self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding) self.prefsOverButtonsLayout.addItem(self.verticalSpacer) self.line = QFrame() self.line.setObjectName(u"line") self.line.setFrameShape(QFrame.HLine) self.line.setFrameShadow(QFrame.Sunken) self.prefsOverButtonsLayout.addWidget(self.line) self.horizontalLayout_2 = QHBoxLayout() self.horizontalLayout_2.setObjectName(u"horizontalLayout_2") self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_2.addItem(self.horizontalSpacer) self.buttonBox = QDialogButtonBox() self.buttonBox.setObjectName(u"buttonBox") self.buttonBox.setStandardButtons(QDialogButtonBox.Apply|QDialogButtonBox.Cancel|QDialogButtonBox.Ok) self.horizontalLayout_2.addWidget(self.buttonBox) self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_2) self.verticalLayout.addLayout(self.prefsOverButtonsLayout) self.retranslateUi(Ocean_Simulation_Settings) QMetaObject.connectSlotsByName(Ocean_Simulation_Settings) # setupUi def retranslateUi(self, Ocean_Simulation_Settings): Ocean_Simulation_Settings.setWindowTitle(QCoreApplication.translate("Preferences", u"Ocean Simulation Settings", None)) Ocean_Simulation_Settings.setProperty("comment", QCoreApplication.translate("Preferences", u"\n" " Copyright 2020 Pixar \n" " \n" " Licensed under the Apache License, Version 2.0 (the \"Apache License\") \n" " with the following modification; you may not use this file except in \n" " compliance with the Apache License and the following modification to it: \n" " Section 6. Trademarks. is deleted and replaced with: \n" " \n" " 6. Trademarks. This License does not grant permission to use the trade \n" " names, trademarks, service marks, or product names of the Licensor \n" " and its affiliates, except as required to comply with Section 4(c) of \n" " the License and to reproduce the content of the NOTI" "CE file. \n" " \n" " You may obtain a copy of the Apache License at \n" " \n" " http://www.apache.org/licenses/LICENSE-2.0 \n" " \n" " Unless required by applicable law or agreed to in writing, software \n" " distributed under the Apache License with the above modification is \n" " distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY \n" " KIND, either express or implied. See the Apache License for the specific \n" " language governing permissions and limitations under the Apache License. \n" " ", None)) self.scaleLabel.setText(QCoreApplication.translate("Preferences", u"Scale", None)) self.directionLabel.setText(QCoreApplication.translate("Preferences", u"Direction", None)) self.windSpeedLabel.setText(QCoreApplication.translate("Preferences", u"Wind Speed", None)) self.waterDepthLabel.setText(QCoreApplication.translate("Preferences", u"Water Depth", None)) self.waveAmplitudeLabel.setText(QCoreApplication.translate("Preferences", u"Wave Amplitude", None)) self.waveDirectionalityLabel.setText(QCoreApplication.translate("Preferences", u"Wave Directionality", None)) # retranslateUi
10,887
Python
46.134199
127
0.669055
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedPrimDataSource.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/base/gf/transform.h> #include <pxr/usd/usdGeom/tokens.h> #include <pxr/imaging/hd/xformSchema.h> #include "computedPrimDataSource.h" #include "localPositionSchema.h" #include "referencePositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE HdOmniGeospatialComputedPrimDataSource::HdOmniGeospatialComputedPrimDataSource( HdContainerDataSourceHandle inputDataSource) : _inputDataSource(inputDataSource) { _matrixDataSource = HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::New(_inputDataSource); } #if PXR_VERSION < 2302 bool HdOmniGeospatialComputedPrimDataSource::Has(const TfToken& name) { return (name == HdXformSchemaTokens->resetXformStack) || (name == HdXformSchemaTokens->matrix); } #endif TfTokenVector HdOmniGeospatialComputedPrimDataSource::GetNames() { // this container data source retrieves the xform tokens TfTokenVector result; result.push_back(HdXformSchemaTokens->resetXformStack); result.push_back(HdXformSchemaTokens->matrix); return result; } HdDataSourceBaseHandle HdOmniGeospatialComputedPrimDataSource::Get(const TfToken& name) { if (_inputDataSource != nullptr) { if (name == HdXformSchemaTokens->resetXformStack) { // we don't modify the underlying time-sampled data // for resetXformStack, so return that directly HdXformSchema xformSchema = HdXformSchema::GetFromParent(_inputDataSource); return xformSchema.IsDefined() ? xformSchema.GetResetXformStack() : nullptr; } else if (name == HdXformSchemaTokens->matrix) { // note even if resetXformStack was true we consider // the geospatial data to override that return _matrixDataSource; } } return nullptr; } HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GeospatialMatrixDataSource( HdContainerDataSourceHandle inputDataSource) : _inputDataSource(inputDataSource) { } VtValue HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetValue(Time shutterOffset) { return VtValue(this->GetTypedValue(shutterOffset)); } GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetTypedValue(Time shutterOffset) { return this->_ComputeTransformedMatrix(shutterOffset); } bool HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetContributingSampleTimesForInterval( Time startTime, Time endTime, std::vector<Time>* outSampleTimes) { HdSampledDataSourceHandle sources[] = { this->_GetMatrixSource(), this->_GetLocalPositionSource() }; return HdGetMergedContributingSampleTimesForInterval( TfArraySize(sources), sources, startTime, endTime, outSampleTimes); } HdMatrixDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetMatrixSource() const { return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix(); } HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetLocalPositionSource() const { return HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(_inputDataSource).GetPosition(); } HdTokenDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetTangentPlaneSource() const { return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetTangentPlane(); } HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetReferencePositionSource() const { return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetReferencePosition(); } HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetOrientationSource() const { return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetOrientation(); } HdTokenDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageUpAxisSource() const { return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetStageUpAxis(); } HdDoubleDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageMetersPerUnitSource() const { return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetStageMetersPerUnit(); } GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetMatrix(const Time shutterOffset) const { HdMatrixDataSourceHandle dataSource = this->_GetMatrixSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(shutterOffset); } return GfMatrix4d(1.0); } GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetLocalPosition(const Time shutterOffset) const { HdVec3dDataSourceHandle dataSource = this->_GetLocalPositionSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(shutterOffset); } return GfVec3d(1.0); } TfToken HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetTangentPlane() const { HdTokenDataSourceHandle dataSource = this->_GetTangentPlaneSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(0.0f); } return TfToken(); } GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetReferencePosition() const { HdVec3dDataSourceHandle dataSource = this->_GetReferencePositionSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(0.0f); } return GfVec3d(1.0); } GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetOrientation() const { HdVec3dDataSourceHandle dataSource = this->_GetOrientationSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(0.0f); } return GfVec3d(1.0); } TfToken HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageUpAxis() const { HdTokenDataSourceHandle dataSource = this->_GetStageUpAxisSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(0.0f); } return UsdGeomTokens->y; } double HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageMetersPerUnit() const { HdDoubleDataSourceHandle dataSource = this->_GetStageMetersPerUnitSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(0.0f); } return 0.01; } GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_ComputeTransformedMatrix(const Time shutterOffset) const { // NOTE: in the case of the geospatially applied prim, we are completely // ignoring the fact that resetXformStack may be true at any given time sample // that is, geospatial positioning takes priority over local transformation reset // to compute the local position, we need to first get the geodetic reference TfToken targetFrame = this->_GetTangentPlane(); GfVec3d tangentPosition = this->_GetReferencePosition(); GfVec3d orientation = this->_GetOrientation(); GfVec3d localPosition = this->_GetLocalPosition(shutterOffset); double metersPerUnit = this->_GetStageMetersPerUnit(); TfToken upAxis = this->_GetStageUpAxis(); // calculate the new geodetic translation auto enu = this->_EcefToEnu(this->_GeodeticToEcef(localPosition), tangentPosition); GfVec3d translation = this->_EnuToCartesian(enu, upAxis, metersPerUnit, tangentPosition); // we only want to replace the translation piece // but since the transform may have orientation and scale // information, we need to extract that from the existing // matrix first GfTransform currentTransform(this->_GetMatrix(shutterOffset)); GfVec3d existingScale = currentTransform.GetScale(); GfRotation existingRotation = currentTransform.GetRotation(); GfRotation existingPivotOrientation = currentTransform.GetPivotOrientation(); GfVec3d existingPivotPosition = currentTransform.GetPivotPosition(); // now combine the new translation with the existing scale / rotation GfTransform newTransform(existingScale, existingPivotOrientation, existingRotation, existingPivotPosition, translation); return newTransform.GetMatrix(); } // Geospatial transform functions // For reference: // https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470099728.app3 // https://en.wikipedia.org/wiki/Geographic_coordinate_conversion // Implementation of Ferrari's solution GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GeodeticToEcef(const GfVec3d & llh) const { double lambda = llh[0] * GeoConstants::radians; double phi = llh[1] * GeoConstants::radians; double sin_lambda = sin(lambda); double N = GeoConstants::semiMajorAxis / sqrt(1 - GeoConstants::eccentricity * sin_lambda * sin_lambda); double cos_lambda = cos(lambda); double cos_phi = cos(phi); double sin_phi = sin(phi); return PXR_NS::GfVec3d((llh[2] + N) * cos_lambda * cos_phi, (llh[2] + N) * cos_lambda * sin_phi, (llh[2] + (1 - GeoConstants::eccentricity) * N) * sin_lambda); } GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const { double lambda = llh[0] * GeoConstants::radians; double phi = llh[1] * GeoConstants::radians; double sin_lambda = sin(lambda); double N = GeoConstants::semiMajorAxis / sqrt(1 - GeoConstants::eccentricity * sin_lambda * sin_lambda); double cos_lambda = cos(lambda); double cos_phi = cos(phi); double sin_phi = sin(phi); PXR_NS::GfVec3d pt((llh[2] + N) * cos_lambda * cos_phi, (llh[2] + N) * cos_lambda * sin_phi, (llh[2] + (1 - GeoConstants::eccentricity) * N) * sin_lambda); auto delta = ecef - pt; return PXR_NS::GfVec3d(-sin_phi * delta[0] + cos_phi * delta[1], -cos_phi * sin_lambda * delta[0] - sin_lambda * sin_phi * delta[1] + cos_lambda * delta[2], cos_lambda * cos_phi * delta[0] + cos_lambda * sin_phi * delta[1] + sin_lambda * delta[2]); } GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_EnuToCartesian( const GfVec3d& enu, const TfToken& upAxis, const double& metersPerUnit, const GfVec3d& reference) const { auto cartesian = GfVec3d(reference[0] < 0.0 ? -enu[0] : enu[0], upAxis == UsdGeomTokens->y ? enu[2] : enu[1], upAxis == UsdGeomTokens->z ? enu[2] : enu[1]); cartesian /= metersPerUnit; return cartesian; } PXR_NAMESPACE_CLOSE_SCOPE
11,354
C++
35.394231
137
0.747314
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionSchema.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_ #define HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_ #include <pxr/imaging/hd/schema.h> #include <pxr/imaging/hd/dataSourceLocator.h> #include "api.h" PXR_NAMESPACE_OPEN_SCOPE //----------------------------------------------------------------------------- #define HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS \ (referencePositionApi) \ (tangentPlane) \ (referencePosition) \ (orientation) \ (stageUpAxis) \ (stageMetersPerUnit) \ TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialWGS84ReferencePositionSchemaTokens, OMNIGEOSCENEINDEX_API, HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS); //----------------------------------------------------------------------------- class HdOmniGeospatialWGS84ReferencePositionSchema : public HdSchema { public: HdOmniGeospatialWGS84ReferencePositionSchema(HdContainerDataSourceHandle container) : HdSchema(container) { } OMNIGEOSCENEINDEX_API HdTokenDataSourceHandle GetTangentPlane(); OMNIGEOSCENEINDEX_API HdVec3dDataSourceHandle GetReferencePosition(); OMNIGEOSCENEINDEX_API HdVec3dDataSourceHandle GetOrientation(); OMNIGEOSCENEINDEX_API HdTokenDataSourceHandle GetStageUpAxis(); OMNIGEOSCENEINDEX_API HdDoubleDataSourceHandle GetStageMetersPerUnit(); OMNIGEOSCENEINDEX_API static HdOmniGeospatialWGS84ReferencePositionSchema GetFromParent( const HdContainerDataSourceHandle& fromParentContainer); OMNIGEOSCENEINDEX_API static const HdDataSourceLocator& GetDefaultLocator(); OMNIGEOSCENEINDEX_API static HdContainerDataSourceHandle BuildRetained( const HdTokenDataSourceHandle& tangentPlane, const HdVec3dDataSourceHandle& referencePosition, const HdVec3dDataSourceHandle& orientation, const HdTokenDataSourceHandle& stageUpAxis, const HdDoubleDataSourceHandle& stageMetersPerUnit ); }; PXR_NAMESPACE_CLOSE_SCOPE #endif // HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_
2,662
C
32.70886
99
0.730278
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/api.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef OMNI_GEO_SCENE_INDEX_API_H #define OMNI_GEO_SCENE_INDEX_API_H #include "pxr/base/arch/export.h" #if defined(PXR_STATIC) # define OMNIGEOSCENEINDEX_API # define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...) # define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...) # define OMNIGEOSCENEINDEX_LOCAL #else # if defined(OMNIGEOSCENEINDEX_EXPORTS) # define OMNIGEOSCENEINDEX_API ARCH_EXPORT # define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__) # define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__) # else # define OMNIGEOSCENEINDEX_API ARCH_IMPORT # define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__) # define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__) # endif # define OMNIGEOSCENEINDEX_LOCAL ARCH_HIDDEN #endif #endif // OMNI_GEO_INDEX_API_H
1,544
C
39.657894
99
0.734456
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionDataSource.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_ #define HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_ #include <pxr/imaging/hd/dataSource.h> #include <pxr/usdImaging/usdImaging/dataSourceStageGlobals.h> #include <omniGeospatial/wGS84LocalPositionAPI.h> #include "localPositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE class HdOmniGeospatialWGS84LocalPositionDataSource : public HdContainerDataSource { public: HD_DECLARE_DATASOURCE(HdOmniGeospatialWGS84LocalPositionDataSource); HdOmniGeospatialWGS84LocalPositionDataSource(const UsdPrim& prim, const UsdImagingDataSourceStageGlobals& stageGlobals); TfTokenVector GetNames() override; HdDataSourceBaseHandle Get(const TfToken& name) override; #if PXR_VERSION < 2302 bool Has(const TfToken& name) override; #endif private: OmniGeospatialWGS84LocalPositionAPI _localPositionApi; const UsdImagingDataSourceStageGlobals& _stageGlobals; }; HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialWGS84LocalPositionDataSource); PXR_NAMESPACE_CLOSE_SCOPE #endif // HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_
1,710
C
33.219999
81
0.792398
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedDependentDataSource.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_ #define HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_ #include <pxr/imaging/hd/dataSource.h> #include <pxr/imaging/hd/dataSourceTypeDefs.h> PXR_NAMESPACE_OPEN_SCOPE /// \class HdOmniGeospatialComputedDependentDataSource /// /// A datasource representing a container data source mimicing /// that of a container data source for xform data, but returning /// computed values based on geospatial data applied to the parent /// (or some parent in the hierarchy) of this prim. /// class HdOmniGeospatialComputedDependentDataSource : public HdContainerDataSource { public: HD_DECLARE_DATASOURCE(HdOmniGeospatialComputedDependentDataSource); HdOmniGeospatialComputedDependentDataSource(HdContainerDataSourceHandle inputDataSource, HdContainerDataSourceHandle parentDataSource); // data source overrides TfTokenVector GetNames() override; HdDataSourceBaseHandle Get(const TfToken& name) override; #if PXR_VERSION < 2302 bool Has(const TfToken& name) override; #endif private: HdDataSourceBaseHandle _ComputeGeospatiallyAffectedXform(); private: HdContainerDataSourceHandle _inputDataSource; HdContainerDataSourceHandle _parentDataSource; HdMatrixDataSourceHandle _matrixDataSource; class _GeospatiallyAffectedMatrixDataSource : public HdMatrixDataSource { public: HD_DECLARE_DATASOURCE(_GeospatiallyAffectedMatrixDataSource); VtValue GetValue(Time shutterOffset) override; GfMatrix4d GetTypedValue(Time shutterOffset) override; bool GetContributingSampleTimesForInterval( Time startTime, Time endTime, std::vector<Time>* outSampleTimes) override; private: _GeospatiallyAffectedMatrixDataSource(HdContainerDataSourceHandle inputDataSource, HdContainerDataSourceHandle parentDataSource); HdMatrixDataSourceHandle _GetMatrixSource() const; HdBoolDataSourceHandle _GetResetXformStackSource() const; HdMatrixDataSourceHandle _GetParentMatrixSource() const; HdMatrixDataSourceHandle _GetParentOriginalMatrixSource() const; GfMatrix4d _GetMatrix(const Time shutterOffset) const; bool _GetResetXformStack(const Time shutterOffset) const; GfMatrix4d _GetParentMatrix(const Time shutterOffset) const; GfMatrix4d _GetParentOriginalMatrix(const Time shutterOffset) const; // geospatial transform methods GfMatrix4d _ComputeTransformedMatrix(const Time shutterOffset) const; HdContainerDataSourceHandle _inputDataSource; HdContainerDataSourceHandle _parentDataSource; }; HD_DECLARE_DATASOURCE_HANDLES(_GeospatiallyAffectedMatrixDataSource); }; HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialComputedDependentDataSource); PXR_NAMESPACE_CLOSE_SCOPE #endif // HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_
3,530
C
35.402061
92
0.768272
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialDataSource.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/imaging/hd/xformSchema.h> #include "geospatialDataSource.h" #include "computedPrimDataSource.h" #include "computedDependentDataSource.h" #include "localPositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialDataSourceTokens, HDOMNIGEOSPATIALDATASOURCE_TOKENS); HdOmniGeospatialDataSource::HdOmniGeospatialDataSource(const HdSceneIndexBase& index, const SdfPath& primPath, HdContainerDataSourceHandle wrappedDataSource) : _sceneIndex(index), _primPath(primPath), _wrappedDataSource(wrappedDataSource) { } void HdOmniGeospatialDataSource::UpdateWrappedDataSource( HdContainerDataSourceHandle wrappedDataSource) { _wrappedDataSource = wrappedDataSource; } #if PXR_VERSION < 2302 bool HdOmniGeospatialDataSource::Has(const TfToken& name) { if (name == HdOmniGeospatialDataSourceTokens->geospatialPreservedXform) { return true; } return (_wrappedDataSource != nullptr) ? _wrappedDataSource->Has(name) : false; } #endif TfTokenVector HdOmniGeospatialDataSource::GetNames() { // since we only wrapped Xformables, this should // also return HdXformSchemaTokens->xform TfTokenVector result = (_wrappedDataSource == nullptr) ? TfTokenVector() : _wrappedDataSource->GetNames(); result.push_back(HdOmniGeospatialDataSourceTokens->geospatialPreservedXform); return result; } HdDataSourceBaseHandle HdOmniGeospatialDataSource::Get(const TfToken& name) { if (name == HdXformSchemaTokens->xform) { // this is an intercept of the flattened transform matrix // we need to dynamically compute a geospatial one return this->_ComputeGeospatialXform(); } else if (name == HdOmniGeospatialDataSourceTokens->geospatialPreservedXform) { // this would be the original flattened matrix of the wrapped data source if (_wrappedDataSource != nullptr) { return _wrappedDataSource->Get(HdXformSchemaTokens->xform); } } // all other token values should be defer to the wrapped data source (if any) if (_wrappedDataSource != nullptr) { return _wrappedDataSource->Get(name); } return nullptr; } bool HdOmniGeospatialDataSource::IsPrimDirtied(const HdDataSourceLocatorSet& locators) { static const HdContainerDataSourceHandle containerNull(nullptr); if (locators.Intersects(HdXformSchema::GetDefaultLocator())) { if (HdContainerDataSource::AtomicLoad(_computedGeospatialPrimDataSource) != nullptr || HdContainerDataSource::AtomicLoad(_computedGeospatialDependentDataSource) != nullptr) { HdContainerDataSource::AtomicStore(_computedGeospatialPrimDataSource, containerNull); HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, containerNull); return true; } } return false; } HdDataSourceBaseHandle HdOmniGeospatialDataSource::_ComputeGeospatialXform() { // since matrices are time sampled, we actually don't compute anything // here, we just setup the right HdMatrixDataSources to be able to // compute a final value at a specific time sample when asked // to do that, we have two cases: // 1. The wrapped prim in question has a local geodetic position applied // In this case, all of the information we need to compute the position // is stored inside of the wrapped prim itself (i.e. the geodetic root // tangentFrame and geodtic position from the applied API schema) // 2. The wrapped prim in question does not have a local geodetic position // applied, but it's parent in the stage hierarchy does, which means // that we need the wrapped prim plus it's parent prim to be able to // compute the new correct transform // // Case 1 is easy - we can detect whether we have the information or not // and create the right data source to return. // // Case 2 is a bit more difficult to do performantly - at the moment // we will walk the parent prim hierarchy to the root to determine // this information, but likely you would want to cache this locally // on the wrapped prim. We can certainly do that, but then we have to // be concerned about invalidating it at the right time. We'll leave this // as a TODO for the future. // if (this->_HasGeospatialInformation(_wrappedDataSource)) { // this is case 1, and we can create a data source specifically // catered to do that computation HdContainerDataSourceHandle computedGeospatialPrimDataSource = HdContainerDataSource::AtomicLoad(_computedGeospatialPrimDataSource); if (computedGeospatialPrimDataSource != nullptr) { // we have a previously cached value so can return that directly return computedGeospatialPrimDataSource; } // otherwise we have to compute a new one // since the container responsible for the xform token // needs to take into account both resetXform and matrix // and since both of those can be time-sampled, we have to make // sure we can respond appropriately to any query // so we will need a complete view of the wrapped data source // to perform the computation computedGeospatialPrimDataSource = HdOmniGeospatialComputedPrimDataSource::New(_wrappedDataSource); HdContainerDataSource::AtomicStore(_computedGeospatialPrimDataSource, computedGeospatialPrimDataSource); return computedGeospatialPrimDataSource; } else { // this is case 2, in order to perform this transformation appropriately // we have to walk the parent hierarchy to find the parent with a local position // geospatial API attached to it - if none exists we can return the wrapped // data source directly, but if one does exist we need a new data source capable // of handling the dynamic compute at any time sample HdContainerDataSourceHandle computedGeospatialDependentDataSource = HdContainerDataSource::AtomicLoad(_computedGeospatialDependentDataSource); if (computedGeospatialDependentDataSource != nullptr) { // we have a previously cached value and can return that directly return computedGeospatialDependentDataSource; } // otherwise we have to compute a new one // so we need to follow the prim hierarchy up until we reach // a geospatially applied one (if any) if (_primPath != SdfPath::AbsoluteRootPath()) { HdContainerDataSourceHandle geospatialDataSource = nullptr; for (SdfPath p = _primPath.GetParentPath(); p != SdfPath::AbsoluteRootPath(); p = p.GetParentPath()) { HdSceneIndexPrim prim = _sceneIndex.GetPrim(p); if (this->_HasGeospatialInformation(prim.dataSource)) { // found it! geospatialDataSource = prim.dataSource; } } // if we didn't find a geospatially applied parent, we don't need to do anything if (geospatialDataSource == nullptr) { if (_wrappedDataSource != nullptr) { HdContainerDataSourceHandle dataSource = HdContainerDataSource::Cast(_wrappedDataSource->Get(HdXformSchemaTokens->xform)); if (dataSource != nullptr) { HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, dataSource); return _computedGeospatialDependentDataSource; } return nullptr; } return nullptr; } // otherwise we need a new datasource that can perform the compute between // the immediate parent and the prim in question SdfPath parentPath = _primPath.GetParentPath(); HdSceneIndexPrim parentSceneIndexPrim = _sceneIndex.GetPrim(parentPath); computedGeospatialDependentDataSource = HdOmniGeospatialComputedDependentDataSource::New(_wrappedDataSource, parentSceneIndexPrim.dataSource); HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, computedGeospatialDependentDataSource); return computedGeospatialDependentDataSource; } else { // it's the root path, and we don't have to do anything here // NOTE: this makes the assumption that root never has geospatial information applied if (_wrappedDataSource != nullptr) { return _wrappedDataSource->Get(HdXformSchemaTokens->xform); } } } return nullptr; } bool HdOmniGeospatialDataSource::_HasGeospatialInformation(HdContainerDataSourceHandle handle) { HdOmniGeospatialWGS84LocalPositionSchema localPositionSchema = HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(handle); return localPositionSchema.IsDefined(); } PXR_NAMESPACE_CLOSE_SCOPE
9,813
C++
40.235294
142
0.689086
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionDataSource.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/usdImaging/usdImaging/dataSourceAttribute.h> #include "localPositionDataSource.h" PXR_NAMESPACE_OPEN_SCOPE HdOmniGeospatialWGS84LocalPositionDataSource::HdOmniGeospatialWGS84LocalPositionDataSource( const UsdPrim& prim, const UsdImagingDataSourceStageGlobals& stageGlobals) : _stageGlobals(stageGlobals) { _localPositionApi = OmniGeospatialWGS84LocalPositionAPI(prim); } #if PXR_VERSION < 2302 bool HdOmniGeospatialWGS84LocalPositionDataSource::Has(const TfToken& name) { return (name == HdOmniGeospatialWGS84LocalPositionSchemaTokens->position); } #endif TfTokenVector HdOmniGeospatialWGS84LocalPositionDataSource::GetNames() { // return the hydra attribute names this data source is responsible for TfTokenVector names; names.push_back(HdOmniGeospatialWGS84LocalPositionSchemaTokens->position); return names; } HdDataSourceBaseHandle HdOmniGeospatialWGS84LocalPositionDataSource::Get(const TfToken& name) { // retrieves the data source values for the attributes this data source // supports if (name == HdOmniGeospatialWGS84LocalPositionSchemaTokens->position) { return UsdImagingDataSourceAttribute<GfVec3d>::New( _localPositionApi.GetPositionAttr(), _stageGlobals); } // this is a name we don't support return nullptr; } PXR_NAMESPACE_CLOSE_SCOPE
1,954
C++
32.135593
93
0.772262
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedPrimDataSource.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_ #define HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_ #include <pxr/imaging/hd/dataSource.h> #include <pxr/imaging/hd/dataSourceTypeDefs.h> PXR_NAMESPACE_OPEN_SCOPE /// \class HdOmniGeospatialComputedPrimDataSource /// /// A datasource representing a container data source mimicing /// that of a container data source for xform data, but returning /// computed values based on geospatial data applied to the prim. /// class HdOmniGeospatialComputedPrimDataSource : public HdContainerDataSource { public: HD_DECLARE_DATASOURCE(HdOmniGeospatialComputedPrimDataSource); HdOmniGeospatialComputedPrimDataSource(HdContainerDataSourceHandle inputDataSource); // data source overrides TfTokenVector GetNames() override; HdDataSourceBaseHandle Get(const TfToken& name) override; #if PXR_VERSION < 2302 bool Has(const TfToken& name) override; #endif private: HdDataSourceBaseHandle _ComputeGeospatialXform(); GfVec3d _GeodeticToEcef(const GfVec3d& llh) const; GfVec3d _EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const; GfVec3d _EnuToCartesian(const GfVec3d& enu, const TfToken& upAxis, const double& metersPerUnit, const GfVec3d& reference) const; private: HdContainerDataSourceHandle _inputDataSource; HdMatrixDataSourceHandle _matrixDataSource; class _GeospatialMatrixDataSource : public HdMatrixDataSource { public: HD_DECLARE_DATASOURCE(_GeospatialMatrixDataSource); VtValue GetValue(Time shutterOffset) override; GfMatrix4d GetTypedValue(Time shutterOffset) override; bool GetContributingSampleTimesForInterval( Time startTime, Time endTime, std::vector<Time>* outSampleTimes) override; private: _GeospatialMatrixDataSource(HdContainerDataSourceHandle inputDataSource); HdMatrixDataSourceHandle _GetMatrixSource() const; HdVec3dDataSourceHandle _GetLocalPositionSource() const; HdTokenDataSourceHandle _GetTangentPlaneSource() const; HdVec3dDataSourceHandle _GetReferencePositionSource() const; HdVec3dDataSourceHandle _GetOrientationSource() const; HdTokenDataSourceHandle _GetStageUpAxisSource() const; HdDoubleDataSourceHandle _GetStageMetersPerUnitSource() const; GfMatrix4d _GetMatrix(const Time shutterOffset) const; GfVec3d _GetLocalPosition(const Time shutterOffset) const; TfToken _GetTangentPlane() const; GfVec3d _GetReferencePosition() const; GfVec3d _GetOrientation() const; TfToken _GetStageUpAxis() const; double _GetStageMetersPerUnit() const; // geospatial transform methods GfMatrix4d _ComputeTransformedMatrix(const Time shutterOffset) const; GfVec3d _GeodeticToEcef(const GfVec3d& llh) const; GfVec3d _EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const; GfVec3d _EnuToCartesian(const GfVec3d& enu, const TfToken& upAxis, const double& metersPerUnit, const GfVec3d& reference) const; struct GeoConstants { static constexpr double semiMajorAxis = 6378137.0; static constexpr double semiMinorAxis = 6356752.3142; static constexpr double flattening = 1.0 / 298.257223563; static constexpr double eccentricity = flattening * (2 - flattening); static constexpr double radians = M_PI / 180.0; static constexpr double degrees = 180.0 / M_PI; }; HdContainerDataSourceHandle _inputDataSource; }; HD_DECLARE_DATASOURCE_HANDLES(_GeospatialMatrixDataSource); }; HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialComputedPrimDataSource); PXR_NAMESPACE_CLOSE_SCOPE #endif // HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_
4,414
C
37.72807
136
0.738106
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialSceneIndex.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef OMNI_GEO_SCENE_INDEX_H_ #define OMNI_GEO_SCENE_INDEX_H_ #include <pxr/pxr.h> #include <pxr/usd/sdf/pathTable.h> #include <pxr/imaging/hd/filteringSceneIndex.h> #include "api.h" PXR_NAMESPACE_OPEN_SCOPE TF_DECLARE_REF_PTRS(OmniGeospatialSceneIndex); /// /// \class OmniGeospatialSceneIndex /// /// A scene index responsible for observing an input flattened scene /// index and producing a comparable scene in which geospatial transforms /// have been applied to prims with geospatial state attached to them /// and for updating the transform of their children as needed. /// /// Note that with Render Delegate 2.0 and the ability to pull data /// from a non-flattened scene, this implementation will have to be /// revisited to work with the unflattened xform representation of /// the hydra prims. /// class OmniGeospatialSceneIndex : public HdSingleInputFilteringSceneIndexBase { public: OMNIGEOSCENEINDEX_API static OmniGeospatialSceneIndexRefPtr New(const HdSceneIndexBaseRefPtr& inputSceneIndex, const HdContainerDataSourceHandle& inputArgs = nullptr); OMNIGEOSCENEINDEX_API ~OmniGeospatialSceneIndex() override; OMNIGEOSCENEINDEX_API HdSceneIndexPrim GetPrim(const SdfPath& primPath) const override; OMNIGEOSCENEINDEX_API SdfPathVector GetChildPrimPaths(const SdfPath& primPath) const override; protected: OmniGeospatialSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex, const HdContainerDataSourceHandle& inputArgs); // these three are provided by HdSingleInputFilteringSceneIndexBase // and must be overridden by inheritors virtual void _PrimsAdded(const HdSceneIndexBase& sender, const HdSceneIndexObserver::AddedPrimEntries& entries) override; virtual void _PrimsRemoved(const HdSceneIndexBase& sender, const HdSceneIndexObserver::RemovedPrimEntries& entries) override; virtual void _PrimsDirtied(const HdSceneIndexBase& sender, const HdSceneIndexObserver::DirtiedPrimEntries& entries) override; private: SdfPathTable<HdSceneIndexPrim>::_IterBoolPair _IsPrimWrapped(const SdfPath& primPath) const; HdSceneIndexPrim& _WrapPrim(const SdfPath& primPath, const HdSceneIndexPrim& hdPrim) const; void _DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators, HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries); /*HdContainerDataSourceHandle _ComputeDataSource( const SdfPath& primPath, const HdContainerDataSourceHandle& primDataSource) const; void _ComputeChildDataSources(const SdfPath& parentPath, const HdContainerDataSourceHandle& parentDataSource) const; HdContainerDataSourceHandle _ComputeMatrixDependenciesDataSource( const SdfPath& primPath) const;*/ private: // marked as mutable because it is an internal cache // that is written to on-demand from the GetPrim method // which is a const method by interface definition in HdSceneIndexBase mutable SdfPathTable<HdSceneIndexPrim> _wrappedPrims; }; PXR_NAMESPACE_CLOSE_SCOPE #endif
3,668
C
36.438775
146
0.773446
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionSchema.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/imaging/hd/retainedDataSource.h> #include "referencePositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialWGS84ReferencePositionSchemaTokens, HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS); HdTokenDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetTangentPlane() { return _GetTypedDataSource<HdTokenDataSource>( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane); } HdVec3dDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetReferencePosition() { return _GetTypedDataSource<HdVec3dDataSource>( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition); } HdVec3dDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetOrientation() { return _GetTypedDataSource<HdVec3dDataSource>( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation); } HdTokenDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetStageUpAxis() { return _GetTypedDataSource<HdTokenDataSource>( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis); } HdDoubleDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetStageMetersPerUnit() { return _GetTypedDataSource<HdDoubleDataSource>( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit); } HdOmniGeospatialWGS84ReferencePositionSchema HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent( const HdContainerDataSourceHandle& fromParentContainer) { if (fromParentContainer == nullptr) { return HdOmniGeospatialWGS84ReferencePositionSchema(nullptr); } return HdOmniGeospatialWGS84ReferencePositionSchema( HdContainerDataSource::Cast(fromParentContainer->Get( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi)) ); } const HdDataSourceLocator& HdOmniGeospatialWGS84ReferencePositionSchema::GetDefaultLocator() { static const HdDataSourceLocator locator( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi ); return locator; } HdContainerDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::BuildRetained( const HdTokenDataSourceHandle& tangentPlane, const HdVec3dDataSourceHandle& referencePosition, const HdVec3dDataSourceHandle& orientation, const HdTokenDataSourceHandle& stageUpAxis, const HdDoubleDataSourceHandle& stageMetersPerUnit) { TfToken names[5]; HdDataSourceBaseHandle values[5]; size_t count = 0; if (tangentPlane != nullptr) { names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane; values[count] = tangentPlane; count++; } if (referencePosition != nullptr) { names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition; values[count] = referencePosition; count++; } if (orientation != nullptr) { names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation; values[count] = orientation; count++; } if (stageUpAxis != nullptr) { names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis; values[count] = stageUpAxis; count++; } if (stageMetersPerUnit != nullptr) { names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit; values[count] = stageMetersPerUnit; count++; } return HdRetainedContainerDataSource::New(count, names, values); } PXR_NAMESPACE_CLOSE_SCOPE
4,221
C++
33.048387
105
0.773513
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionSchema.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_ #define HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_ #include <pxr/imaging/hd/schema.h> #include <pxr/imaging/hd/dataSourceLocator.h> #include "api.h" PXR_NAMESPACE_OPEN_SCOPE //----------------------------------------------------------------------------- #define HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS \ (localPositionApi) \ (position) \ TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialWGS84LocalPositionSchemaTokens, OMNIGEOSCENEINDEX_API, HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS); //----------------------------------------------------------------------------- class HdOmniGeospatialWGS84LocalPositionSchema : public HdSchema { public: HdOmniGeospatialWGS84LocalPositionSchema(HdContainerDataSourceHandle container) : HdSchema(container) { } OMNIGEOSCENEINDEX_API HdVec3dDataSourceHandle GetPosition(); OMNIGEOSCENEINDEX_API static HdOmniGeospatialWGS84LocalPositionSchema GetFromParent( const HdContainerDataSourceHandle& fromParentContainer); OMNIGEOSCENEINDEX_API static const HdDataSourceLocator& GetDefaultLocator(); OMNIGEOSCENEINDEX_API static HdContainerDataSourceHandle BuildRetained( const HdVec3dDataSourceHandle& position ); }; PXR_NAMESPACE_CLOSE_SCOPE #endif // HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_
1,985
C
32.661016
95
0.716877
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionDataSource.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_ #define HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_ #include <pxr/imaging/hd/dataSource.h> #include <pxr/usdImaging/usdImaging/dataSourceStageGlobals.h> #include <omniGeospatial/wGS84ReferencePositionAPI.h> #include "referencePositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE class HdOmniGeospatialWGS84ReferencePositionDataSource : public HdContainerDataSource { public: HD_DECLARE_DATASOURCE(HdOmniGeospatialWGS84ReferencePositionDataSource); HdOmniGeospatialWGS84ReferencePositionDataSource(const UsdPrim& prim, const UsdImagingDataSourceStageGlobals& stageGlobals); TfTokenVector GetNames() override; HdDataSourceBaseHandle Get(const TfToken& name) override; #if PXR_VERSION < 2302 bool Has(const TfToken& name) override; #endif private: OmniGeospatialWGS84ReferencePositionAPI _referencePositionApi; const UsdImagingDataSourceStageGlobals& _stageGlobals; template <typename T> class _StageDataSource : public HdTypedSampledDataSource<T> { public: HD_DECLARE_DATASOURCE(_StageDataSource<T>); VtValue GetValue(HdSampledDataSource::Time shutterOffset) override { return VtValue(GetTypedValue(shutterOffset)); } T GetTypedValue(HdSampledDataSource::Time shutterOffset) override { return _value; } bool GetContributingSampleTimesForInterval( HdSampledDataSource::Time startTime, HdSampledDataSource::Time endTime, std::vector<HdSampledDataSource::Time>* outSampleTimes) override { return false; } private: _StageDataSource(const T& value); T _value; }; }; HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialWGS84ReferencePositionDataSource); PXR_NAMESPACE_CLOSE_SCOPE #endif // HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_
2,546
C
30.060975
85
0.739199
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionAPIAdapter.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/imaging/hd/retainedDataSource.h> #include <omniGeospatial/wGS84ReferencePositionAPI.h> #include "referencePositionAPIAdapter.h" #include "referencePositionDataSource.h" #include "referencePositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE TF_REGISTRY_FUNCTION(TfType) { typedef OmniGeospatialWGS84ReferencePositionAPIAdapter Adapter; TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >(); t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >(); } #if PXR_VERSION >= 2302 HdContainerDataSourceHandle OmniGeospatialWGS84ReferencePositionAPIAdapter::GetImagingSubprimData( const UsdPrim& prim, const TfToken& subprim, const TfToken& appliedInstanceName, const UsdImagingDataSourceStageGlobals& stageGlobals) #else HdContainerDataSourceHandle OmniGeospatialWGS84ReferencePositionAPIAdapter::GetImagingSubprimData( const TfToken& subprim, const UsdPrim& prim, const TfToken& appliedInstanceName, const UsdImagingDataSourceStageGlobals& stageGlobals) #endif { // at the point we are invoked here, the stage scene index has already determined // that the API schema applies to the prim, so we can safely create our // data source if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty()) { // there shouldn't be a subprim or an applied instance name // if there is, we don't really know what to do with it // so we return null to indicate there is no data source // for this prim setup return nullptr; } return HdRetainedContainerDataSource::New( HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi, HdOmniGeospatialWGS84ReferencePositionDataSource::New(prim, stageGlobals) ); } #if PXR_VERSION >= 2302 HdDataSourceLocatorSet OmniGeospatialWGS84ReferencePositionAPIAdapter::InvalidateImagingSubprim( const UsdPrim& prim, const TfToken& subprim, const TfToken& appliedInstanceName, const TfTokenVector& properties) #else HdDataSourceLocatorSet OmniGeospatialWGS84ReferencePositionAPIAdapter::InvalidateImagingSubprim( const TfToken& subprim, const TfToken& appliedInstanceName, const TfTokenVector& properties) #endif { if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty()) { return HdDataSourceLocatorSet(); } TfToken geospatialPrefix("omni:geospatial:wgs84:reference"); for (const TfToken& propertyName : properties) { if (TfStringStartsWith(propertyName, geospatialPrefix)) { return HdOmniGeospatialWGS84ReferencePositionSchema::GetDefaultLocator(); } } return HdDataSourceLocatorSet(); } PXR_NAMESPACE_CLOSE_SCOPE
3,306
C++
33.810526
98
0.753781
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialDataSource.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_ #define HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_ #include <pxr/imaging/hd/dataSource.h> #include <pxr/imaging/hd/dataSourceTypeDefs.h> #include <pxr/imaging/hd/sceneIndex.h> #include "api.h" PXR_NAMESPACE_OPEN_SCOPE //----------------------------------------------------------------------------- #define HDOMNIGEOSPATIALDATASOURCE_TOKENS \ (geospatialPreservedXform) TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialDataSourceTokens, OMNIGEOSCENEINDEX_API, HDOMNIGEOSPATIALDATASOURCE_TOKENS); //----------------------------------------------------------------------------- /// \class HdOmniGeospatialDataSource /// /// A datasource representing a wrapped view of an existing flattened /// data source where the xform token is intercepted and a new geospatial /// matrix dynamically calculated. /// class HdOmniGeospatialDataSource : public HdContainerDataSource { public: HD_DECLARE_DATASOURCE(HdOmniGeospatialDataSource); HdOmniGeospatialDataSource(const HdSceneIndexBase& sceneIndex, const SdfPath& primPath, HdContainerDataSourceHandle wrappedDataSource); void UpdateWrappedDataSource(HdContainerDataSourceHandle wrappedDataSource); // data source overrides TfTokenVector GetNames() override; HdDataSourceBaseHandle Get(const TfToken& name) override; #if PXR_VERSION < 2302 bool Has(const TfToken& name) override; #endif // determines if the data source would be dirtied based on the locators given bool IsPrimDirtied(const HdDataSourceLocatorSet& locators); private: bool _HasGeospatialInformation(HdContainerDataSourceHandle dataSource); HdDataSourceBaseHandle _ComputeGeospatialXform(); private: const HdSceneIndexBase& _sceneIndex; SdfPath _primPath; HdContainerDataSourceHandle _wrappedDataSource; // cached computed datasources HdContainerDataSourceAtomicHandle _computedGeospatialPrimDataSource; HdContainerDataSourceAtomicHandle _computedGeospatialDependentDataSource; }; HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialDataSource); PXR_NAMESPACE_CLOSE_SCOPE #endif // HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_
2,737
C
31.987951
91
0.739496
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionAPIAdapter.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/imaging/hd/retainedDataSource.h> #include <omniGeospatial/wGS84LocalPositionAPI.h> #include <omniGeospatial/wGS84ReferencePositionAPI.h> #include "localPositionAPIAdapter.h" #include "localPositionDataSource.h" #include "localPositionSchema.h" #include "referencePositionDataSource.h" #include "referencePositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE TF_REGISTRY_FUNCTION(TfType) { typedef OmniGeospatialWGS84LocalPositionAPIAdapter Adapter; TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >(); t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >(); } #if PXR_VERSION >= 2302 HdContainerDataSourceHandle OmniGeospatialWGS84LocalPositionAPIAdapter::GetImagingSubprimData( const UsdPrim& prim, const TfToken& subprim, const TfToken& appliedInstanceName, const UsdImagingDataSourceStageGlobals& stageGlobals) #else HdContainerDataSourceHandle OmniGeospatialWGS84LocalPositionAPIAdapter::GetImagingSubprimData( const TfToken& subprim, const UsdPrim& prim, const TfToken& appliedInstanceName, const UsdImagingDataSourceStageGlobals& stageGlobals) #endif { // at the point we are invoked here, the stage scene index has already determined // that the API schema applies to the prim, so we can safely create our // data source if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty()) { // there shouldn't be a subprim or an applied instance name // if there is, we don't really know what to do with it // so we return null to indicate there is no data source // for this prim setup return nullptr; } // to make it a bit easier, we will traverse the parent structure here to find a geodetic root // rather than traversing it in the scene index - this is because we have all of the information // we need at the point where this prim is getting processed HdDataSourceBaseHandle referencePositionDataSource = nullptr; for (UsdPrim parentPrim = prim; !parentPrim.IsPseudoRoot(); parentPrim = parentPrim.GetParent()) { if (parentPrim.HasAPI<OmniGeospatialWGS84ReferencePositionAPI>()) { // bake the geodetic root information into this local prim referencePositionDataSource = HdOmniGeospatialWGS84ReferencePositionDataSource::New(parentPrim, stageGlobals); break; } } // only process local position if we found a geodetic root - if we didn't // it means that this is an unrooted local position so we keep whatever // transform information the prim would have had otherwise if (referencePositionDataSource != nullptr) { return HdRetainedContainerDataSource::New( HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi, HdOmniGeospatialWGS84LocalPositionDataSource::New(prim, stageGlobals), HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi, referencePositionDataSource ); } return nullptr; } #if PXR_VERSION >= 2302 HdDataSourceLocatorSet OmniGeospatialWGS84LocalPositionAPIAdapter::InvalidateImagingSubprim( const UsdPrim& prim, const TfToken& subprim, const TfToken& appliedInstanceName, const TfTokenVector& properties) #else HdDataSourceLocatorSet OmniGeospatialWGS84LocalPositionAPIAdapter::InvalidateImagingSubprim( const TfToken& subprim, const TfToken& appliedInstanceName, const TfTokenVector& properties) #endif { if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty()) { return HdDataSourceLocatorSet(); } TfToken geospatialPrefix("omni:geospatial:wgs84:local"); for (const TfToken& propertyName : properties) { if (TfStringStartsWith(propertyName, geospatialPrefix)) { return HdOmniGeospatialWGS84LocalPositionSchema::GetDefaultLocator(); } } return HdDataSourceLocatorSet(); } PXR_NAMESPACE_CLOSE_SCOPE
4,574
C++
36.809917
122
0.740927
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionDataSource.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/usd/usdGeom/metrics.h> #include <pxr/usd/usdGeom/tokens.h> #include <pxr/usdImaging/usdImaging/dataSourceAttribute.h> #include "referencePositionDataSource.h" PXR_NAMESPACE_OPEN_SCOPE HdOmniGeospatialWGS84ReferencePositionDataSource::HdOmniGeospatialWGS84ReferencePositionDataSource( const UsdPrim& prim, const UsdImagingDataSourceStageGlobals& stageGlobals) : _stageGlobals(stageGlobals) { _referencePositionApi = OmniGeospatialWGS84ReferencePositionAPI(prim); } #if PXR_VERSION < 2302 bool HdOmniGeospatialWGS84ReferencePositionDataSource::Has(const TfToken& name) { return (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane) || (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition) || (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation) || (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis) || (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit); } #endif TfTokenVector HdOmniGeospatialWGS84ReferencePositionDataSource::GetNames() { // return the hydra attribute names this data source is responsible for TfTokenVector names; names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane); names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition); names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation); names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis); names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit); return names; } HdDataSourceBaseHandle HdOmniGeospatialWGS84ReferencePositionDataSource::Get(const TfToken& name) { // retrieves the data source values for the attributes this data source // supports if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane) { return UsdImagingDataSourceAttribute<TfToken>::New( _referencePositionApi.GetTangentPlaneAttr(), _stageGlobals); } else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition) { return UsdImagingDataSourceAttribute<GfVec3d>::New( _referencePositionApi.GetReferencePositionAttr(), _stageGlobals); } else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation) { return UsdImagingDataSourceAttribute<GfVec3d>::New( _referencePositionApi.GetOrientationAttr(), _stageGlobals); } else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis) { TfToken upAxis = UsdGeomTokens->y; UsdStageWeakPtr stage = _referencePositionApi.GetPrim().GetStage(); if (stage != nullptr) { upAxis = UsdGeomGetStageUpAxis(stage); } return _StageDataSource<TfToken>::New(upAxis); } else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit) { double mpu = 0.01; UsdStageWeakPtr stage = _referencePositionApi.GetPrim().GetStage(); if (stage != nullptr) { mpu = UsdGeomGetStageMetersPerUnit(stage); } return _StageDataSource<double>::New(mpu); } // this is a name we don't support return nullptr; } template <typename T> HdOmniGeospatialWGS84ReferencePositionDataSource::_StageDataSource<T>::_StageDataSource(const T& value) : _value(value) { } PXR_NAMESPACE_CLOSE_SCOPE
4,155
C++
38.207547
119
0.754513
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionAPIAdapter.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_ #define OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_ #include <pxr/pxr.h> #include <pxr/usdImaging/usdImaging/apiSchemaAdapter.h> #include "api.h" PXR_NAMESPACE_OPEN_SCOPE class OmniGeospatialWGS84ReferencePositionAPIAdapter : public UsdImagingAPISchemaAdapter { public: using BaseAdapter = UsdImagingAPISchemaAdapter; #if PXR_VERSION >= 2302 OMNIGEOSCENEINDEX_API HdContainerDataSourceHandle GetImagingSubprimData( const UsdPrim& prim, const TfToken& subprim, const TfToken& appliedInstanceName, const UsdImagingDataSourceStageGlobals& stageGlobals ) override; #else OMNIGEOSCENEINDEX_API HdContainerDataSourceHandle GetImagingSubprimData( const TfToken& subprim, const UsdPrim& prim, const TfToken& appliedInstanceName, const UsdImagingDataSourceStageGlobals& stageGlobals ) override; #endif #if PXR_VERSION >= 2302 OMNIGEOSCENEINDEX_API HdDataSourceLocatorSet InvalidateImagingSubprim( const UsdPrim& prim, const TfToken& subprim, const TfToken& appliedInstanceName, const TfTokenVector& properties ) override; #else OMNIGEOSCENEINDEX_API HdDataSourceLocatorSet InvalidateImagingSubprim( const TfToken& subprim, const TfToken& appliedInstanceName, const TfTokenVector& properties ) override; #endif }; PXR_NAMESPACE_CLOSE_SCOPE #endif // OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_
2,144
C
30.544117
88
0.747201
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedDependentDataSource.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/imaging/hd/xformSchema.h> #include "geospatialDataSource.h" #include "computedDependentDataSource.h" PXR_NAMESPACE_OPEN_SCOPE HdOmniGeospatialComputedDependentDataSource::HdOmniGeospatialComputedDependentDataSource( HdContainerDataSourceHandle inputDataSource, HdContainerDataSourceHandle parentDataSource) : _inputDataSource(inputDataSource), _parentDataSource(parentDataSource) { _matrixDataSource = HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::New( _inputDataSource, parentDataSource); } #if PXR_VERSION < 2302 bool HdOmniGeospatialComputedDependentDataSource::Has(const TfToken& name) { return (name == HdXformSchemaTokens->resetXformStack) || (name == HdXformSchemaTokens->matrix); } #endif TfTokenVector HdOmniGeospatialComputedDependentDataSource::GetNames() { // this container data source retrieves the xform tokens TfTokenVector result; result.push_back(HdXformSchemaTokens->resetXformStack); result.push_back(HdXformSchemaTokens->matrix); return result; } HdDataSourceBaseHandle HdOmniGeospatialComputedDependentDataSource::Get(const TfToken& name) { if (_inputDataSource != nullptr) { if (name == HdXformSchemaTokens->resetXformStack) { // we don't modify the underlying time-sampled data // for resetXformStack, so return that directly HdXformSchema xformSchema = HdXformSchema::GetFromParent(_inputDataSource); return xformSchema.IsDefined() ? xformSchema.GetResetXformStack() : nullptr; } else if (name == HdXformSchemaTokens->matrix) { return _matrixDataSource; } } return nullptr; } HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::_GeospatiallyAffectedMatrixDataSource( HdContainerDataSourceHandle inputDataSource, HdContainerDataSourceHandle parentDataSource) : _inputDataSource(inputDataSource), _parentDataSource(parentDataSource) { } VtValue HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetValue(Time shutterOffset) { return VtValue(this->GetTypedValue(shutterOffset)); } GfMatrix4d HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetTypedValue(Time shutterOffset) { return this->_ComputeTransformedMatrix(shutterOffset); } bool HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetContributingSampleTimesForInterval( Time startTime, Time endTime, std::vector<Time>* outSampleTimes) { HdSampledDataSourceHandle sources[] = { this->_GetMatrixSource(), this->_GetParentMatrixSource() }; return HdGetMergedContributingSampleTimesForInterval( TfArraySize(sources), sources, startTime, endTime, outSampleTimes); } HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetMatrixSource() const { return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix(); } HdBoolDataSourceHandle HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetResetXformStackSource() const { return HdXformSchema::GetFromParent(_inputDataSource).GetResetXformStack(); } HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetParentMatrixSource() const { return HdXformSchema::GetFromParent(_parentDataSource).GetMatrix(); } HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetParentOriginalMatrixSource() const { // the parent data source here should be a geospatial data source // but in the even it is not, this method will simply return the same // matrix as that of _GetParentMatrixSource HdOmniGeospatialDataSourceHandle geospatialDataSource = HdOmniGeospatialDataSource::Cast(_parentDataSource); if (geospatialDataSource != nullptr) { HdContainerDataSourceHandle xformDataSource = HdContainerDataSource::Cast( geospatialDataSource->Get(HdOmniGeospatialDataSourceTokens->geospatialPreservedXform)); if (xformDataSource == nullptr) { TF_WARN("Parent data source could not retrieve preserved xform!"); return this->_GetParentMatrixSource(); } HdMatrixDataSourceHandle matrixDataSource = HdMatrixDataSource::Cast( xformDataSource->Get(HdXformSchemaTokens->matrix)); if (matrixDataSource == nullptr) { TF_WARN("Xform schema not defined on preserved container data source!"); } return (matrixDataSource != nullptr) ? matrixDataSource : this->_GetParentMatrixSource(); } else { TF_WARN("Parent data source has no geospatial data source!"); } return this->_GetParentMatrixSource(); } GfMatrix4d HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetMatrix(const Time shutterOffset) const { HdMatrixDataSourceHandle dataSource = this->_GetMatrixSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(shutterOffset); } return GfMatrix4d(1.0); } bool HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetResetXformStack(const Time shutterOffset) const { HdBoolDataSourceHandle dataSource = this->_GetResetXformStackSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(shutterOffset); } return false; } GfMatrix4d HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetParentMatrix(const Time shutterOffset) const { HdMatrixDataSourceHandle dataSource = this->_GetParentMatrixSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(shutterOffset); } return GfMatrix4d(1.0); } GfMatrix4d HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_GetParentOriginalMatrix(const Time shutterOffset) const { HdMatrixDataSourceHandle dataSource = this->_GetParentOriginalMatrixSource(); if (dataSource != nullptr) { return dataSource->GetTypedValue(shutterOffset); } return GfMatrix4d(1.0); } GfMatrix4d HdOmniGeospatialComputedDependentDataSource:: _GeospatiallyAffectedMatrixDataSource::_ComputeTransformedMatrix(const Time shutterOffset) const { // this prim did not have geospatial information applied to it, // but it is the child of one that did, so we compute the updated // value based on the recomputed value of the parent // however, we actually only want to do this if this prim does // not have a resetXformStack applied bool resetXformStack = this->_GetResetXformStack(shutterOffset); if (!resetXformStack) { // to compute the affected matrix, we first need to acquire the parent information GfMatrix4d flattenedParentTransform = this->_GetParentMatrix(shutterOffset); GfMatrix4d originalParentTransform = this->_GetParentOriginalMatrix(shutterOffset); // since we are dealing with flattened transformations, we have to recover // the local transform of the prim data source in question // we can do this by knowing the prim's flattened transform // and the original transform of its parent (the _dependsOnDataSource) // Let FT be the flattened transform, P be the transform of the parent, // and LT be the child's local transform. The flattened transform would // then have been computed as FT = (P)(LT), thus to recover LT we divide // out by P, which results in LT = (FT) / (P) = FT * (P)^-1 // so we need the inverse of the original parent transform GfMatrix4d inverseParentTransform = originalParentTransform.GetInverse(); GfMatrix4d originalChildTransform = this->_GetMatrix(shutterOffset); GfMatrix4d childLocalTransform = originalChildTransform * inverseParentTransform; // once we have the local transform, we can re-apply the new // flattened parent transform - this is the new geospatially affected transform // of the child return flattenedParentTransform * childLocalTransform; } // if resetXformStack was true, the original flattened transform of // of the input data source is valid here and we don't recompute return this->_GetMatrix(shutterOffset); } PXR_NAMESPACE_CLOSE_SCOPE
9,285
C++
35.996016
128
0.74238
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionSchema.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/imaging/hd/retainedDataSource.h> #include "localPositionSchema.h" PXR_NAMESPACE_OPEN_SCOPE TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialWGS84LocalPositionSchemaTokens, HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS); HdVec3dDataSourceHandle HdOmniGeospatialWGS84LocalPositionSchema::GetPosition() { return _GetTypedDataSource<HdVec3dDataSource>( HdOmniGeospatialWGS84LocalPositionSchemaTokens->position); } HdOmniGeospatialWGS84LocalPositionSchema HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent( const HdContainerDataSourceHandle& fromParentContainer) { if (fromParentContainer == nullptr) { return HdOmniGeospatialWGS84LocalPositionSchema(nullptr); } return HdOmniGeospatialWGS84LocalPositionSchema( HdContainerDataSource::Cast(fromParentContainer->Get( HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi)) ); } const HdDataSourceLocator& HdOmniGeospatialWGS84LocalPositionSchema::GetDefaultLocator() { static const HdDataSourceLocator locator( HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi ); return locator; } HdContainerDataSourceHandle HdOmniGeospatialWGS84LocalPositionSchema::BuildRetained( const HdVec3dDataSourceHandle& position) { TfToken names[1]; HdDataSourceBaseHandle values[1]; size_t count = 0; if (position != nullptr) { names[count] = HdOmniGeospatialWGS84LocalPositionSchemaTokens->position; values[count] = position; count++; } return HdRetainedContainerDataSource::New(count, names, values); } PXR_NAMESPACE_CLOSE_SCOPE
2,240
C++
31.955882
97
0.775
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialSceneIndex.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/base/work/utils.h> #include <pxr/imaging/hd/xformSchema.h> #include <pxr/imaging/hd/retainedDataSource.h> #include <pxr/imaging/hd/overlayContainerDataSource.h> #include <pxr/imaging/hd/dependenciesSchema.h> #include "geospatialSceneIndex.h" #include "referencePositionSchema.h" #include "localPositionSchema.h" #include "geospatialDataSource.h" PXR_NAMESPACE_OPEN_SCOPE TF_DEFINE_PRIVATE_TOKENS( _tokens, (positionToXform) ); OmniGeospatialSceneIndexRefPtr OmniGeospatialSceneIndex::New( const HdSceneIndexBaseRefPtr& inputSceneIndex, const HdContainerDataSourceHandle& inputArgs) { return TfCreateRefPtr(new OmniGeospatialSceneIndex(inputSceneIndex, inputArgs)); } OmniGeospatialSceneIndex::OmniGeospatialSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex, const HdContainerDataSourceHandle& inputArgs) : HdSingleInputFilteringSceneIndexBase(inputSceneIndex) { } OmniGeospatialSceneIndex::~OmniGeospatialSceneIndex() = default; HdSceneIndexPrim OmniGeospatialSceneIndex::GetPrim(const SdfPath &primPath) const { // lookup the prim to see if we have wrapped it yet auto iterBoolPair = this->_IsPrimWrapped(primPath); if (iterBoolPair.second) { // we have it wrapped already, so return the wrapped prim return iterBoolPair.first->second; } // we haven't wrapped it yet, but we only need to wrap it // if it is Xformable - geospatial transforms have the potential // to affect anything that has a transform, so even if it is // never affected (e.g. resetXform is true or it is not the child // of a geospatially applied prim) we wrap it here for simplicity // sake at the cost of an extra HdSceneIndexPrim (as in some cases // it will even retain its original data source) // note that unlike the flattening scene index we wrap lazily // instead of walking the tree at construction time - this is because // there is a low chance of geospatial information being attached // to a prim and in cases where the scene isn't goesptially grounded // but the scene index is still applied we don't want to walk the // whole scene HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(primPath); HdXformSchema xformSchema = HdXformSchema::GetFromParent(sceneIndexPrim.dataSource); if (xformSchema.IsDefined() && !xformSchema.GetResetXformStack()) { return this->_WrapPrim(primPath, sceneIndexPrim); } // otherwise we don't need to wrap it and can return it directly return sceneIndexPrim; } SdfPathVector OmniGeospatialSceneIndex::GetChildPrimPaths(const SdfPath& primPath) const { // no change in topology occurs as part of this scene index // so we can ask the input scene to get the child prim paths directly return this->_GetInputSceneIndex()->GetChildPrimPaths(primPath); } SdfPathTable<HdSceneIndexPrim>::_IterBoolPair OmniGeospatialSceneIndex::_IsPrimWrapped(const SdfPath& primPath) const { bool result = false; const auto it = _wrappedPrims.find(primPath); if (it != _wrappedPrims.end()) { // because SdfPathTable inserts all parents // when a path gets inserted, there may be an empty // entry in our cache if a child path was visited first // to verify we have to check the prim type and data source if (it->second.primType != TfToken() || it->second.dataSource != nullptr) { // not an auto-insertion of the parent result = true; } } return std::make_pair(it, result); } HdSceneIndexPrim& OmniGeospatialSceneIndex::_WrapPrim(const SdfPath& primPath, const HdSceneIndexPrim& hdPrim) const { // PRECONDITION: The table must not yet contain a wrapped prim, check via _IsPrimWrapped first! // wrapping a scene index prim involves creating our geospatial data source to wrap the original // scene index prim's data source - this will allow us to intercept the xform token to return // a compute geospatial transform and still provide access to the original xform via the wrapped data source HdContainerDataSourceHandle wrappedDataSource = HdOmniGeospatialDataSource::New(*this, primPath, hdPrim.dataSource); const auto it = _wrappedPrims.find(primPath); if (it != _wrappedPrims.end()) { // in this case, the entry is there, but it was auto-created // by SdfPathTable, meaning it should have empty entries TF_VERIFY(it->second.primType == TfToken()); TF_VERIFY(it->second.dataSource == nullptr); it->second.primType = hdPrim.primType; it->second.dataSource = std::move(wrappedDataSource); return it->second; } else { auto iterBoolPair = _wrappedPrims.insert( { primPath, HdSceneIndexPrim { hdPrim.primType, std::move(wrappedDataSource) } } ); return iterBoolPair.first->second; } } void OmniGeospatialSceneIndex::_PrimsAdded(const HdSceneIndexBase& sender, const HdSceneIndexObserver::AddedPrimEntries& entries) { HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries; for(const HdSceneIndexObserver::AddedPrimEntry& entry : entries) { HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(entry.primPath); // cache the prim if necessary HdXformSchema xformSchema = HdXformSchema::GetFromParent(sceneIndexPrim.dataSource); if (xformSchema.IsDefined() && !xformSchema.GetResetXformStack()) { auto iterBoolPair = this->_IsPrimWrapped(entry.primPath); if (iterBoolPair.second) { /// we already wrapped this prim, so we need to update it HdSceneIndexPrim& wrappedPrim = iterBoolPair.first->second; wrappedPrim.primType = entry.primType; if (wrappedPrim.dataSource != nullptr) { HdOmniGeospatialDataSource::Cast(wrappedPrim.dataSource)->UpdateWrappedDataSource(sceneIndexPrim.dataSource); } // if we updated it, we have to now see if we need // to dirty any cached values alreday in the hierarchy static HdDataSourceLocatorSet locators = { HdXformSchema::GetDefaultLocator() }; this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries); } else { // we don't yet have this prim wrapped - do so now this->_WrapPrim(entry.primPath, sceneIndexPrim); } } } // forward on the notification this->_SendPrimsAdded(entries); // also, if we had to dirty entries because of an insertion in the middle // of the stage hierarchy, send those along too if (!dirtyEntries.empty()) { this->_SendPrimsDirtied(dirtyEntries); } } void OmniGeospatialSceneIndex::_PrimsRemoved(const HdSceneIndexBase& sender, const HdSceneIndexObserver::RemovedPrimEntries& entries) { for (const HdSceneIndexObserver::RemovedPrimEntry& entry : entries) { if (entry.primPath.IsAbsoluteRootPath()) { // removing the whole scene _wrappedPrims.ClearInParallel(); TfReset(_wrappedPrims); } else { auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(entry.primPath); for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second; it++) { WorkSwapDestroyAsync(it->second.dataSource); } if(startEndRangeIterator.first != startEndRangeIterator.second) { _wrappedPrims.erase(startEndRangeIterator.first); } } } _SendPrimsRemoved(entries); } void OmniGeospatialSceneIndex::_PrimsDirtied(const HdSceneIndexBase& sender, const HdSceneIndexObserver::DirtiedPrimEntries& entries) { HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries; for (const HdSceneIndexObserver::DirtiedPrimEntry& entry : entries) { HdDataSourceLocatorSet locators; if (entry.dirtyLocators.Intersects(HdXformSchema::GetDefaultLocator())) { locators.insert(HdXformSchema::GetDefaultLocator()); } if (!locators.IsEmpty()) { this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries); } } _SendPrimsDirtied(entries); if (!dirtyEntries.empty()) { _SendPrimsDirtied(dirtyEntries); } } void OmniGeospatialSceneIndex::_DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators, HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries) { // find subtree range retrieves a start end pair of children // in the subtree of the given prim path auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(primPath); for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second;) { // if we have a valid wrapper for the prim, we need to check // whether it needs to be dirtied - this involves checking the // data sources to see if they have cached data and if so // this indicates it needs to be updated if (it->second.dataSource != nullptr) { HdOmniGeospatialDataSourceHandle geospatialDataSource = HdOmniGeospatialDataSource::Cast(it->second.dataSource); if (geospatialDataSource != nullptr && geospatialDataSource->IsPrimDirtied(locators)) { if (it->first != primPath) { dirtyEntries->emplace_back(it->first, locators); } it++; } else { it++; } } else { it++; } } } PXR_NAMESPACE_CLOSE_SCOPE
10,650
C++
36.37193
129
0.665446
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/fileFormat/edfFileFormat/edfFileFormat.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "edfFileFormat.h" #include "edfData.h" PXR_NAMESPACE_OPEN_SCOPE EdfFileFormat::EdfFileFormat() : SdfFileFormat( EdfFileFormatTokens->Id, EdfFileFormatTokens->Version, EdfFileFormatTokens->Target, EdfFileFormatTokens->Extension) { } EdfFileFormat::~EdfFileFormat() { } bool EdfFileFormat::CanRead(const std::string& filePath) const { return true; } bool EdfFileFormat::Read(SdfLayer* layer, const std::string& resolvedPath, bool metadataOnly) const { // these macros emit methods defined in the Pixar namespace // but not properly scoped, so we have to use the namespace // locally here - note this isn't strictly true since we had to open // the namespace scope anyway because the macros won't allow non-Pixar namespaces // to be used because of some auto-generated content PXR_NAMESPACE_USING_DIRECTIVE if (!TF_VERIFY(layer)) { return false; } // construct the SdfAbstractData object from the file format args // and set that as the layer data - note this is a different object // from that constructed in the InitData method - this may or may // not be an issue, something to be investigated in more detail when // working through the backend - either way we associate it with the layer // so we always have a mapping from the dynamic layer and the specific // set of parameters that created it const FileFormatArguments& args = layer->GetFileFormatArguments(); SdfAbstractDataRefPtr layerData = this->InitData(args); // inform the data provider that it's time to read the content // this is a good time for it to cache data that it needs to generate // the prim / property specs when asked for them via the data apis EdfData& edfData = dynamic_cast<EdfData&>(*layerData); bool readSuccess = edfData.Read(); if (readSuccess) { this->_SetLayerData(layer, layerData); // for now, this is dynamic content read one way from a source external system // therefore we mark that the layer is read-only // later we will remove this restriction and explore what it means to edit // data that is sourced from external data formats layer->SetPermissionToSave(false); layer->SetPermissionToEdit(false); } return readSuccess; } bool EdfFileFormat::WriteToString(const SdfLayer& layer, std::string* str, const std::string& comment) const { // this POC doesn't support writing return false; } bool EdfFileFormat::WriteToStream(const SdfSpecHandle& spec, std::ostream& out, size_t indent) const { // this POC doesn't support writing return false; } SdfAbstractDataRefPtr EdfFileFormat::InitData(const FileFormatArguments& args) const { // create the data parameters object to capture what data was used to create the layer EdfDataParameters parameters = EdfDataParameters::FromFileFormatArgs(args); return EdfData::CreateFromParameters(parameters); } bool EdfFileFormat::_ShouldSkipAnonymousReload() const { return false; } bool EdfFileFormat::_ShouldReadAnonymousLayers() const { return true; } void EdfFileFormat::ComposeFieldsForFileFormatArguments(const std::string& assetPath, const PcpDynamicFileFormatContext& context, FileFormatArguments* args, VtValue* contextDependencyData) const { VtValue val; if (context.ComposeValue(EdfFileFormatTokens->Params, &val) && val.IsHolding<VtDictionary>()) { // the composition engine has composed the metadata values of the prim appropriately // for the currently composed stage, we read these metadata values that were composed // and make them part of the file format arguments to load the dependent layer VtDictionary dict = val.UncheckedGet<VtDictionary>(); const VtValue* dictVal = TfMapLookupPtr(dict, EdfDataParametersTokens->dataProviderId); if (dictVal != nullptr) { (*args)[EdfDataParametersTokens->dataProviderId] = dictVal->UncheckedGet<std::string>(); } // unfortunately, FileFormatArguments is a typedef for a map<string, string> // which means we have to unpack the provider arguments dictionary // to keep the unpacking simple, we assume for now that the providerArgs // is itself a dictionary containing only string paris and values // we can remove this restriction later for simple types (using TfStringify) // but would need some work (recursively) for embedded lists and dictionary values dictVal = TfMapLookupPtr(dict, EdfDataParametersTokens->providerArgs); if (dictVal != nullptr) { std::string prefix = EdfDataParametersTokens->providerArgs.GetString(); VtDictionary providerArgs = dictVal->UncheckedGet<VtDictionary>(); for (VtDictionary::iterator it = providerArgs.begin(); it != providerArgs.end(); it++) { (*args)[prefix + ":" + it->first] = it->second.UncheckedGet<std::string>(); } } } } bool EdfFileFormat::CanFieldChangeAffectFileFormatArguments(const TfToken& field, const VtValue& oldValue, const VtValue& newValue, const VtValue& contextDependencyData) const { const VtDictionary& oldDictionaryValue = oldValue.IsHolding<VtDictionary>() ? oldValue.UncheckedGet<VtDictionary>() : VtGetEmptyDictionary(); const VtDictionary& newDictionaryValue = newValue.IsHolding<VtDictionary>() ? newValue.UncheckedGet<VtDictionary>() : VtGetEmptyDictionary(); // nothing to do if both metadata values are empty if (oldDictionaryValue.empty() && newDictionaryValue.empty()) { return false; } // our layer is new if: // 1. there is a new provider // 2. there is a change to the value of the provider specific data const VtValue* oldProviderId = TfMapLookupPtr(oldDictionaryValue, EdfDataParametersTokens->dataProviderId); const VtValue* newProviderId = TfMapLookupPtr(newDictionaryValue, EdfDataParametersTokens->dataProviderId); if (oldProviderId != nullptr && newProviderId != nullptr) { if (oldProviderId->UncheckedGet<std::string>() != newProviderId->UncheckedGet<std::string>()) { // different providers! return true; } else { // same provider, but the specific provider metadata may have changed const VtValue* oldProviderDictionaryValue = TfMapLookupPtr(oldDictionaryValue, EdfDataParametersTokens->providerArgs); const VtValue* newProviderDictionaryValue = TfMapLookupPtr(newDictionaryValue, EdfDataParametersTokens->providerArgs); const VtDictionary& oldProviderDictionary = oldProviderDictionaryValue->IsHolding<VtDictionary>() ? oldProviderDictionaryValue->UncheckedGet<VtDictionary>() : VtGetEmptyDictionary(); const VtDictionary& newProviderDictionary = newProviderDictionaryValue->IsHolding<VtDictionary>() ? newProviderDictionaryValue->UncheckedGet<VtDictionary>() : VtGetEmptyDictionary(); return oldProviderDictionary != newProviderDictionary; } } else { // one of them (or both) are nullptrs if (oldProviderId == nullptr && newProviderId == nullptr) { // no change to provider, don't need to check parameters return false; } // otherwise one changed return true; } } // these macros emit methods defined in the Pixar namespace // but not properly scoped, so we have to use the namespace // locally here TF_DEFINE_PUBLIC_TOKENS( EdfFileFormatTokens, ((Id, "edfFileFormat")) ((Version, "1.0")) ((Target, "usd")) ((Extension, "edf")) ((Params, "EdfDataParameters")) ); TF_REGISTRY_FUNCTION(TfType) { SDF_DEFINE_FILE_FORMAT(EdfFileFormat, SdfFileFormat); } PXR_NAMESPACE_CLOSE_SCOPE
7,937
C++
35.75
194
0.754567
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/api.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef OMNI_OMNIMETPROVIDER_API_H_ #define OMNI_OMNIMETPROVIDER_API_H_ #include "pxr/base/arch/export.h" #if defined(PXR_STATIC) # define OMNIMETPROVIDER_API # define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...) # define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...) # define OMNIMETPROVIDER_LOCAL #else # if defined(OMNIMETPROVIDER_EXPORTS) # define OMNIMETPROVIDER_API ARCH_EXPORT # define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__) # define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__) # else # define OMNIMETPROVIDER_API ARCH_IMPORT # define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__) # define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__) # endif # define OMNIMETPROVIDER_LOCAL ARCH_HIDDEN #endif #endif
1,498
C
38.447367
97
0.732977
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.cpp
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <pxr/base/tf/token.h> #include <pxr/base/vt/value.h> #include <pxr/base/js/json.h> #include <pxr/usd/sdf/path.h> #include <pxr/usd/sdf/schema.h> #include <pxr/usd/sdf/payload.h> #include <pxr/usd/sdf/primSpec.h> #include <pxr/usd/sdf/attributeSpec.h> #include <pxr/usd/usd/tokens.h> #include <edfDataProviderFactory.h> #include "omniMetProvider.h" #include <iostream> #include <curl/curl.h> PXR_NAMESPACE_OPEN_SCOPE EDF_DEFINE_DATAPROVIDER(OmniMetProvider); TF_DEFINE_PUBLIC_TOKENS( OmniMetProviderProviderArgKeys, (dataLodLevel) (deferredRead) (lod1Count) ); TF_DEFINE_PRIVATE_TOKENS( EdfFieldKeys, (EdfDataParameters) ); TF_DEFINE_PRIVATE_TOKENS( OmniMetProviderTypeNames, (AmaDepartment) (AmaObject) ); TF_DEFINE_PRIVATE_TOKENS( OmniMetProviderFieldKeys, (departmentId) (displayName) (objectID) (isHighlight) (accessionNumber) (accessionYear) (isPublicDomain) (primaryImage) (primaryImageSmall) (additionalImages) (constituents) (department) (objectName) (title) (culture) (period) (dynasty) (reign) (portfolio) (artistRole) (artistPrefix) (artistDisplayName) (artistDisplayBio) (artistSuffix) (artistAlphaSort) (artistNationality) (artistGender) (artistWikidata_URL) (artistULAN_URL) (objectDate) (objectBeginDate) (objectEndDate) (medium) (dimensions) (measurements) (creditLine) (geographyType) (city) (state) (county) (country) (region) (subregion) (locale) (locus) (excavation) (river) (classification) (rightsAndReproduction) (linkResource) (metadataDate) (repository) (objectURL) (objectWikidataURL) (isTimelineWork) (galleryNumber) ); enum struct DataLodLevel { Level0 = 0, Level1 = 1, Level2 = 2 }; // urls used to retrieve the data static const std::string DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/departments"; static const std::string OBJECTS_IN_DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects?departmentIds="; static const std::string OBJECT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects/"; static const SdfPath DATA_ROOT_PATH("/Data"); OmniMetProvider::OmniMetProvider(const EdfDataParameters& parameters) : IEdfDataProvider(parameters) { curl_global_init(CURL_GLOBAL_DEFAULT); } OmniMetProvider::~OmniMetProvider() { curl_global_cleanup(); } bool OmniMetProvider::Read(std::shared_ptr<IEdfSourceData> sourceData) { // this gives the provider a chance to load all data it needs to on first layer read // if we are parameterized for a deferred read, we do nothing and read on demand // at first ask, if it's not a deferred read, we load all appropriate content from the // back-end here if(!this->IsDeferredRead()) { // it's not a deferred read, so determine how much data we want to really load int lodLevel = this->GetDataLodLevel(); if (lodLevel == static_cast<int>(DataLodLevel::Level0)) { // load the departments this->_LoadData(false, 0, sourceData); } else if (lodLevel == static_cast<int>(DataLodLevel::Level1)) { // load the departments and their children // but cap the number of children at the specified level this->_LoadData(true, this->GetLod1Count(), sourceData); } else { // max lod level, load everything this->_LoadData(true, 0, sourceData); } } return true; } void OmniMetProvider::_LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData) { // load the department data std::string departmentData = this->_LoadDepartments(); std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData, sourceData); // do we want to load objects as well? if (includeObjects) { for (auto it = departments.begin(); it != departments.end(); it++) { std::vector<std::string> objectData = this->_LoadObjects(TfStringify(it->second), objectCount); for (auto itt = objectData.begin(); itt != objectData.end(); itt++) { this->_ParseObject(*itt, it->first, sourceData); } } } } std::string OmniMetProvider::_LoadDepartments() { std::string departments; CURL* departmentCurl = curl_easy_init(); if (departmentCurl != nullptr) { CURLcode resultCode; curl_easy_setopt(departmentCurl, CURLOPT_URL, DEPARTMENT_URL.c_str()); curl_easy_setopt(departmentCurl, CURLOPT_HTTPGET, 1L); curl_easy_setopt(departmentCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback); // allocate a string that we can append the result onto std::string* result = new std::string(); curl_easy_setopt(departmentCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result)); resultCode = curl_easy_perform(departmentCurl); if (resultCode == CURLE_OK) { departments = *result; } else { TF_CODING_ERROR("Unable to load departments from '%s'!", DEPARTMENT_URL.c_str()); } // done with the callback data delete result; // done with the request handle curl_easy_cleanup(departmentCurl); } return departments; } std::vector<int> OmniMetProvider::_ParseObjectIds(const std::string& response) const { std::vector<int> objectIds; PXR_NS::JsValue jsValue = PXR_NS::JsParseString(response, nullptr); if (!jsValue.IsNull()) { PXR_NS::JsObject rootObject = jsValue.GetJsObject(); PXR_NS::JsObject::iterator it = rootObject.find("objectIDs"); if (it != rootObject.end()) { PXR_NS::JsArray jsonObjectIdArray = it->second.GetJsArray(); for (auto objectIdIt = jsonObjectIdArray.begin(); objectIdIt != jsonObjectIdArray.end(); objectIdIt++) { objectIds.push_back((*objectIdIt).GetInt()); } } else { TF_CODING_ERROR("Unable to find 'objectIDs' array in returned data '%s'!", response.c_str()); } } else { TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", response.c_str()); } return objectIds; } std::vector<std::string> OmniMetProvider::_LoadObjects(const std::string& departmentId, size_t objectCount) { // NOTE: this should be updated to make these requests in parallel in the case // where we aren't doing deferred reads // ideally we wouldn't want to initialize a new curl handle here, but since this // call can be made in the parallel prim indexing, we can't share the easy handle // across threads, so we take the overhead hit here std::vector<std::string> objects; CURL* objectCurl = curl_easy_init(); std::string url = OBJECTS_IN_DEPARTMENT_URL + departmentId; std::string* result = new std::string(); CURLcode resultCode; *result = ""; curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str()); curl_easy_setopt(objectCurl, CURLOPT_HTTPGET, 1L); curl_easy_setopt(objectCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback); curl_easy_setopt(objectCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result)); resultCode = curl_easy_perform(objectCurl); if (resultCode == CURLE_OK) { // process result std::vector<int> objectIds = this->_ParseObjectIds(*result); // objectCount = 0 means load all objects // objectCount > 0 means load max that many objects size_t counter = 0; for (auto objectIdIterator = objectIds.begin(); objectIdIterator != objectIds.end() && (objectCount == 0 || counter < objectCount); objectIdIterator++) { // reset the URL and result buffer // NOTE: this should be updated to make these requests in parallel url = OBJECT_URL + TfStringify(*objectIdIterator); *result = ""; curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str()); resultCode = curl_easy_perform(objectCurl); if (resultCode == CURLE_OK) { objects.push_back(*result); } counter++; } } // done with the callback data delete result; // done with the request handle curl_easy_cleanup(objectCurl); return objects; } std::vector<std::pair<std::string, int>> OmniMetProvider::_ParseDepartments(const std::string& departmentJson, std::shared_ptr<IEdfSourceData> sourceData) { std::vector<std::pair<std::string, int>> parsedDepartments; JsValue jsValue = JsParseString(departmentJson, nullptr); if (!jsValue.IsNull()) { JsObject rootObject = jsValue.GetJsObject(); JsObject::iterator it = rootObject.find("departments"); if (it != rootObject.end()) { JsArray departments = it->second.GetJsArray(); std::string parent = DATA_ROOT_PATH.GetAsString(); for (auto departmentIt = departments.begin(); departmentIt != departments.end(); departmentIt++) { // for each department, create a prim to represent it JsObject department = (*departmentIt).GetJsObject(); int departmentId = department[OmniMetProviderFieldKeys->departmentId.GetString()].GetInt(); std::string displayName = department[OmniMetProviderFieldKeys->displayName.GetString()].GetString(); // create the prim std::string primName = TfMakeValidIdentifier(displayName); sourceData->CreatePrim(DATA_ROOT_PATH, primName, SdfSpecifier::SdfSpecifierDef, OmniMetProviderTypeNames->AmaDepartment); // create the attributes for the prim SdfPath parentPrim = SdfPath(parent + "/" + primName); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->departmentId.GetString(), SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform, VtValue(departmentId)); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->displayName.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(displayName)); parsedDepartments.push_back(std::make_pair(parentPrim.GetAsString(), departmentId)); } } else { TF_CODING_ERROR("Unable to find 'departments' array in returned data '%s'!", departmentJson.c_str()); } } else { TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", departmentJson.c_str()); } return parsedDepartments; } void OmniMetProvider::_ParseObject(const std::string& objectData, const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData) { // from the parent path given and the data contained in the JSON // object retrieved from the server, we can create the full prim JsValue jsValue = JsParseString(objectData, nullptr); if (!jsValue.IsNull()) { JsObject rootObject = jsValue.GetJsObject(); // the root object contains all of our properties that we now need // to create a prim spec for the object and a set of property // specs for it // NOTE: this code uses the "default value" of a property spec // to represent the authored value coming from the external system // We don't need to do sub-composition over the data coming // from the external system, so we ever only have a value or not // so if HasDefaultValue is true on the property spec, it means // there was an authored value that came from the remote system // One optimization we could do in the layer above (EdfData) is // to add schema acquisition and checking in the loop. This would allow us // to create the property spec or not depending on if the value that came in // is different from the true fallback declared in the schema // (but we'd have to change the ask for the property to check whether // the schema has the property rather than if the property spec exists) std::string objectName = rootObject[OmniMetProviderFieldKeys->objectName.GetString()].GetString(); std::string primName = TfMakeValidIdentifier(objectName) + TfStringify(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt()); // create the prim SdfPath newPrimParentPath(parentPath); sourceData->CreatePrim(newPrimParentPath, primName, SdfSpecifier::SdfSpecifierDef, OmniMetProviderTypeNames->AmaObject); // set the fact that this prim has an API schema attached to it // usdGenSchema doesn't generate a public token for the actual // API schema class name, so we hard code that here SdfPath parentPrim = SdfPath(parentPath + "/" + primName); TfTokenVector apiSchemas; apiSchemas.push_back(TfToken("OmniMetArtistAPI")); VtValue apiSchemasValue(apiSchemas); sourceData->SetField(parentPrim, UsdTokens->apiSchemas, apiSchemasValue); // create the attributes for the prim sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->objectID.GetString(), SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isHighlight.GetString(), SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->isHighlight.GetString()].GetBool())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionNumber.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->accessionNumber.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionYear.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->accessionYear.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isPublicDomain.GetString(), SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->isPublicDomain.GetString()].GetBool())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImage.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->primaryImage.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImageSmall.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->primaryImageSmall.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->department.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->department.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->title.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->title.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->culture.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->culture.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->period.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->period.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->dynasty.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->dynasty.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->reign.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->reign.GetString()].GetString())); sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->portfolio.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->portfolio.GetString()].GetString())); // artist information complying with sample API schema std::string namespaceFieldPrefix = "omni:met:artist:"; JsObject::const_iterator i = rootObject.find(OmniMetProviderFieldKeys->artistRole.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistRole.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistRole.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistPrefix.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistPrefix.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistPrefix.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayName.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayName.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayName.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayBio.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayBio.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayBio.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistSuffix.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistSuffix.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistSuffix.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistAlphaSort.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistAlphaSort.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistAlphaSort.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistNationality.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistNationality.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistNationality.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistGender.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistGender.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistGender.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistWikidata_URL.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistWikidata_URL.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistWikidata_URL.GetString()].GetString())); } i = rootObject.find(OmniMetProviderFieldKeys->artistULAN_URL.GetString()); if (i != rootObject.end()) { sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistULAN_URL.GetString(), SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(rootObject[OmniMetProviderFieldKeys->artistULAN_URL.GetString()].GetString())); } // note that there are quite a few additional properties that could be pulled, the above // represents only a sample of the data that is there - if you'd like to try the rest as an // exercise, you can enhance the schema attributes and read the remaining ones here } else { TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", objectData.c_str()); } } bool OmniMetProvider::ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData) { // if the parent path is the root, we need to load the departments // but only if we are in a deferred read scenario if (this->IsDeferredRead()) { SdfPath parentPrimPath = SdfPath(parentPath); int lodLevel = this->GetDataLodLevel(); if (parentPrimPath == DATA_ROOT_PATH) { // load the department data std::cout << "Loading department data..." << std::endl; std::string departmentData = this->_LoadDepartments(); std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData, sourceData); } else { VtValue typeNameValue; if(sourceData->HasField(SdfPath(parentPath), SdfFieldKeys->TypeName, &typeNameValue)) { if (typeNameValue.UncheckedGet<TfToken>() == OmniMetProviderTypeNames->AmaDepartment && this->GetDataLodLevel() != static_cast<int>(DataLodLevel::Level0)) { // it's a department, we need to load the objects // associated with the department std::string departmentIdPath = parentPath + "." + OmniMetProviderFieldKeys->departmentId.GetString(); VtValue departmentId; if (sourceData->HasAttribute(SdfPath(departmentIdPath), &departmentId)) { size_t objectCount = 0; if (lodLevel == static_cast<int>(DataLodLevel::Level1)) { objectCount = this->GetLod1Count(); } // load the object data std::cout << "Loading object data for " + parentPath + "..." << std::endl; std::vector<std::string> objectData = this->_LoadObjects(TfStringify(departmentId.UncheckedGet<int>()), objectCount); for (auto it = objectData.begin(); it != objectData.end(); it++) { this->_ParseObject(*it, parentPath, sourceData); } } } } } return true; } return false; } bool OmniMetProvider::IsDataCached() const { return !this->IsDeferredRead(); } int OmniMetProvider::GetDataLodLevel() const { int dataLodLevel = 0; EdfDataParameters parameters = this->GetParameters(); std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->dataLodLevel); if (it != parameters.providerArgs.end()) { dataLodLevel = TfUnstringify<int>(it->second); if (dataLodLevel < 0) { dataLodLevel = 0; } } return dataLodLevel; } size_t OmniMetProvider::GetLod1Count() const { // although the incoming string from the parameter set // might be interpretable as a negative integer // it doesn't really make practical sense, so if // it is interpreted as negative, we clamp to 0 // and return an unsigned version to the caller size_t lod1Count = 0; EdfDataParameters parameters = this->GetParameters(); std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->lod1Count); if (it != parameters.providerArgs.end()) { lod1Count = TfUnstringify<int>(it->second); if (lod1Count < 0) { lod1Count = 0; } } return static_cast<size_t>(lod1Count); } bool OmniMetProvider::IsDeferredRead() const { bool deferredRead = false; EdfDataParameters parameters = this->GetParameters(); std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->deferredRead); if (it != parameters.providerArgs.end()) { deferredRead = TfUnstringify<bool>(it->second); } return deferredRead; } size_t OmniMetProvider::_CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp) { std::string* result = reinterpret_cast<std::string*>(userp); result->append(reinterpret_cast<const char* const>(data), nmemb); return nmemb; } PXR_NAMESPACE_CLOSE_SCOPE
27,507
C++
41.780715
159
0.662704
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.h
// Copyright 2023 NVIDIA CORPORATION // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_ #define OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_ #include <string> #include <vector> #include <utility> #include <pxr/pxr.h> #include <pxr/base/tf/token.h> #include <pxr/usd/sdf/layer.h> #include <pxr/usd/sdf/schema.h> #include <iEdfDataProvider.h> PXR_NAMESPACE_OPEN_SCOPE TF_DECLARE_PUBLIC_TOKENS( OmniMetProviderProviderArgKeys, (dataLodLevel) (deferredRead) (lod1Count) ); /// \class OmniMetProvider /// /// Defines a specific EDF back-end data provider for reading information /// from the Metropolitan Museum of Art REST APIs and converting that /// into prim and attribute data that can be processed by USD. /// class OmniMetProvider : public IEdfDataProvider { public: OmniMetProvider(const EdfDataParameters& parameters); virtual ~OmniMetProvider(); virtual bool Read(std::shared_ptr<IEdfSourceData> sourceData) override; virtual bool ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData) override; virtual bool IsDataCached() const override; private: int GetDataLodLevel() const; size_t GetLod1Count() const; bool IsDeferredRead() const; void _LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData); std::string _LoadDepartments(); std::vector<std::string> _LoadObjects(const std::string& departmentId, size_t objectCount); std::vector<std::pair<std::string, int>> _ParseDepartments(const std::string& departmentJson, std::shared_ptr<IEdfSourceData> sourceData); void _ParseObject(const std::string& objectData, const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData); // NOTE: these methods are not technically const, since they do change internal state // in the edfData object's layer data. This is ok, because that object is a cache // https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#es50-dont-cast-away-const // the mutuable cache state is allowed to change internally and still keep the semantics // of the object not changing from the outside void _LoadDepartments(bool includeObjects) const; void _LoadObjects(const std::string& departmentId, const std::string& parentPath) const; bool _IsDepartmentDataCached() const; bool _IsObjectDataCached(const std::string& parentPath) const; void _ParseDepartments(const std::string& response) const; std::vector<int> _ParseObjectIds(const std::string& response) const; void _ParseObject(const std::string& parentPath, const std::string& response) const; static size_t _CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp); }; PXR_NAMESPACE_CLOSE_SCOPE #endif
3,321
C
37.627907
128
0.747666
NVIDIA-Omniverse/kit-osc/README.md
# OSC Omniverse Kit Extension [omni.osc] Omniverse Kit extension for sending and receiving OSC (Open Sound Control) messages. ![demo.gif](/docs/images/demo.gif) *The OSC control surface app running on the iPad is [TouchOSC](https://hexler.net/touchosc).* # Getting Started Open the Community tab under Extensions window (`Window > Extensions`), search for `OSC`, and install and enable the `omni.osc` extension. ![extension-install](/docs/images/extension-install.png) ## Running the server After installing and enabling the extension, you should see the following window. ![server-ui-window](/docs/images/server-ui-window.png) Enter the private IP address of the computer running your Kit application and the desired port, then click `Start`. If you are prompted to configure your Windows Firewall, ensure that the Kit application is allowed to communicate with other devices on the private network. ![windows-firewall](/docs/images/osc-start-windows-security-alert.png) You can find the private IP address of your computer by running `ipconfig` in the Windows terminal. ![ipconfig](/docs/images/ipconfig.png) If you run the server on `localhost`, that means the server can only receive messages from OSC clients running on the same machine. If you want to receive messages from OSC clients running on other devices on the same network, you must run the server on an IP address that is visible to those devices. Once the server is running, confirm that it can successfully receive messages by inspecting the verbose console logs. It might be helpful to filter only the logs that originate from `omni.osc`. ![console-logs](/docs/images/console-logs.png) ## Receiving messages with Python Below is a python snippet that demonstrates how to handle OSC messages received by the server. It assumes that the OSC server configured above is running. You can paste and run the below snippet directly into the Omniverse Script Editor for testing. ```python import carb import carb.events import omni.osc def on_event(event: carb.events.IEvent) -> None: addr, args = omni.osc.osc_message_from_carb_event(event) carb.log_info(f"Received OSC message: [{addr}, {args}]") sub = omni.osc.subscribe_to_osc_event_stream(on_event) ``` ## Receiving messages with ActionGraph Search for `OSC` in the Action Graph nodes list and add the `On OSC Message` node to your graph. The node takes a single input, the OSC address path that this node will handle. This input can be a valid regular expression. Note that this input field does *not* support OSC pattern matching expressions. The node outputs an OmniGraph bundle with two attributes named `address` and `arguments` which you can access by using the `Extract Attribute` node. ![og-receive](/docs/images/og-receive.png) You can find example USD stages that demonstrate how to configure an ActionGraph using this extension at [exts/omni.osc/data/examples](/exts/omni.osc/data/examples). ## Sending messages from Python Since `omni.osc` depends on [python-osc](https://pypi.org/project/python-osc/), you can import this module directly in your own Python code to send OSC messages. Please see the [documentation](https://python-osc.readthedocs.io/en/latest/) for additional information and support. ```python import random import time from pythonosc import udp_client client = udp_client.SimpleUDPClient("127.0.0.1", 3334) client.send_message("/scale", [random.random(), random.random(), random.random()]) ``` You can paste and run the above snippet directly into the Omniverse Script Editor for testing. ## Sending messages from ActionGraph This is not currently implemented. ## Limitations & Known Issues - OSC Bundles are currently not supported. - The OmniGraph `On OSC Message` node can only handle OSC messages containing lists of floating-point arguments. # Help The below sections should help you diagnose any potential issues you may encounter while working with `omni.osc` extension. ## Unable to receive messages 1. First, enable verbose logs in the console (filter by the `omni.osc` extension). The server will log any messages received. 2. Confirm that the computer running the Kit application and the device sending the OSC messages are on the same network. 3. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall. Note that you may have multiple instances of kit.exe on this list. When in doubt, ensure that all of them have the appropriate permission. ![windows-firewall](/docs/images/windows-firewall.png) 4. Confirm that the Windows Defender Firewall allows incoming UDP traffic to the port in use. 5. Confirm that the device sending the OSC messages is sending the messages via UDP to the correct IP address and port. 6. Use a tool such as [wireshark](https://www.wireshark.org/) to confirm that the computer running the Kit application is receiving UDP traffic from the device. ## Unable to send messages 1. Confirm that the computer running the Kit application and the device receiving the OSC messages are on the same network. 2. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall. 3. Confirm that the device receiving the OSC messages is able to receive incoming UDP traffic at the port in use. # Contributing The source code for this repository is provided as-is and we are not accepting outside contributions. # License - The code in this repository is licensed under the Apache License 2.0. See [LICENSE](/LICENSE). - python-osc is licensed under the Unlicense. See [exts/omni.osc/vendor/LICENSE-python-osc](/exts/omni.osc/vendor/LICENSE-python-osc). # Resources - [https://opensoundcontrol.stanford.edu/spec-1_0.html](https://opensoundcontrol.stanford.edu/spec-1_0.html) - [https://en.wikipedia.org/wiki/Open_Sound_Control](https://en.wikipedia.org/wiki/Open_Sound_Control) - [https://python-osc.readthedocs.io/en/latest/](https://python-osc.readthedocs.io/en/latest/)
5,998
Markdown
46.992
301
0.779593
NVIDIA-Omniverse/kit-osc/exts/omni.osc/config/extension.toml
[package] # Semantic Versionning is used: https://semver.org/ version = "0.3.1" # The title and description fields are primarily for displaying extension info in UI title = "OSC (Open Sound Control)" description="Send and receive OSC (Open Sound Control) messages" authors = ["NVIDIA"] repository = "https://github.com/NVIDIA-Omniverse/kit-osc" readme = "docs/README.md" changelog = "docs/CHANGELOG.md" icon = "data/icon.png" preview_image = "data/preview.png" # One of categories for UI. category = "Other" # Keywords for the extension keywords = ["kit", "osc"] [dependencies] "omni.kit.uiapp" = {} "omni.kit.pipapi" = {} "omni.graph" = {} "omni.graph.bundle.action" = {} # Main python module this extension provides, it will be publicly available as "import omni.osc.core". [[python.module]] name = "omni.osc" [python.pipapi] archiveDirs = ["vendor"] [settings.exts."omni.osc"] address = "localhost" port = 3334 [[test]] dependencies = ["omni.graph", "omni.kit.test"]
983
TOML
22.999999
102
0.703967
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/extension.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from typing import Any, List import carb import carb.events import carb.profiler import omni.ext import omni.kit.app from pythonosc.dispatcher import Dispatcher from .core import carb_event_payload_from_osc_message, push_to_osc_event_stream from .menu import OscMenu from .server import DaemonOSCUDPServer from .window import OscWindow class OmniOscExt(omni.ext.IExt): def on_startup(self, ext_id): def on_start(host: str, port: int) -> bool: return self.server.start(host, port) def on_stop() -> bool: return self.server.stop() def toggle_window_visible(_arg0, _arg1) -> None: """ Toggle the window visibility from the editor menu item """ self.window.visible = not self.window.visible self.server = OmniOscExt.create_server() # The main UI window default_addr = carb.settings.get_settings().get("exts/omni.osc/address") default_port = carb.settings.get_settings().get("exts/omni.osc/port") self.window = OscWindow( on_start=on_start, on_stop=on_stop, default_addr=default_addr, default_port=default_port ) # The editor menu entry that toggles the window visibility self.menu = OscMenu(on_click=toggle_window_visible) # Toggle the editor menu entry when the user closes the window self.window.set_visibility_changed_fn(lambda visible: self.menu.set_item_value(visible)) def on_shutdown(self): self.window = None self.menu = None if self.server is not None: self.server.stop() self.server = None def create_server() -> DaemonOSCUDPServer: """ Create a server that routes all OSC messages to a carbonite event stream """ @carb.profiler.profile def on_osc_msg(addr: str, *args: List[Any]) -> None: """ OSC message handler """ carb.log_verbose(f"OSC message: [{addr}, {args}]") payload = carb_event_payload_from_osc_message(addr, args) push_to_osc_event_stream(payload) # Server dispatcher = Dispatcher() dispatcher.set_default_handler(on_osc_msg) return DaemonOSCUDPServer(dispatcher)
2,714
Python
34.723684
100
0.658438
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/__init__.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import omni.kit.pipapi # python-osc: # - SWIPAT request: http://nvbugs/3684871 # - A copy of the source is forked to https://github.com/NVIDIA-Omniverse/python-osc # - The dependency vendored and installed from exts/omni.osc/vendor/python_osc-1.8.0-py3-none-any.whl omni.kit.pipapi.install( package="python-osc", module="pythonosc", use_online_index=False, ignore_cache=True, ignore_import_check=False ) from pythonosc import * # noqa: F401 from .core import * # noqa: F401,F403 from .extension import * # noqa: F401,F403 from .server import * # noqa: F401,F403 # NOTE(jshrake): omni.graph is an optional dependency so handle the case # that the below import fails try: from .ogn import * except Exception as e: print(f"omni.osc failed to import OGN due to {e}") pass
1,219
Python
37.124999
114
0.754717
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/core.py
## Copyright © 2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED. ## ## This software product is a proprietary product of Nvidia Corporation and its affiliates ## (the "Company") and all right, title, and interest in and to the software ## product, including all associated intellectual property rights, are and ## shall remain exclusively with the Company. ## ## This software product is governed by the End User License Agreement ## provided with the software product. from typing import Callable, Tuple import carb import carb.events import omni.ext import omni.kit.app OSC_EVENT_TYPE_NAME: str = "omni.osc" OSC_EVENT_TYPE: int = carb.events.type_from_string(OSC_EVENT_TYPE_NAME) OSC_MESSAGE_ADDRESS_STR = "address" OSC_MESSAGE_ARGUMENTS_STR = "arguments" def get_osc_event_stream() -> carb.events._events.IEventStream: """ Returns the OSC event stream """ return omni.kit.app.get_app().get_message_bus_event_stream() def push_to_osc_event_stream(payload: dict) -> None: """ Push a payload to the OSC event stream """ get_osc_event_stream().push(OSC_EVENT_TYPE, sender=0, payload=payload) def subscribe_to_osc_event_stream( cb: Callable[[carb.events._events.IEvent], None] ) -> carb.events._events.ISubscription: """ Returns a Carbonite event subscription to the OSC event stream """ return get_osc_event_stream().create_subscription_to_pop_by_type(OSC_EVENT_TYPE, cb) def carb_event_payload_from_osc_message(address: str, args: list) -> dict: """ Return a carbonite event payload suitable for pushing to the OSC event stream """ return {OSC_MESSAGE_ADDRESS_STR: address, OSC_MESSAGE_ARGUMENTS_STR: args} def osc_message_from_carb_event(e: carb.events.IEvent) -> Tuple[str, list]: """ Return the OSC message address and arguments extracted from a carbonite event payload """ return (e.payload[OSC_MESSAGE_ADDRESS_STR], e.payload[OSC_MESSAGE_ARGUMENTS_STR])
1,961
Python
34.672727
90
0.7231
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/server.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import threading import carb import carb.events from pythonosc import osc_server from pythonosc.dispatcher import Dispatcher class DaemonOSCUDPServer: """ Run a python-osc BlockingOSCUDPServer in a separate thread. Usage:: import omni.osc.core as osc dispatcher = osc.Dispatcher() dispatcher.set_default_handler(lambda(path, args): print(f"{path}: {args}")) server = osc.DaemonOSCUDPServer(dispatcher) server.start("192.168.0.1", 3434) # ... server.stop() """ def __init__(self, dispatcher: Dispatcher): self.dispatcher: Dispatcher = dispatcher self.server: osc_server.BlockingOSCUDPServer = None self.thread: threading.Thread = None def running(self) -> bool: """ Returns true if the server is running """ return self.thread is not None and self.thread.is_alive() def start(self, addr: str, port: int) -> bool: """ Start the OSC server on the specified address and port. Does nothing if the server is already running. """ if not self.running(): carb.log_info(f"Starting OSC server on {addr}:{port}") try: self.server = osc_server.BlockingOSCUDPServer((addr, port), dispatcher=self.dispatcher) self.thread = threading.Thread(target=lambda: self.server.serve_forever()) # NOTE(jshrake): Running the thread in daemon mode ensures that the thread and server # are properly disposed of in the event that the main thread exits unexpectedly. self.thread.daemon = True self.thread.start() except Exception as e: carb.log_error(f"Error starting OSC server: {e}") else: carb.log_info("OSC server already running") return self.running() def stop(self) -> bool: """ Stops the OSC server. """ if self.running(): carb.log_info("Stopping OSC server") try: self.server.shutdown() self.thread.join() except Exception as e: carb.log_error(f"Error stopping OSC server: {e}") finally: self.server = None self.thread = None else: carb.log_info("OSC server not running") return self.running()
2,857
Python
34.28395
103
0.615681
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/menu.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import omni.kit.ui MENU_PATH = "Window/OSC" class OscMenu: def __init__(self, on_click): editor_menu = omni.kit.ui.get_editor_menu() if not editor_menu: return editor_menu.add_item(menu_path=MENU_PATH, on_click=on_click, toggle=True, value=True) def set_item_value(self, val: bool) -> None: editor_menu = omni.kit.ui.get_editor_menu() if not editor_menu: return editor_menu.set_value(MENU_PATH, val) def __del__(self): editor_menu = omni.kit.ui.get_editor_menu() if not editor_menu: return if editor_menu.has_item(MENU_PATH): editor_menu.remove_item(MENU_PATH)
1,125
Python
33.121211
93
0.672889
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/window.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from typing import Callable import omni.ui as ui OnStartCallback = Callable[[str, int], bool] OnStopCallback = Callable[[], bool] class OscWindow(ui.Window): def __init__( self, default_addr: str, default_port: int, on_start: OnStartCallback, on_stop: OnStopCallback ) -> None: super().__init__("OSC UDP Server", width=300, height=300) def start() -> None: """ Callback when the user presses the start button """ is_running = on_start(addr.as_string, port.as_int) running.set_value(is_running) def stop() -> None: """ Callback when the user presses the stop button """ is_running = on_stop() running.set_value(is_running) def update_running_label(label: ui.Label, running: bool) -> None: """ Keep the UI label up to date with the state of the server """ if running: label.text = f"Running UDP server @ {addr.as_string}:{port.as_int}" label.set_style({"color": "green"}) else: label.text = "Stopped" label.set_style({"color": "red"}) def toggle_enabled(field: ui.AbstractField, running: bool) -> None: """ Enable or disable the input field based on the state of the server """ field.enabled = not running color = "gray" if running else "white" field.set_style({"color": color}) # Settings addr = ui.SimpleStringModel(default_addr) port = ui.SimpleIntModel(default_port) running = ui.SimpleBoolModel(False) with self.frame: with ui.VStack(): label = ui.Label("", height=20) update_running_label(label, running.get_value_as_bool()) running.add_value_changed_fn(lambda m: update_running_label(label, m.get_value_as_bool())) with ui.VStack(height=20): with ui.HStack(): ui.Label("Address:") addr_field = ui.StringField(addr) toggle_enabled(addr_field, running.get_value_as_bool()) running.add_value_changed_fn(lambda m: toggle_enabled(addr_field, m.get_value_as_bool())) ui.Spacer(height=2) with ui.HStack(): ui.Label("Port:") port_field = ui.IntField(port) toggle_enabled(port_field, running.get_value_as_bool()) running.add_value_changed_fn(lambda m: toggle_enabled(port_field, m.get_value_as_bool())) with ui.VStack(): ui.Button("Start", clicked_fn=start) ui.Button("Stop", clicked_fn=stop)
3,323
Python
39.536585
113
0.560036
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/ogn/__init__.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """ Dynamically import every file in a directory tree that looks like a Python Ogn Node. This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree. """ # Required to register nodes in Kit 104 try: import omni.graph.core as og og.register_ogn_nodes(__file__, "omni.osc") except Exception: # Swallow any exceptions pass
817
Python
37.952379
113
0.774786
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/ogn/nodes/OgnOnOscEvent.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. """ This is the implementation of the OGN node defined in OgnOnOscEvent.ogn This implementation is inspired by the OgnOnCustomEvent node See https://gitlab-master.nvidia.com/omniverse/kit/-/blob/master/kit/source/extensions/omni.graph.action/nodes/OgnOnCustomEvent.py # noqa E501 """ import re from typing import Any, List, Union import carb import carb.events import carb.profiler import omni.graph.core as og import omni.osc from omni.osc.core import OSC_MESSAGE_ADDRESS_STR, OSC_MESSAGE_ARGUMENTS_STR from .. import OgnOnOscEventDatabase class OgnOnOscEventInternalState: """Convenience class for maintaining per-node state information""" def __init__(self): """Instantiate the per-node state information.""" # This subscription object controls the lifetime of our callback, it will be # cleaned up automatically when our node is destroyed self.sub = None # Set when the callback has triggered self.is_set = False # The last event received self.event: Union[None, carb.events.IEvent] = None # The node instance handle self.node = None # The regex used to match the OSC address path self.osc_path_regex = "" # The compiled regex pattern self.osc_path_regex_pattern = None @carb.profiler.profile def on_event(self, event: carb.events.IEvent): """The event callback""" if event is None: return # Only handle messages with a path that matches the OSC address path regex osc_addr, _ = omni.osc.osc_message_from_carb_event(event) if self.osc_path_regex_pattern is None or not self.osc_path_regex_pattern.match(osc_addr): return self.is_set = True self.event = event # Tell the evaluator we need to be computed if self.node.is_valid(): self.node.request_compute() @carb.profiler.profile def first_time_subscribe(self, node: og.Node, osc_path_regex: str) -> bool: """Checked call to set up carb subscription Args: node: The node instance event_name: The name of the carb event Returns: True if we subscribed, False if we are already subscribed """ if self.osc_path_regex != osc_path_regex: # osc path regex changed since we last subscribed, re-compile try: self.osc_path_regex_pattern = re.compile(osc_path_regex) self.osc_path_regex = osc_path_regex except Exception as e: carb.log_error(f"Error compiling OSC Address Path Regex '{osc_path_regex}': {e}") if self.sub is None: self.sub = omni.osc.subscribe_to_osc_event_stream(self.on_event) self.node = node return True return False def try_pop_event(self) -> Union[None, carb.events.IEvent]: """Pop the last event received, or None if there is no event to pop""" if self.is_set: self.is_set = False event = self.event self.event = None return event return None # ====================================================================== class OgnOnOscEvent: """ This node triggers when an OSC event is received that matches the OSC address path regex. """ @staticmethod def internal_state(): """Returns an object that will contain per-node state information""" return OgnOnOscEventInternalState() @staticmethod def release(node): state = OgnOnOscEventDatabase.OgnOnOscEventDatabase.per_node_internal_state(node) if state.sub: state.sub.unsubscribe() state.sub = None @staticmethod def check_all_args_are_floats(args: List[Any]) -> bool: """ Returns true if the OSC message arguments has the shape of List[float] """ all_args_are_float = all(isinstance(arg, float) for arg in args) return all_args_are_float @staticmethod @carb.profiler.profile def compute(db: og.Database) -> bool: state: OgnOnOscEventInternalState = db.internal_state osc_path_regex = db.inputs.path state.first_time_subscribe(db.node, osc_path_regex) event = state.try_pop_event() if event is None: return False try: addr, args = omni.osc.osc_message_from_carb_event(event) # Populate the output bundle bundle: og._impl.bundles.BundleContents = db.outputs.message bundle.clear() # Update the address attribute addr_attribute = bundle.insert((og.Type(og.BaseDataType.TOKEN), OSC_MESSAGE_ADDRESS_STR)) addr_attribute.value = addr # Update the arguments attribute all_args_are_floats = OgnOnOscEvent.check_all_args_are_floats(args) # NOTE(jshrake): This node currently only supports OSC arguments shaped like a List[Float] if all_args_are_floats: if len(args) == 1: # Argument list contains a single element, write it as a double args_attribute = bundle.insert((og.Type(og.BaseDataType.DOUBLE), OSC_MESSAGE_ARGUMENTS_STR)) args_attribute.value = args[0] elif len(args) > 1: # Argument list contains multiple element, write it as a list args_attribute = bundle.insert((og.Type(og.BaseDataType.DOUBLE, tuple_count=len(args), array_depth=0), OSC_MESSAGE_ARGUMENTS_STR)) args_attribute.value = args else: carb.log_warn(f"OnOscMessage node expected OSC message arguments to be of type List[Float], instead got {args}") return False db.outputs.execOut = og.ExecutionAttributeState.ENABLED except Exception as e: carb.log_error(f"Error in OgnOnOscEvent::compute: {e}") return False return True
6,464
Python
37.254438
150
0.629332
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/tests/tests.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import asyncio import omni.kit.test import omni.osc class Test(omni.kit.test.AsyncTestCase): # Before running each test async def setUp(self): pass # After running each test async def tearDown(self): pass async def test_can_start_and_stop_server(self): server = omni.osc.DaemonOSCUDPServer(None) is_running = server.start("localhost", 12345) self.assertTrue(is_running) await asyncio.sleep(0.1) is_running = server.running() self.assertTrue(is_running) is_running = server.stop() self.assertFalse(is_running) async def test_server_can_receive_messages(self): server = omni.osc.OmniOscExt.create_server() is_running = server.start("localhost", 3337) self.assertTrue(is_running) self.count = 0 def on_event(e) -> None: addr, _ = omni.osc.osc_message_from_carb_event(e) self.assertEqual(e.type, omni.osc.core.OSC_EVENT_TYPE) self.assertEqual(addr, "/filter") self.count += 1 sub = omni.osc.subscribe_to_osc_event_stream(on_event) total_msg_count = 10 def send_messages(): import random from pythonosc import udp_client client = udp_client.SimpleUDPClient(address="127.0.0.1", port=3337) self.assertTrue(client is not None) for _ in range(total_msg_count): client.send_message("/filter", random.random()) send_messages() # Wait a few seconds for the server to receive the messages await asyncio.sleep(3) # Manually pump the stream so our subscription callback executes omni.osc.get_osc_event_stream().pump() self.assertEqual(self.count, total_msg_count)
2,226
Python
34.919354
79
0.655436
NVIDIA-Omniverse/kit-osc/exts/omni.osc/docs/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [0.3.1] - 2023-09-28 ### Changed - Update CHANGELOG ## [0.3.0] - 2023-09-26 ### Changed - Fix OGN node registration for Kit 105.1 ## [0.2.0] - 2022-09-12 ### Changed - The `On OSC Message` OmniGraph node now outputs a Bundle typed value rather than an Unknown typed value. - Users can extract the "address" and the "arguments" of the OSC message with the `Extract Attribute` node. ## [0.1.1] - 2022-09-12 ### Changed - Updated documentation. ## [0.1.0] - 2022-09-02 ### Added - Initial release.
600
Markdown
22.115384
107
0.671667
NVIDIA-Omniverse/kit-osc/exts/omni.osc/docs/README.md
# omni.osc Omniverse Kit extension for sending and receiving OSC (Open Sound Control) messages.
96
Markdown
31.333323
84
0.802083
AccelerationAgency/omniverse-extensions/exts/taa.google.spreadsheet.api/taa/google/spreadsheet/api/extension.py
import omni.ext import omni.ui as ui import omni.kit.commands from typing import List from pxr import Gf omni.kit.pipapi.install('google-api-python-client') omni.kit.pipapi.install('google-auth-httplib2') from googleapiclient.discovery import build from googleapiclient.errors import HttpError SPACING = 4 LABEL_WIDTH = 120 class MyExtension(omni.ext.IExt): data = {'translate_x': 0, 'translate_y': 0, 'translate_z': 0, 'rotate_x': 0, 'rotate_y': 0, 'rotate_z': 0, 'scale_x': 0, 'scale_y': 0, 'scale_z': 0} subscription = None stage = None google_sheet = None label_width = 50 _source_prim_model = ui.SimpleStringModel() # lifecycle def on_startup(self, ext_id): print("[taa.google.spreadsheet.api] Extension starting up") self.stage = omni.usd.get_context().get_stage() self._window = ui.Window("TAA Google Spreadsheet API", width=400, height=270) with self._window.frame: with ui.VStack(height=0, spacing=SPACING): with ui.CollapsableFrame("Source", name="group"): with ui.VStack(height=0, spacing=SPACING): with ui.HStack(): ui.Label("Prim", name="attribute_name", width=LABEL_WIDTH) ui.StringField(model=self._source_prim_model) ui.Button(" S ", width=0, height=0, style={"margin": 0}, clicked_fn=self._on_get_selection, tooltip="Get From Selection") ui.Spacer(height= 12) with ui.CollapsableFrame("Settings", name="group"): with ui.VStack(height=0, spacing=SPACING): ui.Label('Spreadsheet ID', height=20) self.spreadsheet_id_field = ui.StringField(height=20) ui.Label('Range', height=20) self.range_field = ui.StringField(height=20) ui.Label('API Key', height=20) self.api_key_field = ui.StringField(height=20) ui.Spacer(height= 12) self.startButton = ui.Button("Start", height=54, clicked_fn=lambda: self.start(), style={"background_color": "green"}) self.stopButton = ui.Button("Stop", height=54, clicked_fn=lambda: self.stop(), style={"color": "red"}) ui.Spacer(height= 12) self.statusLabel = ui.Label('Click start to begin', height=14, style={"font_size": 12}) self.stopButton.visible = False print("[taa.google.spreadsheet.api] Extension start up complete") def on_shutdown(self): print("Extension shutting down") self.stop() print("Extension shutdown complete") # custom methods def _on_get_selection(self): print('_on_get_selection', self.get_selection()) self._source_prim_model.as_string = ", ".join(self.get_selection()) def get_selection(self) -> List[str]: return omni.usd.get_context().get_selection().get_selected_prim_paths() def apply_changes(self, frame): try: # load the data from Google Spreadsheet ever few seconds; this API is rate limited frameNumber = int(frame.payload["SWHFrameNumber"]) if(frameNumber % 180 != 0): return print('applying changes') self.read_data() # act on all selected prims paths = self.list_paths_of_selected_prims() for path in paths: # get reference to the prim on stage, making sure that it's valid prim = self.stage.GetPrimAtPath(path) if prim.IsValid() == False: continue # transform the prim based on the settings in the Google Spreadsheet self.move_prim(prim) self.rotate_prim(prim) self.scale_prim(prim) print('changes applied successfully') except Exception as err: print(err) def read_config(self): try: spreadsheetId = self.spreadsheet_id_field.model.get_value_as_string() range = self.range_field.model.get_value_as_string() api_key = self.api_key_field.model.get_value_as_string() return (spreadsheetId, range, api_key) except Exception as err: print(err) def read_data(self): try: spreadsheetId, range, api_key = self.read_config() if self.google_sheet == None: service = build('sheets', 'v4', developerKey=api_key) self.google_sheet = service.spreadsheets() result = self.google_sheet.values().get(spreadsheetId=spreadsheetId, range=range).execute() values = result.get('values', []) data = toJSON(values) # normalize and clean data self.data["shape"] = data.setdefault('shape', 'Cube') self.data["size"] = float(data.setdefault('size', 100)) self.data["radius"] = float(data.setdefault('radius', 100)) self.data["translate_x"] = float(data.setdefault('translate_x', 0)) self.data["translate_y"] = float(data.setdefault('translate_y', 0)) self.data["translate_z"] = float(data.setdefault('translate_z', 0)) self.data["rotate_x"] = float(data.setdefault('rotate_x', 0)) self.data["rotate_y"] = float(data.setdefault('rotate_y', 0)) self.data["rotate_z"] = float(data.setdefault('rotate_z', 0)) self.data["scale_x"] = float(data.setdefault('scale_x', 1)) self.data["scale_y"] = float(data.setdefault('scale_y', 1)) self.data["scale_z"] = float(data.setdefault('scale_z', 1)) except HttpError as err: print(err) def move_prim(self, prim): try: x = self.data.get('translate_x') y = self.data.get('translate_y') z = self.data.get('translate_z') omni.kit.commands.execute('TransformPrimSRT', path=prim.GetPath(), new_translation=Gf.Vec3d(x, y, z), ) except Exception as err: print("Failed to move prim", err) def rotate_prim(self, prim): try: x = self.data.get('rotate_x') y = self.data.get('rotate_y') z = self.data.get('rotate_z') omni.kit.commands.execute('TransformPrimSRT', path=prim.GetPath(), new_rotation_euler=Gf.Vec3d(x, y, z), ) except Exception as err: print("Failed to rotate prime", err) def scale_prim(self, prim): try: x = self.data.get('scale_x') y = self.data.get('scale_y') z = self.data.get('scale_z') omni.kit.commands.execute('TransformPrimSRT', path=prim.GetPath(), new_scale=Gf.Vec3d(x, y, z), ) except Exception as err: print("Failed to scale prim", err) def list_paths_of_selected_prims(self): try: paths = [i.strip() for i in self._source_prim_model.as_string.split(",")] if not paths: paths = self.get_selection() if not paths: pass return paths except Exception as err: print(err) def start(self): self.read_data() def on_update_apply(frame): self.apply_changes(frame) self.subscription = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(on_update_apply) self.startButton.visible = False self.stopButton.visible = True self.statusLabel.text = "Status: started" def stop(self): if self.subscription: del self.subscription self.startButton.visible = True self.stopButton.visible = False self.statusLabel.text = "Status: stopped" """ Utility functions """ def toJSON(values): json = {} if not values: return json for row in values: key = row[0] value = row[1] if not key or not value: continue json[row[0]] = row[1] return json
8,802
Python
27.124601
152
0.527153
AccelerationAgency/omniverse-extensions/exts/taa.google.spreadsheet.api/config/extension.toml
[package] version = "1.0.0" title = "TAA - Google Spreadsheet API" description="An exploration into using Google Spreadsheet data to objects on the stage" readme = "docs/README.md" repository = "" category = "Other" keywords = ["taa", "google", "spreadsheet", "api", "example"] icon = "data/taa-logo.png" [dependencies] "omni.kit.uiapp" = {} [[python.module]] name = "taa.google.spreadsheet.api"
399
TOML
23.999999
87
0.696742
AccelerationAgency/omniverse-extensions/exts/taa.omniverse.cameracreator/taa/omniverse/cameracreator/extension.py
import omni.ext import omni.ui as ui import omni.kit.commands as commands class MyExtension(omni.ext.IExt): # Lifecycle def on_startup(self, ext_id): print("[taa.omniverse.viewport] Extension starting up") self._window = ui.Window("TAA Quick Camera", width=200, height = 200) with self._window.frame: with ui.VStack(height = 0, spacing = 4): self.perspectiveButton = ui.Button("Perspective", height=40, clicked_fn=lambda: self.create_perspective_camera(), style={"background_color":"black"}) self.topButton = ui.Button("Top", height=40, clicked_fn=lambda: self.create_top_camera(), style={"background_color":"black"}) self.frontButton = ui.Button("Front", height=40, clicked_fn=lambda: self.create_front_camera(), style={"background_color":"black"}) self.rightButton = ui.Button("Right", height=40, clicked_fn=lambda: self.create_right_camera(), style={"background_color":"black"}) print("[taa.omniverse.viewport] Extension start up complete") def on_shutdown(self): print("[taa.omniverse.viewport] Extension shutting down") self.stop() print("[taa.omniverse.viewport] Extension shutdown complete") # Custom methods def set_camera(self, path): omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window().set_active_camera(path) def rename_camera(self, name): cameraPath = omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window().get_active_camera() omni.kit.commands.execute('MovePrims', paths_to_move={cameraPath: f'/World/Camera_{name}'}) def create_perspective_camera(self): print("[taa.omniverse.viewport] Creating new perspective camera") self.set_camera("/OmniverseKit_Persp") commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport') self.rename_camera("Perspective") def create_top_camera(self): print("[taa.omniverse.viewport] Creating new top-down camera") self.set_camera("/OmniverseKit_Top") commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport') self.rename_camera("Top") def create_front_camera(self): print("[taa.omniverse.viewport] Creating new front view camera") self.set_camera("/OmniverseKit_Front") commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport') self.rename_camera("Front") def create_right_camera(self): print("[taa.omniverse.viewport] Creating new right view camera") self.set_camera("/OmniverseKit_Right") commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport') self.rename_camera("Right") def start(self): print("[taa.omniverse.viewport] Starting...") def stop(self): print("[taa.omniverse.viewport] Stopping...")
2,974
Python
44.76923
165
0.675521
AccelerationAgency/omniverse-extensions/exts/taa.omniverse.cameracreator/config/extension.toml
[package] version = "1.0.0" title = "TAA - Omniverse Camera Creator" description = "An simple extension that lets you quickly create cameras with a single click." readme = "docs/README.md" repository = "" category = "Other" keywords = ["taa", "viewport", "create", "camera", "view"] icon = "data/taa-logo.png" [dependencies] "omni.kit.uiapp" = {} [[python.module]] name = "taa.omniverse.cameracreator"
405
TOML
24.374998
93
0.693827
ilanhuang/audio2face-streamgpt-public/README.md
# Stream-GPT Stream-GPT is an Omniverse Extension that uses OpenAI's GPT-3 model to create a virtual assistant. It allows users to interact with the assistant through both text and voice, and the assistant responds in kind. The extension uses OpenAI's Whisper ASR system to transcribe audio input and Eleven Labs' API to convert the assistant's text responses into audio. ## Getting Started ### Prerequisites - Python 3.6 or higher - Omniverse Kit - Omniverse Audio2Face - OpenAI API key - Eleven Labs API key ### Installation 1. Clone the repository: ```bash git clone https://github.com/ilanhuang/audio2face-stream-chatgpt.git ``` 2. Install the required Python packages: ```bash pip install -r requirements.txt ``` 3. Update the `sys.path.append` in `extension.py` with the correct path to the `streaming_server` directory in your local clone of the repository. ```python sys.path.append("C:\\Users\\YourUsername\\path\\to\\stream-gpt\\pkg\\audio2face-2022.2.1\\exts\\omni.audio2face.player\omni\\audio2face\\player\\scripts\\streaming_server") ``` 4. Add the custom extension to Omniverse: - Go to the "Windows" tab on the top of the screen. - Scroll down to "Extensions". - Click on the gear icon to open the Extensions settings. - Click on the "+" button to add a new path to the custom extension. - A window will pop up when you turn on the extension. 5. Set your OpenAI and Eleven Labs API keys, as well as the voice_id, model_id, and the Audio2Face's audioplayer's prim path (instance_name) in the extension's settings: - Open the extension and click on the "Settings" button. - Enter your OpenAI API key, Eleven Labs API key, voice_id, model_id and instance name in the corresponding fields. (A text file in the repository lists the available voice ids.) ## Usage Once the application is running, you can interact with the virtual assistant through the UI. You can type your prompts into the text field and click on the "Send" button or use the "Record Audio" button to speak your prompts. The assistant will respond in the chat log and through your speakers. You can also add a system to the GPT virtual assistant by typing it in the "System" field in the UI. All interactions made with the extension are saved in a folder named "chat_logs" for future reference.
2,294
Markdown
40.727272
358
0.762424
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/recording_transcription.py
#Stream-GPT #GNU - GLP Licence #Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio> #This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. #You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. import os import pyaudio import wave import keyboard import time from time import sleep import openai import datetime def open_file(filepath): with open(filepath, 'r', encoding='utf-8') as infile: return infile.read() def save_file(filepath, content): with open(filepath, 'w', encoding='utf-8') as outfile: outfile.write(content) def timestamp_to_datetime(unix_time): return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z") def record_client_voice(output_filename, recording_status): CHUNK = 1024 FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 16000 frames = [] p = pyaudio.PyAudio() stream = None try: stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) start_time = time.time() min_duration = 0.1 while recording_status() or time.time() - start_time < min_duration: data = stream.read(CHUNK) frames.append(data) except Exception as e: print(f"Error while recording audio: {e}") finally: if stream is not None: stream.stop_stream() stream.close() p.terminate() wf = wave.open(output_filename, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() return output_filename def transcribe_audio_to_text(file_path): with open(file_path, 'rb') as audio_file: transcript_response = openai.Audio.transcribe("whisper-1", audio_file) return transcript_response["text"]
2,508
Python
32.013157
240
0.64673
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/transmission.py
#Stream-GPT #GNU - GLP Licence #Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio> #This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. #You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. import grpc import os import soundfile import numpy as np import audio2face_pb2 import audio2face_pb2_grpc import sounddevice as sd import time from typing import Iterator import requests import queue import threading import carb def generate_stream(text: str, voice_id: str, model_id: str, api_key: str, stream_chunk_size: int = 2048) -> Iterator[bytes]: url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}/stream" data = dict(text=text, model_id=model_id, voice_settings=None) headers = {"xi-api-key": api_key} response = requests.post(url, json=data, headers=headers, stream=True) for chunk in response.iter_content(chunk_size=stream_chunk_size): if chunk: yield chunk def read_api_key_from_file(file_path: str) -> str: with open(file_path, 'r') as f: return f.read().strip() def text_to_audio_stream(text, instance_name, api_key): print("text_to_audio_stream: start") settings = carb.settings.get_settings() voice_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/VOICE_ID") model_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/MODEL_ID") audio_stream = generate_stream(text, voice_id, model_id, api_key) current_dir = os.path.dirname(os.path.realpath(__file__)) audio_filename = os.path.join(current_dir, "temp_audio_response.mp3") with open(audio_filename, 'wb') as f: for chunk in audio_stream: f.write(chunk) audio_data, samplerate = soundfile.read(audio_filename, dtype="float32") if len(audio_data.shape) > 1: audio_data = np.average(audio_data, axis=1) url = "localhost:50051" audio_queue = queue.Queue() audio_queue.put(audio_data) def audio_streamer(): while not audio_queue.empty(): audio_chunk = audio_queue.get() push_audio_track_stream(url, audio_chunk, samplerate, instance_name) audio_thread = threading.Thread(target=audio_streamer) audio_thread.start() os.remove(audio_filename) print("text_to_audio_stream: end") def push_audio_track_stream(url, audio_data, samplerate, instance_name): print("push_audio_track_stream: start") chunk_size = samplerate // 10 sleep_between_chunks = 0.04 with grpc.insecure_channel(url) as channel: print("Channel created") stub = audio2face_pb2_grpc.Audio2FaceStub(channel) def make_generator(): start_marker = audio2face_pb2.PushAudioRequestStart( samplerate=samplerate, instance_name=instance_name, block_until_playback_is_finished=False, ) yield audio2face_pb2.PushAudioStreamRequest(start_marker=start_marker) for i in range(len(audio_data) // chunk_size + 1): try: time.sleep(sleep_between_chunks) chunk = audio_data[i * chunk_size : i * chunk_size + chunk_size] yield audio2face_pb2.PushAudioStreamRequest(audio_data=chunk.astype(np.float32).tobytes()) except Exception as e: print(f"Error in generator function: {e}") break request_generator = make_generator() print("Sending audio data...") response = stub.PushAudioStream(request_generator) if response.success: print("SUCCESS") else: print(f"ERROR: {response.message}") print("Channel closed")
4,203
Python
39.038095
240
0.66738
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/extension.py
#Stream-GPT #GNU - GLP Licence #Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio> #This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. #You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. import omni.ext import sys sys.path.append("C:\\Users\\ERKS 2\\Documents\\Omniverse\\ov\\pkg\\audio2face-2022.2.1\\exts\\omni.audio2face.player\omni\\audio2face\\player\\scripts\\streaming_server") import openai import carb from .window import AudioChatWindow def open_file(filepath): with open(filepath, 'r', encoding='utf-8') as infile: return infile.read() # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled # on_shutdown() is called. class MyExtension(omni.ext.IExt): # ext_id is current extension id. It can be used with extension manager to query additional information, like where # this extension is located on filesystem. def on_startup(self, ext_id): openai.api_key = AudioChatWindow.get_openai_api_key() self._window = AudioChatWindow("VIRTUAL ASSISTANT", width=400, height=525) def on_shutdown(self): self._window.destroy() self._window = None
1,821
Python
55.937498
240
0.741351
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/chatbot.py
#Stream-GPT #GNU - GLP Licence #Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio> #This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. #You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. import os import openai import json import numpy as np from numpy.linalg import norm import re from time import time,sleep from uuid import uuid4 import datetime def open_file(filepath): with open(filepath, 'r', encoding='utf-8') as infile: return infile.read() def save_file(filepath, content): with open(filepath, 'w', encoding='utf-8') as outfile: outfile.write(content) def load_json(filepath): with open(filepath, 'r', encoding='utf-8') as infile: return json.load(infile) def save_json(filepath, payload): with open(filepath, 'w', encoding='utf-8') as outfile: json.dump(payload, outfile, ensure_ascii=False, sort_keys=True, indent=2) def timestamp_to_datetime(unix_time): return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z") def gpt3_embedding(content, engine='text-embedding-ada-002'): content = content.encode(encoding='ASCII',errors='ignore').decode() # fix any UNICODE errors response = openai.Embedding.create(input=content,engine=engine) vector = response['data'][0]['embedding'] # this is a normal list return vector def chatgpt_completion(messages, model="gpt-4", temp=0.0, top_p=1.0, tokens=400, freq_pen=0.0, pres_pen=0.0): response = openai.ChatCompletion.create( model=model, messages=messages, temperature=temp, max_tokens=tokens, top_p=top_p, frequency_penalty=freq_pen, presence_penalty=pres_pen,) text = response['choices'][0]['message']['content'] tokens_used = response['usage']['total_tokens'] filename = 'chat_%s_aibot.json' % time() script_dir = os.path.dirname(os.path.realpath(__file__)) chat_logs_path = os.path.join(script_dir, 'chat_logs') if not os.path.exists(chat_logs_path): os.makedirs(chat_logs_path) input_message = messages[-1]['content'] log_content = f"User:\n{input_message}\n\nAi_Bot:\n{text}\n\nTokens used: {tokens_used}" save_file(os.path.join(chat_logs_path, filename), log_content) return text def flatten_convo(conversation): convo = '' for i in conversation: convo += '%s: %s\n' % (i['role'].upper(), i['content']) return convo.strip() def set_openai_api_key(api_key): openai.api_key = api_key def set_system_content(content): global system_content system_content = content if __name__ == '__main__': convo_length = 30 set_openai_api_key(api_key) conversation = list() conversation.append({'role': 'system', 'content': system_content}) counter = 0 while True: # get user input, save to file a = input('\n\nCLIENT: ') conversation.append({'role': 'user', 'content': a}) filename = 'chat_%s_client.txt' % time() if not os.path.exists('chat_logs'): os.makedirs('chat_logs') save_file('chat_logs/%s' % filename, a) flat = flatten_convo(conversation) # generate a response response = chatgpt_completion(conversation) conversation.append({'role': 'assistant', 'content': response}) print('\n\nAI_Bot: %s' % response) # increment counter and consolidate memories counter += 2 if counter >= 10: # reset conversation conversation = list() conversation.append({'role': 'system', 'content': system_content})
4,226
Python
35.128205
240
0.643871
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/window.py
#Stream-GPT #GNU - GLP Licence #Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio> #This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. #You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. import os import omni.ui as ui import omni.kit.commands from omni.kit.window.popup_dialog.form_dialog import FormDialog from time import time from .recording_transcription import record_client_voice, transcribe_audio_to_text from .chatbot import chatgpt_completion, set_system_content from .transmission import text_to_audio_stream import threading import time import tempfile import datetime import carb def save_file(filepath, content): with open(filepath, 'w', encoding='utf-8') as outfile: outfile.write(content) def timestamp_to_datetime(unix_time): return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z") class AudioChatWindow(ui.Window): def _build_fn(self): with self.frame: with ui.VStack(): with ui.ScrollingFrame(height=ui.Percent(75)): self.chat_log = ui.Label("", word_wrap=True) with ui.HStack(height=ui.Percent(10)): ui.StringField(model=self._prompt_model, multiline=True) with ui.HStack(height=ui.Percent(10)): self.record_audio_button = ui.Button("Record Audio", height=40, clicked_fn=lambda *_args, **_kwargs: self._toggle_record_audio()) ui.Button("Send", height=40, clicked_fn=lambda: self._send_text_prompt()) with ui.HStack(): ui.Button("Settings", tooltip="Configure API Key, Instance name and Default System", width=0, height=0, clicked_fn=lambda: self._open_settings()) system_settings_button = ui.Button("System", height=0, width=0) system_settings_button.set_clicked_fn(lambda: self.show_system_settings_menu()) def __init__(self, title: str, **kwargs) -> None: self.conversation = [{"role": "system", "content": ""}] self.system_content_model = ui.SimpleStringModel() self.lock = threading.Lock() super().__init__(title, **kwargs) self._prompt_model = ui.SimpleStringModel() self.frame.set_build_fn(self._build_fn) def show_system_settings_menu(self): self.system_settings_menu = ui.Menu("") with self.system_settings_menu: ui.StringField(model=self.system_content_model, multiline=True) self.system_settings_menu.show() def _toggle_record_audio(self): if not hasattr(self, "recording"): self.recording = False if not self.recording: self.recording = True threading.Thread(target=self._record_and_transcribe_audio).start() else: self.recording = False def _process_conversation(self, user_content): current_system_content = self.system_content_model.get_value_as_string().strip() if current_system_content != self.conversation[0]['content']: self.reset_chat() set_system_content(current_system_content) self.conversation.append({"role": "user", "content": user_content}) response = chatgpt_completion(self.conversation) self.chat_log.text += f"\nUser: {user_content}\nAssistant: {response}" settings = carb.settings.get_settings() instance_name = settings.get_as_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME") threading.Thread(target=text_to_audio_stream, args=(response, instance_name, self.get_elevenlabs_api_key())).start() def _record_and_transcribe_audio(self): output_filename = "recorded_audio.wav" record_client_voice(output_filename) transcript = transcribe_audio_to_text(output_filename) self._send_audio_transcript(transcript) def _send_audio_transcript(self, transcript): self.chat_log.text += "\nThinking..." threading.Thread(target=self._process_conversation, args=(transcript,)).start() def reset_chat(self): self.chat_log.text = "" self.conversation = [{"role": "system", "content": self.system_content_model.get_value_as_string().strip()}] def _save_settings(self, dialog): values = dialog.get_values() settings = carb.settings.get_settings() settings.set_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI", values["APIKey_OPEN_AI"]) settings.set_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS", values["APIKey_ELEVEN_LABS"]) settings.set_string("/persistent/exts/omni.example.streamgpt/VOICE_ID", values["ELEVEN_LABS_VOICE_ID"]) settings.set_string("/persistent/exts/omni.example.streamgpt/MODEL_ID", values["ELEVEN_LABS_MODEL_ID"]) settings.set_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME", values["INSTANCE_NAME"]) dialog.hide() def _open_settings(self): settings = carb.settings.get_settings() apikey_open_ai = settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI") apikey_eleven_labs = settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS") voice_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/VOICE_ID") model_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/MODEL_ID") instance_name = settings.get_as_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME") if apikey_open_ai == "": apikey_open_ai = "Enter OPEN-AI API Key Here" if apikey_eleven_labs == "": apikey_eleven_labs = "Enter ELEVEN-LABS API Key Here" if instance_name == "": instance_name = "Enter Instance Name Here" if voice_id == "": voice_id = "Enter Eleven Labs Voice ID Here" if model_id == "": model_id = "Enter Eleven Labs Model ID Here" field_defs = [ FormDialog.FieldDef("APIKey_OPEN_AI", "OPEN-AI API Key: ", ui.StringField, apikey_open_ai), FormDialog.FieldDef("APIKey_ELEVEN_LABS", "ELEVEN-LABS API Key: ", ui.StringField, apikey_eleven_labs), FormDialog.FieldDef("ELEVEN_LABS_VOICE_ID", "Voice ID: ", ui.StringField, voice_id), FormDialog.FieldDef("ELEVEN_LABS_MODEL_ID", "Model ID: ", ui.StringField, model_id), FormDialog.FieldDef("INSTANCE_NAME", "Instance Name: ", ui.StringField, instance_name), ] dialog = FormDialog( title="Settings", message="Your Settings: ", field_defs=field_defs, ok_handler=lambda dialog: self._save_settings(dialog)) dialog.show() @staticmethod def get_openai_api_key(): settings = carb.settings.get_settings() return settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI") def get_elevenlabs_api_key(self): settings = carb.settings.get_settings() return settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS") def _send_text_prompt(self): prompt = self._prompt_model.get_value_as_string() self.chat_log.text += "\nThinking..." threading.Thread(target=self._process_conversation, args=(prompt,)).start() self._prompt_model.set_value("") def _toggle_record_audio(self): if not hasattr(self, "recording"): self.recording = False self.recording = not self.recording if self.recording: self.record_audio_button.text = "Stop Recording" else: self.record_audio_button.text = "Record Audio" threading.Thread(target=self._record_and_transcribe_audio_alternative).start() def recording_status(self): return self.recording def _record_and_transcribe_audio_alternative(self): with self.lock: temp_audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav") temp_audio_filename = temp_audio_file.name temp_audio_file.close() recorded_audio_filename = record_client_voice(temp_audio_filename, self.recording_status) transcript = transcribe_audio_to_text(recorded_audio_filename) os.remove(temp_audio_filename) if transcript.strip(): self._send_audio_transcript(transcript) def destroy(self): super().destroy() self._prompt_model = None
9,174
Python
47.036649
240
0.645193
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/pytransform/__init__.py
# These module alos are used by protection code, so that protection # code needn't import anything import os import platform import sys import struct # Because ctypes is new from Python 2.5, so pytransform doesn't work # before Python 2.5 # from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \ pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE from fnmatch import fnmatch # # Support Platforms # plat_path = 'platforms' plat_table = ( ('windows', ('windows', 'cygwin*')), ('darwin', ('darwin',)), ('ios', ('ios',)), ('linux', ('linux*',)), ('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')), ('poky', ('poky',)), ) arch_table = ( ('x86', ('i?86', )), ('x86_64', ('x64', 'x86_64', 'amd64', 'intel')), ('arm', ('armv5',)), ('armv6', ('armv6l',)), ('armv7', ('armv7l',)), ('ppc64', ('ppc64le',)), ('mips32', ('mips',)), ('aarch32', ('aarch32',)), ('aarch64', ('aarch64', 'arm64')) ) # # Hardware type # HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5) # # Global # _pytransform = None class PytransformError(Exception): pass def dllmethod(func): def wrap(*args, **kwargs): return func(*args, **kwargs) return wrap @dllmethod def version_info(): prototype = PYFUNCTYPE(py_object) dlfunc = prototype(('version_info', _pytransform)) return dlfunc() @dllmethod def init_pytransform(): major, minor = sys.version_info[0:2] # Python2.5 no sys.maxsize but sys.maxint # bitness = 64 if sys.maxsize > 2**32 else 32 prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p) init_module = prototype(('init_module', _pytransform)) ret = init_module(major, minor, pythonapi._handle) if (ret & 0xF000) == 0x1000: raise PytransformError('Initialize python wrapper failed (%d)' % (ret & 0xFFF)) return ret @dllmethod def init_runtime(): prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int) _init_runtime = prototype(('init_runtime', _pytransform)) return _init_runtime(0, 0, 0, 0) @dllmethod def encrypt_code_object(pubkey, co, flags, suffix=''): _pytransform.set_option(6, suffix.encode()) prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int) dlfunc = prototype(('encrypt_code_object', _pytransform)) return dlfunc(pubkey, co, flags) @dllmethod def generate_license_key(prikey, keysize, rcode): prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p) dlfunc = prototype(('generate_license_key', _pytransform)) return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \ else dlfunc(prikey, keysize, rcode.encode()) @dllmethod def get_registration_code(): prototype = PYFUNCTYPE(py_object) dlfunc = prototype(('get_registration_code', _pytransform)) return dlfunc() @dllmethod def get_expired_days(): prototype = PYFUNCTYPE(py_object) dlfunc = prototype(('get_expired_days', _pytransform)) return dlfunc() @dllmethod def clean_obj(obj, kind): prototype = PYFUNCTYPE(c_int, py_object, c_int) dlfunc = prototype(('clean_obj', _pytransform)) return dlfunc(obj, kind) def clean_str(*args): tdict = { 'str': 0, 'bytearray': 1, 'unicode': 2 } for obj in args: k = tdict.get(type(obj).__name__) if k is None: raise RuntimeError('Can not clean object: %s' % obj) clean_obj(obj, k) def get_hd_info(hdtype, name=None): if hdtype not in range(HT_DOMAIN + 1): raise RuntimeError('Invalid parameter hdtype: %s' % hdtype) size = 256 t_buf = c_char * size buf = t_buf() cname = c_char_p(0 if name is None else name.encode('utf-8') if hasattr('name', 'encode') else name) if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1): raise PytransformError('Get hardware information failed') return buf.value.decode() def show_hd_info(): return _pytransform.show_hd_info() def assert_armored(*names): prototype = PYFUNCTYPE(py_object, py_object) dlfunc = prototype(('assert_armored', _pytransform)) def wrapper(func): def wrap_execute(*args, **kwargs): dlfunc(names) return func(*args, **kwargs) return wrap_execute return wrapper def check_armored(*names): try: prototype = PYFUNCTYPE(py_object, py_object) prototype(('assert_armored', _pytransform))(names) return True except RuntimeError: return False def get_license_info(): info = { 'ISSUER': None, 'EXPIRED': None, 'HARDDISK': None, 'IFMAC': None, 'IFIPV4': None, 'DOMAIN': None, 'DATA': None, 'CODE': None, } rcode = get_registration_code().decode() if rcode.startswith('*VERSION:'): index = rcode.find('\n') info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '') rcode = rcode[index+1:] index = 0 if rcode.startswith('*TIME:'): from time import ctime index = rcode.find('\n') info['EXPIRED'] = ctime(float(rcode[6:index])) index += 1 if rcode[index:].startswith('*FLAGS:'): index += len('*FLAGS:') + 1 info['FLAGS'] = ord(rcode[index - 1]) prev = None start = index for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']: index = rcode.find('*%s:' % k) if index > -1: if prev is not None: info[prev] = rcode[start:index] prev = k start = index + len(k) + 2 info['CODE'] = rcode[start:] i = info['CODE'].find(';') if i > 0: info['DATA'] = info['CODE'][i+1:] info['CODE'] = info['CODE'][:i] return info def get_license_code(): return get_license_info()['CODE'] def get_user_data(): return get_license_info()['DATA'] def _match_features(patterns, s): for pat in patterns: if fnmatch(s, pat): return True def _gnu_get_libc_version(): try: prototype = CFUNCTYPE(c_char_p) ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))() return ver.decode().split('.') except Exception: pass def format_platform(platid=None): if platid: return os.path.normpath(platid) plat = platform.system().lower() mach = platform.machine().lower() for alias, platlist in plat_table: if _match_features(platlist, plat): plat = alias break if plat == 'linux': cname, cver = platform.libc_ver() if cname == 'musl': plat = 'musl' elif cname == 'libc': plat = 'android' elif cname == 'glibc': v = _gnu_get_libc_version() if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214: plat = 'centos6' for alias, archlist in arch_table: if _match_features(archlist, mach): mach = alias break if plat == 'windows' and mach == 'x86_64': bitness = struct.calcsize('P'.encode()) * 8 if bitness == 32: mach = 'x86' return os.path.join(plat, mach) # Load _pytransform library def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0): path = os.path.dirname(__file__) if path is None \ else os.path.normpath(path) plat = platform.system().lower() for alias, platlist in plat_table: if _match_features(platlist, plat): plat = alias break name = '_pytransform' + suffix if plat == 'linux': filename = os.path.abspath(os.path.join(path, name + '.so')) elif plat in ('darwin', 'ios'): filename = os.path.join(path, name + '.dylib') elif plat == 'windows': filename = os.path.join(path, name + '.dll') elif plat in ('freebsd', 'poky'): filename = os.path.join(path, name + '.so') else: filename = None if platid is not None and os.path.isfile(platid): filename = platid elif platid is not None or not os.path.exists(filename) or not is_runtime: libpath = platid if platid is not None and os.path.isabs(platid) else \ os.path.join(path, plat_path, format_platform(platid)) filename = os.path.join(libpath, os.path.basename(filename)) if filename is None: raise PytransformError('Platform %s not supported' % plat) if not os.path.exists(filename): raise PytransformError('Could not find "%s"' % filename) try: m = cdll.LoadLibrary(filename) except Exception as e: if sys.flags.debug: print('Load %s failed:\n%s' % (filename, e)) raise # Removed from v4.6.1 # if plat == 'linux': # m.set_option(-1, find_library('c').encode()) if not os.path.abspath('.') == os.path.abspath(path): m.set_option(1, path.encode() if sys.version_info[0] == 3 else path) elif (not is_runtime) and sys.platform.startswith('cygwin'): path = os.environ['PYARMOR_CYGHOME'] m.set_option(1, path.encode() if sys.version_info[0] == 3 else path) # Required from Python3.6 m.set_option(2, sys.byteorder.encode()) if sys.flags.debug: m.set_option(3, c_char_p(1)) m.set_option(4, c_char_p(not is_runtime)) # Disable advanced mode by default m.set_option(5, c_char_p(not advanced)) # Set suffix for private package if suffix: m.set_option(6, suffix.encode()) return m def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0): global _pytransform _pytransform = _load_library(path, is_runtime, platid, suffix, advanced) return init_pytransform() def pyarmor_runtime(path=None, suffix='', advanced=0): if _pytransform is not None: return try: pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced) init_runtime() except Exception as e: if sys.flags.debug or hasattr(sys, '_catch_pyarmor'): raise sys.stderr.write("%s\n" % str(e)) sys.exit(1) # ---------------------------------------------------------- # End of pytransform # ---------------------------------------------------------- # # Unused # @dllmethod def generate_license_file(filename, priname, rcode, start=-1, count=1): prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int) dlfunc = prototype(('generate_project_license_files', _pytransform)) return dlfunc(filename.encode(), priname.encode(), rcode.encode(), start, count) if sys.version_info[0] == 3 \ else dlfunc(filename, priname, rcode, start, count) # # Not available from v5.6 # def generate_capsule(licfile): prikey, pubkey, prolic = _generate_project_capsule() capkey, newkey = _generate_pytransform_key(licfile, pubkey) return prikey, pubkey, capkey, newkey, prolic @dllmethod def _generate_project_capsule(): prototype = PYFUNCTYPE(py_object) dlfunc = prototype(('generate_project_capsule', _pytransform)) return dlfunc() @dllmethod def _generate_pytransform_key(licfile, pubkey): prototype = PYFUNCTYPE(py_object, c_char_p, py_object) dlfunc = prototype(('generate_pytransform_key', _pytransform)) return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile, pubkey) # # Deprecated functions from v5.1 # @dllmethod def encrypt_project_files(proname, filelist, mode=0): prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int) dlfunc = prototype(('encrypt_project_files', _pytransform)) return dlfunc(proname.encode(), filelist, mode) def generate_project_capsule(licfile): prikey, pubkey, prolic = _generate_project_capsule() capkey = _encode_capsule_key_file(licfile) return prikey, pubkey, capkey, prolic @dllmethod def _encode_capsule_key_file(licfile): prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p) dlfunc = prototype(('encode_capsule_key_file', _pytransform)) return dlfunc(licfile.encode(), None) @dllmethod def encrypt_files(key, filelist, mode=0): t_key = c_char * 32 prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int) dlfunc = prototype(('encrypt_files', _pytransform)) return dlfunc(t_key(*key), filelist, mode) @dllmethod def generate_module_key(pubname, key): t_key = c_char * 32 prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p) dlfunc = prototype(('generate_module_key', _pytransform)) return dlfunc(pubname.encode(), t_key(*key), None) # # Compatible for PyArmor v3.0 # @dllmethod def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1): '''Only for old version, before PyArmor 3''' pyarmor_init(is_runtime=1) prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int) _init_runtime = prototype(('init_runtime', _pytransform)) return _init_runtime(systrace, sysprofile, threadtrace, threadprofile) @dllmethod def import_module(modname, filename): '''Only for old version, before PyArmor 3''' prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p) _import_module = prototype(('import_module', _pytransform)) return _import_module(modname.encode(), filename.encode()) @dllmethod def exec_file(filename): '''Only for old version, before PyArmor 3''' prototype = PYFUNCTYPE(c_int, c_char_p) _exec_file = prototype(('exec_file', _pytransform)) return _exec_file(filename.encode())
13,587
Python
27.07438
79
0.60499
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/config/extension.toml
[package] # Semantic Versioning is used: https://semver.org/ version = "1.0.2" # Lists people or organizations that are considered the "authors" of the package. authors = ["Huang I Lan - Erks Virtual Studio"] # The title and description fields are primarily for displaying extension info in UI title = "stream-gpt" description="Extension for NVIDIA Omniverse that provides a simple chatbot UI to record audio inputs, transcribe them, use transcriptions as chat GPT prompts, generate responses, convert responses to audio, and transmit them to Audio2Face via gRPC, while maintaining your original scripting style and modular system.." # Path (relative to the root) or content of readme markdown file for UI. readme = "docs/README.md" # URL of the extension source repository. repository = "" # One of categories for UI. category = "Chatbot" # Keywords for the extension keywords = ["Chat_GPT", "AI_assistant"] # Location of change log file in target (final) folder of extension, relative to the root. # More info on writing changelog: https://keepachangelog.com/en/1.0.0/ changelog="docs/CHANGELOG.md" # Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file). # Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image. preview_image = "data/preview.png" # Icon is shown in Extensions window, it is recommended to be square, of size 256x256. icon = "data/icon.png" # Use omni.ui to build simple UI [dependencies] "omni.kit.uiapp" = {} [python.pipapi] requirements = [ "pyaudio", "openai", "keyboard", "soundfile", "elevenlabs", "pydub", "gtts", ] # Allow going to online index if package can't be found locally (not recommended) use_online_index = true # Main python module this extension provides, it will be publicly available as "import stream.gptchat". [[python.module]] name = "stream.gptchat" [[test]] # Extra dependencies only to be used during test run dependencies = [ "omni.kit.ui_test" # UI testing extension ]
2,071
TOML
32.967213
318
0.740222
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/docs/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [1.0.2] - 2023-07-06 - Upgraded the UI to allow users to add API keys, Voice_ID, Voice_Models, and Instance Name directly from the UI, eliminating the need for hardcoding. ## [1.0.0] - 2023-04-13 - Initial version of extension UI template with a window.
355
Markdown
28.666664
150
0.715493
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/docs/README.md
# Stream-GPT Stream-GPT is an Omniverse Extension that uses OpenAI's GPT-3 model to create a virtual assistant. It allows users to interact with the assistant through both text and voice, and the assistant responds in kind. The extension uses OpenAI's Whisper ASR system to transcribe audio input and Eleven Labs' API to convert the assistant's text responses into audio. ## Getting Started ### Prerequisites - OpenAI API key - Eleven Labs API key ### SET UP 1. Set your OpenAI and Eleven Labs API keys, as well as the voice_id, model_id, and the Audio2Face's audioplayer's prim path (instance_name) in the extension's settings: - Open the extension and click on the "Settings" button. - Enter your OpenAI API key, Eleven Labs API key, voice_id, model_id and instance name in the corresponding fields. (A text file in the repository lists the available voice ids.) ## Usage Once the application is running, you can interact with the virtual assistant through the UI. You can type your prompts into the text field and click on the "Send" button or use the "Record Audio" button to speak your prompts. The assistant will respond in the chat log and through your speakers. You can also add a system to the GPT virtual assistant by typing it in the "System" field in the UI. All interactions made with the extension are saved in a folder named "chat_logs" for future reference.
1,389
Markdown
46.931033
358
0.773938
ilanhuang/audio2face-streamgpt-public/UE5_install_files/extension.toml
[package] version = "104.10.8" title = "Audio2Face Exporter" authors = ["NVIDIA"] description="Custom Kit exporter for audio2face" repository = "" keywords = ["audio2face"] category = "Animation" readme = "docs/README.md" changelog = "docs/CHANGELOG.md" preview_image = "data/preview.png" icon = "data/icon.png" [dependencies] "omni.ui" = {optional = true} "omni.kit.window.filepicker" = {optional = true} "omni.graph" = {} "omni.graph.tools" = {} "omni.kit.menu.utils" = {optional = true} "omni.kit.window.viewport" = {optional = true} "omni.kit.viewport.utility" = {optional = true} "omni.client" = {} "omni.anim.shared" = {} "omni.deform.shared" = {} "omni.audio2face.common" = {} "omni.audio2face.ui.common" = {optional = true} "omni.audio2face.tool" = {} "omni.services.core"={} [[python.module]] name = "omni.audio2face.exporter" [[test]] dependencies = [ "omni.kit.renderer.core", "omni.ui", "omni.kit.window.filepicker", "omni.kit.menu.utils", "omni.kit.window.viewport", "omni.kit.viewport.utility", "omni.audio2face.ui.common" ] timeout = 900 stdoutFailPatterns.exclude = [ "*failed to upload minidump*", # Exclude grahics leaks until fixed ] [package.writeTarget] kit = true platform = true [python.pipapi] requirements = ['python-osc'] use_online_index = true
1,310
TOML
22.836363
71
0.681679
ilanhuang/audio2face-streamgpt-public/UE5_install_files/from pythonosc import udp_client.py
from pythonosc import udp_client blend = ["eyeBlinkLeft", "eyeLookDownLeft", "eyeLookInLeft", "eyeLookOutLeft", "eyeLookUpLeft", "eyeSquintLeft", "eyeWideLeft", "eyeBlinkRight", "eyeLookDownRight", "eyeLookInRight", "eyeLookOutRight", "eyeLookUpRight", "eyeSquintRight", "eyeWideRight", "jawForward", "jawLeft", "jawRight", "jawOpen", "mouthClose", "mouthFunnel", "mouthPucker", "mouthLeft", "mouthRight", "mouthSmileLeft", "mouthSmileRight", "mouthFrownLeft", "mouthFrownRight", "mouthDimpleLeft", "mouthDimpleRight", "mouthStretchLeft", "mouthStretchRight", "mouthRollLower", "mouthRollUpper", "mouthShrugLower", "mouthShrugUpper", "mouthPressLeft", "mouthPressRight", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthUpperUpLeft", "mouthUpperUpRight", "browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight", "cheekPuff", "cheekSquintLeft", "cheekSquintRight", "noseSneerLeft", "noseSneerRight", "tongueOut"] client = udp_client.SimpleUDPClient('127.0.0.1', 5008) osc_array = outWeight.tolist() count = 0 for i in osc_array: client.send_message('/' + str(blend[count]), i) count += 1 [python.pipapi] requirements = ['python-osc'] use_online_index = true
1,267
Python
89.571422
910
0.708761
ilanhuang/audio2face-streamgpt-public/UE5_install_files/facsSolver.py
import numpy as np from omni.audio2face.common import log_error, log_info, log_warn from scipy.optimize import lsq_linear from pythonosc import udp_client class FacsSolver: def __init__(self, neutral_mat, delta_mat): self.weightRegulCoeff = 3.5 self.weightRegulCoeff_scale = 10.0 self.prevRegulCoeff = 3.5 self.prevRegulCoeff_scale = 100.0 self.sparseRegulCoeff = 1.0 self.sparseRegulCoeff_scale = 0.25 self.symmetryRegulCoeff = 1.0 self.symmetryRegulCoeff_scale = 10.0 self.neutral_mat = neutral_mat self.delta_mat_orig = delta_mat self.delta_mat = delta_mat self.numPoses_orig = self.delta_mat_orig.shape[1] self.numPoses = self.numPoses_orig self.lb_orig = np.zeros(self.numPoses_orig) self.ub_orig = self.lb_orig + 1.0 self.lb = self.lb_orig.copy() self.ub = self.ub_orig.copy() self.activeIdxMap = range(self.numPoses_orig) self.activePosesBool = np.array([True for pi in range(self.numPoses_orig)], dtype=bool) self.cancelPoseIndices = np.array([-1 for pi in range(self.numPoses_orig)], dtype=int) self.symmetryPoseIndices = np.array([-1 for pi in range(self.numPoses_orig)], dtype=int) self.cancelList = [] self.symmetryList = [] self.symShapeMat = np.zeros((self.numPoses_orig, self.numPoses_orig)) self.prevWeights = np.zeros(self.numPoses_orig) # TODO L1 implementation l1RegulMat = np.ones((1, self.numPoses)) self.l1RegulMat = np.dot(l1RegulMat.T, l1RegulMat) self.compute_A_mat() def compute_A_mat(self): self.A = ( np.dot(self.delta_mat.T, self.delta_mat) + self.weightRegulCoeff * self.weightRegulCoeff_scale * np.eye(self.numPoses) + self.prevRegulCoeff * self.prevRegulCoeff_scale * np.eye(self.numPoses) + self.sparseRegulCoeff ** 2 * self.sparseRegulCoeff_scale * self.l1RegulMat + self.symmetryRegulCoeff * self.symmetryRegulCoeff_scale * self.symShapeMat ) self.A = self.A.astype(np.float64) def set_activePoses(self, activePosesBool): self.activePosesBool = activePosesBool # 1 - simple approach # self.ub *= np.array(self.activePosesBool) # 2- less computation way self.delta_mat = self.delta_mat_orig[:, self.activePosesBool] self.numPoses = self.delta_mat.shape[1] self.lb = self.lb_orig[self.activePosesBool] self.ub = self.ub_orig[self.activePosesBool] self.prevWeights = np.zeros(self.numPoses) self.activeIdxMap = [] cnt = 0 for idx in range(self.numPoses_orig): if self.activePosesBool[idx]: self.activeIdxMap.append(cnt) cnt += 1 else: self.activeIdxMap.append(-1) # update L1 regularization mat l1RegulMat = np.ones((1, self.numPoses)) self.l1RegulMat = np.dot(l1RegulMat.T, l1RegulMat) # update cancel pair index self.set_cancelPoses(self.cancelPoseIndices) # update symmetry pair index self.set_symmetryPoses(self.symmetryPoseIndices) # update self.A here def set_cancelPoses(self, cancelPoseIndices): self.cancelPoseIndices = cancelPoseIndices # filter out cancel shapes self.cancelList = [] maxIdx = np.max(self.cancelPoseIndices) if maxIdx < 0: return for ci in range(maxIdx + 1): cancelIndices = np.where(self.cancelPoseIndices == ci)[0] if len(cancelIndices) > 2: log_warn("There is more than 2 poses for a cancel index %d" % ci) break elif len(cancelIndices) < 2: log_warn("There is less than 2 poses for a cancel index %d" % ci) break self.cancelList.append(cancelIndices) # print ('cancel shape list', self.cancelList) activeCancelList = [] for pIdx1, pIdx2 in self.cancelList: if self.activePosesBool[pIdx1] and self.activePosesBool[pIdx2]: activeCancelList.append([self.activeIdxMap[pIdx1], self.activeIdxMap[pIdx2]]) # print (activeCancelList) self.cancelList = activeCancelList def set_symmetryPoses(self, symmetryPoseIndices): self.symmetryPoseIndices = symmetryPoseIndices self.symmetryList = [] maxIdx = np.max(self.symmetryPoseIndices) if maxIdx < 0: self.symShapeMat = np.zeros((self.numPoses, self.numPoses)) else: for ci in range(maxIdx + 1): symmetryIndices = np.where(self.symmetryPoseIndices == ci)[0] if len(symmetryIndices) > 2: log_warn("There is more than 2 poses for a cancel index %d" % ci) break elif len(symmetryIndices) < 2: log_warn("There is less than 2 poses for a cancel index %d" % ci) break self.symmetryList.append(symmetryIndices) activeSymmetryList = [] for pIdx1, pIdx2 in self.symmetryList: if self.activePosesBool[pIdx1] and self.activePosesBool[pIdx2]: activeSymmetryList.append([self.activeIdxMap[pIdx1], self.activeIdxMap[pIdx2]]) self.symmetryList = activeSymmetryList symShapeMat = np.zeros((len(self.symmetryList), self.numPoses)) for si, [pose1Idx, pose2Idx] in enumerate(self.symmetryList): symShapeMat[si, pose1Idx] = 1.0 symShapeMat[si, pose2Idx] = -1.0 self.symShapeMat = np.dot(symShapeMat.T, symShapeMat) self.compute_A_mat() def set_l2_regularization(self, L2=3.5): self.weightRegulCoeff = L2 self.compute_A_mat() def set_tempo_regularization(self, temporal=3.5): self.prevRegulCoeff = temporal self.compute_A_mat() def set_l1_regularization(self, L1=1.0): self.sparseRegulCoeff = L1 self.compute_A_mat() def set_symmetry_regularization(self, value=1.0): self.symmetryRegulCoeff = value self.compute_A_mat() def computeFacsWeights(self, point_mat): target_delta_mat = point_mat - self.neutral_mat B = ( np.dot(self.delta_mat.T, target_delta_mat).flatten() + self.prevRegulCoeff * self.prevRegulCoeff_scale * self.prevWeights ) B = B.astype(np.float64) res = lsq_linear(self.A, B, bounds=(self.lb, self.ub), lsmr_tol="auto", verbose=0, method="bvls") # print ('first pass:', res.x) if len(self.cancelList) > 0: # check cancelling poses - ub = self.ub.copy() lb = self.lb.copy() for pose1Idx, pose2Idx in self.cancelList: if res.x[pose1Idx] >= res.x[pose2Idx]: ub[pose2Idx] = 1e-10 else: ub[pose1Idx] = 1e-10 res = lsq_linear(self.A, B, bounds=(lb, ub), lsmr_tol="auto", verbose=0, method="bvls") self.prevWeights = res.x # print ('second pass:', res.x) outWeight = np.zeros(self.numPoses_orig) outWeight[self.activePosesBool] = res.x outWeight = outWeight * (outWeight > 1.0e-9) # print (outWeight) blend = ["eyeBlinkLeft", "eyeLookDownLeft", "eyeLookInLeft", "eyeLookOutLeft", "eyeLookUpLeft", "eyeSquintLeft", "eyeWideLeft", "eyeBlinkRight", "eyeLookDownRight", "eyeLookInRight", "eyeLookOutRight", "eyeLookUpRight", "eyeSquintRight", "eyeWideRight", "jawForward", "jawLeft", "jawRight", "jawOpen", "mouthClose", "mouthFunnel", "mouthPucker", "mouthLeft", "mouthRight", "mouthSmileLeft", "mouthSmileRight", "mouthFrownLeft", "mouthFrownRight", "mouthDimpleLeft", "mouthDimpleRight", "mouthStretchLeft", "mouthStretchRight", "mouthRollLower", "mouthRollUpper", "mouthShrugLower", "mouthShrugUpper", "mouthPressLeft", "mouthPressRight", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthUpperUpLeft", "mouthUpperUpRight", "browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight", "cheekPuff", "cheekSquintLeft", "cheekSquintRight", "noseSneerLeft", "noseSneerRight", "tongueOut"] try: client = udp_client.SimpleUDPClient('127.0.0.1', 27008) osc_array = outWeight.tolist() count = 0 for i in osc_array: client.send_message('/' + str(blend[count]), i) count += 1 except Exception as e: log_error(f"Error in OSC communication: {e}")
8,708
Python
41.276699
918
0.614378
matthias-research/omni.fun/README.md
# omni.fun A simple plugin for nvidia's omniverse
50
Markdown
15.999995
38
0.78
matthias-research/omni.fun/exts/omni.fun/config/extension.toml
[package] # Semantic Versioning is used: https://semver.org/ version = "0.1.0" authors = ["Ten Minute Physics"] title = "Fun" description="Ten Minute Physics domniverse extension" readme = "docs/README.md" repository="https://github.com/matthias-research/omni.fun" category = "sim" keywords = ["simulation"] changelog="docs/CHANGELOG.md" preview_image = "data/preview.png" icon = "data/icon.png" # Watch the .ogn files for hot reloading (only works for Python files) [fswatcher.patterns] include = ["*.ogn", "*.py"] exclude = ["Ogn*Database.py", "*/ogn*"] [dependencies] "omni.kit.test" = {} "omni.kit.menu.utils" = {} "omni.timeline" = {} "omni.usd" = {} # Main python module this extension provides, it will be publicly available as "import omni.play". [[python.module]] name = "omni.fun"
797
TOML
24.741935
98
0.697616
matthias-research/omni.fun/exts/omni.fun/config/extension.gen.toml
[package] [package.target] python = ["cp37"] [package.publish] date = 1635811509 kitVersion = "103.0+master.0.75457a67.gitlab"
127
TOML
17.285712
45
0.732283
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/sim.py
# Copyright 2022 Matthias Müller - Ten Minute Physics, # https://www.youtube.com/c/TenMinutePhysics # www.matthiasMueller.info/tenMinutePhysics # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np import math import warp as wp from pxr import Usd, UsdGeom, Gf, Sdf from .usdutils import * gravity = -9.81 @wp.struct class SimData: sphere_radius: wp.array(dtype=float) sphere_mass: wp.array(dtype=float) sphere_pos: wp.array(dtype=wp.vec3) sphere_rot: wp.array(dtype=wp.quat) sphere_lin_vel: wp.array(dtype=wp.vec3) sphere_ang_vel: wp.array(dtype=wp.vec3) sphere_pos_corr: wp.array(dtype=wp.vec3) sphere_lin_corr: wp.array(dtype=wp.vec3) sphere_ang_corr: wp.array(dtype=wp.vec3) sphere_num_corr: wp.array(dtype=int) sphere_lower_bounds: wp.array(dtype=wp.vec3) sphere_upper_bounds: wp.array(dtype=wp.vec3) sphere_bvh_id: wp.uint64 obj_mesh_id: wp.uint64 obj_tri_ids: wp.array(dtype=int) obj_orig_pos: wp.array(dtype=wp.vec3) obj_pos: wp.array(dtype=wp.vec3) obj_prev_pos: wp.array(dtype=wp.vec3) obj_transforms: wp.array(dtype=wp.mat44) obj_pos_transform_nr: wp.array(dtype=int) @wp.kernel def dev_integrate( dt: float, gravity: wp.vec3, bounds_margin: float, sim: SimData): sphere_nr = wp.tid() pos = sim.sphere_pos[sphere_nr] lin_vel = sim.sphere_lin_vel[sphere_nr] rot = sim.sphere_rot[sphere_nr] ang_vel = sim.sphere_ang_vel[sphere_nr] # move state forward in time lin_vel = lin_vel + gravity * dt pos = pos + lin_vel * dt qt = wp.quat(ang_vel[0], ang_vel[1], ang_vel[2], 0.0) * (dt * 0.5) rot = wp.normalize(rot + qt * rot) sim.sphere_pos[sphere_nr] = pos sim.sphere_lin_vel[sphere_nr] = lin_vel sim.sphere_rot[sphere_nr] = rot # compute bounding box for bvh pred_pos = pos + lin_vel * dt lower = wp.vec3(wp.min(pos[0], pred_pos[0]), wp.min(pos[1], pred_pos[1]), wp.min(pos[2], pred_pos[2])) upper = wp.vec3(wp.max(pos[0], pred_pos[0]), wp.max(pos[1], pred_pos[1]), wp.max(pos[2], pred_pos[2])) m = bounds_margin + sim.sphere_radius[sphere_nr] sim.sphere_lower_bounds[sphere_nr] = lower - wp.vec3(m, m, m) sim.sphere_upper_bounds[sphere_nr] = upper + wp.vec3(m, m, m) @wp.kernel def dev_handle_sphere_sphere_collisions( restitution: float, sim: SimData): sphere0 = wp.tid() eps = 0.00001 pos0 = sim.sphere_pos[sphere0] radius0 = sim.sphere_radius[sphere0] m0 = sim.sphere_mass[sphere0] w0 = 1.0 / (m0 + eps) vel0 = sim.lin_vel[sphere0] ang0 = sim.ang_vel[sphere0] lower = sim.sphere_lower_bounds[sphere0] upper = sim.sphere_upper_bounds[sphere0] query = wp.bvh_query_aabb(sim.spheres_bvh_id, lower, upper) sphere1 = int(0) while (wp.bvh_query_next(query, sphere1)): if sphere1 < sphere0: # handle each pair only once! pos1 = sim.sphere_pos[sphere1] radius1 = sim.sphere_radius[sphere1] m1 = sim.sphere_mass[sphere1] w1 = 1.0 / (m1 + eps) vel1 = sim.lin_vel[sphere1] ang1 = sim.ang_vel[sphere1] min_dist = radius0 + radius1 pos_normal = wp.normalize(pos1 - pos0) dist = wp.dot(pos_normal, pos1 - pos0) if dist < min_dist: # bounce wp.atomic_add(sim.sphere_num_corr, sphere0, 1) wp.atomic_add(sim.sphere_num_corr, sphere1, 1) pos_corr = pos_normal / (w0 + w1) * (min_dist - dist + eps) wp.atomic_add(sim.pos_corr, sphere0, -w0 * pos_corr) wp.atomic_add(sim.pos_corr, sphere1, +w1 * pos_corr) vn0 = wp.dot(vel0, pos_normal) vn1 = wp.dot(vel1, pos_normal) new_vn0 = (m0 * vn0 + m1 * vn1 - m1 * (vn0 - vn1) * restitution) / (m0 + m1) new_vn1 = (m0 * vn0 + m1 * vn1 - m0 * (vn1 - vn0) * restitution) / (m0 + m1) new_vn0 = wp.min(0.0, new_vn0) new_vn1 = wp.max(0.0, new_vn1) lin_corr0 = pos_normal * (new_vn0 - vn0) lin_corr1 = pos_normal * (new_vn1 - vn1) wp.atomic_add(sim.sphere_lin_corr, sphere0, lin_corr0) wp.atomic_add(sim.sphere_lin_corr, sphere1, lin_corr1) vel0 = vel0 + lin_corr0 vel1 = vel1 + lin_corr1 # friction ang_normal = wp.normalize(ang0 * m0 + ang1 * m1) ang_normal = wp.nomralize(ang_normal - pos_normal * wp.dot(pos_normal, ang_normal)) vt0 = wp.dot(vel0, wp.cross(ang_normal, pos_normal)) vt1 = wp.dot(vel1, wp.cross(ang_normal, pos_normal)) omega0 = wp.dot(ang0, ang_normal) omega1 = wp.dot(ang1, ang_normal) # v0 + (o0 - do*w0) * r0 = v1 + (o1 + do*w1) * r1 domega = (vt1 + omega1 * radius1 - vt0 - omega0 * radius0) / (radius0 * w0 + radius1 * w1) ang_corr0 = ang_normal * (omega0 - domega * w0) - ang0 ang_corr1 = ang_normal * (omega1 + domega * w1) - ang1 ang0 = ang0 + ang_corr0 ang1 = ang1 + ang_corr1 wp.atomic_add(sim.sphere_ang_corr, sphere0, ang_corr0) wp.atomic_add(sim.sphere_ang_corr, sphere1, ang_corr1) @wp.kernel def dev_update_obj_pos(sim: SimData): id = wp.tid() trans_nr = sim.pos_transform_nr[id] pos = sim.obj_transforms[trans_nr] * sim.orig_pos[id] sim.prev_pos[id] = sim.pos[id] sim.pos[id] = pos @wp.kernel def dev_handle_sphere_obj_collisions( dt: float, restitution: float, sim: SimData): sphere_nr = wp.tid() pos = sim.sphere_pos[sphere_nr] radius = sim.sphere_radius[sphere_nr] vel = sim.lin_vel[sphere_nr] ang = sim.ang_vel[sphere_nr] inside = float(0.0) face_nr = int(0) u = float(0.0) v = float(0.0) found = wp.mesh_query_point(sim.obj_mesh_id, pos, radius, inside, face_nr, u, v) if not found: return id0 = sim.obj_tri_ids[3 * face_nr] id1 = sim.obj_tri_ids[3 * face_nr + 1] id2 = sim.obj_tri_ids[3 * face_nr + 2] p0 = sim.obj_pos[id0] p1 = sim.obj_pos[id1] p2 = sim.obj_pos[id2] closest = u * p0 + v * p1 + (1.0 - u - v) * p2 pos_normal = wp.normalize(pos - closest) dist = wp.dot(pos_normal, pos - closest) if dist >= radius: return sim.sphere_pos[sphere_nr] = pos - pos_normal * (radius - dist) v0 = (p0 - sim.mesh_prev_points[id0]) / dt v1 = (p1 - sim.mesh_prev_points[id1]) / dt v2 = (p2 - sim.mesh_prev_points[id2]) / dt v_mesh = v0 + u * (v1 - v0) + v * (v2 - v0) v_mesh = u * v0 + v * v1 + (1.0 - u - v) * v2 vn_sphere = wp.dot(sim.sphere_lin_vel[sphere_nr], pos_normal) vn_mesh = wp.dot(v_mesh, pos_normal) new_vn = wp.min(vn_mesh - (vn_sphere - vn_mesh) * restitution, 0.0) sim.sphere_lin_vel[sphere_nr] = v + pos_normal * (new_vn - vn_sphere) # friction ang_normal = wp.normalize(ang) ang_normal = wp.nomralize(ang - pos_normal * wp.dot(pos_normal, ang_normal)) vt = wp.dot(vel, wp.cross(ang_normal, pos_normal)) omega = wp.dot(ang, ang_normal) # vel + (omega + do) * r = v_mesh domega = (vt + omega * radius - v_mesh) / radius ang = ang + ang_normal * (omega - domega) sim.sphere_ang_vel[sphere_nr] = ang @wp.kernel def dev_apply_corrections( sim: SimData): sphere_nr = wp.tid() num = sim.sphere_num_corr[sphere_nr] if num > 0: s = 1.0 / float(num) sim.sphere_pos[sphere_nr] += sim.sphere_pos_corr[sphere_nr] * s sim.sphere_lin_vel[sphere_nr] += sim.sphere_lin_corr[sphere_nr] * s sim.sphere_ang_vel[sphere_nr] += sim.sphere_ang_corr[sphere_nr] * s class Sim(): def __init__(self, stage): self.paused = True self.stage = stage self.device = 'cuda' self.prim_cache = UsdGeom.XformCache() self.dev_sim_data = SimData() self.host_sim_data = SimData() self.spheres_bvh = None self.obj_mesh = None self.sphere_usd_meshes = [] self.obj_usd_prims = [] self.obj_usd_transforms = [] self.initialized = False self.time_step = 1.0 / 30.0 self.num_substeps = 5 self.restitution = 0.1 self.jacobi_scale = 0.25 self.num_spheres = 0 self.frame_nr = 0 def init(self): if not self.stage: return obj_pos = [] obj_pos_transform_nr = [] obj_tri_ids = [] sphere_pos = [] sphere_radius = [] sphere_inv_mass = [] self.sphere_usd_meshes = [] self.sphere_usd_transforms = [] s = 4.0 / 3.0 * 3.141592 print("traversing stage") for prim in self.stage.Traverse(): if prim.GetTypeName() == "Mesh": mesh = UsdGeom.Mesh(prim) name = mesh.GetName() points = mesh.GetPointsAttr().Get(0.0) if name.find("sphere") != 0 or name.find("Sphere") != 0: # create a sphere trans_mat, trans_t = get_global_transform(prim, 0.0, False) trans_points = points @ trans_mat min = np.min(trans_points, axis = 0) max = np.max(trans_points, axis = 0) radius = np.max(max - min) * 0.5 sphere_radius.append(radius) sphere_pos.append(trans_t) mass = s * radius * radius * radius sphere_inv_mass.append(1.0 / mass) clone = clone_prim(self.stage, prim) self.sphere_usd_meshes.append(UsdGeom.Mesh(clone)) self.sphere_usd_transforms.append(clone.Get) else: obj_nr = len(self.obj_usd_prims) self.object_usd_prims.append(prim) # create obstacle points first_pos = len(obj_pos) for i in range(len(mesh_points)): p = mesh_points[i] obj_pos.append(wp.vec3(*p)) obj_pos_transform_nr.append(obj_nr) # create obstacle triangles mesh_poly_indices = mesh.GetFaceVertexIndicesAttr().Get(0.0) mesh_face_sizes = mesh.GetFaceVertexCountsAttr().Get(0.0) mesh_points = np.array(points) first_index = 0 for i in range(len(mesh_face_sizes)): face_size = mesh_face_sizes[i] for j in range(1, face_size-1): obj_tri_ids.append(first_pos + mesh_poly_indices[first_index]) obj_tri_ids.append(first_pos + mesh_poly_indices[first_index + j]) obj_tri_ids.append(first_pos + mesh_poly_indices[first_index + j + 1]) first_index += face_size # create objects warp buffers if len(obj_pos) > 0: self.dev_sim_data.obj_pos = wp.array(obj_pos, dtype=wp.vec3, device=self.device) self.dev_sim_data.pbj_prev_pos = wp.array(obj_pos, dtype=wp.vec3, device=self.device) self.dev_sim_data.obj_tri_ids = wp.array(obj_tri_ids, dtype=int, device=self.device) self.obj_mesh = wp.Mesh(self.dev_sim_data.obj_pos, self.dev_sim_data.obj_tri_ids) self.dev_sim_data.obj_mesh_id = self.obj_mesh.id num_objs = len(self.object_usd_prims) mat = wp.mat44() self.obj_transforms = np.array([mat] * num_objs) self.dev_sim_data.obj_transforms = wp.zeros(shape=(num_objs), dtype=wp.mat44, device=self.device) # create sphere warp buffers self.num_spheres = len(sphere_pos) if self.num_spheres > 0: self.dev_sim_data.sphere_radius = wp.array(sphere_radius, dtype=float, device=self.device) self.dev_sim_data.sphere_pos = wp.array(sphere_pos, dtype=wp.vec3, device=self.device) self.dev_sim_data.sphere_quat = wp.zeros(shape=(self.num_spheres), dtype=wp.quat, device=self.device) self.dev_sim_data.sphere_vel = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device) self.dev_sim_data.sphere_omega = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device) self.dev_sim_data.sphere_lower_bounds = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device) self.dev_sim_data.sphere_upper_bounds = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device) self.host_sim_data.sphere_pos = wp.array(sphere_pos, dtype=wp.vec3, device="cpu") self.host_sim_data.sphere_quat = wp.zeros(shape=(self.num_spheres), dtype=wp.quat, device="cpu") # zero time step to initialize sphere bounds wp.launch(kernel = self.dev_integrate, inputs = [0.0, wp.vec3(0.0, 0.0, 0.0), self.dev_sim_data], dim = self.num_spheres, device=self.device) self.sphere_bvh = wp.Bvh(self.dev_sim_data.sphere_lower_bounds, self.dev_sim_data.sphere_upper_bounds) self.dev_sim_data.sphere_bvh_id = self.sphere_bvh.id def simulate(self): if self.paused: return self.frame_nr += 1 print("simulating", self.frame_nr) return # update objects for i in range(len(self.object_usd_prims)): self.obj_transforms[i] = get_global_transform(self.object_usd_prims[i], 0.0, True) wp.copy(self.dev_sim_data.obj_transforms, wp.array(self.obj_transforms, dtype=wp.array(wp.mat44), copy=False, device="cpu")) wp.launch(kernel = dev_update_obj_pos, inputs = [self.dev_sim_data], dim = len(self.dev_sim_data.obj_pos), device=self.device) self.obj_mesh.refit() #simulate spheres wp.launch(kernel = dev_integrate, inputs = [self.time_step, self.gravity, self.dev_sim_data], dim = self.num_spheres, device=self.device) self.sphere_bvh.refit() self.dev_sim_data.sphere_pos_corr.zero_() self.dev_sim_data.sphere_lin_corr.zero_() self.dev_sim_data.sphere_ang_corr.zero_() self.dev_sim_data.sphere_num_corr.zero_() wp.launch(kernel = dev_handle_sphere_sphere_collisions, inputs = [self.restitution, self.dev_sim_data], dim = self.num_spheres, device=self.device) wp.launch(kernel = dev_apply_corrections, inputs = [self.dev_sim_data], dim = self.num_spheres, device=self.device) wp.launch(kernel = dev_handle_sphere_obj_collisions, inputs = [self.time_step, self.restitution, self.dev_sim_data], dim = self.num_spheres, device=self.device) # update stage wp.copy(self.host_sim_data.sphere_pos, self.dev_sim_data.sphere_pos) wp.copy(self.host_sim_data.sphere_quat, self.dev_sim_data.sphere_quat) pos = self.host_sim_data.numpy() quat = self.host_sim_data.numpy() for i in range(self.num_spheres): set_transform(self.sphere_usd_meshes, pos[i], quat[i]) def reset(self): hide_clones(self.stage) self.paused = True
16,580
Python
34.734914
462
0.5769
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/extension.py
# Copyright 2022 Matthias Müller - Ten Minute Physics, # https://www.youtube.com/c/TenMinutePhysics # www.matthiasMueller.info/tenMinutePhysics # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import omni.ext import os import omni.usd from omni import ui from pxr import Usd from .controls import ControlsWindow from .sim import Sim EXAMPLES_PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../data/scenes")) class OmniFunExtension(omni.ext.IExt): def on_startup(self, ext_id): print("fun on_startup") setattr(self, "controls", None) setattr(self, "sim", None) stage = omni.usd.get_context().get_stage() self.sim = Sim(stage) self.sim.init() editor_menu = omni.kit.ui.get_editor_menu() self.menu_items = [] if editor_menu: self.controls_menu = editor_menu.add_item( f"Window/Fun/Controls", lambda _, value: self.show_controls(value), toggle=True, value=False ) self.menu_items.append(editor_menu.add_item( f"Window/Fun/SimpleScene", lambda _, value: self.load_example("simple.usd"), toggle=False, value=False )) # self.show_controls(True) # set callbacks self.update_event_stream = omni.kit.app.get_app_interface().get_update_event_stream() self.stage_event_sub = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(self.on_event) def on_shutdown(self): print("fun on_shutdown") self.menu_items = None self.update_event_stream = None self.stage_event_sub = None if self.sim: self.sim.reset() self.show_controls(False) def init_callback(self, state): if state: stage = omni.usd.get_context().get_stage() if self.sim: self.sim = Sim(stage) self.update_event_sub = self.update_event_stream.create_subscription_to_pop(self.on_update) else: if self.sim: self.sim.reset() self.sim = None def play_callback(self, state): if self.sim: self.sim.paused = not state def on_update(self, dt): if self.sim: self.sim.simulate() def set_controls_menu(self, visible): omni.kit.ui.get_editor_menu().set_value(f"Window/Fun/Controls", visible) def show_controls(self, is_visible): if is_visible: if not hasattr(self, "controls"): setattr(self, "controls", None) if self.controls is None: self.controls = ControlsWindow( init_callback=self.init_callback, play_callback=self.play_callback) self.controls.create_window(lambda visible: self.set_controls_menu(visible)) self.controls.show_window() else: self.controls.show_window() elif self.controls: self.controls.destroy_window() self.controls = None def on_event(self, event): if event.type == int(omni.usd.StageEventType.CLOSED): if self.sim: self.sim.reset() if event.type == int(omni.usd.StageEventType.OPENED): if self.sim: self.sim.init() def load_example(self, scene_name): def new_stage(): stage_path = os.path.normpath(os.path.join(EXAMPLES_PATH, scene_name)) omni.usd.get_context().open_stage(stage_path) if self.sim: self.sim.init() omni.kit.window.file.prompt_if_unsaved_stage(new_stage)
4,788
Python
35.007519
462
0.618421
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/gpu.py
# Copyright 2022 Matthias Müller - Ten Minute Physics, # https://www.youtube.com/c/TenMinutePhysics # www.matthiasMueller.info/tenMinutePhysics # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import numpy as np import warp as wp @wp.struct class SimData: spheres_pos: wp.array(dtype=wp.vec3) spheres_prev_pos: wp.array(dtype=wp.vec3) spheres_pos_corr: wp.array(dtype=wp.vec3) spheres_vel: wp.array(dtype=wp.vec3) spheres_radius: wp.array(dtype=float) spheres_inv_mass: wp.array(dtype=float) mesh_id: wp.uint64 mesh_verts: wp.array(dtype=wp.vec3) mesh_tri_ids: wp.array(dtype=int) @wp.func def closest_point_on_triangle( p: wp.vec3, p0: wp.vec3, p1: wp.vec3, p2: wp.vec3): e0 = p1 - p0 e1 = p2 - p0 tmp = p0 - p a = wp.dot(e0, e0) b = wp.dot(e0, e1) c = wp.dot(e1, e1) d = wp.dot(e0, tmp) e = wp.dot(e1, tmp) coords = wp.vec3(b*e - c*d, b*d - a*e, a*c - b*b) x = 0.0 y = 0.0 z = 0.0 if coords[0] <= 0.0: if c != 0.0: y = -e / c elif coords[1] <= 0.0: if a != 0.0: x = -d / a elif coords[0] + coords[1] > coords[2]: den = a + c - b - b num = c + e - b - d if den != 0.0: x = num / den y = 1.0 - x else: if coords[2] != 0.0: x = coords[0] / coords[2] y = coords[1] / coords[2] x = wp.clamp(x, 0.0, 1.0) y = wp.clamp(y, 0.0, 1.0) bary = wp.vec3(1.0 - x - y, x, y) return bary @wp.kernel def dev_integrate_spheres( dt: float, gravity: wp.vec3, data: SimData): sphere_nr = wp.tid() w = data.spheres_inv_mass[sphere_nr] if w > 0.0: data.spheres_vel[sphere_nr] += gravity * dt data.spheres_prev_pos[sphere_nr] = data.spheres_pos[sphere_nr] data.spheres_pos[sphere_nr] += data.spheres_vel[sphere_nr] * dt def integrate_spheres(num_spheres: int, dt: float, gravity: wp.vec3, data: SimData, device): wp.launch(kernel = dev_integrate_spheres, inputs = [dt, gravity, data], dim=num_spheres, device=device) @wp.kernel def dev_update_spheres( dt: float, jacobi_scale: float, data: SimData): sphere_nr = wp.tid() w = data.spheres_inv_mass[sphere_nr] if w > 0.0: data.spheres_pos[sphere_nr] = data.spheres_pos[sphere_nr] + jacobi_scale * data.spheres_pos_corr data.spheres_vel[sphere_nr] = (data.spheres_pos[sphere_nr] - data.spheres_prev_pos[sphere_nr]) / dt def update_spheres(num_spheres: int, dt: float, jacobi_scale: float, data: SimData, device): wp.launch(kernel = dev_update_spheres, inputs = [dt, jacobi_scale, data], dim=num_spheres, device=device) @wp.kernel def dev_solve_mesh_collisions( data: SimData): sphere_nr = wp.tid() w = data.spheres_inv_mass[sphere_nr] if w > 0.0: pos = data.spheres_pos[sphere_nr] r = data.spheres_radius[sphere_nr] # query bounding volume hierarchy bounds_lower = pos - wp.vec3(r, r, r) bounds_upper = pos + wp.vec3(r, r, r) query = wp.mesh_query_aabb(data.mesh_id, bounds_lower, bounds_upper) tri_nr = int(0) while (wp.mesh_query_aabb_next(query, tri_nr)): p0 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr]] p1 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr + 1]] p2 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr + 2]] hit = closest_point_on_triangle(pos, p0, p1, p2) n = pos - hit d = wp.length(n) if d < r: n = wp.normalize(n) data.spheres_pos[sphere_nr] = data.spheres_pos[sphere_nr] + n * (r - d) def solve_mesh_collisions(num_spheres: int, data: SimData, device): wp.launch(kernel = dev_solve_mesh_collisions, inputs = [data], dim=num_spheres, device=device) @wp.kernel def dev_solve_sphere_collisions( num_spheres: int, data: SimData): i0 = wp.tid() p0 = data.spheres_pos[i0] r0 = data.spheres_radius[i0] w0 = data.spheres_inv_mass[i0] # simpe O(n^2) collision detection for i1 in range(num_spheres): if i1 > i0: p1 = data.spheres_pos[i1] r1 = data.spheres_radius[i1] w1 = data.spheres_inv_mass[i1] w = w0 + w1 if w > 0.0: n = p1 - p0 d = wp.length(n) n = wp.noramlize(n) if d < r0 + r1: corr = n * (r0 + r1 - d) / w data.spheres_corr[i0] = data.spheres_corr[i0] - corr * w0 data.spheres_corr[i1] = data.spheres_corr[i1] - corr * w0 def solve_sphere_collisions(num_spheres: int, data: SimData, device): wp.launch(kernel = dev_solve_sphere_collisions, inputs = [num_spheres, data], dim=num_spheres, device=device)
6,034
Python
32.342541
462
0.586841
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/controls.py
import carb import omni.ui import omni.usd import omni.kit.app from pxr import Usd, Sdf from .sim import gravity class ControlsWindow: def __init__(self, init_callback=None, play_callback=None): self._window = None self.buttons = [ [None, init_callback, False, "Init", "Reset"], [None, play_callback, False, "Play", "Pause"]] def __bool__(self): return self._window is not None def create_window(self, visibility_changed_fn): window_flags = omni.ui.WINDOW_FLAGS_NO_SCROLLBAR self._window = omni.ui.Window("Fun Controls", flags=window_flags, width=400, height=400, dockPreference=omni.ui.DockPreference.RIGHT_TOP) self._window.set_visibility_changed_fn(visibility_changed_fn) self.rebuild_ui() def show_window(self): self._window.visible = True def hide_window(self): self._window.visible = False def destroy_window(self): if self._window: self._window.visible = False self._window.destroy() self._window = None def button_pressed(self, button): state = not button[2] button[2] = state button[0].text = button[4] if state else button[3] button[1](state) def set_parameter(self, param_name, val): if param_name == "gravity": gravity = val def rebuild_ui(self): ui = omni.ui row_height = 20 v_spacing = 10 h_spacing = 20 if self._window and self._window.visible: with self._window.frame: with ui.VStack(spacing=v_spacing, padding=50): with ui.HStack(spacing=h_spacing, height=row_height): for button in self.buttons: button[0] = ui.Button( button[3], width=100, height=15, margin=10, clicked_fn=lambda button=button: self.button_pressed(button)) with ui.HStack(spacing=h_spacing, height=row_height): ui.Label("Gravity", width=ui.Percent(50), height=10, name="Gravity") slider = ui.FloatSlider(min=0.0,max=10.0, width=ui.Percent(50)) slider.model.add_value_changed_fn( lambda val, param_name="gravity": self.set_parameter(param_name, val.get_value_as_float()))
2,487
Python
29.341463
145
0.554483
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/usdutils.py
from pxr import Usd, UsdGeom, Gf, UsdShade import numpy as np import warp as wp prim_cache = None def get_global_transform(prim, time, return_mat44): if prim_cache is None: prim_cache = UsdGeom.XformCache() prim_cache.SetTime(time) m = prim_cache.GetLocalToWorldTransform(prim) if return_mat44: return wp.mat44( m[0][0], m[1][0], m[2][0], m[3][0], m[0][1], m[1][1], m[2][1], m[3][1], m[0][2], m[1][2], m[2][2], m[3][2], m[0][3], m[1][3], m[2][3], m[3][3]) else: A = np.array([[m[0][0], m[0][1], m[0][2]], [m[1][0], m[1][1], m[1][2]], [m[2][0], m[2][1], m[2][2]]]) b = np.array([m[3][0], m[3][1], m[3][2]]) return A, b def set_transform(mesh, trans, quat): usd_mat = Gf.Matrix4d() usd_mat.SetRotateOnly(Gf.Quatd(*quat)) usd_mat.SetTranslateOnly(Gf.Vec3d(*trans)) xform = UsdGeom.Xform(mesh) xform.GetOrderedXformOps()[0].Set(usd_mat) def clone_primvar(self, prim, prim_clone, name, time=0.0): try: attr = UsdGeom.Primvar(prim.GetAttribute(name)) prim_clone.CreatePrimvar(name, attr.GetTypeName(), attr.GetInterpolation()).Set(attr.Get(time)) except: pass def clone_prim(stage, prim): vis = prim.GetAttribute("visibility") if vis: vis.Set("invisible") mesh = UsdGeom.Mesh(prim) clone_prim_path = '/' + str(prim.GetPath()).replace("/", "_") + '_clone' UsdGeom.Mesh.Define(stage, clone_prim_path) prim_clone = UsdGeom.Mesh(stage.GetPrimAtPath(clone_prim_path)) mesh_clone = UsdGeom.Mesh(prim_clone) stage.GetPrimAtPath(clone_prim_path).SetActive(True) xform = UsdGeom.Xform(mesh_clone) xform.ClearXformOpOrder() xform.AddXformOp(UsdGeom.XformOp.TypeTransform) trans_mat, trans_t = get_global_transform(prim, 0.0, True) trans_points = mesh.GetPointsAttr().Get(0.0) @ trans_mat + trans_t normal_mat = np.array([\ trans_mat[0,:] / np.linalg.norm(trans_mat[0,:]), \ trans_mat[1,:] / np.linalg.norm(trans_mat[1,:]), \ trans_mat[2,:] / np.linalg.norm(trans_mat[2,:])]) trans_normals = mesh.GetNormalsAttr().Get(0.0) @ normal_mat mesh_clone.GetPointsAttr().Set(trans_points) mesh_clone.GetNormalsAttr().Set(trans_normals) mesh_clone.SetNormalsInterpolation(mesh.GetNormalsInterpolation()) mesh_clone.GetFaceVertexIndicesAttr().Set(mesh.GetFaceVertexIndicesAttr().Get(0.0)) mesh_clone.GetFaceVertexCountsAttr().Set(mesh.GetFaceVertexCountsAttr().Get(0.0)) mesh_clone.GetCornerIndicesAttr().Set(mesh.GetCornerIndicesAttr().Get(0.0)) mesh_clone.GetCornerSharpnessesAttr().Set(mesh.GetCornerSharpnessesAttr().Get(0.0)) mesh_clone.GetCreaseIndicesAttr().Set(mesh.GetCreaseIndicesAttr().Get(0.0)) mesh_clone.GetCreaseLengthsAttr().Set(mesh.GetCreaseLengthsAttr().Get(0.0)) mesh_clone.GetCreaseSharpnessesAttr().Set(mesh.GetCreaseSharpnessesAttr().Get(0.0)) mesh_clone.GetSubdivisionSchemeAttr().Set(mesh.GetSubdivisionSchemeAttr().Get(0.0)) mesh_clone.GetInterpolateBoundaryAttr().Set(mesh.GetInterpolateBoundaryAttr().Get(0.0)) mesh_clone.GetFaceVaryingLinearInterpolationAttr().Set(mesh.GetFaceVaryingLinearInterpolationAttr().Get(0.0)) mesh_clone.GetTriangleSubdivisionRuleAttr().Set(mesh.GetTriangleSubdivisionRuleAttr().Get(0.0)) mesh_clone.GetHoleIndicesAttr().Set(mesh.GetHoleIndicesAttr().Get(0.0)) for attr in prim.GetAttributes(): type = str(attr.GetTypeName()) if type.find("texCoord") >= 0: clone_primvar(prim, prim_clone, attr.GetName()) try: mat = UsdShade.MaterialBindingAPI(prim).GetDirectBinding().GetMaterial() UsdShade.MaterialBindingAPI(prim_clone).Bind(mat) except: pass return prim_clone def hide_clones(stage): if stage is None: return for prim in stage.Traverse(): if str(prim.GetName()).find("_clone") >= 0: prim.SetActive(False) else: vis = prim.GetAttribute("visibility") if vis: vis.Set("inherited")
4,122
Python
34.543103
113
0.643862
matthias-research/omni.fun/exts/omni.fun/docs/CHANGELOG.md
# CHANGELOG ## [0.1.0] - 2022-08-15 - Initial publish for alpha testing
77
Markdown
7.666666
35
0.636364
matthias-research/omni.fun/exts/omni.fun/docs/README.md
# Play [omni.ten] A simple plugin from ten minute physics. ## Documentation None ## Source Code None
109
Markdown
6.333333
40
0.688073
qcr/benchbot_sim_omni/pip_package_fix.py
import subprocess import sys print("HACK FIX FOR BROKEN PACKAGES") def install(package): subprocess.check_call([sys.executable, "-m", "pip", "install", package]) def uninstall(package): subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "--yes", package]) uninstall("click") install("click") uninstall("typing-extensions") install("typing-extensions")
375
Python
27.923075
87
0.717333
qcr/benchbot_sim_omni/run.py
import flask import numpy as np import os import signal from builtins import print as bprint from gevent import event, pywsgi, signal from pathlib import Path from spatialmath import SE3, UnitQuaternion print("STARTING RUN.PY IN BENCHBOT_SIM_OMNI") DEFAULT_POSE = np.array([1, 0, 0, 0, 0, 0, 0]) DIRTY_EPSILON_DIST = 1 DIRTY_EPSILON_YAW = 2 DIRTY_FILE = '/tmp/benchbot_dirty' MAP_PRIM_PATH = '/env' ROBOT_NAME = 'robot' ROBOT_PRIM_PATH = '/%s' % ROBOT_NAME ROBOT_COMPONENTS = { 'clock': '/ROS_Clock', 'diff_base': '%s/ROS_DifferentialBase' % ROBOT_PRIM_PATH, 'lidar': '%s/ROS_Lidar' % ROBOT_PRIM_PATH, 'rgbd': '%s/ROS_Camera_Stereo_Left' % ROBOT_PRIM_PATH, 'tf_sensors': '%s/ROS_Carter_Sensors_Broadcaster' % ROBOT_PRIM_PATH, 'tf': '%s/ROS_Carter_Broadcaster' % ROBOT_PRIM_PATH } UPDATE_DELAY_SECS = 3.0 def _dc_tf_to_SE3(tf): r = np.array(tf.r) return SE3(np.array(tf.p)) * UnitQuaternion(r[3], r[0:3]).SE3() def _to_SE3(pose): return SE3(pose[4::]) * UnitQuaternion(pose[0], pose[1:4]).SE3() def disable_component(prop_path): from omni.kit.commands import execute from pxr import Sdf print("DISABLING '%s.enabled'" % prop_path) execute("ChangeProperty", prop_path=Sdf.Path("%s.enabled" % prop_path), value=False, prev=None) def print(*args, **kwargs): bprint(*args, **kwargs, flush=True) class SimulatorDaemon: def __init__(self, port): self.address = 'localhost:%s' % port self.inst = None self.sim = None self.sim_i = 0 self.sim_collided = False self.sim_dirty = False self.map_usd = None self.robot_usd = None self.start_pose = None self._map_usd = None self._robot_usd = None self._start_pose = None self._dc = None self._robot = None self._robot_dc = None def check_dirty(self): delta = (_to_SE3(self.start_pose).inv() * _dc_tf_to_SE3(self._dc.get_rigid_body_pose(self._robot_dc))) return (np.linalg.norm(delta.t[0:2]) > DIRTY_EPSILON_DIST or np.abs(delta.rpy(unit='deg')[2]) > DIRTY_EPSILON_YAW) def check_collided(self): return False def open_usd(self): # Bail early if we can't act if self.inst is None: print("No simulator running. " "Stored environment USD, but not opening.") return if self.map_usd is None: print("No environment USD selected. Returning.") return # Imports must go after bail early checks pass as they throw errors # when called in an "inappropriate state" (no idea what that # corresponds to...) from omni.isaac.core.utils.stage import open_stage, update_stage # Stop simulation if running self.stop_simulation() # Update the map if self.map_usd != self._map_usd: self._dc = None self._start_pose = None self._robot = None self._robot_dc = None self._robot_usd = None open_stage(usd_path=self.map_usd) update_stage() self._map_usd = self.map_usd else: print("Skipping map load; already loaded.") # Attempt to replace the robot self.place_robot() def place_robot(self): # Bail early if we can't act if self.inst is None: print("No simulator running. " "Stored robot USD & pose, but not opening.") return if self.robot_usd is None: print("No robot USD selected. Returning.") return # Imports must go after bail early checks pass as they throw errors # when called in an "inappropriate state" (no idea what that # corresponds to...) from omni.isaac.core.robots import Robot from omni.isaac.core.utils.stage import (add_reference_to_stage, update_stage) # Stop simulation if running self.stop_simulation() # Add robot to the environment at the requested pose p = DEFAULT_POSE if self.start_pose is None else self.start_pose if self.robot_usd != self._robot_usd: add_reference_to_stage(usd_path=self.robot_usd, prim_path=ROBOT_PRIM_PATH) self._robot = Robot(prim_path=ROBOT_PRIM_PATH, name=ROBOT_NAME) update_stage() self._robot_usd = self.robot_usd else: print("Skipping robot load; already loaded.") if (p != self._start_pose).any(): self._robot.set_world_pose(position=p[4::], orientation=p[:4]) update_stage() self._start_pose = p else: print("Skipping robot move; already at requested pose.") # Disable auto-publishing of all robot components (we'll manually # publish at varying frequencies instead) for p in ROBOT_COMPONENTS.values(): disable_component(p) # Attempt to start the simulation self.start_simulation() def run(self): f = flask.Flask('benchbot_sim_omni') @f.route('/', methods=['GET']) def __hello(): return flask.jsonify("Hello, I am the Omniverse Sim Daemon") @f.route('/open_environment', methods=['POST']) def __open_env(): r = flask.request.json if 'environment' in r: self.map_usd = r['environment'] self.open_usd() return flask.jsonify({}) @f.route('/place_robot', methods=['POST']) def __place_robot(): r = flask.request.json if 'robot' in r: self.robot_usd = r['robot'] if 'start_pose' in r: # Probably should be regexing... self.start_pose = np.array([ float(x.strip()) for x in r['start_pose'].replace( '[', '').replace(']', '').split(',') ]) self.place_robot() return flask.jsonify({}) @f.route('/restart_sim', methods=['POST']) def __restart_sim(): self.stop_simulation() self.start_simulation() return flask.jsonify({}) @f.route('/start', methods=['POST']) def __start_inst(): self.start_instance() return flask.jsonify({}) @f.route('/start_sim', methods=['POST']) def __start_sim(): self.start_simulation() return flask.jsonify({}) @f.route('/started', methods=['GET']) def __started(): # TODO note there is a race condition (returns true before a /start # job finishes) return flask.jsonify({'started': self.inst is not None}) @f.route('/stop_sim', methods=['POST']) def __stop_sim(): self.stop_simulation() return flask.jsonify({}) # Start long-running server server = pywsgi.WSGIServer(self.address, f) evt = event.Event() for s in [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]: signal.signal(s, lambda n, frame: evt.set()) server.start() while not evt.is_set(): evt.wait(0.001) self.tick_simulator() # Cleanup self.stop_instance() def start_instance(self): print("STARTING INSTANCE!!") if not self.inst is None: print("Instance already running. Please /stop first.") return env = {} if self.map_usd is None else {"open_usd": self.map_usd} from omni.isaac.kit import SimulationApp # Start the simulator self.inst = SimulationApp({ "renderer": "RayTracedLighting", "headless": False, **env }) # Import all required modules, and configure application from omni.isaac.core.utils.extensions import enable_extension enable_extension("omni.isaac.ros_bridge") # Attempt to place the robot if we had a map if env: self.place_robot() def start_simulation(self): if self.sim is not None: self.stop_simulation() if self.inst is None or self.map_usd is None or self.robot_usd is None: print("Can't start simulation. Missing some required state.") return from omni.isaac.core import SimulationContext self.sim_i = 0 self.sim_collided = False self.sim_dirty = False self.sim = SimulationContext() self.sim.play() from omni.isaac.dynamic_control import _dynamic_control self._dc = _dynamic_control.acquire_dynamic_control_interface() self._robot_dc = self._dc.get_articulation_root_body( self._dc.get_object(ROBOT_PRIM_PATH)) def stop_instance(self): if self.inst is None: print("No instance is running to stop.") return self.stop_simulation() self.inst.close() self.inst = None def stop_simulation(self): if self.sim is None: print("Skipping. No running simulation to stop") return if self.inst is None: print("Skipping. No running simulator found.") return self.sim.stop() self.sim = None # TODO maybe could reuse with more guarding logic? def tick_simulator(self): # Tick simulator steps. Does less now than in 2021.2.1 due to new action graph if self.inst is None: return if self.sim is None: self.inst.update() return self.sim.step() # Tick at 10Hz CHECK DIRTY if self.sim_i % 6 == 0: if not self.sim_dirty: self.sim_dirty = self.check_dirty() if self.sim_dirty: Path(DIRTY_FILE).touch() # Tick at 1Hz CHECK COLLIDED if self.sim_i % 60 == 0: self.sim_collided = self.check_collided() self.sim_i += 1 if __name__ == '__main__': print("inside run.py __main__") sd = SimulatorDaemon(port=os.environ.get('PORT')) sd.run()
10,394
Python
30.122754
86
0.554166
qcr/benchbot_sim_omni/README.md
**NOTE: this software is part of the BenchBot software stack. For a complete working BenchBot system, please install the BenchBot software stack by following the instructions [here](https://github.com/qcr/benchbot).** # BenchBot Simulator for Omniverse-powered Isaac Sim [![BenchBot project](https://img.shields.io/badge/collection-BenchBot-%231a2857)](http://benchbot.org) [![QUT Centre for Robotics Open Source](https://github.com/qcr/qcr.github.io/raw/master/misc/badge.svg)](https://qcr.github.io) ![Primary language](https://img.shields.io/github/languages/top/qcr/benchbot_sim_omni) [![License](https://img.shields.io/github/license/qcr/benchbot_sim_omni)](./LICENSE.txt) ![BenchBot Simulator interaction with the Omniverse-powered Isaac Sim](./docs/benchbot_sim_omni.jpg) The BenchBot Simulator bindings for Omniverse-powered Isaac Sim provide a simple `run` script that makes powerful photorealistic simulations available in ROS, and controllable through a basic HTTP API. Through a single script, this package provides: - creation of, and management of, a running [Omniverse-powered Isaac Sim](https://developer.nvidia.com/isaac-sim) instance - a simple HTTP API for programmatically loading environments, placing robots, and controlling simulations - ROS topics for common mobile robot topics: transforms, odometry, command velocity, RGB images, depth images, laser scans The configuration is currently Carter specific, but could easily be extended in the future to target other robots. Also all simulator interactions come from a simple Python script that could be used as a starting point for more complex projects. ## Installation **Please see the note at the top of the page; the BenchBot ecosystem contains much more than just these bindings** There is no physical installation step for these bindings, simply install Isaac Sim, clone this repository, and install Python dependencies: 1. Follow the instructions on the [NVIDIA Isaac Sim documentation site](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html) for [installing Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_basic.html) 2. Clone this repository: ``` git clone https://github.com/qcr/benchbot_sim_omni ``` 3. Install declared Python dependencies: ``` pip install -r ./.custom_deps ``` ## Running and using the simulator bindings Simulator bindings are run through the `run` script, which will start a blank instance of the simulator with the HTTP API bound on port 10001 by default: ``` ./run ``` A simulation in environment `my_env.usd`, with robot `my_robot.usd` at position `(0,0,0)` and quaternion (w,x,y,z) `(1,0,0,0)` can then be started by the following two CURL commands: ``` curl localhost:10001/open_environment \ -H "Content-Type: application/json" \ -d '{"environment": "my_env.usd"}' curl localhost:10001/place_robot \ -H "Content-Type: application/json" \ -d '{"robot": "my_robot.usd", "start_pose": "1,0,0,0,0,0,0"}' ``` Full documentation of configuration options and HTTP API routes is available through the script's `--help` flag: ``` user@pc:~/benchbot_sim_omni/$ ./run --help run -- BenchBot simulator daemon for Omniverse-powered Isaac Sim USAGE: Start the daemon: run run -p /path/to/python.sh -P 8080 Print this help information: run [-h|--help] OPTION DETAILS: -h, --help Show this help menu. -P,--port Port the daemon will bind to. Default port of 10001 will be used if not provided. -p,--python-sh-path Path to the 'python.sh' environment script included with your Isaac Sim installation. Will recursively search for the script in the current directory if this flag is not provided. INTERACTING WITH THE DAEMON: The daemon responds to HTTP requests. Following routes are supported: / Returns a greeting message /open_environment Opens a new environment, with USD path specified via 'environment' data field /place_robot Places a robot at a specified pose. Robot USD is specified via 'robot' data field, and start pose via a comma-separated 7-tuple in the 'pose' field. Format for pose is: quat_w,quat_x,quat_y,quat_z,pos_x,pos_y,pos_z /start Starts a simulator instance (happens by default when first opened) /stop Stops a currently running simulator instance if it exists /restart Restarts the entire simulator (generally not needed) FURTHER DETAILS: Please contact the authors of BenchBot for support or to report bugs: [email protected] ``` ## Using this simulator with the BenchBot Robot Controller The [BenchBot Robot Controller](https://github.com/qcr/benchbot_robot_controller) is a wrapping ROS / HTTP hybrid script that manages running robots and their required subprocesses. It is ultimately fed configurations from [BenchBot add-ons](https://github.com/qcr/benchbot_addons) through our [BenchBot supervisor](https://github.com/qcr/benchbot_supervisor) service. These details are superfluous to these BenchBot simulator bindings, but are provided here for context. This context may be helpful if looking for examples of more complex interactions with the simulator bindings. For example, the `carter_sim_omni.yaml` file in the [robots_sim_omni](https://github.com/benchbot-addons/robots_sim_omni) BenchBot add-on may be of interest.
5,559
Markdown
41.442748
370
0.729808
AndrePatri/OmniRoboGym/pyproject.toml
[build-system] requires = ["flit_core >=2,<4"] build-backend = "flit_core.buildapi" [project] name = "omni_robo_gym" version = "0.1.0" description = "" authors = [{name = "AndrePatri", email = "[email protected]"}] readme = "README.md" license = {file = "LICENSE"}
276
TOML
24.181816
73
0.666667
AndrePatri/OmniRoboGym/omnirobogym_mamba_env.yml
name: omni_robo_gym_isaac2023.1.1 channels: - defaults - pytorch - nvidia - conda-forge - omnia - robostack-staging - AndrePatri dependencies: - python=3.10 - pip - pytorch == 2.0.1 - torchvision - torchaudio - cuda-toolkit=11.7 - compilers - cmake - make - quaternion - anaconda-client - yaml-cpp - pybind11 - gtest - eigen3 - posix_ipc=1.0.4 - rospkg=1.5.0 - ros-humble-xacro - empy - python-devtools - perf_sleep - pyqt - pyqtgraph - pip: - flit - nvidia-cublas-cu11==11.11.3.6 - gym==0.26.2 - gymnasium==0.28.1 - stable_baselines3[extra]==2.0.0a10 - box2d-py - tensorboard - tensorboard-plugin-wit - protobuf - matplotlib - scipy - urdf-parser-py - multiprocess
789
YAML
15.122449
40
0.593156
AndrePatri/OmniRoboGym/meta.yaml
package: name: omni_robo_gym version: 0.1.0 source: path: . # Path to the directory containing your built distribution artifacts requirements: build: - python=3.7 - flit run: - python=3.7 about: home: https://github.com/AndrePatri/CoClusterBridge license: MIT summary: Some custom implementations of Tasks and Gyms for Omniverse Isaac Sim based on Gymnasium. Easy URDF and SRDF import/cloning and simulation configuration exploiting Omniverse API extra: recipe-maintainers: - AndrePatri
537
YAML
20.519999
189
0.722533
AndrePatri/OmniRoboGym/README.md
# OmniRoboGym Wrapper on top of [Omniverse Isaac Sim](https://developer.nvidia.com/isaac-sim), a photo-realistic GPU accelerated simulator from NVIDIA. The aim of the package is to a easy interface for loading floating-base robots and their configuration from URDF and SRDF into IsaacSim, cloning them with Isaac Sim API and, in general, simplify simulation setup for RL-based robotics applications.
402
Markdown
79.599984
248
0.80597
AndrePatri/OmniRoboGym/LICENSE.md
GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. <signature of Ty Coon>, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License.
18,092
Markdown
52.214706
77
0.785541
AndrePatri/OmniRoboGym/omni_robo_gym/envs/isaac_env.py
# Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected]) # # This file is part of OmniRoboGym and distributed under the General Public License version 2 license. # # OmniRoboGym is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # OmniRoboGym is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>. # from omni.isaac.kit import SimulationApp import os import signal import carb import torch from abc import ABC, abstractmethod from typing import Union, Tuple, Dict from SharsorIPCpp.PySharsorIPC import VLevel from SharsorIPCpp.PySharsorIPC import LogType from SharsorIPCpp.PySharsorIPC import Journal import numpy as np # import gymnasium as gym # class IsaacSimEnv(gym.Env): class IsaacSimEnv(): def __init__( self, headless: bool, sim_device: int = 0, enable_livestream: bool = False, enable_viewport: bool = False, debug = False ) -> None: """ Initializes RL and task parameters. Args: headless (bool): Whether to run training headless. sim_device (int): GPU device ID for running physics simulation. Defaults to 0. enable_livestream (bool): Whether to enable running with livestream. enable_viewport (bool): Whether to enable rendering in headless mode. """ self.debug = debug experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.omnirobogym.kit' # experience = "" if headless: info = f"Will run in headless mode." Journal.log(self.__class__.__name__, "__init__", info, LogType.STAT, throw_when_excep = True) if enable_livestream: experience = "" elif enable_viewport: exception = f"Using viewport is not supported yet." Journal.log(self.__class__.__name__, "__init__", exception, LogType.EXCEP, throw_when_excep = True) else: experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.omnirobogym.headless.kit' # experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.gym.headless.kit' self._simulation_app = SimulationApp({"headless": headless, "physics_gpu": sim_device}, experience=experience) info = "Using IsaacSim experience file @ " + experience Journal.log(self.__class__.__name__, "__init__", info, LogType.STAT, throw_when_excep = True) # carb.settings.get_settings().set("/persistent/omnihydra/useSceneGraphInstancing", True) if enable_livestream: info = "Livestream enabled" Journal.log(self.__class__.__name__, "__init__", info, LogType.STAT, throw_when_excep = True) from omni.isaac.core.utils.extensions import enable_extension self._simulation_app.set_setting("/app/livestream/enabled", True) self._simulation_app.set_setting("/app/window/drawMouse", True) self._simulation_app.set_setting("/app/livestream/proto", "ws") self._simulation_app.set_setting("/app/livestream/websocket/framerate_limit", 120) self._simulation_app.set_setting("/ngx/enabled", False) enable_extension("omni.kit.livestream.native") enable_extension("omni.services.streaming.manager") # handle ctrl+c event signal.signal(signal.SIGINT, self.signal_handler) self._render = not headless or enable_livestream or enable_viewport self._record = False self.step_counter = 0 # step counter self._world = None self.metadata = None self.gpu_pipeline_enabled = False def signal_handler(self, sig, frame): self.close() def set_task(self, task, backend="torch", sim_params=None, init_sim=True) -> None: """ Creates a World object and adds Task to World. Initializes and registers task to the environment interface. Triggers task start-up. Args: task (RLTask): The task to register to the env. backend (str): Backend to use for task. Can be "numpy" or "torch". Defaults to "numpy". sim_params (dict): Simulation parameters for physics settings. Defaults to None. init_sim (Optional[bool]): Automatically starts simulation. Defaults to True. """ from omni.isaac.core.world import World # parse device based on sim_param settings if sim_params and "sim_device" in sim_params: device = sim_params["sim_device"] else: device = "cpu" physics_device_id = carb.settings.get_settings().get_as_int("/physics/cudaDevice") gpu_id = 0 if physics_device_id < 0 else physics_device_id if sim_params and "use_gpu_pipeline" in sim_params: # GPU pipeline must use GPU simulation if sim_params["use_gpu_pipeline"]: device = "cuda:" + str(gpu_id) elif sim_params and "use_gpu" in sim_params: if sim_params["use_gpu"]: device = "cuda:" + str(gpu_id) self.gpu_pipeline_enabled = sim_params["use_gpu_pipeline"] info = "Using device: " + str(device) Journal.log(self.__class__.__name__, "__init__", info, LogType.STAT, throw_when_excep = True) if (sim_params is None): info = f"No sim params provided -> defaults will be used." Journal.log(self.__class__.__name__, "set_task", info, LogType.STAT, throw_when_excep = True) sim_params = {} # defaults for integration and rendering dt if not("physics_dt" in sim_params): sim_params["physics_dt"] = 1.0/60.0 dt = sim_params["physics_dt"] info = f"Using default integration_dt of {dt} s." Journal.log(self.__class__.__name__, "set_task", info, LogType.STAT, throw_when_excep = True) if not("rendering_dt" in sim_params): sim_params["rendering_dt"] = sim_params["physics_dt"] dt = sim_params["rendering_dt"] info = f"Using default rendering_dt of {dt} s." Journal.log(self.__class__.__name__, "set_task", info, LogType.STAT, throw_when_excep = True) self._world = World( stage_units_in_meters=1.0, physics_dt=sim_params["physics_dt"], rendering_dt=sim_params["rendering_dt"], # dt between rendering steps. Note: rendering means rendering a frame of # the current application and not only rendering a frame to the viewports/ cameras. # So UI elements of Isaac Sim will be refereshed with this dt as well if running non-headless backend=backend, device=str(device), physics_prim_path="/physicsScene", set_defaults = False, # set to True to use the defaults settings [physics_dt = 1.0/ 60.0, # stage units in meters = 0.01 (i.e in cms), rendering_dt = 1.0 / 60.0, gravity = -9.81 m / s # ccd_enabled, stabilization_enabled, gpu dynamics turned off, # broadcast type is MBP, solver type is TGS] sim_params=sim_params ) self._sim_params = sim_params big_info = "[World] Creating task " + task.name + "\n" + \ "use_gpu_pipeline: " + str(sim_params["use_gpu_pipeline"]) + "\n" + \ "device: " + str(device) + "\n" +\ "backend: " + str(backend) + "\n" +\ "integration_dt: " + str(sim_params["physics_dt"]) + "\n" + \ "rendering_dt: " + str(sim_params["rendering_dt"]) + "\n" \ Journal.log(self.__class__.__name__, "set_task", big_info, LogType.STAT, throw_when_excep = True) ## we get the physics context to expose additional low-level ## # settings of the simulation self._physics_context = self._world.get_physics_context() self._physics_scene_path = self._physics_context.prim_path self._physics_context.enable_gpu_dynamics(True) self._physics_context.enable_stablization(True) self._physics_scene_prim = self._physics_context.get_current_physics_scene_prim() self._solver_type = self._physics_context.get_solver_type() # we set parameters, depending on sim_params dict if "gpu_max_rigid_contact_count" in sim_params: self._physics_context.set_gpu_max_rigid_contact_count(sim_params["gpu_max_rigid_contact_count"]) if "gpu_max_rigid_patch_count" in sim_params: self._physics_context.set_gpu_max_rigid_patch_count(sim_params["gpu_max_rigid_patch_count"]) if "gpu_found_lost_pairs_capacity" in sim_params: self._physics_context.set_gpu_found_lost_pairs_capacity(sim_params["gpu_found_lost_pairs_capacity"]) if "gpu_found_lost_aggregate_pairs_capacity" in sim_params: self._physics_context.set_gpu_found_lost_aggregate_pairs_capacity(sim_params["gpu_found_lost_aggregate_pairs_capacity"]) if "gpu_total_aggregate_pairs_capacity" in sim_params: self._physics_context.set_gpu_total_aggregate_pairs_capacity(sim_params["gpu_total_aggregate_pairs_capacity"]) if "gpu_max_soft_body_contacts" in sim_params: self._physics_context.set_gpu_max_soft_body_contacts(sim_params["gpu_max_soft_body_contacts"]) if "gpu_max_particle_contacts" in sim_params: self._physics_context.set_gpu_max_particle_contacts(sim_params["gpu_max_particle_contacts"]) if "gpu_heap_capacity" in sim_params: self._physics_context.set_gpu_heap_capacity(sim_params["gpu_heap_capacity"]) if "gpu_temp_buffer_capacity" in sim_params: self._physics_context.set_gpu_temp_buffer_capacity(sim_params["gpu_temp_buffer_capacity"]) if "gpu_max_num_partitions" in sim_params: self._physics_context.set_gpu_max_num_partitions(sim_params["gpu_max_num_partitions"]) # overwriting defaults # self._physics_context.set_gpu_max_rigid_contact_count(2 * self._physics_context.get_gpu_max_rigid_contact_count()) # self._physics_context.set_gpu_max_rigid_patch_count(2 * self._physics_context.get_gpu_max_rigid_patch_count()) # self._physics_context.set_gpu_found_lost_pairs_capacity(2 * self._physics_context.get_gpu_found_lost_pairs_capacity()) # self._physics_context.set_gpu_found_lost_aggregate_pairs_capacity(20 * self._physics_context.get_gpu_found_lost_aggregate_pairs_capacity()) # self._physics_context.set_gpu_total_aggregate_pairs_capacity(20 * self._physics_context.get_gpu_total_aggregate_pairs_capacity()) # self._physics_context.set_gpu_heap_capacity(2 * self._physics_context.get_gpu_heap_capacity()) # self._physics_context.set_gpu_temp_buffer_capacity(20 * self._physics_context.get_gpu_heap_capacity()) # self._physics_context.set_gpu_max_num_partitions(20 * self._physics_context.get_gpu_temp_buffer_capacity()) # GPU buffers self._gpu_max_rigid_contact_count = self._physics_context.get_gpu_max_rigid_contact_count() self._gpu_max_rigid_patch_count = self._physics_context.get_gpu_max_rigid_patch_count() self._gpu_found_lost_pairs_capacity = self._physics_context.get_gpu_found_lost_pairs_capacity() self._gpu_found_lost_aggregate_pairs_capacity = self._physics_context.get_gpu_found_lost_aggregate_pairs_capacity() self._gpu_total_aggregate_pairs_capacity = self._physics_context.get_gpu_total_aggregate_pairs_capacity() self._gpu_max_soft_body_contacts = self._physics_context.get_gpu_max_soft_body_contacts() self._gpu_max_particle_contacts = self._physics_context.get_gpu_max_particle_contacts() self._gpu_heap_capacity = self._physics_context.get_gpu_heap_capacity() self._gpu_temp_buffer_capacity = self._physics_context.get_gpu_temp_buffer_capacity() # self._gpu_max_num_partitions = physics_context.get_gpu_max_num_partitions() # BROKEN->method does not exist big_info2 = "[physics context]:" + "\n" + \ "gpu_max_rigid_contact_count: " + str(self._gpu_max_rigid_contact_count) + "\n" + \ "gpu_max_rigid_patch_count: " + str(self._gpu_max_rigid_patch_count) + "\n" + \ "gpu_found_lost_pairs_capacity: " + str(self._gpu_found_lost_pairs_capacity) + "\n" + \ "gpu_found_lost_aggregate_pairs_capacity: " + str(self._gpu_found_lost_aggregate_pairs_capacity) + "\n" + \ "gpu_total_aggregate_pairs_capacity: " + str(self._gpu_total_aggregate_pairs_capacity) + "\n" + \ "gpu_max_soft_body_contacts: " + str(self._gpu_max_soft_body_contacts) + "\n" + \ "gpu_max_particle_contacts: " + str(self._gpu_max_particle_contacts) + "\n" + \ "gpu_heap_capacity: " + str(self._gpu_heap_capacity) + "\n" + \ "gpu_temp_buffer_capacity: " + str(self._gpu_temp_buffer_capacity) Journal.log(self.__class__.__name__, "set_task", big_info2, LogType.STAT, throw_when_excep = True) self._scene = self._world.scene from omni.usd import get_context self._stage = get_context().get_stage() from pxr import UsdLux, Sdf, Gf, UsdPhysics, PhysicsSchemaTools # add lighting distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight")) distantLight.CreateIntensityAttr(500) self._world._current_tasks = dict() # resets registered tasks self._task = task self._task.set_world(self._world) self._task.configure_scene() self._world.add_task(self._task) self._num_envs = self._task.num_envs if sim_params and "enable_viewport" in sim_params: self._render = sim_params["enable_viewport"] Journal.log(self.__class__.__name__, "set_task", "[render]: " + str(self._render), LogType.STAT, throw_when_excep = True) # if init_sim: # self._world.reset() # after the first reset we get get all quantities # # from the scene # self._task.post_initialization_steps() # performs initializations # # steps after the fisrt world reset was called def render(self, mode="human") -> None: """ Step the renderer. Args: mode (str): Select mode of rendering based on OpenAI environments. """ if mode == "human": self._world.render() return None elif mode == "rgb_array": # check if viewport is enabled -- if not, then complain because we won't get any data if not self._render or not self._record: exception = f"Cannot render '{mode}' when rendering is not enabled. Please check the provided" + \ "arguments to the environment class at initialization." Journal.log(self.__class__.__name__, "__init__", exception, LogType.EXCEP, throw_when_excep = True) # obtain the rgb data rgb_data = self._rgb_annotator.get_data() # convert to numpy array rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape) # return the rgb data return rgb_data[:, :, :3] else: # gym.Env.render(self, mode=mode) return None def create_viewport_render_product(self, resolution=(1280, 720)): """Create a render product of the viewport for rendering.""" try: import omni.replicator.core as rep # create render product self._render_product = rep.create.render_product("/OmniverseKit_Persp", resolution) # create rgb annotator -- used to read data from the render product self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu") self._rgb_annotator.attach([self._render_product]) self._record = True except Exception as e: carb.log_info("omni.replicator.core could not be imported. Skipping creation of render product.") carb.log_info(str(e)) def close(self) -> None: """ Closes simulation. """ if self._simulation_app.is_running(): self._simulation_app.close() return @abstractmethod def step(self, actions = None): """ Basic implementation for stepping simulation""" pass @abstractmethod def reset(self): """ Usually resets the task and updates observations + # other custom operations. """ pass @property def num_envs(self): """ Retrieves number of environments. Returns: num_envs(int): Number of environments. """ return self._num_envs @property def simulation_app(self): """Retrieves the SimulationApp object. Returns: simulation_app(SimulationApp): SimulationApp. """ return self._simulation_app @property def get_world(self): """Retrieves the World object for simulation. Returns: world(World): Simulation World. """ return self._world @property def task(self): """Retrieves the task. Returns: task(BaseTask): Task. """ return self._task @property def render_enabled(self): """Whether rendering is enabled. Returns: render(bool): is render enabled. """ return self._render
19,383
Python
39.299376
149
0.579735
AndrePatri/OmniRoboGym/omni_robo_gym/tasks/isaac_task.py
# Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected]) # # This file is part of OmniRoboGym and distributed under the General Public License version 2 license. # # OmniRoboGym is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # OmniRoboGym is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>. # from omni.isaac.core.tasks.base_task import BaseTask from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.utils.viewports import set_camera_view from omni.isaac.core.world import World import omni.kit import numpy as np import torch from omni.importer.urdf import _urdf from omni.isaac.core.utils.prims import move_prim from omni.isaac.cloner import GridCloner import omni.isaac.core.utils.prims as prim_utils # from omni.isaac.sensor import ContactSensor from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.scenes.scene import Scene from omni_robo_gym.utils.jnt_imp_cntrl import OmniJntImpCntrl from omni_robo_gym.utils.homing import OmniRobotHomer from omni_robo_gym.utils.contact_sensor import OmniContactSensors from omni_robo_gym.utils.terrains import RlTerrains from omni_robo_gym.utils.math_utils import quat_to_omega, quaternion_difference, rel_vel from abc import abstractmethod from typing import List, Dict from SharsorIPCpp.PySharsorIPC import LogType from SharsorIPCpp.PySharsorIPC import Journal class IsaacTask(BaseTask): def __init__(self, name: str, integration_dt: float, robot_names: List[str], robot_pkg_names: List[str] = None, contact_prims: Dict[str, List] = None, contact_offsets: Dict[str, Dict[str, np.ndarray]] = None, sensor_radii: Dict[str, Dict[str, np.ndarray]] = None, num_envs = 1, device = "cuda", cloning_offset: np.array = None, fix_base: List[bool] = None, self_collide: List[bool] = None, merge_fixed: List[bool] = None, replicate_physics: bool = True, solver_position_iteration_count: int = 4, solver_velocity_iteration_count: int = 1, solver_stabilization_thresh: float = 1e-5, offset=None, env_spacing = 5.0, spawning_radius = 1.0, use_flat_ground = True, default_jnt_stiffness = 300.0, default_jnt_damping = 20.0, default_wheel_stiffness = 0.0, default_wheel_damping = 10.0, override_art_controller = False, dtype = torch.float64, debug_enabled: bool = False, verbose = False, use_diff_velocities = False) -> None: self.torch_dtype = dtype self._debug_enabled = debug_enabled self._verbose = verbose self.use_diff_velocities = use_diff_velocities self.num_envs = num_envs self._override_art_controller = override_art_controller self._integration_dt = integration_dt # just used for contact reporting self.torch_device = torch.device(device) # defaults to "cuda" ("cpu" also valid) self.using_gpu = False if self.torch_device == torch.device("cuda"): self.using_gpu = True self.robot_names = robot_names # these are (potentially) custom names to self.robot_pkg_names = robot_pkg_names # will be used to search for URDF and SRDF packages self.scene_setup_completed = False if self.robot_pkg_names is None: self.robot_pkg_names = self.robot_names # if not provided, robot_names are the same as robot_pkg_names else: # check dimension consistency if len(robot_names) != len(robot_pkg_names): exception = "The provided robot names list must match the length " + \ "of the provided robot package names" raise Exception(exception) if fix_base is None: self._fix_base = [False] * len(self.robot_names) else: # check dimension consistency if len(fix_base) != len(robot_pkg_names): exception = "The provided fix_base list of boolean must match the length " + \ "of the provided robot package names" raise Exception(exception) self._fix_base = fix_base if self_collide is None: self._self_collide = [False] * len(self.robot_names) else: # check dimension consistency if len(self_collide) != len(robot_pkg_names): exception = "The provided self_collide list of boolean must match the length " + \ "of the provided robot package names" raise Exception(exception) self._self_collide = self_collide if merge_fixed is None: self._merge_fixed = [False] * len(self.robot_names) else: # check dimension consistency if len(merge_fixed) != len(robot_pkg_names): exception = "The provided merge_fixed list of boolean must match the length " + \ "of the provided robot package names" raise Exception(exception) self._merge_fixed = merge_fixed self._urdf_paths = {} self._srdf_paths = {} self._robots_art_views = {} self._robots_articulations = {} self._robots_geom_prim_views = {} self._solver_position_iteration_count = solver_position_iteration_count # solver position iteration count # -> higher number makes simulation more accurate self._solver_velocity_iteration_count = solver_velocity_iteration_count self._solver_stabilization_thresh = solver_stabilization_thresh # threshold for kin. energy below which an articulatiion # "goes to sleep", i.e. it's not simulated anymore until some action wakes him up # potentially, each robot could have its own setting for the solver (not supported yet) self._solver_position_iteration_counts = {} self._solver_velocity_iteration_counts = {} self._solver_stabilization_threshs = {} self.robot_bodynames = {} self.robot_n_links = {} self.robot_n_dofs = {} self.robot_dof_names = {} self._root_p = {} self._root_q = {} self._jnts_q = {} self._root_p_prev = {} # used for num differentiation self._root_q_prev = {} # used for num differentiation self._jnts_q_prev = {} # used for num differentiation self._root_p_default = {} self._root_q_default = {} self._jnts_q_default = {} self._root_v = {} self._root_v_default = {} self._root_omega = {} self._root_omega_default = {} self._jnts_v = {} self._jnts_v_default = {} self._jnts_eff_default = {} self._root_pos_offsets = {} self._root_q_offsets = {} self.distr_offset = {} # decribed how robots within each env are distributed self.jnt_imp_controllers = {} self.homers = {} # default jnt impedance settings self.default_jnt_stiffness = default_jnt_stiffness self.default_jnt_damping = default_jnt_damping self.default_wheel_stiffness = default_wheel_stiffness self.default_wheel_damping = default_wheel_damping self.use_flat_ground = use_flat_ground self.spawning_radius = spawning_radius # [m] -> default distance between roots of robots in a single # environment self._calc_robot_distrib() # computes the offsets of robots withing each env. self._env_ns = "/World/envs" self._env_spacing = env_spacing # [m] self._template_env_ns = self._env_ns + "/env_0" self._cloner = GridCloner(spacing=self._env_spacing) self._cloner.define_base_env(self._env_ns) prim_utils.define_prim(self._template_env_ns) self._envs_prim_paths = self._cloner.generate_paths(self._env_ns + "/env", self.num_envs) self._cloning_offset = cloning_offset if self._cloning_offset is None: self._cloning_offset = np.array([[0, 0, 0]] * self.num_envs) self._replicate_physics = replicate_physics self._world_initialized = False self._ground_plane_prim_path = "/World/terrain" self._world = None self._world_scene = None self._world_physics_context = None self.omni_contact_sensors = {} self.contact_prims = contact_prims for robot_name in contact_prims: self.omni_contact_sensors[robot_name] = OmniContactSensors( name = robot_name, n_envs = self.num_envs, contact_prims = contact_prims, contact_offsets = contact_offsets, sensor_radii = sensor_radii, device = self.torch_device, dtype = self.torch_dtype, enable_debug=self._debug_enabled) # trigger __init__ of parent class BaseTask.__init__(self, name=name, offset=offset) self.xrdf_cmd_vals = [] # by default empty, needs to be overriden by # child class def update_jnt_imp_control_gains(self, robot_name: str, jnt_stiffness: float, jnt_damping: float, wheel_stiffness: float, wheel_damping: float, env_indxs: torch.Tensor = None): # updates joint imp. controller with new impedance values if self._debug_enabled: for_robots = "" if env_indxs is not None: if not isinstance(env_indxs, torch.Tensor): msg = "Provided env_indxs should be a torch tensor of indexes!" Journal.log(self.__class__.__name__, "update_jnt_imp_control_gains", msg, LogType.EXCEP, throw_when_excep = True) if self.using_gpu: if not env_indxs.device.type == "cuda": error = "Provided env_indxs should be on GPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) else: if not env_indxs.device.type == "cpu": error = "Provided env_indxs should be on CPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs.tolist()) if self._verbose: Journal.log(self.__class__.__name__, "update_jnt_imp_control_gains", f"updating joint impedances " + for_robots, LogType.STAT, throw_when_excep = True) # set jnt imp gains for the whole robot if env_indxs is None: gains_pos = torch.full((self.num_envs, \ self.jnt_imp_controllers[robot_name].n_dofs), jnt_stiffness, device = self.torch_device, dtype=self.torch_dtype) gains_vel = torch.full((self.num_envs, \ self.jnt_imp_controllers[robot_name].n_dofs), jnt_damping, device = self.torch_device, dtype=self.torch_dtype) else: gains_pos = torch.full((env_indxs.shape[0], \ self.jnt_imp_controllers[robot_name].n_dofs), jnt_stiffness, device = self.torch_device, dtype=self.torch_dtype) gains_vel = torch.full((env_indxs.shape[0], \ self.jnt_imp_controllers[robot_name].n_dofs), jnt_damping, device = self.torch_device, dtype=self.torch_dtype) self.jnt_imp_controllers[robot_name].set_gains( pos_gains = gains_pos, vel_gains = gains_vel, robot_indxs = env_indxs) # in case of wheels wheels_indxs = self.jnt_imp_controllers[robot_name].get_jnt_idxs_matching( name_pattern="wheel") if wheels_indxs is not None: if env_indxs is None: # wheels are velocity-controlled wheels_pos_gains = torch.full((self.num_envs, len(wheels_indxs)), wheel_stiffness, device = self.torch_device, dtype=self.torch_dtype) wheels_vel_gains = torch.full((self.num_envs, len(wheels_indxs)), wheel_damping, device = self.torch_device, dtype=self.torch_dtype) else: # wheels are velocity-controlled wheels_pos_gains = torch.full((env_indxs.shape[0], len(wheels_indxs)), wheel_stiffness, device = self.torch_device, dtype=self.torch_dtype) wheels_vel_gains = torch.full((env_indxs.shape[0], len(wheels_indxs)), wheel_damping, device = self.torch_device, dtype=self.torch_dtype) self.jnt_imp_controllers[robot_name].set_gains( pos_gains = wheels_pos_gains, vel_gains = wheels_vel_gains, jnt_indxs=wheels_indxs, robot_indxs = env_indxs) def update_root_offsets(self, robot_name: str, env_indxs: torch.Tensor = None): if self._debug_enabled: for_robots = "" if env_indxs is not None: if not isinstance(env_indxs, torch.Tensor): msg = "Provided env_indxs should be a torch tensor of indexes!" Journal.log(self.__class__.__name__, "update_root_offsets", msg, LogType.EXCEP, throw_when_excep = True) if self.using_gpu: if not env_indxs.device.type == "cuda": error = "Provided env_indxs should be on GPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) else: if not env_indxs.device.type == "cpu": error = "Provided env_indxs should be on CPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs.tolist()) if self._verbose: Journal.log(self.__class__.__name__, "update_root_offsets", f"updating root offsets " + for_robots, LogType.STAT, throw_when_excep = True) # only planar position used if env_indxs is None: self._root_pos_offsets[robot_name][:, 0:2] = self._root_p[robot_name][:, 0:2] self._root_q_offsets[robot_name][:, :] = self._root_q[robot_name] else: self._root_pos_offsets[robot_name][env_indxs, 0:2] = self._root_p[robot_name][env_indxs, 0:2] self._root_q_offsets[robot_name][env_indxs, :] = self._root_q[robot_name][env_indxs, :] def synch_default_root_states(self, robot_name: str = None, env_indxs: torch.Tensor = None): if self._debug_enabled: for_robots = "" if env_indxs is not None: if not isinstance(env_indxs, torch.Tensor): msg = "Provided env_indxs should be a torch tensor of indexes!" Journal.log(self.__class__.__name__, "synch_default_root_states", msg, LogType.EXCEP, throw_when_excep = True) if self.using_gpu: if not env_indxs.device.type == "cuda": error = "Provided env_indxs should be on GPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) else: if not env_indxs.device.type == "cpu": error = "Provided env_indxs should be on CPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs.tolist()) if self._verbose: Journal.log(self.__class__.__name__, "synch_default_root_states", f"updating default root states " + for_robots, LogType.STAT, throw_when_excep = True) if env_indxs is None: self._root_p_default[robot_name][:, :] = self._root_p[robot_name] self._root_q_default[robot_name][:, :] = self._root_q[robot_name] else: self._root_p_default[robot_name][env_indxs, :] = self._root_p[robot_name][env_indxs, :] self._root_q_default[robot_name][env_indxs, :] = self._root_q[robot_name][env_indxs, :] def post_initialization_steps(self): print("Performing post-initialization steps") self._world_initialized = True # used by other methods which nees to run # only when the world was initialized # populates robot info fields self._fill_robot_info_from_world() # initializes homing managers self._init_homing_managers() # initializes robot state data self._init_robots_state() # default robot state self._set_robots_default_jnt_config() self._set_robots_root_default_config() # initializes joint impedance controllers self._init_jnt_imp_control() # update solver options self._update_art_solver_options() self.reset() self._custom_post_init() self._get_solver_info() # get again solver option before printing everything self._print_envs_info() # debug prints def apply_collision_filters(self, physicscene_path: str, coll_root_path: str): self._cloner.filter_collisions(physicsscene_path = physicscene_path, collision_root_path = coll_root_path, prim_paths=self._envs_prim_paths, global_paths=[self._ground_plane_prim_path] # can collide with these prims ) def reset_jnt_imp_control(self, robot_name: str, env_indxs: torch.Tensor = None): if self._debug_enabled: for_robots = "" if env_indxs is not None: if not isinstance(env_indxs, torch.Tensor): Journal.log(self.__class__.__name__, "reset_jnt_imp_control", "Provided env_indxs should be a torch tensor of indexes!", LogType.EXCEP, throw_when_excep = True) if self.using_gpu: if not env_indxs.device.type == "cuda": error = "Provided env_indxs should be on GPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) else: if not env_indxs.device.type == "cpu": error = "Provided env_indxs should be on CPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) for_robots = f"for robot {robot_name}, indexes: " + str(env_indxs) if self._verbose: Journal.log(self.__class__.__name__, "reset_jnt_imp_control", f"resetting joint impedances " + for_robots, LogType.STAT, throw_when_excep = True) # resets all internal data, refs to defaults self.jnt_imp_controllers[robot_name].reset(robot_indxs = env_indxs) # restore current state if env_indxs is None: self.jnt_imp_controllers[robot_name].update_state(pos = self._jnts_q[robot_name][:, :], vel = self._jnts_v[robot_name][:, :], eff = None, robot_indxs = None) else: self.jnt_imp_controllers[robot_name].update_state(pos = self._jnts_q[robot_name][env_indxs, :], vel = self._jnts_v[robot_name][env_indxs, :], eff = None, robot_indxs = env_indxs) # restore default gains self.update_jnt_imp_control_gains(robot_name = robot_name, jnt_stiffness = self.default_jnt_stiffness, jnt_damping = self.default_jnt_damping, wheel_stiffness = self.default_wheel_stiffness, wheel_damping = self.default_wheel_damping, env_indxs = env_indxs) #restore jnt imp refs to homing if env_indxs is None: self.jnt_imp_controllers[robot_name].set_refs(pos_ref=self.homers[robot_name].get_homing()[:, :], robot_indxs = None) else: self.jnt_imp_controllers[robot_name].set_refs(pos_ref=self.homers[robot_name].get_homing()[env_indxs, :], robot_indxs = env_indxs) # actually applies reset commands to the articulation # self.jnt_imp_controllers[robot_name].apply_cmds() def set_world(self, world: World): if not isinstance(world, World): Journal.log(self.__class__.__name__, "configure_scene", "world should be an instance of omni.isaac.core.world.World!", LogType.EXCEP, throw_when_excep = True) self._world = world self._world_scene = self._world.scene self._world_physics_context = self._world.get_physics_context() def set_up_scene(self, scene: Scene): super().set_up_scene(scene) def configure_scene(self) -> None: # this is called automatically by the environment BEFORE # initializing the simulation if self._world is None: Journal.log(self.__class__.__name__, "configure_scene", "Did you call the set_world() method??", LogType.EXCEP, throw_when_excep = True) if not self.scene_setup_completed: for i in range(len(self.robot_names)): robot_name = self.robot_names[i] robot_pkg_name = self.robot_pkg_names[i] fix_base = self._fix_base[i] self_collide = self._self_collide[i] merge_fixed = self._merge_fixed[i] self._generate_rob_descriptions(robot_name=robot_name, robot_pkg_name=robot_pkg_name) self._import_urdf(robot_name, fix_base=fix_base, self_collide=self_collide, merge_fixed=merge_fixed) Journal.log(self.__class__.__name__, "set_up_scene", "cloning environments...", LogType.STAT, throw_when_excep = True) self._cloner.clone( source_prim_path=self._template_env_ns, prim_paths=self._envs_prim_paths, replicate_physics=self._replicate_physics, position_offsets = self._cloning_offset ) # we can clone the environment in which all the robos are Journal.log(self.__class__.__name__, "set_up_scene", "finishing scene setup...", LogType.STAT, throw_when_excep = True) for i in range(len(self.robot_names)): robot_name = self.robot_names[i] self._robots_art_views[robot_name] = ArticulationView(name = robot_name + "ArtView", prim_paths_expr = self._env_ns + "/env_.*"+ "/" + robot_name + "/base_link", reset_xform_properties=False) self._robots_articulations[robot_name] = self._world_scene.add(self._robots_art_views[robot_name]) # self._robots_geom_prim_views[robot_name] = GeometryPrimView(name = robot_name + "GeomView", # prim_paths_expr = self._env_ns + "/env*"+ "/" + robot_name, # # prepare_contact_sensors = True # ) # self._robots_geom_prim_views[robot_name].apply_collision_apis() # to be able to apply contact sensors if self.use_flat_ground: self._world_scene.add_default_ground_plane(z_position=0, name="terrain", prim_path= self._ground_plane_prim_path, static_friction=1.0, dynamic_friction=1.0, restitution=0.2) else: self.terrains = RlTerrains(get_current_stage()) self.terrains.get_obstacles_terrain(terrain_size=40, num_obs=100, max_height=0.4, min_size=0.5, max_size=5.0) # delete_prim(self._ground_plane_prim_path + "/SphereLight") # we remove the default spherical light # set default camera viewport position and target self._set_initial_camera_params() self.apply_collision_filters(self._world_physics_context.prim_path, "/World/collisions") # init contact sensors self._init_contact_sensors() # IMPORTANT: this has to be called # after calling the clone() method and initializing articulation views!!! self._world.reset() # reset world to make art views available self.post_initialization_steps() self.scene_setup_completed = True def post_reset(self): pass def reset(self, env_indxs: torch.Tensor = None, robot_names: List[str] =None): # we first reset all target articulations to their default state rob_names = robot_names if (robot_names is not None) else self.robot_names # resets the state of target robot and env to the defaults self.reset_state(env_indxs=env_indxs, robot_names=rob_names) # and jnt imp. controllers for i in range(len(rob_names)): self.reset_jnt_imp_control(robot_name=rob_names[i], env_indxs=env_indxs) def reset_state(self, env_indxs: torch.Tensor = None, robot_names: List[str] =None): rob_names = robot_names if (robot_names is not None) else self.robot_names if env_indxs is not None: if self._debug_enabled: if self.using_gpu: if not env_indxs.device.type == "cuda": error = "Provided env_indxs should be on GPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) else: if not env_indxs.device.type == "cpu": error = "Provided env_indxs should be on CPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) for i in range(len(rob_names)): robot_name = rob_names[i] # root q self._robots_art_views[robot_name].set_world_poses(positions = self._root_p_default[robot_name][env_indxs, :], orientations=self._root_q_default[robot_name][env_indxs, :], indices = env_indxs) # jnts q self._robots_art_views[robot_name].set_joint_positions(positions = self._jnts_q_default[robot_name][env_indxs, :], indices = env_indxs) # root v and omega self._robots_art_views[robot_name].set_joint_velocities(velocities = self._jnts_v_default[robot_name][env_indxs, :], indices = env_indxs) # jnts v concatenated_vel = torch.cat((self._root_v_default[robot_name][env_indxs, :], self._root_omega_default[robot_name][env_indxs, :]), dim=1) self._robots_art_views[robot_name].set_velocities(velocities = concatenated_vel, indices = env_indxs) # jnts eff self._robots_art_views[robot_name].set_joint_efforts(efforts = self._jnts_eff_default[robot_name][env_indxs, :], indices = env_indxs) else: for i in range(len(rob_names)): robot_name = rob_names[i] # root q self._robots_art_views[robot_name].set_world_poses(positions = self._root_p_default[robot_name][:, :], orientations=self._root_q_default[robot_name][:, :], indices = None) # jnts q self._robots_art_views[robot_name].set_joint_positions(positions = self._jnts_q_default[robot_name][:, :], indices = None) # root v and omega self._robots_art_views[robot_name].set_joint_velocities(velocities = self._jnts_v_default[robot_name][:, :], indices = None) # jnts v concatenated_vel = torch.cat((self._root_v_default[robot_name][:, :], self._root_omega_default[robot_name][:, :]), dim=1) self._robots_art_views[robot_name].set_velocities(velocities = concatenated_vel, indices = None) # jnts eff self._robots_art_views[robot_name].set_joint_efforts(efforts = self._jnts_eff_default[robot_name][:, :], indices = None) # we update the robots state self.get_states(env_indxs=env_indxs, robot_names=rob_names) def close(self): pass def root_pos_offsets(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._root_pos_offsets[robot_name] else: return self._root_pos_offsets[robot_name][env_idxs, :] def root_q_offsets(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._root_q_offsets[robot_name] else: return self._root_q_offsets[robot_name][env_idxs, :] def root_p(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._root_p[robot_name] else: return self._root_p[robot_name][env_idxs, :] def root_p_rel(self, robot_name: str, env_idxs: torch.Tensor = None): rel_pos = torch.sub(self.root_p(robot_name=robot_name, env_idxs=env_idxs), self.root_pos_offsets(robot_name=robot_name, env_idxs=env_idxs)) return rel_pos def root_q(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._root_q[robot_name] else: return self._root_q[robot_name][env_idxs, :] def root_q_rel(self, robot_name: str, env_idxs: torch.Tensor = None): rel_q = quaternion_difference(self.root_q_offsets(robot_name=robot_name, env_idxs=env_idxs), self.root_q(robot_name=robot_name, env_idxs=env_idxs)) return rel_q def root_v(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._root_v[robot_name] else: return self._root_v[robot_name][env_idxs, :] def root_v_rel(self, robot_name: str, env_idxs: torch.Tensor = None): v_rel = rel_vel(offset_q0_q1=self.root_q_offsets(robot_name=robot_name, env_idxs=env_idxs), v0=self.root_v(robot_name=robot_name, env_idxs=env_idxs)) return v_rel def root_omega(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._root_omega[robot_name] else: return self._root_omega[robot_name][env_idxs, :] def root_omega_rel(self, robot_name: str, env_idxs: torch.Tensor = None): omega_rel = rel_vel(offset_q0_q1=self.root_q_offsets(robot_name=robot_name, env_idxs=env_idxs), v0=self.root_omega(robot_name=robot_name, env_idxs=env_idxs)) return omega_rel def jnts_q(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._jnts_q[robot_name] else: return self._jnts_q[robot_name][env_idxs, :] def jnts_v(self, robot_name: str, env_idxs: torch.Tensor = None): if env_idxs is None: return self._jnts_v[robot_name] else: return self._jnts_v[robot_name][env_idxs, :] def integration_dt(self): return self._integration_dt @abstractmethod def _xrdf_cmds(self) -> Dict: # this has to be implemented by the user depending on the arguments # the xacro description of the robot takes. The output is a list # of xacro commands. # Example implementation: # def _xrdf_cmds(): # cmds = {} # cmds{self.robot_names[0]} = [] # xrdf_cmd_vals = [True, True, True, False, False, True] # legs = "true" if xrdf_cmd_vals[0] else "false" # big_wheel = "true" if xrdf_cmd_vals[1] else "false" # upper_body ="true" if xrdf_cmd_vals[2] else "false" # velodyne = "true" if xrdf_cmd_vals[3] else "false" # realsense = "true" if xrdf_cmd_vals[4] else "false" # floating_joint = "true" if xrdf_cmd_vals[5] else "false" # horizon needs a floating joint # cmds.append("legs:=" + legs) # cmds.append("big_wheel:=" + big_wheel) # cmds.append("upper_body:=" + upper_body) # cmds.append("velodyne:=" + velodyne) # cmds.append("realsense:=" + realsense) # cmds.append("floating_joint:=" + floating_joint) # return cmds pass @abstractmethod def pre_physics_step(self, actions, robot_name: str) -> None: # apply actions to simulated robot # to be overriden by child class depending # on specific needs pass def _generate_srdf(self, robot_name: str, robot_pkg_name: str): # we generate the URDF where the description package is located import rospkg rospackage = rospkg.RosPack() descr_path = rospackage.get_path(robot_pkg_name + "_srdf") srdf_path = descr_path + "/srdf" xacro_name = robot_pkg_name xacro_path = srdf_path + "/" + xacro_name + ".srdf.xacro" self._srdf_paths[robot_name] = self._descr_dump_path + "/" + robot_name + ".srdf" if self._xrdf_cmds() is not None: cmds = self._xrdf_cmds()[robot_name] if cmds is None: xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._srdf_paths[robot_name]] else: xacro_cmd = ["xacro"] + [xacro_path] + cmds + ["-o"] + [self._srdf_paths[robot_name]] if self._xrdf_cmds() is None: xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._srdf_paths[robot_name]] import subprocess try: xacro_gen = subprocess.check_call(xacro_cmd) except: Journal.log(self.__class__.__name__, "_generate_urdf", "failed to generate " + robot_name + "\'S SRDF!!!", LogType.EXCEP, throw_when_excep = True) def _generate_urdf(self, robot_name: str, robot_pkg_name: str): # we generate the URDF where the description package is located import rospkg rospackage = rospkg.RosPack() descr_path = rospackage.get_path(robot_pkg_name + "_urdf") urdf_path = descr_path + "/urdf" xacro_name = robot_pkg_name xacro_path = urdf_path + "/" + xacro_name + ".urdf.xacro" self._urdf_paths[robot_name] = self._descr_dump_path + "/" + robot_name + ".urdf" if self._xrdf_cmds() is not None: cmds = self._xrdf_cmds()[robot_name] if cmds is None: xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._urdf_paths[robot_name]] else: xacro_cmd = ["xacro"] + [xacro_path] + cmds + ["-o"] + [self._urdf_paths[robot_name]] if self._xrdf_cmds() is None: xacro_cmd = ["xacro"] + [xacro_path] + ["-o"] + [self._urdf_paths[robot_name]] import subprocess try: xacro_gen = subprocess.check_call(xacro_cmd) # we also generate an updated SRDF except: Journal.log(self.__class__.__name__, "_generate_urdf", "Failed to generate " + robot_name + "\'s URDF!!!", LogType.EXCEP, throw_when_excep = True) def _generate_rob_descriptions(self, robot_name: str, robot_pkg_name: str): self._descr_dump_path = "/tmp/" + f"{self.__class__.__name__}" Journal.log(self.__class__.__name__, "update_root_offsets", "generating URDF for robot "+ f"{robot_name}, of type {robot_pkg_name}...", LogType.STAT, throw_when_excep = True) self._generate_urdf(robot_name=robot_name, robot_pkg_name=robot_pkg_name) Journal.log(self.__class__.__name__, "update_root_offsets", "generating SRDF for robot "+ f"{robot_name}, of type {robot_pkg_name}...", LogType.STAT, throw_when_excep = True) # we also generate SRDF files, which are useful for control self._generate_srdf(robot_name=robot_name, robot_pkg_name=robot_pkg_name) def _import_urdf(self, robot_name: str, import_config: omni.importer.urdf._urdf.ImportConfig = _urdf.ImportConfig(), fix_base = False, self_collide = False, merge_fixed = True): Journal.log(self.__class__.__name__, "update_root_offsets", "importing robot URDF", LogType.STAT, throw_when_excep = True) _urdf.acquire_urdf_interface() # we overwrite some settings which are bound to be fixed import_config.merge_fixed_joints = merge_fixed # makes sim more stable # in case of fixed joints with light objects import_config.import_inertia_tensor = True # import_config.convex_decomp = False import_config.fix_base = fix_base import_config.self_collision = self_collide # import_config.distance_scale = 1 # import_config.make_default_prim = True # import_config.create_physics_scene = True # import_config.default_drive_strength = 1047.19751 # import_config.default_position_drive_damping = 52.35988 # import_config.default_drive_type = _urdf.UrdfJointTargetType.JOINT_DRIVE_POSITION # import URDF success, robot_prim_path_default = omni.kit.commands.execute( "URDFParseAndImportFile", urdf_path=self._urdf_paths[robot_name], import_config=import_config, ) robot_base_prim_path = self._template_env_ns + "/" + robot_name # moving default prim to base prim path for cloning move_prim(robot_prim_path_default, # from robot_base_prim_path) # to return success def _init_contact_sensors(self): for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] # creates base contact sensor (which is then cloned) self.omni_contact_sensors[robot_name].create_contact_sensors( self._world, self._env_ns ) def _init_robots_state(self): for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] pose = self._robots_art_views[robot_name].get_world_poses( clone = True) # tuple: (pos, quat) # root p (measured, previous, default) self._root_p[robot_name] = pose[0] self._root_p_prev[robot_name] = torch.clone(pose[0]) self._root_p_default[robot_name] = torch.clone(pose[0]) + self.distr_offset[robot_name] # root q (measured, previous, default) self._root_q[robot_name] = pose[1] # root orientation self._root_q_prev[robot_name] = torch.clone(pose[1]) self._root_q_default[robot_name] = torch.clone(pose[1]) # jnt q (measured, previous, default) self._jnts_q[robot_name] = self._robots_art_views[robot_name].get_joint_positions( clone = True) # joint positions self._jnts_q_prev[robot_name] = self._robots_art_views[robot_name].get_joint_positions( clone = True) self._jnts_q_default[robot_name] = self.homers[robot_name].get_homing(clone=True) # root v (measured, default) self._root_v[robot_name] = self._robots_art_views[robot_name].get_linear_velocities( clone = True) # root lin. velocity self._root_v_default[robot_name] = torch.full((self._root_v[robot_name].shape[0], self._root_v[robot_name].shape[1]), 0.0, dtype=self.torch_dtype, device=self.torch_device) # root omega (measured, default) self._root_omega[robot_name] = self._robots_art_views[robot_name].get_angular_velocities( clone = True) # root ang. velocity self._root_omega_default[robot_name] = torch.full((self._root_omega[robot_name].shape[0], self._root_omega[robot_name].shape[1]), 0.0, dtype=self.torch_dtype, device=self.torch_device) # joints v (measured, default) self._jnts_v[robot_name] = self._robots_art_views[robot_name].get_joint_velocities( clone = True) # joint velocities self._jnts_v_default[robot_name] = torch.full((self._jnts_v[robot_name].shape[0], self._jnts_v[robot_name].shape[1]), 0.0, dtype=self.torch_dtype, device=self.torch_device) self._jnts_eff_default[robot_name] = torch.full((self._jnts_v[robot_name].shape[0], self._jnts_v[robot_name].shape[1]), 0.0, dtype=self.torch_dtype, device=self.torch_device) self._root_pos_offsets[robot_name] = torch.zeros((self.num_envs, 3), device=self.torch_device) # reference position offses self._root_q_offsets[robot_name] = torch.zeros((self.num_envs, 4), device=self.torch_device) self._root_q_offsets[robot_name][:, 0] = 1.0 # init to valid identity quaternion self.update_root_offsets(robot_name) def _calc_robot_distrib(self): import math # we distribute robots in a single env. along the # circumference of a circle of given radius n_robots = len(self.robot_names) offset_baseangle = 2 * math.pi / n_robots for i in range(n_robots): offset_angle = offset_baseangle * (i + 1) robot_offset_wrt_center = torch.tensor([self.spawning_radius * math.cos(offset_angle), self.spawning_radius * math.sin(offset_angle), 0], device=self.torch_device, dtype=self.torch_dtype) # list with n references to the original tensor tensor_list = [robot_offset_wrt_center] * self.num_envs self.distr_offset[self.robot_names[i]] = torch.stack(tensor_list, dim=0) def _get_robots_state(self, env_indxs: torch.Tensor = None, robot_names: List[str] = None, dt: float = None, reset: bool = False): rob_names = robot_names if (robot_names is not None) else self.robot_names if env_indxs is not None: for i in range(0, len(rob_names)): robot_name = rob_names[i] pose = self._robots_art_views[robot_name].get_world_poses( clone = True, indices=env_indxs) # tuple: (pos, quat) self._root_p[robot_name][env_indxs, :] = pose[0] self._root_q[robot_name][env_indxs, :] = pose[1] # root orientation self._jnts_q[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_joint_positions( clone = True, indices=env_indxs) # joint positions if dt is None: # we get velocities from the simulation. This is not good since # these can actually represent artifacts which do not have physical meaning. # It's better to obtain them by differentiation to avoid issues with controllers, etc... self._root_v[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_linear_velocities( clone = True, indices=env_indxs) # root lin. velocity self._root_omega[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_angular_velocities( clone = True, indices=env_indxs) # root ang. velocity self._jnts_v[robot_name][env_indxs, :] = self._robots_art_views[robot_name].get_joint_velocities( clone = True, indices=env_indxs) # joint velocities else: # differentiate numerically if not reset: self._root_v[robot_name][env_indxs, :] = (self._root_p[robot_name][env_indxs, :] - \ self._root_p_prev[robot_name][env_indxs, :]) / dt self._root_omega[robot_name][env_indxs, :] = quat_to_omega(self._root_q[robot_name][env_indxs, :], self._root_q_prev[robot_name][env_indxs, :], dt) self._jnts_v[robot_name][env_indxs, :] = (self._jnts_q[robot_name][env_indxs, :] - \ self._jnts_q_prev[robot_name][env_indxs, :]) / dt else: # to avoid issues when differentiating numerically self._root_v[robot_name][env_indxs, :].zero_() self._root_omega[robot_name][env_indxs, :].zero_() self._jnts_v[robot_name][env_indxs, :].zero_() # update "previous" data for numerical differentiation self._root_p_prev[robot_name][env_indxs, :] = self._root_p[robot_name][env_indxs, :] self._root_q_prev[robot_name][env_indxs, :] = self._root_q[robot_name][env_indxs, :] self._jnts_q_prev[robot_name][env_indxs, :] = self._jnts_q[robot_name][env_indxs, :] else: # updating data for all environments for i in range(0, len(rob_names)): robot_name = rob_names[i] pose = self._robots_art_views[robot_name].get_world_poses( clone = True) # tuple: (pos, quat) self._root_p[robot_name][:, :] = pose[0] self._root_q[robot_name][:, :] = pose[1] # root orientation self._jnts_q[robot_name][:, :] = self._robots_art_views[robot_name].get_joint_positions( clone = True) # joint positions if dt is None: # we get velocities from the simulation. This is not good since # these can actually represent artifacts which do not have physical meaning. # It's better to obtain them by differentiation to avoid issues with controllers, etc... self._root_v[robot_name][:, :] = self._robots_art_views[robot_name].get_linear_velocities( clone = True) # root lin. velocity self._root_omega[robot_name][:, :] = self._robots_art_views[robot_name].get_angular_velocities( clone = True) # root ang. velocity self._jnts_v[robot_name][:, :] = self._robots_art_views[robot_name].get_joint_velocities( clone = True) # joint velocities else: # differentiate numerically if not reset: self._root_v[robot_name][:, :] = (self._root_p[robot_name][:, :] - \ self._root_p_prev[robot_name][:, :]) / dt self._root_omega[robot_name][:, :] = quat_to_omega(self._root_q[robot_name][:, :], self._root_q_prev[robot_name][:, :], dt) self._jnts_v[robot_name][:, :] = (self._jnts_q[robot_name][:, :] - \ self._jnts_q_prev[robot_name][:, :]) / dt # self._jnts_v[robot_name][:, :].zero_() else: # to avoid issues when differentiating numerically self._root_v[robot_name][:, :].zero_() self._root_omega[robot_name][:, :].zero_() self._jnts_v[robot_name][:, :].zero_() # update "previous" data for numerical differentiation self._root_p_prev[robot_name][:, :] = self._root_p[robot_name][:, :] self._root_q_prev[robot_name][:, :] = self._root_q[robot_name][:, :] self._jnts_q_prev[robot_name][:, :] = self._jnts_q[robot_name][:, :] def get_states(self, env_indxs: torch.Tensor = None, robot_names: List[str] = None): if self.use_diff_velocities: self._get_robots_state(dt = self.integration_dt(), env_indxs = env_indxs, robot_names = robot_names) # updates robot states # but velocities are obtained via num. differentiation else: self._get_robots_state(env_indxs = env_indxs, robot_names = robot_names) # velocities directly from simulator (can # introduce relevant artifacts, making them unrealistic) def _custom_post_init(self): # can be overridden by child class pass def _set_robots_default_jnt_config(self): # setting Isaac's internal defaults. Useful is resetting # whole scenes or views, but single env reset has to be implemented # manueally # we use the homing of the robots if (self._world_initialized): for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] homing = self.homers[robot_name].get_homing() self._robots_art_views[robot_name].set_joints_default_state(positions= homing, velocities = torch.zeros((homing.shape[0], homing.shape[1]), \ dtype=self.torch_dtype, device=self.torch_device), efforts = torch.zeros((homing.shape[0], homing.shape[1]), \ dtype=self.torch_dtype, device=self.torch_device)) else: Journal.log(self.__class__.__name__, "_set_robots_default_jnt_config", "Before calling __set_robots_default_jnt_config(), you need to reset the World" + \ " at least once and call post_initialization_steps()", LogType.EXCEP, throw_when_excep = True) def _set_robots_root_default_config(self): if (self._world_initialized): for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] self._robots_art_views[robot_name].set_default_state(positions = self._root_p_default[robot_name], orientations = self._root_q_default[robot_name]) else: Journal.log(self.__class__.__name__, "_generate_urdf", "Before calling _set_robots_root_default_config(), you need to reset the World" + \ " at least once and call post_initialization_steps()", LogType.EXCEP, throw_when_excep = True) return True def _get_solver_info(self): for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] self._solver_position_iteration_counts[robot_name] = self._robots_art_views[robot_name].get_solver_position_iteration_counts() self._solver_velocity_iteration_counts[robot_name] = self._robots_art_views[robot_name].get_solver_velocity_iteration_counts() self._solver_stabilization_threshs[robot_name] = self._robots_art_views[robot_name].get_stabilization_thresholds() def _update_art_solver_options(self): # sets new solver iteration options for specifc articulations self._get_solver_info() # gets current solver info for the articulations of the # environments, so that dictionaries are filled properly if (self._world_initialized): for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] # increase by a factor self._solver_position_iteration_counts[robot_name] = torch.full((self.num_envs,), self._solver_position_iteration_count) self._solver_velocity_iteration_counts[robot_name] = torch.full((self.num_envs,), self._solver_velocity_iteration_count) self._solver_stabilization_threshs[robot_name] = torch.full((self.num_envs,), self._solver_stabilization_thresh) self._robots_art_views[robot_name].set_solver_position_iteration_counts(self._solver_position_iteration_counts[robot_name]) self._robots_art_views[robot_name].set_solver_velocity_iteration_counts(self._solver_velocity_iteration_counts[robot_name]) self._robots_art_views[robot_name].set_stabilization_thresholds(self._solver_stabilization_threshs[robot_name]) self._get_solver_info() # gets again solver info for articulation, so that it's possible to debug if # the operation was successful else: Journal.log(self.__class__.__name__, "_set_robots_default_jnt_config", "Before calling update_art_solver_options(), you need to reset the World at least once!", LogType.EXCEP, throw_when_excep = True) def _print_envs_info(self): if (self._world_initialized): print("TASK INFO:") for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] task_info = f"[{robot_name}]" + "\n" + \ "bodies: " + str(self._robots_art_views[robot_name].body_names) + "\n" + \ "n. prims: " + str(self._robots_art_views[robot_name].count) + "\n" + \ "prims names: " + str(self._robots_art_views[robot_name].prim_paths) + "\n" + \ "n. bodies: " + str(self._robots_art_views[robot_name].num_bodies) + "\n" + \ "n. dofs: " + str(self._robots_art_views[robot_name].num_dof) + "\n" + \ "dof names: " + str(self._robots_art_views[robot_name].dof_names) + "\n" + \ "solver_position_iteration_counts: " + str(self._solver_position_iteration_counts[robot_name]) + "\n" + \ "solver_velocity_iteration_counts: " + str(self._solver_velocity_iteration_counts[robot_name]) + "\n" + \ "stabiliz. thresholds: " + str(self._solver_stabilization_threshs[robot_name]) # print("dof limits: " + str(self._robots_art_views[robot_name].get_dof_limits())) # print("effort modes: " + str(self._robots_art_views[robot_name].get_effort_modes())) # print("dof gains: " + str(self._robots_art_views[robot_name].get_gains())) # print("dof max efforts: " + str(self._robots_art_views[robot_name].get_max_efforts())) # print("dof gains: " + str(self._robots_art_views[robot_name].get_gains())) # print("physics handle valid: " + str(self._robots_art_views[robot_name].is_physics_handle_valid()) Journal.log(self.__class__.__name__, "_print_envs_info", task_info, LogType.STAT, throw_when_excep = True) else: Journal.log(self.__class__.__name__, "_set_robots_default_jnt_config", "Before calling __print_envs_info(), you need to reset the World at least once!", LogType.EXCEP, throw_when_excep = True) def _fill_robot_info_from_world(self): if self._world_initialized: for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] self.robot_bodynames[robot_name] = self._robots_art_views[robot_name].body_names self.robot_n_links[robot_name] = self._robots_art_views[robot_name].num_bodies self.robot_n_dofs[robot_name] = self._robots_art_views[robot_name].num_dof self.robot_dof_names[robot_name] = self._robots_art_views[robot_name].dof_names else: Journal.log(self.__class__.__name__, "_fill_robot_info_from_world", "Before calling _fill_robot_info_from_world(), you need to reset the World at least once!", LogType.EXCEP, throw_when_excep = True) def _init_homing_managers(self): if self._world_initialized: for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] self.homers[robot_name] = OmniRobotHomer(articulation=self._robots_art_views[robot_name], srdf_path=self._srdf_paths[robot_name], device=self.torch_device, dtype=self.torch_dtype) else: exception = "you should reset the World at least once and call the " + \ "post_initialization_steps() method before initializing the " + \ "homing manager." Journal.log(self.__class__.__name__, "_init_homing_managers", exception, LogType.EXCEP, throw_when_excep = True) def _init_jnt_imp_control(self): if self._world_initialized: for i in range(0, len(self.robot_names)): robot_name = self.robot_names[i] # creates impedance controller self.jnt_imp_controllers[robot_name] = OmniJntImpCntrl(articulation=self._robots_art_views[robot_name], default_pgain = self.default_jnt_stiffness, # defaults default_vgain = self.default_jnt_damping, override_art_controller=self._override_art_controller, filter_dt = None, filter_BW = 50, device= self.torch_device, dtype=self.torch_dtype, enable_safety=True, enable_profiling=self._debug_enabled, urdf_path=self._urdf_paths[robot_name], debug_checks = self._debug_enabled) self.reset_jnt_imp_control(robot_name) else: exception = "you should reset the World at least once and call the " + \ "post_initialization_steps() method before initializing the " + \ "joint impedance controller." Journal.log(self.__class__.__name__, "_init_homing_managers", exception, LogType.EXCEP, throw_when_excep = True) def _set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]): set_camera_view(eye=camera_position, target=camera_target, camera_prim_path="/OmniverseKit_Persp")
68,642
Python
47.995717
142
0.49324
AndrePatri/OmniRoboGym/omni_robo_gym/tests/test_lunar_lander_stable_bs3.py
# Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected]) # # This file is part of OmniRoboGym and distributed under the General Public License version 2 license. # # OmniRoboGym is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # OmniRoboGym is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>. # import gymnasium as gym from stable_baselines3 import DQN from stable_baselines3.common.evaluation import evaluate_policy # Create environment env = gym.make("LunarLander-v2", render_mode="rgb_array") # Instantiate the agent model = DQN("MlpPolicy", env, verbose=1) # Train the agent and display a progress bar model.learn(total_timesteps=int(2e5), progress_bar=True) # Save the agent model.save("dqn_lunar") del model # delete trained model to demonstrate loading # Load the trained agent # NOTE: if you have loading issue, you can pass `print_system_info=True` # to compare the system on which the model was trained vs the current one # model = DQN.load("dqn_lunar", env=env, print_system_info=True) model = DQN.load("dqn_lunar", env=env) # Evaluate the agent # NOTE: If you use wrappers with your environment that modify rewards, # this will be reflected here. To evaluate with original rewards, # wrap environment in a "Monitor" wrapper before other wrappers. mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10) # Enjoy trained agent vec_env = model.get_env() obs = vec_env.reset() n_pred_iterations = 100000 for i in range(n_pred_iterations): action, _states = model.predict(obs, deterministic=True) obs, rewards, dones, info = vec_env.step(action) vec_env.render("human")
2,169
Python
38.454545
102
0.751498
AndrePatri/OmniRoboGym/omni_robo_gym/tests/create_terrain_demo.py
# Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected]) # # This file is part of OmniRoboGym and distributed under the General Public License version 2 license. # # OmniRoboGym is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # OmniRoboGym is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>. # # Copyright (c) 2018-2022, NVIDIA Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(SCRIPT_DIR) import omni from omni.isaac.kit import SimulationApp import numpy as np simulation_app = SimulationApp({"headless": False}) from omni.isaac.core.tasks import BaseTask from omni.isaac.core import World from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.utils.prims import define_prim from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.materials import PreviewSurface from omni.isaac.cloner import GridCloner from pxr import UsdLux, UsdShade, Sdf from omni_robo_gym.utils.terrain_utils import * from omni_robo_gym.utils.terrains import RlTerrains class TerrainsTest(BaseTask): def __init__(self, name) -> None: BaseTask.__init__(self, name=name) self._device = "cpu" def set_up_scene(self, scene) -> None: self._stage = get_current_stage() distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight")) distantLight.CreateIntensityAttr(2000) self.terrains = RlTerrains(self._stage) self.terrains.get_obstacles_terrain( terrain_size = 40.0, num_obs = 200, max_height = 0.5, min_size = 0.5, max_size = 5.0,) super().set_up_scene(scene) return def post_reset(self): a = 1 def get_observations(self): pass def calculate_metrics(self) -> None: pass def is_done(self) -> None: pass if __name__ == "__main__": world = World( stage_units_in_meters=1.0, rendering_dt=1.0/60.0, backend="torch", device="cpu", ) terrain_creation_task = TerrainsTest(name="CustomTerrain", ) world.add_task(terrain_creation_task) world.reset() while simulation_app.is_running(): if world.is_playing(): if world.current_time_step_index == 0: world.reset(soft=True) world.step(render=True) else: world.step(render=True) simulation_app.close()
4,763
Python
33.773722
102
0.672475
AndrePatri/OmniRoboGym/omni_robo_gym/utils/contact_sensor.py
import torch import numpy as np from omni.isaac.sensor import ContactSensor from typing import List, Dict from omni.isaac.core.world import World from omni.isaac.core.prims import RigidPrimView, RigidContactView from SharsorIPCpp.PySharsorIPC import LogType from SharsorIPCpp.PySharsorIPC import Journal class OmniContactSensors: def __init__(self, name: str, # robot name for which contact sensors are to be created n_envs: int, # number of environments contact_prims: Dict[str, List] = None, contact_offsets: Dict[str, Dict[str, np.ndarray]] = None, sensor_radii: Dict[str, Dict[str, np.ndarray]] = None, device = "cuda", dtype = torch.float64, enable_debug: bool = False, filter_paths: List[str] = ["/World/terrain/GroundPlane/CollisionPlane"]): # contact sensors abstraction for a single robot # over multiple environments self._filter_paths = filter_paths self._enable_debug = enable_debug self.n_envs = n_envs self.device = device if self.device == "cuda": self.using_gpu = True else: self.using_gpu = False self.dtype = dtype self.name = name self.contact_radius_default = 0.003 # parses contact dictionaries and checks for issues self._parse_contact_dicts(self.name, contact_prims, contact_offsets, sensor_radii) self.n_sensors = len(self.contact_prims) self.in_contact = torch.full((n_envs, self.n_sensors), False, device = self.device, dtype=torch.bool) self.force_norm = torch.full((n_envs, self.n_sensors), -1.0, device = self.device, dtype=self.dtype) self.n_contacts = torch.full((n_envs, self.n_sensors), 0, device = self.device, dtype=torch.int) self.contact_sensors = [[None] * self.n_sensors] * n_envs # outer: environment, # inner: contact sensor, ordered as in contact_prims self.contact_geom_prim_views = [None] * self.n_sensors # self.contact_views = [None] * self.n_sensors def _parse_contact_dicts(self, name: str, contact_prims: Dict[str, List], contact_offsets: Dict[str, Dict[str, np.ndarray]], sensor_radii: Dict[str, Dict[str, np.ndarray]]): try: self.contact_prims = contact_prims[name] except: Journal.log(self.__class__.__name__, "_parse_contact_dicts", f"Could not find key {name} in contact_prims dictionary.", LogType.EXCEP, throw_when_excep = True) try: self.contact_offsets = contact_offsets[name] except: Journal.log(self.__class__.__name__, "_parse_contact_dicts", f"Could not find key {name} in contact_offsets dictionary.", LogType.EXCEP, throw_when_excep = True) try: self.sensor_radii = sensor_radii[name] except: Journal.log(self.__class__.__name__, "_parse_contact_dicts", f"Could not find key {name} in sensor_radii dictionary.", LogType.EXCEP, throw_when_excep = True) contact_offsets_ok = all(item in self.contact_offsets for item in self.contact_prims) sensor_radii_ok = all(item in self.sensor_radii for item in self.contact_prims) if not contact_offsets_ok: warning = f"Provided contact_offsets dictionary does not posses all the necessary keys. " + \ f"It should contain all of [{' '.join(self.contact_prims)}]. \n" + \ f"Resetting all offsets to zero..." Journal.log(self.__class__.__name__, "_parse_contact_dicts", warning, LogType.WARN, throw_when_excep = True) for i in range(0, len(self.contact_prims)): self.contact_offsets[self.contact_prims[i]] = np.array([0.0, 0.0, 0.0]) if not sensor_radii_ok: warning = f"Provided sensor_radii dictionary does not posses all the necessary keys. " + \ f"It should contain all of [{' '.join(self.contact_prims)}]. \n" + \ f"Resetting all radii to {self.contact_radius_default} ..." Journal.log(self.__class__.__name__, "_parse_contact_dicts", warning, LogType.WARN, throw_when_excep = True) for i in range(0, len(self.contact_prims)): self.sensor_radii[self.contact_prims[i]] = self.contact_radius_default def create_contact_sensors(self, world: World, envs_namespace: str): robot_name = self.name contact_link_names = self.contact_prims for sensor_idx in range(0, self.n_sensors): # we create views of the contact links for all envs if self.contact_geom_prim_views[sensor_idx] is None: self.contact_geom_prim_views[sensor_idx] = RigidPrimView(prim_paths_expr=envs_namespace + "/env_.*/" + robot_name + \ "/" + contact_link_names[sensor_idx], name= self.name + "RigidPrimView" + contact_link_names[sensor_idx], contact_filter_prim_paths_expr= self._filter_paths, prepare_contact_sensors=True, track_contact_forces = True, disable_stablization = False, reset_xform_properties=False, max_contact_count = self.n_envs ) world.scene.add(self.contact_geom_prim_views[sensor_idx]) # for env_idx in range(0, self.n_envs): # # env_idx = 0 # create contact sensors for base env only # for sensor_idx in range(0, self.n_sensors): # contact_link_prim_path = envs_namespace + f"/env_{env_idx}" + \ # "/" + robot_name + \ # "/" + contact_link_names[sensor_idx] # sensor_prim_path = contact_link_prim_path + \ # "/contact_sensor" # contact sensor prim path # print(f"[{self.__class__.__name__}]" + f"[{self.journal.status}]" + ": creating contact sensor at " + # f"{sensor_prim_path}...") # contact_sensor = ContactSensor( # prim_path=sensor_prim_path, # name=f"{robot_name}{env_idx}_{contact_link_names[sensor_idx]}_contact_sensor", # min_threshold=0, # max_threshold=10000000, # radius=self.sensor_radii[contact_link_names[sensor_idx]], # translation=self.contact_offsets[contact_link_names[sensor_idx]], # position=None # ) # self.contact_sensors[env_idx][sensor_idx] = world.scene.add(contact_sensor) # self.contact_sensors[env_idx][sensor_idx].add_raw_contact_data_to_frame() # print(f"[{self.__class__.__name__}]" + f"[{self.journal.status}]" + ": contact sensor at " + # f"{sensor_prim_path} created.") def get(self, dt: float, contact_link: str, env_indxs: torch.Tensor = None, clone = False): index = -1 try: index = self.contact_prims.index(contact_link) except: exception = f"[{self.__class__.__name__}]" + f"[{self.journal.exception}]" + \ f"could not find contact link {contact_link} in contact list {' '.join(self.contact_prims)}." Journal.log(self.__class__.__name__, "get", exception, LogType.EXCEP, throw_when_excep = True) if env_indxs is None: return self.contact_geom_prim_views[index].get_net_contact_forces(clone = clone, dt = dt).view(self.n_envs, 3) else: if self._enable_debug: if env_indxs is not None: if not isinstance(env_indxs, torch.Tensor): msg = "Provided env_indxs should be a torch tensor of indexes!" Journal.log(self.__class__.__name__, "get", msg, LogType.EXCEP, throw_when_excep = True) if not len(env_indxs.shape) == 1: msg = "Provided robot_indxs should be a 1D torch tensor!" Journal.log(self.__class__.__name__, "get", msg, LogType.EXCEP, throw_when_excep = True) if self.using_gpu: if not env_indxs.device.type == "cuda": error = "Provided env_indxs should be on GPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) else: if not env_indxs.device.type == "cpu": error = "Provided env_indxs should be on CPU!" Journal.log(self.__class__.__name__, "_step_jnt_imp_control", error, LogType.EXCEP, True) return self.contact_geom_prim_views[index].get_net_contact_forces(clone = clone, dt = dt).view(self.n_envs, 3)[env_indxs, :]
10,792
Python
43.415638
133
0.47424
AndrePatri/OmniRoboGym/omni_robo_gym/utils/math_utils.py
import torch import time import torch.nn.functional as F def normalize_quaternion(q): # Normalizes the quaternion return q / torch.norm(q, dim=-1, keepdim=True) def quaternion_difference(q1, q2): """ Compute the quaternion difference needed to rotate from q1 to q2 """ def quat_conjugate(q): # Computes the conjugate of a quaternion w, x, y, z = q.unbind(-1) return torch.stack([w, -x, -y, -z], dim=-1) q1_conj = quat_conjugate(q1) return quaternion_multiply(q2, q1_conj) def quaternion_multiply(q1, q2): """ Multiply two quaternions. """ w1, x1, y1, z1 = q1.unbind(-1) w2, x2, y2, z2 = q2.unbind(-1) return torch.stack([ w1*w2 - x1*x2 - y1*y2 - z1*z2, w1*x2 + x1*w2 + y1*z2 - z1*y2, w1*y2 - x1*z2 + y1*w2 + z1*x2, w1*z2 + x1*y2 - y1*x2 + z1*w2 ], dim=-1) def quaternion_to_angular_velocity(q_diff, dt): """ Convert a quaternion difference to an angular velocity vector. """ angle = 2 * torch.arccos(q_diff[..., 0].clamp(-1.0, 1.0)) # Clamping for numerical stability axis = q_diff[..., 1:] norm = axis.norm(dim=-1, keepdim=True) norm = torch.where(norm > 0, norm, torch.ones_like(norm)) axis = axis / norm angle = angle.unsqueeze(-1) # Add an extra dimension for broadcasting return (angle / dt) * axis def quat_to_omega(q0, q1, dt): """ Convert quaternion pairs to angular velocities """ if q0.shape != q1.shape: raise ValueError("Tensor shapes do not match in quat_to_omega.") # Normalize quaternions and compute differences q0_normalized = normalize_quaternion(q0) q1_normalized = normalize_quaternion(q1) q_diff = quaternion_difference(q0_normalized, q1_normalized) return quaternion_to_angular_velocity(q_diff, dt) def rel_vel(offset_q0_q1, v0): # Calculate relative linear velocity in frame q1 from linear velocity in frame q0 using quaternions. # Ensure the quaternion is normalized offset_q0_q1 = F.normalize(offset_q0_q1, p=2, dim=0) # Convert the linear velocity vector to a quaternion v0_q = torch.cat([torch.tensor([0]), v0]) # Rotate the linear velocity quaternion using the orientation offset quaternion rotated_velocity_quaternion = quaternion_multiply(offset_q0_q1, v0_q) offset_q0_q1_inverse = torch.cat([offset_q0_q1[0:1], -offset_q0_q1[1:]]) # Multiply by the conjugate of the orientation offset quaternion to obtain the result in frame f1 v1_q = quaternion_multiply(rotated_velocity_quaternion, offset_q0_q1_inverse) # Extract the linear velocity vector from the quaternion result v1 = v1_q[1:] return v1 # Example usage n_envs = 100 # Number of environments dt = 0.1 # Time step # Random example tensors for initial and final orientations q_initial = torch.randn(n_envs, 4) q_final = torch.randn(n_envs, 4) start_time = time.perf_counter() # Convert to angular velocities omega = quat_to_omega(q_initial, q_final, dt) end_time = time.perf_counter() elapsed_time = end_time - start_time print(f"Time taken to compute angular velocities: {elapsed_time:.6f} seconds")
3,149
Python
32.870967
104
0.668466
AndrePatri/OmniRoboGym/omni_robo_gym/utils/rt_factor.py
import time class RtFactor(): def __init__(self, dt_nom: float, window_size: int): self._it_counter = 0 self._dt_nom = dt_nom self._start_time = time.perf_counter() self._current_rt_factor = 0.0 self._window_size = window_size self._real_time = 0 self._nom_time = 0 def update(self): self._real_time = time.perf_counter() - self._start_time self._it_counter += 1 self._nom_time += self._dt_nom self._current_rt_factor = self._nom_time / self._real_time def reset_due(self): return (self._it_counter+1) % self._window_size == 0 def get_avrg_step_time(self): return self._real_time / self._window_size def get_dt_nom(self): return self._dt_nom def get_nom_time(self): return self._now_time def get(self): return self._current_rt_factor def reset(self): self._it_counter = 0 self._nom_time = 0 self._start_time = time.perf_counter()
1,096
Python
17.913793
66
0.530109
AndrePatri/OmniRoboGym/omni_robo_gym/utils/urdf_helpers.py
import xml.etree.ElementTree as ET class UrdfLimitsParser: def __init__(self, urdf_path, joint_names, backend = "numpy", device = "cpu"): self.urdf_path = urdf_path self.joint_names = joint_names self.limits_matrix = None self.backend = backend self.device = device if self.backend == "numpy" and \ self.device != "cpu": raise Exception("When using numpy backend, only cpu device is supported!") self.parse_urdf() def parse_urdf(self): tree = ET.parse(self.urdf_path) root = tree.getroot() num_joints = len(self.joint_names) self.limits_matrix = None self.inf = None if self.backend == "numpy": import numpy as np self.limits_matrix = np.full((num_joints, 6), np.nan) self.inf = np.inf elif self.backend == "torch": import torch self.limits_matrix = torch.full((num_joints, 6), torch.nan, device=self.device) self.inf = torch.inf else: raise Exception("Backend not supported") for joint_name in self.joint_names: joint_element = root.find(".//joint[@name='{}']".format(joint_name)) if joint_element is not None: limit_element = joint_element.find('limit') jnt_index = self.joint_names.index(joint_name) # position limits q_lower = float(limit_element.get('lower', - self.inf)) q_upper = float(limit_element.get('upper', self.inf)) # effort limits effort_limit = float(limit_element.get('effort', self.inf)) # vel limits velocity_limit = float(limit_element.get('velocity', self.inf)) self.limits_matrix[jnt_index, 0] = q_lower self.limits_matrix[jnt_index, 3] = q_upper self.limits_matrix[jnt_index, 1] = - abs(velocity_limit) self.limits_matrix[jnt_index, 4] = abs(velocity_limit) self.limits_matrix[jnt_index, 2] = - abs(effort_limit) self.limits_matrix[jnt_index, 5] = abs(effort_limit) def get_limits_matrix(self): return self.limits_matrix
2,425
Python
28.228915
91
0.524536
AndrePatri/OmniRoboGym/omni_robo_gym/utils/homing.py
# Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected]) # # This file is part of OmniRoboGym and distributed under the General Public License version 2 license. # # OmniRoboGym is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # OmniRoboGym is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>. # from omni.isaac.core.articulations.articulation_view import ArticulationView import torch import xml.etree.ElementTree as ET from SharsorIPCpp.PySharsorIPC import LogType from SharsorIPCpp.PySharsorIPC import Journal class OmniRobotHomer: def __init__(self, articulation: ArticulationView, srdf_path: str, backend = "torch", device: torch.device = torch.device("cpu"), dtype = torch.float64): self.torch_dtype = dtype if not articulation.initialized: exception = f"the provided articulation is not initialized properly!" Journal.log(self.__class__.__name__, "__init__", exception, LogType.EXCEP, throw_when_excep = True) self._articulation = articulation self.srdf_path = srdf_path self._device = device self.num_robots = self._articulation.count self.n_dofs = self._articulation.num_dof self.jnts_names = self._articulation.dof_names self.joint_idx_map = {} for joint in range(0, self.n_dofs): self.joint_idx_map[self.jnts_names[joint]] = joint if (backend != "torch"): print(f"[{self.__class__.__name__}]" + f"[{self.journal.info}]" + ": forcing torch backend. Other backends are not yet supported.") self._backend = "torch" self._homing = torch.full((self.num_robots, self.n_dofs), 0.0, device = self._device, dtype=self.torch_dtype) # homing configuration # open srdf and parse the homing field with open(srdf_path, 'r') as file: self._srdf_content = file.read() try: self._srdf_root = ET.fromstring(self._srdf_content) # Now 'root' holds the root element of the XML tree. # You can navigate through the XML tree to extract the tags and their values. # Example: To find all elements with a specific tag, you can use: # elements = root.findall('.//your_tag_name') # Example: If you know the specific structure of your .SRDF file, you can extract # the data accordingly, for instance: # for child in root: # if child.tag == 'some_tag_name': # tag_value = child.text # # Do something with the tag value. # elif child.tag == 'another_tag_name': # # Handle another tag. except ET.ParseError as e: print(f"[{self.__class__.__name__}]" + f"[{self.journal.warning}]" + ": could not read SRDF properly!!") # Find all the 'joint' elements within 'group_state' with the name attribute and their values joints = self._srdf_root.findall(".//group_state[@name='home']/joint") self._homing_map = {} for joint in joints: joint_name = joint.attrib['name'] joint_value = joint.attrib['value'] self._homing_map[joint_name] = float(joint_value) self._assign2homing() def _assign2homing(self): for joint in list(self._homing_map.keys()): if joint in self.joint_idx_map: self._homing[:, self.joint_idx_map[joint]] = torch.full((self.num_robots, 1), self._homing_map[joint], device = self._device, dtype=self.torch_dtype).flatten() else: print(f"[{self.__class__.__name__}]" + f"[{self.journal.warning}]" + f"[{self._assign2homing.__name__}]" \ + ": joint " + f"{joint}" + " is not present in the articulation. It will be ignored.") def get_homing(self, clone: bool = False): if not clone: return self._homing else: return self._homing.clone()
5,070
Python
36.286764
144
0.554438
AndrePatri/OmniRoboGym/omni_robo_gym/utils/jnt_imp_cntrl.py
# Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected]) # # This file is part of OmniRoboGym and distributed under the General Public License version 2 license. # # OmniRoboGym is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # OmniRoboGym is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>. # import torch from typing import List from enum import Enum from omni.isaac.core.articulations.articulation_view import ArticulationView from omni_robo_gym.utils.urdf_helpers import UrdfLimitsParser import time from SharsorIPCpp.PySharsorIPC import LogType from SharsorIPCpp.PySharsorIPC import Journal class FirstOrderFilter: # a class implementing a simple first order filter def __init__(self, dt: float, filter_BW: float = 0.1, rows: int = 1, cols: int = 1, device: torch.device = torch.device("cpu"), dtype = torch.double): self._torch_dtype = dtype self._torch_device = device self._dt = dt self._rows = rows self._cols = cols self._filter_BW = filter_BW import math self._gain = 2 * math.pi * self._filter_BW self.yk = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) self.ykm1 = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) self.refk = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) self.refkm1 = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) self._kh2 = self._gain * self._dt / 2.0 self._coeff_ref = self._kh2 * 1/ (1 + self._kh2) self._coeff_km1 = (1 - self._kh2) / (1 + self._kh2) def update(self, refk: torch.Tensor = None): if refk is not None: self.refk[:, :] = refk self.yk[:, :] = torch.add(torch.mul(self.ykm1, self._coeff_km1), torch.mul(torch.add(self.refk, self.refkm1), self._coeff_ref)) self.refkm1[:, :] = self.refk self.ykm1[:, :] = self.yk def reset(self, idxs: torch.Tensor = None): if idxs is not None: self.yk[:, :] = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) self.ykm1[:, :] = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) self.refk[:, :] = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) self.refkm1[:, :] = torch.zeros((self._rows, self._cols), device = self._torch_device, dtype=self._torch_dtype) else: self.yk[idxs, :] = torch.zeros((idxs.shape[0], self._cols), device = self._torch_device, dtype=self._torch_dtype) self.ykm1[idxs, :] = torch.zeros((idxs.shape[0], self._cols), device = self._torch_device, dtype=self._torch_dtype) self.refk[idxs, :] = torch.zeros((idxs.shape[0], self._cols), device = self._torch_device, dtype=self._torch_dtype) self.refkm1[idxs, :] = torch.zeros((idxs.shape[0], self._cols), device = self._torch_device, dtype=self._torch_dtype) def get(self): return self.yk class JntSafety: def __init__(self, urdf_parser: UrdfLimitsParser): self.limits_parser = urdf_parser self.limit_matrix = self.limits_parser.get_limits_matrix() def apply(self, q_cmd=None, v_cmd=None, eff_cmd=None): if q_cmd is not None: self.saturate_tensor(q_cmd, position=True) if v_cmd is not None: self.saturate_tensor(v_cmd, velocity=True) if eff_cmd is not None: self.saturate_tensor(eff_cmd, effort=True) def has_nan(self, tensor): return torch.any(torch.isnan(tensor)) def saturate_tensor(self, tensor, position=False, velocity=False, effort=False): if self.has_nan(tensor): exception = f"Found nan elements in provided tensor!!" Journal.log(self.__class__.__name__, "saturate_tensor", exception, LogType.EXCEP, throw_when_excep = False) # Replace NaN values with infinity, so that we can clamp it tensor[:, :] = torch.nan_to_num(tensor, nan=torch.inf) if position: tensor[:, :] = torch.clamp(tensor[:, :], min=self.limit_matrix[:, 0], max=self.limit_matrix[:, 3]) elif velocity: tensor[:, :] = torch.clamp(tensor[:, :], min=self.limit_matrix[:, 1], max=self.limit_matrix[:, 4]) elif effort: tensor[:, :] = torch.clamp(tensor[:, :], min=self.limit_matrix[:, 2], max=self.limit_matrix[:, 5]) class OmniJntImpCntrl: class IndxState(Enum): NONE = -1 VALID = 1 INVALID = 0 def __init__(self, articulation: ArticulationView, default_pgain = 300.0, default_vgain = 30.0, backend = "torch", device: torch.device = torch.device("cpu"), filter_BW = 50.0, # [Hz] filter_dt = None, # should correspond to the dt between samples override_art_controller = False, init_on_creation = False, dtype = torch.double, enable_safety = True, urdf_path: str = None, enable_profiling: bool = False, debug_checks: bool = False): # [s] self._torch_dtype = dtype self._torch_device = device self.enable_profiling = enable_profiling self._debug_checks = debug_checks # debug data self.profiling_data = {} self.profiling_data["time_to_update_state"] = -1.0 self.profiling_data["time_to_set_refs"] = -1.0 self.profiling_data["time_to_apply_cmds"] = -1.0 self.start_time = None if self.enable_profiling: self.start_time = time.perf_counter() self.enable_safety = enable_safety self.limiter = None self.robot_limits = None self.urdf_path = urdf_path self.override_art_controller = override_art_controller # whether to override Isaac's internal joint # articulation PD controller or not self.init_art_on_creation = init_on_creation # init. articulation's gains and refs as soon as the controller # is created self.gains_initialized = False self.refs_initialized = False self._default_pgain = default_pgain self._default_vgain = default_vgain self._filter_BW = filter_BW self._filter_dt = filter_dt self._articulation_view = articulation # used to actually apply control # signals to the robot if not self._articulation_view.initialized: exception = f"the provided articulation_view is not initialized properly!" Journal.log(self.__class__.__name__, "__init__", exception, LogType.EXCEP, throw_when_excep = True) self._valid_signal_types = ["pos_ref", "vel_ref", "eff_ref", # references "pos", "vel", "eff", # measurements (necessary if overriding Isaac's art. controller) "pgain", "vgain"] self.num_robots = self._articulation_view.count self.n_dofs = self._articulation_view.num_dof self.jnts_names = self._articulation_view.dof_names if (backend != "torch"): warning = f"Only supported backend is torch!!!" Journal.log(self.__class__.__name__, "__init__", warning, LogType.WARN, throw_when_excep = True) self._backend = "torch" if self.enable_safety: if self.urdf_path is None: exception = "If enable_safety is set to True, a urdf_path should be provided too!" Journal.log(self.__class__.__name__, "__init__", exception, LogType.EXCEP, throw_when_excep = True) self.robot_limits = UrdfLimitsParser(urdf_path=self.urdf_path, joint_names=self.jnts_names, backend=self._backend, device=self._torch_device) self.limiter = JntSafety(urdf_parser=self.robot_limits) self._pos_err = None self._vel_err = None self._pos = None self._vel = None self._eff = None self._imp_eff = None self._filter_available = False if filter_dt is not None: self._filter_BW = filter_BW self._filter_dt = filter_dt self._pos_ref_filter = FirstOrderFilter(dt=self._filter_dt, filter_BW=self._filter_BW, rows=self.num_robots, cols=self.n_dofs, device=self._torch_device, dtype=self._torch_dtype) self._vel_ref_filter = FirstOrderFilter(dt=self._filter_dt, filter_BW=self._filter_BW, rows=self.num_robots, cols=self.n_dofs, device=self._torch_device, dtype=self._torch_dtype) self._eff_ref_filter = FirstOrderFilter(dt=self._filter_dt, filter_BW=self._filter_BW, rows=self.num_robots, cols=self.n_dofs, device=self._torch_device, dtype=self._torch_dtype) self._filter_available = True else: warning = f"No filter dt provided -> reference filter will not be used!" Journal.log(self.__class__.__name__, "__init__", warning, LogType.WARN, throw_when_excep = True) self.reset() # initialize data def update_state(self, pos: torch.Tensor = None, vel: torch.Tensor = None, eff: torch.Tensor = None, robot_indxs: torch.Tensor = None, jnt_indxs: torch.Tensor = None): if self.enable_profiling: self.start_time = time.perf_counter() selector = self._gen_selector(robot_indxs=robot_indxs, jnt_indxs=jnt_indxs) # only checks and throws # if debug_checks if pos is not None: self._validate_signal(signal = pos, selector = selector, name="pos") # does nothing if not debug_checks self._pos[selector] = pos if vel is not None: self._validate_signal(signal = vel, selector = selector, name="vel") self._vel[selector] = vel if eff is not None: self._validate_signal(signal = eff, selector = selector, name="eff") self._eff[selector] = eff if self.enable_profiling: self.profiling_data["time_to_update_state"] = \ time.perf_counter() - self.start_time def set_gains(self, pos_gains: torch.Tensor = None, vel_gains: torch.Tensor = None, robot_indxs: torch.Tensor = None, jnt_indxs: torch.Tensor = None): selector = self._gen_selector(robot_indxs=robot_indxs, jnt_indxs=jnt_indxs) # only checks and throws # if debug_checks if pos_gains is not None: self._validate_signal(signal = pos_gains, selector = selector, name="pos_gains") self._pos_gains[selector] = pos_gains if not self.override_art_controller: self._articulation_view.set_gains(kps = self._pos_gains) if vel_gains is not None: self._validate_signal(signal = vel_gains, selector = selector, name="vel_gains") self._vel_gains[selector] = vel_gains if not self.override_art_controller: self._articulation_view.set_gains(kds = self._vel_gains) def set_refs(self, eff_ref: torch.Tensor = None, pos_ref: torch.Tensor = None, vel_ref: torch.Tensor = None, robot_indxs: torch.Tensor = None, jnt_indxs: torch.Tensor = None): if self.enable_profiling: self.start_time = time.perf_counter() selector = self._gen_selector(robot_indxs=robot_indxs, jnt_indxs=jnt_indxs) # only checks and throws # if debug_checks if eff_ref is not None: self._validate_signal(signal = eff_ref, selector = selector, name="eff_ref") self._eff_ref[selector] = eff_ref if pos_ref is not None: self._validate_signal(signal = pos_ref, selector = selector, name="pos_ref") self._pos_ref[selector] = pos_ref if vel_ref is not None: self._validate_signal(signal = vel_ref, selector = selector, name="vel_ref") self._vel_ref[selector] = vel_ref if self.enable_profiling: self.profiling_data["time_to_set_refs"] = time.perf_counter() - self.start_time def apply_cmds(self, filter = False): # initialize gains and refs if not done previously if self.enable_profiling: self.start_time = time.perf_counter() if not self.gains_initialized: self._apply_init_gains_to_art() if not self.refs_initialized: self._apply_init_refs_to_art() if filter and self._filter_available: self._pos_ref_filter.update(self._pos_ref) self._vel_ref_filter.update(self._vel_ref) self._eff_ref_filter.update(self._eff_ref) # we first filter, then apply safety eff_ref_filt = self._eff_ref_filter.get() pos_ref_filt = self._pos_ref_filter.get() vel_ref_filt = self._vel_ref_filter.get() if self.limiter is not None: # saturating ref cmds self.limiter.apply(q_cmd=pos_ref_filt, v_cmd=vel_ref_filt, eff_cmd=eff_ref_filt) if not self.override_art_controller: # using omniverse's articulation PD controller self._articulation_view.set_joint_efforts(eff_ref_filt) self._articulation_view.set_joint_position_targets(pos_ref_filt) self._articulation_view.set_joint_velocity_targets(vel_ref_filt) else: # impedance torque computed explicitly self._pos_err = torch.sub(self._pos_ref_filter.get(), self._pos) self._vel_err = torch.sub(self._vel_ref_filter.get(), self._vel) self._imp_eff = torch.add(self._eff_ref_filter.get(), torch.add( torch.mul(self._pos_gains, self._pos_err), torch.mul(self._vel_gains, self._vel_err))) # torch.cuda.synchronize() # we also make the resulting imp eff safe if self.limiter is not None: self.limiter.apply(eff_cmd=eff_ref_filt) # apply only effort (comprehensive of all imp. terms) self._articulation_view.set_joint_efforts(self._imp_eff) else: # we first apply safety to reference joint cmds if self.limiter is not None: self.limiter.apply(q_cmd=self._pos_ref, v_cmd=self._vel_ref, eff_cmd=self._eff_ref) if not self.override_art_controller: # using omniverse's articulation PD controller self._articulation_view.set_joint_efforts(self._eff_ref) self._articulation_view.set_joint_position_targets(self._pos_ref) self._articulation_view.set_joint_velocity_targets(self._vel_ref) else: # impedance torque computed explicitly self._pos_err = torch.sub(self._pos_ref, self._pos) self._vel_err = torch.sub(self._vel_ref, self._vel) self._imp_eff = torch.add(self._eff_ref, torch.add( torch.mul(self._pos_gains, self._pos_err), torch.mul(self._vel_gains, self._vel_err))) # torch.cuda.synchronize() # we also make the resulting imp eff safe if self.limiter is not None: self.limiter.apply(eff_cmd=self._imp_eff) # apply only effort (comprehensive of all imp. terms) self._articulation_view.set_joint_efforts(self._imp_eff) if self.enable_profiling: self.profiling_data["time_to_apply_cmds"] = \ time.perf_counter() - self.start_time def get_jnt_names_matching(self, name_pattern: str): return [jnt for jnt in self.jnts_names if name_pattern in jnt] def get_jnt_idxs_matching(self, name_pattern: str): jnts_names = self.get_jnt_names_matching(name_pattern) jnt_idxs = [self.jnts_names.index(jnt) for jnt in jnts_names] if not len(jnt_idxs) == 0: return torch.tensor(jnt_idxs, dtype=torch.int64, device=self._torch_device) else: return None def pos_gains(self): return self._pos_gains def vel_gains(self): return self._vel_gains def eff_ref(self): return self._eff_ref def pos_ref(self): return self._pos_ref def vel_ref(self): return self._vel_ref def pos_err(self): return self._pos_err def vel_err(self): return self._vel_err def pos(self): return self._pos def vel(self): return self._vel def eff(self): return self._eff def imp_eff(self): return self._imp_eff def reset(self, robot_indxs: torch.Tensor = None): self.gains_initialized = False self.refs_initialized = False self._all_dofs_idxs = torch.tensor([i for i in range(0, self.n_dofs)], dtype=torch.int64, device=self._torch_device) self._all_robots_idxs = torch.tensor([i for i in range(0, self.num_robots)], dtype=torch.int64, device=self._torch_device) if robot_indxs is None: # reset all data # we assume diagonal joint impedance gain matrices, so we can save on memory and only store the diagonal self._pos_gains = torch.full((self.num_robots, self.n_dofs), self._default_pgain, device = self._torch_device, dtype=self._torch_dtype) self._vel_gains = torch.full((self.num_robots, self.n_dofs), self._default_vgain, device = self._torch_device, dtype=self._torch_dtype) self._eff_ref = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._pos_ref = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._vel_ref = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._pos_err = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._vel_err = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._pos = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._vel = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._eff = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._imp_eff = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) if self._filter_available: self._pos_ref_filter.reset() self._vel_ref_filter.reset() self._eff_ref_filter.reset() else: # only reset some robots if self._debug_checks: self._validate_selectors(robot_indxs=robot_indxs) # throws if checks not satisfied n_envs = robot_indxs.shape[0] # we assume diagonal joint impedance gain matrices, so we can save on memory and only store the diagonal self._pos_gains[robot_indxs, :] = torch.full((n_envs, self.n_dofs), self._default_pgain, device = self._torch_device, dtype=self._torch_dtype) self._vel_gains[robot_indxs, :] = torch.full((n_envs, self.n_dofs), self._default_vgain, device = self._torch_device, dtype=self._torch_dtype) self._eff_ref[robot_indxs, :] = 0 self._pos_ref[robot_indxs, :] = 0 self._vel_ref[robot_indxs, :] = 0 # if self.override_art_controller: # saving memory (these are not necessary if not overriding Isaac's art. controller) self._pos_err[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._vel_err[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._pos[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._vel[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._eff[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._imp_eff[robot_indxs, :] = torch.zeros((n_envs, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) if self._filter_available: self._pos_ref_filter.reset(idxs = robot_indxs) self._vel_ref_filter.reset(idxs = robot_indxs) self._eff_ref_filter.reset(idxs = robot_indxs) if self.init_art_on_creation: # will use updated gains/refs based on reset (non updated gains/refs will be the same) self._apply_init_gains_to_art() self._apply_init_refs_to_art() def _apply_init_gains_to_art(self): if not self.gains_initialized: if not self.override_art_controller: self._articulation_view.set_gains(kps = self._pos_gains, kds = self._vel_gains) else: # settings Isaac's PD controller gains to 0 no_gains = torch.zeros((self.num_robots, self.n_dofs), device = self._torch_device, dtype=self._torch_dtype) self._articulation_view.set_gains(kps = no_gains, kds = no_gains) self.gains_initialized = True def _apply_init_refs_to_art(self): if not self.refs_initialized: if not self.override_art_controller: self._articulation_view.set_joint_efforts(self._eff_ref) self._articulation_view.set_joint_position_targets(self._pos_ref) self._articulation_view.set_joint_velocity_targets(self._vel_ref) else: self._articulation_view.set_joint_efforts(self._eff_ref) self.refs_initialized = True def _validate_selectors(self, robot_indxs: torch.Tensor = None, jnt_indxs: torch.Tensor = None): if robot_indxs is not None: robot_indxs_shape = robot_indxs.shape if (not (len(robot_indxs_shape) == 1 and \ robot_indxs.dtype == torch.int64 and \ bool(torch.min(robot_indxs) >= 0) and \ bool(torch.max(robot_indxs) < self.num_robots)) and \ robot_indxs.device.type == self._torch_device.type): # sanity checks error = "Mismatch in provided selector \n" + \ "robot_indxs_shape -> " + f"{len(robot_indxs_shape)}" + " VS" + " expected -> " + f"{1}" + "\n" + \ "robot_indxs.dtype -> " + f"{robot_indxs.dtype}" + " VS" + " expected -> " + f"{torch.int64}" + "\n" + \ "torch.min(robot_indxs) >= 0) -> " + f"{bool(torch.min(robot_indxs) >= 0)}" + " VS" + f" {True}" + "\n" + \ "torch.max(robot_indxs) < self.n_dofs -> " + f"{torch.max(robot_indxs)}" + " VS" + f" {self.num_robots}\n" + \ "robot_indxs.device -> " + f"{robot_indxs.device.type}" + " VS" + " expected -> " + f"{self._torch_device.type}" + "\n" Journal.log(self.__class__.__name__, "_validate_selectors", error, LogType.EXCEP, throw_when_excep = True) if jnt_indxs is not None: jnt_indxs_shape = jnt_indxs.shape if (not (len(jnt_indxs_shape) == 1 and \ jnt_indxs.dtype == torch.int64 and \ bool(torch.min(jnt_indxs) >= 0) and \ bool(torch.max(jnt_indxs) < self.n_dofs)) and \ jnt_indxs.device.type == self._torch_device.type): # sanity checks error = "Mismatch in provided selector \n" + \ "jnt_indxs_shape -> " + f"{len(jnt_indxs_shape)}" + " VS" + " expected -> " + f"{1}" + "\n" + \ "jnt_indxs.dtype -> " + f"{jnt_indxs.dtype}" + " VS" + " expected -> " + f"{torch.int64}" + "\n" + \ "torch.min(jnt_indxs) >= 0) -> " + f"{bool(torch.min(jnt_indxs) >= 0)}" + " VS" + f" {True}" + "\n" + \ "torch.max(jnt_indxs) < self.n_dofs -> " + f"{torch.max(jnt_indxs)}" + " VS" + f" {self.num_robots}" + \ "robot_indxs.device -> " + f"{jnt_indxs.device.type}" + " VS" + " expected -> " + f"{self._torch_device.type}" + "\n" Journal.log(self.__class__.__name__, "_validate_selectors", error, LogType.EXCEP, throw_when_excep = True) def _validate_signal(self, signal: torch.Tensor, selector: torch.Tensor = None, name: str = "signal"): if self._debug_checks: signal_shape = signal.shape selector_shape = selector[0].shape if not (signal_shape[0] == selector_shape[0] and \ signal_shape[1] == selector_shape[1] and \ signal.device.type == self._torch_device.type and \ signal.dtype == self._torch_dtype): big_error = f"Mismatch in provided signal [{name}" + "] and/or selector \n" + \ "signal rows -> " + f"{signal_shape[0]}" + " VS" + " expected rows -> " + f"{selector_shape[0]}" + "\n" + \ "signal cols -> " + f"{signal_shape[1]}" + " VS" + " expected cols -> " + f"{selector_shape[1]}" + "\n" + \ "signal dtype -> " + f"{signal.dtype}" + " VS" + " expected -> " + f"{self._torch_dtype}" + "\n" + \ "signal device -> " + f"{signal.device.type}" + " VS" + " expected type -> " + f"{self._torch_device.type}" Journal.log(self.__class__.__name__, "_validate_signal", big_error, LogType.EXCEP, throw_when_excep = True) def _gen_selector(self, robot_indxs: torch.Tensor = None, jnt_indxs: torch.Tensor = None): if self._debug_checks: self._validate_selectors(robot_indxs=robot_indxs, jnt_indxs=jnt_indxs) # throws if not valid if robot_indxs is None: robot_indxs = self._all_robots_idxs if jnt_indxs is None: jnt_indxs = self._all_dofs_idxs return torch.meshgrid((robot_indxs, jnt_indxs), indexing="ij")
32,884
Python
40.157697
139
0.485282
AndrePatri/OmniRoboGym/omni_robo_gym/utils/terrains.py
# Copyright (C) 2023 Andrea Patrizi (AndrePatri, [email protected]) # # This file is part of OmniRoboGym and distributed under the General Public License version 2 license. # # OmniRoboGym is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # OmniRoboGym is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with OmniRoboGym. If not, see <http://www.gnu.org/licenses/>. # import os, sys SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(SCRIPT_DIR) import numpy as np from omni_robo_gym.utils.terrain_utils import * from pxr import Usd class RlTerrains(): def __init__(self, stage: Usd.Stage): self._stage = stage def get_wave_terrain(self, terrain_size = 40, num_waves = 10, amplitude = 1, position = np.array([0.0, 0.0, 0.0])): # creates a terrain num_terrains = 1 terrain_width = terrain_size terrain_length = terrain_size horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terrains * num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) heightfield[0:num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=num_waves, amplitude=amplitude).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) def get_sloped_terrain(self, terrain_size = 40, slope = -0.5, position = np.array([0.0, 0.0, 0.0])): # creates a terrain num_terrains = 1 terrain_width = terrain_size terrain_length = terrain_size horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terrains * num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) heightfield[0:num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=slope).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) def get_stairs_terrain(self, terrain_size = 40, step_width = 0.75, step_height = -0.5, position = np.array([0.0, 0.0, 0.0])): # creates a terrain num_terrains = 1 terrain_width = terrain_size terrain_length = terrain_size horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terrains * num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) heightfield[0:num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=step_width, step_height=step_height).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) def get_random_terrain(self, terrain_size = 40, min_height = -0.2, max_height = 0.2, step = 0.2, downsampled_scale=0.5, position = np.array([0.0, 0.0, 0.0])): # creates a terrain num_terrains = 1 terrain_width = terrain_size terrain_length = terrain_size horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terrains * num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=min_height, max_height=max_height, step=step, downsampled_scale=downsampled_scale).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) def get_obstacles_terrain(self, terrain_size = 40.0, num_obs = 50, max_height = 0.5, min_size = 0.5, max_size = 5.0, position = np.array([0.0, 0.0, 0.0])): # create all available terrain types num_terains = 1 terrain_width = terrain_size terrain_length = terrain_size horizontal_scale = 0.25 # [m] vertical_scale = 0.005 # [m] num_rows = int(terrain_width/horizontal_scale) num_cols = int(terrain_length/horizontal_scale) heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16) def new_sub_terrain(): return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale) heightfield[0:num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=max_height, min_size=min_size, max_size=max_size, num_rects=num_obs).height_field_raw vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5) position = np.array([-terrain_width/2.0, terrain_length/2.0, 0]) + position orientation = np.array([0.70711, 0.0, 0.0, -0.70711]) add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation) def post_reset(self): a = 1 def get_observations(self): pass def calculate_metrics(self) -> None: pass def is_done(self) -> None: pass
9,922
Python
36.730038
160
0.528926
AndrePatri/OmniRoboGym/docs/isaac2023.1.0_issues.md
### Some bugs of Isaac2023.1.0 which can be easily fixed #### 1.0 Nucleus blocking function makes startup super slow Easy temporary fix: modify /home/username/.local/share/ov/pkg/isaac_sim-2023.1.0/exts/omni.isaac.core/omni/isaac/core/utils/nucleus.py . Change lines 178 to 198 which is the check server function to below: ```python def check_server(server: str, path: str, timeout: float = 10.0) -> bool: """Check a specific server for a path Args: server (str): Name of Nucleus server path (str): Path to search Returns: bool: True if folder is found """ carb.log_info("Checking path: {}{}".format(server, path)) # Increase hang detection timeout if "localhost" not in server: omni.client.set_hang_detection_time_ms(10000) result, _ = omni.client.stat("{}{}".format(server, path)) if result == Result.OK: carb.log_info("Success: {}{}".format(server, path)) return True carb.log_info("Failure: {}{} not accessible".format(server, path)) return False ``` #### 2.0 Grid Cloner bug See `docs/grid_cloner_bugfix.py` for more details #### 3.0 Contact sensor bug When cloning environments, it's not possible to create contact sensors on the cloned environments because of a failed collision_API enabled flag option. Removing the check seems to recolve the problem without any major or noticeable issues.
1,413
Markdown
39.399999
240
0.683652
AndrePatri/OmniRoboGym/docs/grid_cloner_bugfix/grid_cloner.py
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from typing import List, Union import numpy as np import omni.usd import torch from omni.isaac.cloner import Cloner from pxr import Gf, UsdGeom class GridCloner(Cloner): """ This is a specialized Cloner class that will automatically generate clones in a grid fashion. """ def __init__(self, spacing: float, num_per_row: int = -1): """ Args: spacing (float): Spacing between clones. num_per_row (int): Number of clones to place in a row. Defaults to sqrt(num_clones). """ self._spacing = spacing self._num_per_row = num_per_row Cloner.__init__(self) def clone( self, source_prim_path: str, prim_paths: List[str], position_offsets: np.ndarray = None, orientation_offsets: np.ndarray = None, replicate_physics: bool = False, base_env_path: str = None, root_path: str = None, copy_from_source: bool = False ): """ Creates clones in a grid fashion. Positions of clones are computed automatically. Args: source_prim_path (str): Path of source object. prim_paths (List[str]): List of destination paths. position_offsets (np.ndarray): Positions to be applied as local translations on top of computed clone position. Defaults to None, no offset will be applied. orientation_offsets (np.ndarray): Orientations to be applied as local rotations for each clone. Defaults to None, no offset will be applied. replicate_physics (bool): Uses omni.physics replication. This will replicate physics properties directly for paths beginning with root_path and skip physics parsing for anything under the base_env_path. base_env_path (str): Path to namespace for all environments. Required if replicate_physics=True and define_base_env() not called. root_path (str): Prefix path for each environment. Required if replicate_physics=True and generate_paths() not called. copy_from_source: (bool): Setting this to False will inherit all clones from the source prim; any changes made to the source prim will be reflected in the clones. Setting this to True will make copies of the source prim when creating new clones; changes to the source prim will not be reflected in clones. Defaults to False. Note that setting this to True will take longer to execute. Returns: positions (List): Computed positions of all clones. """ num_clones = len(prim_paths) self._num_per_row = int(np.sqrt(num_clones)) if self._num_per_row == -1 else self._num_per_row num_rows = np.ceil(num_clones / self._num_per_row) num_cols = np.ceil(num_clones / num_rows) row_offset = 0.5 * self._spacing * (num_rows - 1) col_offset = 0.5 * self._spacing * (num_cols - 1) stage = omni.usd.get_context().get_stage() positions = [] orientations = [] for i in range(num_clones): # compute transform row = i // num_cols col = i % num_cols x = row_offset - row * self._spacing y = col * self._spacing - col_offset up_axis = UsdGeom.GetStageUpAxis(stage) position = [x, y, 0] if up_axis == UsdGeom.Tokens.z else [x, 0, y] orientation = Gf.Quatd.GetIdentity() if position_offsets is not None: translation = position_offsets[i] + position else: translation = position if orientation_offsets is not None: orientation = ( Gf.Quatd(orientation_offsets[i][0].item(), Gf.Vec3d(orientation_offsets[i][1:].tolist())) * orientation ) else: orientation = [ orientation.GetReal(), orientation.GetImaginary()[0], orientation.GetImaginary()[1], orientation.GetImaginary()[2], ] positions.append(translation) orientations.append(orientation) super().clone( source_prim_path=source_prim_path, prim_paths=prim_paths, positions=positions, orientations=orientations, replicate_physics=replicate_physics, base_env_path=base_env_path, root_path=root_path, copy_from_source=copy_from_source, ) return positions
5,073
Python
40.590164
246
0.606742
AndrePatri/OmniRoboGym/docs/contact_sensor_bugfix/contact_sensor.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # from omni.isaac.kit import SimulationApp simulation_app = SimulationApp({"headless": False}) import argparse import sys import carb import numpy as np from omni.isaac.core import World from omni.isaac.core.articulations import Articulation from omni.isaac.core.utils.nucleus import get_assets_root_path from omni.isaac.core.utils.stage import add_reference_to_stage from omni.isaac.sensor import ContactSensor from omni.isaac.cloner import GridCloner import omni.isaac.core.utils.prims as prim_utils parser = argparse.ArgumentParser() parser.add_argument("--test", default=False, action="store_true", help="Run in test mode") args, unknown = parser.parse_known_args() assets_root_path = get_assets_root_path() if assets_root_path is None: carb.log_error("Could not find Isaac Sim assets folder") simulation_app.close() sys.exit() my_world = World(stage_units_in_meters=1.0) my_world.scene.add_default_ground_plane() asset_path = assets_root_path + "/Isaac/Robots/Ant/ant.usd" add_reference_to_stage(usd_path=asset_path, prim_path="/World/envs/env_0/Ant") ant = my_world.scene.add(Articulation(prim_path="/World/envs/env_0/Ant/torso", name="ant", translation=np.array([0, 0, 1.5]))) ant_foot_prim_names = ["right_back_foot", "left_back_foot", "front_right_foot", "front_left_foot"] translations = np.array( [[0.38202, -0.40354, -0.0887], [-0.4, -0.40354, -0.0887], [-0.4, 0.4, -0.0887], [0.4, 0.4, -0.0887]] ) # moving def prim # move_prim(robot_prim_path_default, # from # robot_base_prim_path) # to num_envs = 3 env_ns = "/World/envs" env_spacing = 15 # [m] template_env_ns = env_ns + "/env_0" cloner = GridCloner(spacing=env_spacing) cloner.define_base_env(env_ns) envs_prim_paths = cloner.generate_paths(env_ns + "/env", num_envs) cloner.clone( source_prim_path=template_env_ns, prim_paths=envs_prim_paths, replicate_physics=True, position_offsets = None ) ant_sensors = [] for i in range(4): ant_sensors.append( my_world.scene.add( ContactSensor( prim_path="/World/envs/env_0/Ant/" + ant_foot_prim_names[i] + "/contact_sensor", name="ant_contact_sensor_{}".format(i), min_threshold=0, max_threshold=10000000, radius=0.1, translation=translations[i], ) ) ) ant_sensors[0].add_raw_contact_data_to_frame() ant_sensors2 = [] for i in range(4): ant_sensors2.append( my_world.scene.add( ContactSensor( prim_path="/World/envs/env_1/Ant/" + ant_foot_prim_names[i] + "/contact_sensor", name="ant_contact_sensor2_{}".format(i), min_threshold=0, max_threshold=10000000, radius=0.1, translation=translations[i], ) ) ) ant_sensors2[0].add_raw_contact_data_to_frame() my_world.reset() while simulation_app.is_running(): my_world.step(render=True) if my_world.is_playing(): print(ant_sensors2[0].get_current_frame()) if my_world.current_time_step_index == 0: my_world.reset() simulation_app.close()
3,638
Python
30.370689
126
0.657779
AndrePatri/OmniRoboGym/docs/sim_substepping_reset_issue/test_substepping_when_reset.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # import numpy as np import torch def get_device(sim_params): if "sim_device" in sim_params: device = sim_params["sim_device"] else: device = "cpu" physics_device_id = carb.settings.get_settings().get_as_int("/physics/cudaDevice") gpu_id = 0 if physics_device_id < 0 else physics_device_id if sim_params and "use_gpu_pipeline" in sim_params: # GPU pipeline must use GPU simulation if sim_params["use_gpu_pipeline"]: device = "cuda:" + str(gpu_id) elif sim_params and "use_gpu" in sim_params: if sim_params["use_gpu"]: device = "cuda:" + str(gpu_id) return device def sim_parameters(): # simulation parameters sim_params = {} # device settings sim_params["use_gpu_pipeline"] = True # disabling gpu pipeline is necessary to be able # to retrieve some quantities from the simulator which, otherwise, would have random values sim_params["use_gpu"] = True # does this actually do anything? if sim_params["use_gpu_pipeline"]: sim_params["device"] = "cuda" else: sim_params["device"] = "cpu" device = sim_params["device"] # sim_params["dt"] = 1.0/100.0 # physics_dt? sim_params["physics_dt"] = 1.0/400.0 # physics_dt? sim_params["rendering_dt"] = sim_params["physics_dt"] sim_params["substeps"] = 1 # number of physics steps to be taken for for each rendering step sim_params["gravity"] = np.array([0.0, 0.0, -9.81]) sim_params["enable_scene_query_support"] = False sim_params["use_fabric"] = True # Enable/disable reading of physics buffers directly. Default is True. sim_params["replicate_physics"] = True # sim_params["worker_thread_count"] = 4 sim_params["solver_type"] = 1 # 0: PGS, 1:TGS, defaults to TGS. PGS faster but TGS more stable sim_params["enable_stabilization"] = True # sim_params["bounce_threshold_velocity"] = 0.2 # sim_params["friction_offset_threshold"] = 0.04 # sim_params["friction_correlation_distance"] = 0.025 # sim_params["enable_sleeping"] = True # Per-actor settings ( can override in actor_options ) sim_params["solver_position_iteration_count"] = 4 # defaults to 4 sim_params["solver_velocity_iteration_count"] = 1 # defaults to 1 sim_params["sleep_threshold"] = 0.0 # Mass-normalized kinetic energy threshold below which an actor may go to sleep. # Allowed range [0, max_float). sim_params["stabilization_threshold"] = 1e-5 # Per-body settings ( can override in actor_options ) # sim_params["enable_gyroscopic_forces"] = True # sim_params["density"] = 1000 # density to be used for bodies that do not specify mass or density # sim_params["max_depenetration_velocity"] = 100.0 # sim_params["solver_velocity_iteration_count"] = 1 # GPU buffers settings # sim_params["gpu_max_rigid_contact_count"] = 512 * 1024 # sim_params["gpu_max_rigid_patch_count"] = 80 * 1024 # sim_params["gpu_found_lost_pairs_capacity"] = 1024 # sim_params["gpu_found_lost_aggregate_pairs_capacity"] = 1024 # sim_params["gpu_total_aggregate_pairs_capacity"] = 1024 # sim_params["gpu_max_soft_body_contacts"] = 1024 * 1024 # sim_params["gpu_max_particle_contacts"] = 1024 * 1024 # sim_params["gpu_heap_capacity"] = 64 * 1024 * 1024 # sim_params["gpu_temp_buffer_capacity"] = 16 * 1024 * 1024 # sim_params["gpu_max_num_partitions"] = 8 return sim_params def reset_state(art_view, idxs: torch.Tensor): # root q art_view.set_world_poses(positions = root_p_default[idxs, :], orientations=root_q_default[idxs, :], indices = idxs) # jnts q art_view.set_joint_positions(positions = jnts_q_default[idxs, :], indices = idxs) # root v and omega art_view.set_joint_velocities(velocities = jnts_v_default[idxs, :], indices = idxs) # jnts v concatenated_vel = torch.cat((root_v_default[idxs, :], root_omega_default[idxs, :]), dim=1) art_view.set_velocities(velocities = concatenated_vel, indices = idxs) # jnts eff art_view.set_joint_efforts(efforts = jnts_eff_default[idxs, :], indices = idxs) def get_robot_state( art_view): pose = art_view.get_world_poses( clone = True) # tuple: (pos, quat) # root p (measured, previous, default) root_p = pose[0] # root q (measured, previous, default) root_q = pose[1] # root orientation # jnt q (measured, previous, default) jnts_q = art_view.get_joint_positions( clone = True) # joint positions # root v (measured, default) root_v= art_view.get_linear_velocities( clone = True) # root lin. velocity # root omega (measured, default) root_omega = art_view.get_angular_velocities( clone = True) # root ang. velocity # joints v (measured, default) jnts_v = art_view.get_joint_velocities( clone = True) # joint velocities jnts_eff = art_view.get_measured_joint_efforts(clone = True) return root_p, root_q, jnts_q, root_v, root_omega, jnts_v, jnts_eff from omni.isaac.kit import SimulationApp import carb import os experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.omnirobogym.headless.kit' sim_params = sim_parameters() num_envs = 2 headless = True simulation_app = SimulationApp({"headless": headless, "physics_gpu": 0}, experience=experience) from omni.isaac.core import World from omni.isaac.core.articulations import ArticulationView from omni.importer.urdf import _urdf # urdf import config import_config = _urdf.ImportConfig() import_config.merge_fixed_joints = True import_config.import_inertia_tensor = True import_config.fix_base = False import_config.self_collision = False my_world = World(stage_units_in_meters=1.0, physics_dt=sim_params["physics_dt"], rendering_dt=sim_params["rendering_dt"], backend="torch", device=str(get_device(sim_params=sim_params)), physics_prim_path="/physicsScene", set_defaults = False, sim_params=sim_params) # create initial robot import omni.isaac.core.utils.prims as prim_utils # create GridCloner instance env_ns = "/World/envs" template_env_ns = env_ns + "/env" # a single env. may contain multiple robots base_env = template_env_ns + "_0" base_robot_path = base_env + "/panda" # get path to resource from omni.isaac.core.utils.extensions import get_extension_path_from_name extension_path = get_extension_path_from_name("omni.importer.urdf") # import URDF at default prim path import omni.kit success, robot_prim_path_default = omni.kit.commands.execute( "URDFParseAndImportFile", urdf_path=extension_path + "/data/urdf/robots/franka_description/robots/panda_arm.urdf", import_config=import_config, ) # moving default prim to base prim path (for potential cloning) from omni.isaac.core.utils.prims import move_prim prim_utils.define_prim(base_env) move_prim(robot_prim_path_default, # from base_robot_path) # to # cloning from omni.isaac.cloner import GridCloner cloner = GridCloner(spacing=6) _envs_prim_paths = cloner.generate_paths(template_env_ns, num_envs) position_offsets = np.array([[0.0, 0.0, 0.6]] * num_envs) cloner.clone( source_prim_path=base_env, prim_paths=_envs_prim_paths, base_env_path=base_env, position_offsets=position_offsets, replicate_physics=True ) # Prim paths structure: # World/envs/env_0/panda/panda_link0/... # this only in 2023.1.0 art_view = ArticulationView(name = "Panda" + "ArtView", prim_paths_expr = env_ns + "/env_.*"+ "/panda/panda_link0", reset_xform_properties=False # required as per doc. when cloning ) # moreover, robots are not cloned at different locations my_world.scene.add(art_view) ground_plane_prim_path = "/World/terrain" my_world.scene.add_default_ground_plane(z_position=0, name="terrain", prim_path= ground_plane_prim_path, static_friction=0.5, dynamic_friction=0.5, restitution=0.8) cloner.filter_collisions(physicsscene_path = my_world.get_physics_context().prim_path, collision_root_path = "/World/collisions", prim_paths=_envs_prim_paths, global_paths=[ground_plane_prim_path] # can collide with these prims ) my_world.reset() # init default state from measurements root_p, root_q, jnts_q, root_v, \ root_omega, jnts_v, jnts_eff = get_robot_state(art_view) root_p_default = torch.clone(root_p) root_q_default = torch.clone(root_q) jnts_q_default = torch.clone(jnts_q) jnts_v_default = torch.clone(jnts_v) root_omega_default = torch.clone(root_omega) root_v_default = torch.clone(root_v) jnts_eff_default = torch.clone(jnts_eff).zero_() # default values root_p_default[:, 0] = 0 root_p_default[:, 1] = 0 root_p_default[:, 2] = 0.5 root_q_default[:, 0] = 0.0 root_q_default[:, 1] = 0.0 root_q_default[:, 2] = 0.0 root_q_default[:, 3] = 1.0 jnts_q_default[:, :] = 1.0 jnts_v_default[:, :] = 0.0 root_omega_default[:, :] = 0.0 root_v_default[:, :] = 0.0 no_gains = torch.zeros((num_envs, jnts_eff_default.shape[1]), device = get_device(sim_params), dtype=torch.float32) art_view.set_gains(kps = no_gains, kds = no_gains) print("Extension path: " + str(extension_path)) print("Prim paths: " + str(art_view.prim_paths)) reset_ever_n_steps = 100 just_reset = False for i in range(0, 1000): if ((i + 1) % reset_ever_n_steps) == 0: print("resetting to default") reset_state(art_view, torch.tensor([0], dtype=torch.int)) just_reset = True my_world.step() # retrieve state root_p, root_q, jnts_q, root_v, \ root_omega, jnts_v, jnts_eff = get_robot_state(art_view) # if just_reset: # check we hace reset correcty print("measured") print(jnts_q) print("default") print(jnts_q_default) simulation_app.close()
11,081
Python
34.06962
120
0.624222
abizovnuralem/go2_omniverse/terrain_cfg.py
# Copyright (c) 2024, RoboVerse community # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from terrain_generator_cfg import TerrainGeneratorCfg import omni.isaac.orbit.terrains as terrain_gen ROUGH_TERRAINS_CFG = TerrainGeneratorCfg( size=(8.0, 8.0), border_width=0.0, num_rows=1, num_cols=2, horizontal_scale=0.1, vertical_scale=0.005, slope_threshold=0.75, use_cache=False, sub_terrains={ "pyramid_stairs": terrain_gen.MeshPyramidStairsTerrainCfg( proportion=0.2, step_height_range=(0.05, 0.23), step_width=0.3, platform_width=3.0, border_width=1.0, holes=False, ), "pyramid_stairs_inv": terrain_gen.MeshInvertedPyramidStairsTerrainCfg( proportion=0.2, step_height_range=(0.05, 0.23), step_width=0.3, platform_width=3.0, border_width=1.0, holes=False, ), }, )
2,217
Python
38.607142
80
0.700947
abizovnuralem/go2_omniverse/agent_cfg.py
# Copyright (c) 2024, RoboVerse community # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. unitree_go2_agent_cfg = { 'seed': 42, 'device': 'cuda', 'num_steps_per_env': 24, 'max_iterations': 15000, 'empirical_normalization': False, 'policy': { 'class_name': 'ActorCritic', 'init_noise_std': 1.0, 'actor_hidden_dims': [512, 256, 128], 'critic_hidden_dims': [512, 256, 128], 'activation': 'elu' }, 'algorithm': { 'class_name': 'PPO', 'value_loss_coef': 1.0, 'use_clipped_value_loss': True, 'clip_param': 0.2, 'entropy_coef': 0.01, 'num_learning_epochs': 5, 'num_mini_batches': 4, 'learning_rate': 0.001, 'schedule': 'adaptive', 'gamma': 0.99, 'lam': 0.95, 'desired_kl': 0.01, 'max_grad_norm': 1.0 }, 'save_interval': 50, 'experiment_name': 'unitree_go2_rough', 'run_name': '', 'logger': 'tensorboard', 'neptune_project': 'orbit', 'wandb_project': 'orbit', 'resume': False, 'load_run': '.*', 'load_checkpoint': 'model_.*.pt' }
2,562
Python
40.338709
80
0.613193
abizovnuralem/go2_omniverse/main.py
# Copyright (c) 2024, RoboVerse community # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Script to play a checkpoint if an RL agent from RSL-RL.""" from __future__ import annotations """Launch Isaac Sim Simulator first.""" import argparse from omni.isaac.orbit.app import AppLauncher # local imports import cli_args # isort: skip # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument( "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." ) parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default="Isaac-Velocity-Rough-Unitree-Go2-v0", help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") # append RSL-RL cli arguments cli_args.add_rsl_rl_args(parser) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) args_cli = parser.parse_args() # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app import omni ext_manager = omni.kit.app.get_app().get_extension_manager() ext_manager.set_extension_enabled_immediate("omni.isaac.ros2_bridge", True) """Rest everything follows.""" import os import math import gymnasium as gym import torch import carb import usdrt.Sdf from omni.isaac.orbit_tasks.utils import get_checkpoint_path from omni.isaac.orbit_tasks.utils.wrappers.rsl_rl import ( RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper ) from omni.isaac.orbit.utils import configclass from omni.isaac.orbit_assets.unitree import UNITREE_GO2_CFG from omni.isaac.orbit.envs import RLTaskEnvCfg import omni.isaac.orbit.sim as sim_utils from omni.isaac.orbit.assets import ArticulationCfg, AssetBaseCfg from omni.isaac.orbit.managers import CurriculumTermCfg as CurrTerm from omni.isaac.orbit.managers import EventTermCfg as EventTerm from omni.isaac.orbit.managers import ObservationGroupCfg as ObsGroup from omni.isaac.orbit.managers import ObservationTermCfg as ObsTerm from omni.isaac.orbit.managers import RewardTermCfg as RewTerm from omni.isaac.orbit.managers import SceneEntityCfg from omni.isaac.orbit.managers import TerminationTermCfg as DoneTerm from omni.isaac.orbit.scene import InteractiveSceneCfg from omni.isaac.orbit.sensors import ContactSensorCfg, RayCasterCfg, patterns, CameraCfg from omni.isaac.orbit.terrains import TerrainImporterCfg from omni.isaac.orbit.utils import configclass from omni.isaac.orbit.utils.noise import AdditiveUniformNoiseCfg as Unoise import omni.isaac.orbit_tasks.locomotion.velocity.mdp as mdp import omni.appwindow # Contains handle to keyboard from rsl_rl.runners import OnPolicyRunner from typing import Literal from dataclasses import MISSING from omnigraph import create_front_cam_omnigraph from agent_cfg import unitree_go2_agent_cfg from terrain_cfg import ROUGH_TERRAINS_CFG base_command = [0, 0, 0] @configclass class MySceneCfg(InteractiveSceneCfg): """Configuration for the terrain scene with a legged robot.""" # ground terrain terrain = TerrainImporterCfg( prim_path="/World/ground", terrain_type="generator", terrain_generator=ROUGH_TERRAINS_CFG, max_init_terrain_level=5, collision_group=-1, physics_material=sim_utils.RigidBodyMaterialCfg( friction_combine_mode="multiply", restitution_combine_mode="multiply", static_friction=1.0, dynamic_friction=1.0, ), visual_material=sim_utils.MdlFileCfg( mdl_path="{NVIDIA_NUCLEUS_DIR}/Materials/Base/Architecture/Shingles_01.mdl", project_uvw=True, ), debug_vis=False, ) # robots robot: ArticulationCfg = MISSING # sensors camera = CameraCfg( prim_path="{ENV_REGEX_NS}/Robot/base/front_cam", update_period=0.1, height=480, width=640, data_types=["rgb", "distance_to_image_plane"], spawn=sim_utils.PinholeCameraCfg( focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) ), offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"), ) height_scanner = RayCasterCfg( prim_path="{ENV_REGEX_NS}/Robot/base", offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), attach_yaw_only=True, pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), debug_vis=False, mesh_prim_paths=["/World/ground"], ) contact_forces = ContactSensorCfg(prim_path="{ENV_REGEX_NS}/Robot/.*", history_length=3, track_air_time=True) # lights light = AssetBaseCfg( prim_path="/World/light", spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), ) sky_light = AssetBaseCfg( prim_path="/World/skyLight", spawn=sim_utils.DomeLightCfg(color=(0.13, 0.13, 0.13), intensity=1000.0), ) def constant_commands(env: RLTaskEnvCfg) -> torch.Tensor: global base_command """The generated command from the command generator.""" return torch.tensor([base_command], device=env.device).repeat(env.num_envs, 1) @configclass class ObservationsCfg: """Observation specifications for the MDP.""" @configclass class PolicyCfg(ObsGroup): """Observations for policy group.""" # observation terms (order preserved) base_lin_vel = ObsTerm(func=mdp.base_lin_vel) base_ang_vel = ObsTerm(func=mdp.base_ang_vel) projected_gravity = ObsTerm( func=mdp.projected_gravity, noise=Unoise(n_min=-0.05, n_max=0.05), ) velocity_commands = ObsTerm(func=constant_commands) joint_pos = ObsTerm(func=mdp.joint_pos_rel) joint_vel = ObsTerm(func=mdp.joint_vel_rel) actions = ObsTerm(func=mdp.last_action) height_scan = ObsTerm( func=mdp.height_scan, params={"sensor_cfg": SceneEntityCfg("height_scanner")}, clip=(-1.0, 1.0), ) def __post_init__(self): self.enable_corruption = True self.concatenate_terms = True # observation groups policy: PolicyCfg = PolicyCfg() @configclass class ActionsCfg: """Action specifications for the MDP.""" joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) @configclass class CommandsCfg: """Command specifications for the MDP.""" base_velocity = mdp.UniformVelocityCommandCfg( asset_name="robot", resampling_time_range=(0.0, 0.0), rel_standing_envs=0.02, rel_heading_envs=1.0, heading_command=True, heading_control_stiffness=0.5, debug_vis=True, ranges=mdp.UniformVelocityCommandCfg.Ranges( lin_vel_x=(0.0, 0.0), lin_vel_y=(0.0, 0.0), ang_vel_z=(0.0, 0.0), heading=(0, 0) ), ) @configclass class RewardsCfg: """Reward terms for the MDP.""" # -- task track_lin_vel_xy_exp = RewTerm( func=mdp.track_lin_vel_xy_exp, weight=1.0, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) track_ang_vel_z_exp = RewTerm( func=mdp.track_ang_vel_z_exp, weight=0.5, params={"command_name": "base_velocity", "std": math.sqrt(0.25)} ) # -- penalties lin_vel_z_l2 = RewTerm(func=mdp.lin_vel_z_l2, weight=-2.0) ang_vel_xy_l2 = RewTerm(func=mdp.ang_vel_xy_l2, weight=-0.05) dof_torques_l2 = RewTerm(func=mdp.joint_torques_l2, weight=-1.0e-5) dof_acc_l2 = RewTerm(func=mdp.joint_acc_l2, weight=-2.5e-7) action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01) feet_air_time = RewTerm( func=mdp.feet_air_time, weight=0.125, params={ "sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*FOOT"), "command_name": "base_velocity", "threshold": 0.5, }, ) undesired_contacts = RewTerm( func=mdp.undesired_contacts, weight=-1.0, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names=".*THIGH"), "threshold": 1.0}, ) # -- optional penalties flat_orientation_l2 = RewTerm(func=mdp.flat_orientation_l2, weight=0.0) dof_pos_limits = RewTerm(func=mdp.joint_pos_limits, weight=0.0) @configclass class TerminationsCfg: """Termination terms for the MDP.""" time_out = DoneTerm(func=mdp.time_out, time_out=True) base_contact = DoneTerm( func=mdp.illegal_contact, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0}, ) @configclass class EventCfg: """Configuration for events.""" # startup physics_material = EventTerm( func=mdp.randomize_rigid_body_material, mode="startup", params={ "asset_cfg": SceneEntityCfg("robot", body_names=".*"), "static_friction_range": (0.8, 0.8), "dynamic_friction_range": (0.6, 0.6), "restitution_range": (0.0, 0.0), "num_buckets": 64, }, ) @configclass class CurriculumCfg: """Curriculum terms for the MDP.""" terrain_levels = CurrTerm(func=mdp.terrain_levels_vel) @configclass class ViewerCfg: """Configuration of the scene viewport camera.""" eye: tuple[float, float, float] = (7.5, 7.5, 7.5) lookat: tuple[float, float, float] = (0.0, 0.0, 0.0) cam_prim_path: str = "/OmniverseKit_Persp" resolution: tuple[int, int] = (1920, 1080) origin_type: Literal["world", "env", "asset_root"] = "world" env_index: int = 0 asset_name: str | None = None @configclass class LocomotionVelocityRoughEnvCfg(RLTaskEnvCfg): """Configuration for the locomotion velocity-tracking environment.""" # Scene settings scene: MySceneCfg = MySceneCfg(num_envs=4096, env_spacing=2.5) viewer: ViewerCfg = ViewerCfg() # Basic settings observations: ObservationsCfg = ObservationsCfg() actions: ActionsCfg = ActionsCfg() commands: CommandsCfg = CommandsCfg() # MDP settings rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() events: EventCfg = EventCfg() curriculum: CurriculumCfg = CurriculumCfg() def __post_init__(self): """Post initialization.""" # general settings self.decimation = 4 self.episode_length_s = 20.0 # simulation settings self.sim.dt = 0.005 self.sim.disable_contact_processing = True self.sim.physics_material = self.scene.terrain.physics_material # update sensor update periods # we tick all the sensors based on the smallest update period (physics update period) if self.scene.height_scanner is not None: self.scene.height_scanner.update_period = self.decimation * self.sim.dt if self.scene.contact_forces is not None: self.scene.contact_forces.update_period = self.sim.dt # check if terrain levels curriculum is enabled - if so, enable curriculum for terrain generator # this generates terrains with increasing difficulty and is useful for training if getattr(self.curriculum, "terrain_levels", None) is not None: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = True else: if self.scene.terrain.terrain_generator is not None: self.scene.terrain.terrain_generator.curriculum = False @configclass class UnitreeGo2RoughEnvCfg(LocomotionVelocityRoughEnvCfg): def __post_init__(self): # post init of parent super().__post_init__() self.scene.robot = UNITREE_GO2_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") self.scene.height_scanner.prim_path = "{ENV_REGEX_NS}/Robot/base" # reduce action scale self.actions.joint_pos.scale = 0.25 # rewards self.rewards.feet_air_time.params["sensor_cfg"].body_names = ".*_foot" self.rewards.feet_air_time.weight = 0.01 self.rewards.undesired_contacts = None self.rewards.dof_torques_l2.weight = -0.0002 self.rewards.track_lin_vel_xy_exp.weight = 1.5 self.rewards.track_ang_vel_z_exp.weight = 0.75 self.rewards.dof_acc_l2.weight = -2.5e-7 # terminations self.terminations.base_contact.params["sensor_cfg"].body_names = "base" #create ros2 camera stream omnigraph create_front_cam_omnigraph() def sub_keyboard_event(event, *args, **kwargs) -> bool: global base_command if event.type == carb.input.KeyboardEventType.KEY_PRESS: if event.input.name == 'W': base_command = [1, 0, 0] if event.input.name == 'S': base_command = [-1, 0, 0] if event.input.name == 'A': base_command = [0, 1, 0] if event.input.name == 'D': base_command = [0, -1, 0] if event.input.name == 'Q': base_command = [0, 0, 1] if event.input.name == 'E': base_command = [0, 0, -1] elif event.type == carb.input.KeyboardEventType.KEY_RELEASE: base_command = [0, 0, 0] return True def main(): # acquire input interface _input = carb.input.acquire_input_interface() _appwindow = omni.appwindow.get_default_app_window() _keyboard = _appwindow.get_keyboard() _sub_keyboard = _input.subscribe_to_keyboard_events(_keyboard, sub_keyboard_event) """Play with RSL-RL agent.""" # parse configuration env_cfg = UnitreeGo2RoughEnvCfg() env_cfg.scene.num_envs = 1 agent_cfg: RslRlOnPolicyRunnerCfg = unitree_go2_agent_cfg # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg) # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env) # specify directory for logging experiments log_root_path = os.path.join("logs", "rsl_rl", agent_cfg["experiment_name"]) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Loading experiment from directory: {log_root_path}") resume_path = get_checkpoint_path(log_root_path, agent_cfg["load_run"], agent_cfg["load_checkpoint"]) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # load previously trained model ppo_runner = OnPolicyRunner(env, agent_cfg, log_dir=None, device=agent_cfg["device"]) ppo_runner.load(resume_path) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # obtain the trained policy for inference policy = ppo_runner.get_inference_policy(device=env.unwrapped.device) # reset environment obs, _ = env.get_observations() # simulate environment while simulation_app.is_running(): # run everything in inference mode with torch.inference_mode(): # agent stepping actions = policy(obs) # env stepping obs, _, _, _ = env.step(actions) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()
16,627
Python
34.529914
118
0.669333