file_path
stringlengths 21
202
| content
stringlengths 13
1.02M
| size
int64 13
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 5.43
98.5
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.91
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/usd-plugin-samples/CHANGES.md | # Changelog
## 3.0.0
- Added several examples for Hydra 2 scene index plugins
- Fixed issue in build plugInfo.json file configuration for debug builds
- Updated dependencies to stock USD 23.05
- Updated openssl and libcurl dependencies
## 2.0.0
- Added support for general USD plugins beyond schemas
- Updated repo_usd to support flexible build files
- Updated dependencies to USD 22.11 and Python 3.10
- Added sample for dynamic payloads and file format plugins
## 1.0.0
- Initial open source release | 507 | Markdown | 25.736841 | 72 | 0.773176 |
NVIDIA-Omniverse/usd-plugin-samples/repo.toml | # common settings for repo_usd for all USD plug-ins
[repo_usd]
usd_root = "${root}/_build/usd-deps/nv-usd/%{config}"
usd_python_root = "${root}/_build/usd-deps/python"
generate_plugin_buildfiles = true
plugin_buildfile_format = "cmake"
generate_root_buildfile = true
# this tells repo_usd about our codeful schema extension
[repo_usd.plugin.omniExampleSchema]
schema_file = "${root}/src/usd-plugins/schema/omniExampleSchema/schema.usda"
plugin_dir = "${root}/src/usd-plugins/schema/omniExampleSchema"
generate_dir = "${root}/src/usd-plugins/schema/omniExampleSchema/generated"
install_root = "${root}/_install/%{platform}/%{config}/omniExampleSchema"
library_prefix = "OmniExample"
usd_lib_dependencies = [
"arch",
"tf",
"vt",
"sdf",
"usd"
]
# this tells repo_usd about our codeless schema extension
[repo_usd.plugin.omniExampleCodelessSchema]
schema_file = "${root}/src/usd-plugins/schema/omniExampleCodelessSchema/schema.usda"
plugin_dir = "${root}/src/usd-plugins/schema/omniExampleCodelessSchema"
generate_dir = "${root}/src/usd-plugins/schema/omniExampleCodelessSchema/generated"
install_root = "${root}/_install/%{platform}/%{config}/omniExampleCodelessSchema"
is_codeless = true
# this tells repo_usd about the codeless schema for use by
# our file format / dynamic payload infrastructure
[repo_usd.plugin.omniMetSchema]
schema_file = "${root}/src/usd-plugins/schema/omniMetSchema/schema.usda"
plugin_dir = "${root}/src/usd-plugins/schema/omniMetSchema"
generate_dir = "${root}/src/usd-plugins/schema/omniMetSchema/generated"
install_root = "${root}/_install/%{platform}/%{config}/omniMetSchema"
is_codeless = true
# this tells repo_usd about our file format plugin
[repo_usd.plugin.edfFileFormat]
plugin_dir = "${root}/src/usd-plugins/fileFormat/edfFileFormat"
install_root = "${root}/_install/%{platform}/%{config}/edfFileFormat"
include_dir = "include/edfFileFormat"
additional_include_dirs = [
"../../../../_build/usd-deps/nv_usd/%{config}/include/tbb"
]
public_headers = [
"api.h",
"iEdfDataProvider.h",
"edfDataProviderFactory.h"
]
private_headers = [
"edfData.h",
"edfPluginManager.h",
"edfFileFormat.h"
]
cpp_files = [
"edfData.cpp",
"edfDataProviderFactory.cpp",
"edfPluginManager.cpp",
"edfFileFormat.cpp",
"iEdfDataProvider.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"plug",
"vt",
"gf",
"sdf",
"js",
"pcp"
]
# this tells repo_usd about our EDF provider implementing the back-end
# functionality to fulfill the dynamic payload
[repo_usd.plugin.omniMetProvider]
plugin_dir = "${root}/src/usd-plugins/dynamicPayload/omniMetProvider"
install_root = "${root}/_install/%{platform}/%{config}/omniMetProvider"
include_dir = "include/omniMetProvider"
additional_include_dirs = [
"../../../../src/usd-plugins/fileFormat/edfFileFormat",
"../../../../_build/target-deps/libcurl/include"
]
preprocessor_defines = [
"CURL_STATICLIB"
]
depends_on = [
"edfFileFormat"
]
private_headers = [
"api.h",
"omniMetProvider.h"
]
cpp_files = [
"omniMetProvider.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"plug",
"vt",
"gf",
"sdf",
"js",
"pcp",
"usd"
]
[repo_usd.plugin.omniMetProvider."platform:windows-x86_64"]
additional_libs = [
"edfFileFormat",
"zlib",
"ws2_32",
"crypt32"
]
additional_static_libs = [
"libcurl"
]
additional_library_dirs = [
"../../../../_install/%{platform}/%{config}/edfFileFormat/lib",
"../../../../_build/target-deps/libcurl/lib",
"../../../../_build/target-deps/zlib/lib/rt_dynamic/release"
]
[repo_usd.plugin.omniMetProvider."platform:linux-x86_64"]
additional_libs = [
"edfFileFormat"
]
additional_static_libs = [
"curl",
"ssl",
"crypto",
"z"
]
additional_library_dirs = [
"../../../../_install/%{platform}/%{config}/edfFileFormat/lib",
"../../../../_build/target-deps/libcurl/lib",
"../../../../_build/target-deps/zlib/lib",
"../../../../_build/target-deps/openssl/lib"
]
[repo_usd.plugin.omniMetProvider."platform:linux-aarch64"]
additional_libs = [
"edfFileFormat"
]
additional_static_libs = [
"curl",
"ssl",
"crypto",
"z"
]
additional_library_dirs = [
"../../../../_install/%{platform}/%{config}/edfFileFormat/lib",
"../../../../_build/target-deps/libcurl/lib",
"../../../../_build/target-deps/zlib/lib",
"../../../../_build/target-deps/openssl/lib"
]
[repo_usd.plugin.omniGeoSceneIndex]
plugin_dir = "${root}/src/hydra-plugins/omniGeoSceneIndex"
install_root = "${root}/_install/%{platform}/%{config}/omniGeoSceneIndex"
include_dir = "include/omniGeoSceneIndex"
private_headers = [
"api.h",
"computedDependentDataSource.h",
"computedPrimDataSource.h",
"geospatialDataSource.h",
"geospatialSceneIndex.h",
"geospatialSceneIndexPlugin.h",
"localPositionAPIAdapter.h",
"localPositionDataSource.h",
"localPositionSchema.h",
"referencePositionAPIAdapter.h",
"referencePositionDataSource.h",
"referencePositionSchema.h"
]
cpp_files = [
"computedDependentDataSource.cpp",
"computedPrimDataSource.cpp",
"geospatialDataSource.cpp",
"geospatialSceneIndex.cpp",
"geospatialSceneIndexPlugin.cpp",
"localPositionAPIAdapter.cpp",
"localPositionDataSource.cpp",
"localPositionSchema.cpp",
"referencePositionAPIAdapter.cpp",
"referencePositionDataSource.cpp",
"referencePositionSchema.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"work",
"plug",
"vt",
"gf",
"hd",
"hf",
"sdf",
"usd",
"usdGeom",
"usdImaging"
]
additional_include_dirs = [
"${root}/_build/target-deps/omni-geospatial/include"
]
additional_library_dirs = [
"${root}/_build/target-deps/omni-geospatial/bin"
]
additional_libs = [
"omniGeospatial"
]
[repo_usd.plugin.omniMetricsAssembler]
plugin_dir = "${root}/src/hydra-plugins/omniMetricsAssembler"
install_root = "${root}/_install/%{platform}/%{config}/omniMetricsAssembler"
include_dir = "include/omniMetricsAssembler"
private_headers = [
"api.h",
"metricsAdapter.h",
"metricsDataSource.h",
"metricsDoubleDataSource.h",
"metricsSceneIndex.h",
"metricsSceneIndexPlugin.h",
"metricsSchema.h"
]
cpp_files = [
"metricsAdapter.cpp",
"metricsDataSource.cpp",
"metricsDoubleDataSource.cpp",
"metricsSceneIndex.cpp",
"metricsSceneIndexPlugin.cpp",
"metricsSchema.cpp"
]
resource_files = [
"plugInfo.json"
]
usd_lib_dependencies = [
"arch",
"tf",
"work",
"plug",
"vt",
"gf",
"hd",
"hf",
"sdf",
"usd",
"usdGeom",
"usdImaging"
]
[repo_usd.plugin.omniWarpSceneIndex]
plugin_dir = "${root}/src/hydra-plugins/omniWarpSceneIndex"
schema_file = "${root}/src/hydra-plugins/omniWarpSceneIndex/schema.usda"
library_prefix = "OmniWarpSceneIndex"
install_root = "${root}/_install/%{platform}/%{config}/omniWarpSceneIndex"
include_dir = "include/omniWarpSceneIndex"
private_headers = [
"api.h",
"tokens.h",
"warpComputationAPI.h",
"warpComputationAPIAdapter.h",
"warpComputationSchema.h",
"warpPythonModule.h",
"warpSceneIndex.h",
"warpSceneIndexPlugin.h"
]
cpp_files = [
"tokens.cpp",
"warpComputationAPI.cpp",
"warpComputationAPIAdapter.cpp",
"warpComputationSchema.cpp",
"warpPythonModule.cpp",
"warpSceneIndex.cpp",
"warpSceneIndexPlugin.cpp",
"moduleDeps.cpp"
]
pymodule_cpp_files = [
"module.cpp",
"wrapTokens.cpp",
"wrapWarpComputationAPI.cpp"
]
pymodule_files = [
"__init__.py",
"oceanSim/__init__.py",
"oceanSim/preferences.py",
"oceanSim/preferencesUI.py",
"warpModules/__init__.py",
"warpModules/cloth.py",
"warpModules/deform01.py",
"warpModules/deform02.py",
"warpModules/ocean.py",
"warpModules/particles.py"
]
resource_files = [
"plugInfo.json",
"schema.usda"
]
usd_lib_dependencies = [
"arch",
"tf",
"gf",
"plug",
"trace",
"vt",
"work",
"hio",
"garch",
"glf",
"hd",
"hdsi",
"hdx",
"hf",
"pxOsd",
"sdf",
"sdr",
"usd",
"usdGeom",
"usdShade",
"usdImaging",
] | 8,346 | TOML | 23.991018 | 84 | 0.654325 |
NVIDIA-Omniverse/usd-plugin-samples/deps/usd-deps.packman.xml | <project toolsVersion="5.6">
<dependency name="nv-usd" linkPath="../_build/usd-deps/nv-usd/${config}">
<package name="usd.py310.${platform}.usdview.${config}" version="0.23.05-tc.47+v23.05.b53573ea" />
</dependency>
<dependency name="python" linkPath="../_build/usd-deps/python">
<package name="python" version="3.10.13+nv1-${platform}" />
</dependency>
</project> | 380 | XML | 46.624994 | 102 | 0.665789 |
NVIDIA-Omniverse/usd-plugin-samples/src/kit-extension/exts/omni.example.schema/config/extension.toml | [core]
# tells kit that we shouldn't hot reload this extension
reloadable = false
# Load at the start, load all schemas with order -100 (with order -1000 the USD libs are loaded)
# this is necessary (as it to set the extension to auto load)
# so that the schemas get loaded into the UsdSchemaRegistry early enough
order = -100
[package]
# all packages should have this information so it is displayed in the UI properly
author = "NVIDIA USD Core Team"
repository = "https://github.com/NVIDIA-Omniverse/kit-sample-usd-schema"
category = "USD"
title = "USD Example Schema"
version = "1.0.0"
description="Kit extension illustrating how to package a schema extension for use in kit."
keywords = ["schema", "usd"]
readme = "docs/README.md"
changelog = "docs/CHANGES.md"
icon = "data/icon.png"
[dependencies]
# depends on core USD libraries being loaded
"omni.usd.libs" = {}
# when an extension is requested to be enabled, kit will load the python modules
# that are specified here in the order they are specified
# we specify two different python modules, the first is the module we create
# this one will register the plugin with USD so it knows in what module to
# find our schema types
[[python.module]]
name = "omni.example.schema"
# the second extension is the schema python module itself
# this is the module that developers will import to use the schema in Python
[[python.module]]
name = "OmniExampleSchema"
# this tells kit to load these C++ libraries when the extension loads
# (kit will also try to unload them when the extension is unloaded)
# note that this is required to also make loading the schema python module work
# (importing the schema python module will look for the C++ library as a dependency)
# if you don't load the C++ lib here, your PATH / LD_LIBRARY_PATH variables
# should contain the path to your C++ dll otherwise the python module will
# not load properly!
[[native.library]]
path = "OmniExampleSchema/lib/${lib_prefix}omniExampleSchema${lib_ext}" | 1,982 | TOML | 40.312499 | 96 | 0.757316 |
NVIDIA-Omniverse/usd-plugin-samples/src/kit-extension/exts/omni.example.schema/omni/example/schema/__init__.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pxr import Plug
# this extension is responsible for loading both plug-ins that represent the
# example codeful and codeless schema extensions
plugin_root = os.path.join(os.path.dirname(__file__), "..", "..",
"..", "OmniExampleSchema", "resources")
Plug.Registry().RegisterPlugins(plugin_root)
plugin_root = os.path.join(os.path.dirname(__file__), "..", "..",
"..", "OmniExampleCodelessSchema", "resources") | 1,012 | Python | 41.208332 | 76 | 0.737154 |
NVIDIA-Omniverse/usd-plugin-samples/src/kit-extension/exts/omni.example.schema/docs/CHANGES.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.0] - 2023-01-19
- Initial version of an illustrative example of a kit extension loading a set of USD schema extensions | 222 | Markdown | 30.857138 | 102 | 0.734234 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/api.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_ASSEMBLER_API_H
#define OMNI_METRICS_ASSEMBLER_API_H
#include "pxr/base/arch/export.h"
#if defined(PXR_STATIC)
# define OMNIMETRICSASSEMBLER_API
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_CLASS(...)
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_STRUCT(...)
# define OMNIMETRICSASSEMBLER_LOCAL
#else
# if defined(OMNIMETRICSASSEMBLER_EXPORTS)
# define OMNIMETRICSASSEMBLER_API ARCH_EXPORT
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
# else
# define OMNIMETRICSASSEMBLER_API ARCH_IMPORT
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETRICSASSEMBLER_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
# endif
# define OMNIMETRICSASSEMBLER_LOCAL ARCH_HIDDEN
#endif
#endif // OMNI_METRICS_ASSEMBLER_API_H
| 1,592 | C | 40.921052 | 102 | 0.743719 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_METRICS_DATA_SOURCE_H_
#define HD_OMNI_METRICS_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
#include <pxr/imaging/hd/sceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIMETRICSDATASOURCE_TOKENS \
(metricsPreservedXform)
TF_DECLARE_PUBLIC_TOKENS(HdOmniMetricsDataSourceTokens, OMNIMETRICSASSEMBLER_API,
HDOMNIMETRICSDATASOURCE_TOKENS);
//-----------------------------------------------------------------------------
/// \class HdOmniMetricsDataSource
///
/// A datasource representing a wrapped view of an existing flattened
/// datasource where the xform token is intercepted and a new metric-corrected
/// transform matrix is dynamically computed.
///
class HdOmniMetricsDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniMetricsDataSource);
HdOmniMetricsDataSource(const HdSceneIndexBase& sceneIndex, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource);
void UpdateWrappedDataSource(HdContainerDataSourceHandle wrappedDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
// determines if the data source would be dirtied based on the locators given
bool IsPrimDirtied(const HdDataSourceLocatorSet& locators);
private:
bool _HasMetricsInformation(HdContainerDataSourceHandle dataSource);
HdBoolDataSourceHandle _GetInputResetXformStackSource();
HdDataSourceBaseHandle _ComputeCorrectedXform();
private:
const HdSceneIndexBase& _sceneIndex;
SdfPath _primPath;
HdContainerDataSourceHandle _wrappedDataSource;
// cached computed datasources
HdContainerDataSourceAtomicHandle _computedCorrectedXformDataSource;
class _MetricsCorrectedMatrixDataSource : public HdMatrixDataSource
{
public:
HD_DECLARE_DATASOURCE(_MetricsCorrectedMatrixDataSource);
_MetricsCorrectedMatrixDataSource(HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource,
bool isMetricsCorrectiveSource);
// typed sampled data source overrides
VtValue GetValue(Time shutterOffset) override;
GfMatrix4d GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
HdMatrixDataSourceHandle _GetInputMatrixDataSource() const;
HdMatrixDataSourceHandle _GetParentMatrixDataSource() const;
HdMatrixDataSourceHandle _GetMetricsPreservedMatrixDataSource() const;
HdMatrixDataSourceHandle _GetParentMetricsPreservedMatrixDataSource() const;
GfMatrix4d _ComputeCorrectedMatrix(Time shutterOffset);
GfMatrix4d _GetMpuCorrective();
HdContainerDataSourceHandle _inputDataSource;
HdContainerDataSourceHandle _parentDataSource;
bool _isMetricsCorrectiveSource;
};
HD_DECLARE_DATASOURCE_HANDLES(_MetricsCorrectedMatrixDataSource);
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniMetricsDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_METRICS_DATA_SOURCE_H_ | 3,949 | C | 34.267857 | 88 | 0.736389 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndexPlugin.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/sceneIndexPluginRegistry.h>
#include "metricsSceneIndexPlugin.h"
#include "metricsSceneIndex.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
((sceneIndexPluginName, "OmniMetricsSceneIndexPlugin"))
);
TF_REGISTRY_FUNCTION(TfType)
{
HdSceneIndexPluginRegistry::Define<OmniMetricsSceneIndexPlugin>();
}
TF_REGISTRY_FUNCTION(HdSceneIndexPlugin)
{
const HdSceneIndexPluginRegistry::InsertionPhase insertionPhase = 1;
// register this scene index plugin with all renderers
// and try to insert ourselves early in the phases at the start
HdSceneIndexPluginRegistry::GetInstance().RegisterSceneIndexForRenderer(
"",
_tokens->sceneIndexPluginName,
nullptr,
insertionPhase,
HdSceneIndexPluginRegistry::InsertionOrderAtStart);
}
OmniMetricsSceneIndexPlugin::OmniMetricsSceneIndexPlugin() = default;
HdSceneIndexBaseRefPtr OmniMetricsSceneIndexPlugin::_AppendSceneIndex(
const HdSceneIndexBaseRefPtr& inputScene,
const HdContainerDataSourceHandle& inputArgs)
{
return OmniMetricsSceneIndex::New(inputScene, inputArgs);
}
PXR_NAMESPACE_CLOSE_SCOPE | 1,755 | C++ | 31.518518 | 76 | 0.768091 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include "metricsSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniMetricsSchemaTokens, HDOMNI_METRICS_SCHEMA_TOKENS);
HdOmniMetricsSchema::HdOmniMetricsSchema(HdContainerDataSourceHandle container) :
HdSchema(container)
{
}
HdDoubleDataSourceHandle HdOmniMetricsSchema::GetLayerMpu()
{
return _GetTypedDataSource<HdDoubleDataSource>(HdOmniMetricsSchemaTokens->layerMpu);
}
HdDoubleDataSourceHandle HdOmniMetricsSchema::GetStageMpu()
{
return _GetTypedDataSource<HdDoubleDataSource>(HdOmniMetricsSchemaTokens->stageMpu);
}
HdContainerDataSourceHandle HdOmniMetricsSchema::BuildRetained(
const HdDoubleDataSourceHandle& layerMpu,
const HdDoubleDataSourceHandle& stageMpu)
{
TfToken names[2];
HdDataSourceBaseHandle values[2];
size_t count = 0;
if(layerMpu != nullptr)
{
names[count] = HdOmniMetricsSchemaTokens->layerMpu;
values[count++] = layerMpu;
}
if (stageMpu != nullptr)
{
names[count] = HdOmniMetricsSchemaTokens->stageMpu;
values[count++] = stageMpu;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
HdOmniMetricsSchema HdOmniMetricsSchema::GetFromParent(const HdContainerDataSourceHandle& fromParentContainer)
{
return HdOmniMetricsSchema(fromParentContainer ?
HdContainerDataSource::Cast(fromParentContainer->Get(HdOmniMetricsSchemaTokens->metrics))
: nullptr);
}
const HdDataSourceLocator& HdOmniMetricsSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(HdOmniMetricsSchemaTokens->metrics);
return locator;
}
HdOmniMetricsSchema::Builder& HdOmniMetricsSchema::Builder::SetLayerMpu(const HdDoubleDataSourceHandle& layerMpu)
{
_layerMpu = layerMpu;
return *this;
}
HdOmniMetricsSchema::Builder& HdOmniMetricsSchema::Builder::SetStageMpu(const HdDoubleDataSourceHandle& stageMpu)
{
_stageMpu = stageMpu;
return *this;
}
HdContainerDataSourceHandle HdOmniMetricsSchema::Builder::Build()
{
return HdOmniMetricsSchema::BuildRetained(
_layerMpu,
_stageMpu
);
}
PXR_NAMESPACE_CLOSE_SCOPE | 2,762 | C++ | 28.08421 | 113 | 0.761405 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndexPlugin.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_SCENE_INDEX_PLUGIN_H_
#define OMNI_METRICS_SCENE_INDEX_PLUGIN_H_
#include <pxr/pxr.h>
#include <pxr/imaging/hd/sceneIndexPlugin.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
///
/// \class OmniMetricsSceneIndexPlugin
///
/// Defines the Hydra 2.0 scene index plugin that creates
/// the OmniMetricsSceneIndex.
///
class OmniMetricsSceneIndexPlugin : public HdSceneIndexPlugin
{
public:
OmniMetricsSceneIndexPlugin();
protected:
HdSceneIndexBaseRefPtr _AppendSceneIndex(const HdSceneIndexBaseRefPtr& inputScene,
const HdContainerDataSourceHandle& inputArgs) override;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_METRICS_SCENE_INDEX_PLUGIN_H_ | 1,275 | C | 29.380952 | 86 | 0.763137 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDoubleDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_METRICS_DOUBLE_DATA_SOURCE_H_
#define HD_OMNI_METRICS_DOUBLE_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
PXR_NAMESPACE_OPEN_SCOPE
///
/// \class HdOmniMetricsDoubleDataSource
///
/// Concrete implementation for a simple data source that
/// holds a uniform double value.
///
class HdOmniMetricsDoubleDataSource : public HdDoubleDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniMetricsDoubleDataSource);
VtValue GetValue(Time shutterOffset) override;
double GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
HdOmniMetricsDoubleDataSource(double value);
double _value;
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniMetricsDoubleDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_METRICS_DOUBLE_DATA_SOURCE_H_ | 1,550 | C | 29.411764 | 75 | 0.759355 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDoubleDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "metricsDoubleDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniMetricsDoubleDataSource::HdOmniMetricsDoubleDataSource(double value) : _value(value)
{
}
VtValue HdOmniMetricsDoubleDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
double HdOmniMetricsDoubleDataSource::GetTypedValue(Time shutterOffset)
{
return _value;
}
bool HdOmniMetricsDoubleDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
return false;
}
PXR_NAMESPACE_CLOSE_SCOPE | 1,178 | C++ | 27.756097 | 90 | 0.773345 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_SCHEMA_H_
#define OMNI_METRICS_SCHEMA_H_
#include <pxr/imaging/hd/schema.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNI_METRICS_SCHEMA_TOKENS \
(metrics) \
(layerMpu) \
(stageMpu)
TF_DECLARE_PUBLIC_TOKENS(HdOmniMetricsSchemaTokens, OMNIMETRICSASSEMBLER_API,
HDOMNI_METRICS_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class HdOmniMetricsSchema : public HdSchema
{
public:
HdOmniMetricsSchema(HdContainerDataSourceHandle container);
OMNIMETRICSASSEMBLER_API
HdDoubleDataSourceHandle GetLayerMpu();
OMNIMETRICSASSEMBLER_API
HdDoubleDataSourceHandle GetStageMpu();
OMNIMETRICSASSEMBLER_API
static HdContainerDataSourceHandle
BuildRetained(
const HdDoubleDataSourceHandle& layerMpu,
const HdDoubleDataSourceHandle& stageMpu);
class Builder
{
public:
OMNIMETRICSASSEMBLER_API
Builder& SetLayerMpu(const HdDoubleDataSourceHandle& layerMpu);
OMNIMETRICSASSEMBLER_API
Builder& SetStageMpu(const HdDoubleDataSourceHandle& stageMpu);
OMNIMETRICSASSEMBLER_API
HdContainerDataSourceHandle Build();
private:
HdDoubleDataSourceHandle _layerMpu;
HdDoubleDataSourceHandle _stageMpu;
};
OMNIMETRICSASSEMBLER_API
static HdOmniMetricsSchema GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer);
OMNIMETRICSASSEMBLER_API
static const HdDataSourceLocator& GetDefaultLocator();
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // end OMNI_METRICS_SCHEMA_H_ | 2,292 | C | 28.025316 | 79 | 0.692845 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usdGeom/xformable.h>
#include <pxr/usd/usdGeom/sphere.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/imaging/hd/overlayContainerDataSource.h>
#include <pxr/imaging/hd/xformSchema.h>
#include "metricsAdapter.h"
#include "metricsDoubleDataSource.h"
#include "metricsSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_REGISTRY_FUNCTION(TfType)
{
typedef OmniMetricsAssemblerAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
OmniMetricsAssemblerAdapter::~OmniMetricsAssemblerAdapter()
{
}
HdContainerDataSourceHandle OmniMetricsAssemblerAdapter::GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
{
if (prim.IsA<UsdGeomSphere>())
{
double stageMpu = 0.0;
UsdStageRefPtr stage = prim.GetStage();
if (!this->_GetMpuFromLayer(stage->GetRootLayer(), stageMpu))
{
// no explicitly authored MPU, so assume the documented
// default value of centimeters
// Open Issue: interesting case is when it isn't defined on
// another layer - should we assume documented default of cm
// or assume this means we should use the stage MPU?
stageMpu = 0.01;
}
// this PoC only looks at Spheres as a simplification of a much more general problem
// in this case, an MPU divergence is defined as layer.MPU != stage.MPU for the layer
// containing the site of the strongest opinion of the `radius` property of the sphere
UsdGeomSphere sphere = UsdGeomSphere(prim);
UsdAttribute radiusAttr = sphere.GetRadiusAttr();
// GetPropertyStack will give us the property specs for the attribute
// in strongest to weakest order
SdfPropertySpecHandleVector propertySpecs = radiusAttr.GetPropertyStack(UsdTimeCode::Default());
if (propertySpecs.size() != 0)
{
// only need to process if there are any property specs for the attribute
// and we only want the strongest
// Open Issue: may need to take into account whether the property is blocked
// which would indicate that it has no authored value
SdfPropertySpecHandle strongestSpec = propertySpecs[0];
SdfLayerHandle targetLayer = strongestSpec->GetLayer();
double layerMpu = 0.0;
if (!this->_GetMpuFromLayer(targetLayer, layerMpu))
{
// no explicitly authored layerMpu, so assume
// it's in the same MPU as the stage
return nullptr;
}
// are the layer MPU and stage MPU different? if so, we have a metrics divergence
if (layerMpu != stageMpu)
{
// there is a divergence, we record this information
// in a hydra data source and send that data source back
HdDataSourceBaseHandle metricsDataSource = HdOmniMetricsSchema::Builder()
.SetLayerMpu(HdOmniMetricsDoubleDataSource::New(layerMpu))
.SetStageMpu(HdOmniMetricsDoubleDataSource::New(stageMpu))
.Build();
return HdRetainedContainerDataSource::New(
HdOmniMetricsSchemaTokens->metrics,
metricsDataSource);
}
}
else
{
// in this case, there are no authored values for the property spec
// this one is semantically tricky, because we rely on a (potential)
// fallback value from the schema - but we have no layer target on which
// this is technically assigned. As such, we assume tha the fallback
// value is defined on the root layer itself.
TF_STATUS("No property specs in the property stack for the radius attribute!");
}
}
return nullptr;
}
HdDataSourceLocatorSet OmniMetricsAssemblerAdapter::InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
{
if (prim.IsA<UsdGeomSphere>())
{
// invalidate the prim by invalidating its xform
static const HdDataSourceLocatorSet locators {
HdXformSchema::GetDefaultLocator()
};
return locators;
}
return HdDataSourceLocatorSet();
}
bool OmniMetricsAssemblerAdapter::_GetMpuFromLayer(const SdfLayerHandle& layer, double& mpu)
{
SdfDataRefPtr metadata = layer->GetMetadata();
VtValue mpuValue;
if (metadata->Has(SdfPath::AbsoluteRootPath(), UsdGeomTokens->metersPerUnit, &mpuValue))
{
mpu = mpuValue.Get<double>();
}
else
{
TF_WARN("Unable to retrieve MPU metadata from layer!");
return false;
}
return true;
}
PXR_NAMESPACE_CLOSE_SCOPE | 5,659 | C++ | 36.986577 | 104 | 0.661778 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndex.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_SCENE_INDEX_H_
#define OMNI_METRICS_SCENE_INDEX_H_
#include <pxr/pxr.h>
#include <pxr/usd/sdf/pathTable.h>
#include <pxr/imaging/hd/filteringSceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniMetricsSceneIndex);
///
/// \class OmniMetricsSceneIndex
///
/// A scene index responsible for observing an input flattened scene
/// index and producing a comparable scene in which metrics correctives
/// have been added to the appropriate places in the scene hiearchy
/// to correct for metrics divergences.
///
/// Note that with Render Delegate 2.0 and the ability to pull data
/// from a non-flattened scene, this implementation will have to be
/// revisited to work with the unflattened xform representation of
/// the hydra prims.
///
class OmniMetricsSceneIndex : public HdSingleInputFilteringSceneIndexBase
{
public:
OMNIMETRICSASSEMBLER_API
static OmniMetricsSceneIndexRefPtr New(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs = nullptr);
OMNIMETRICSASSEMBLER_API
~OmniMetricsSceneIndex() override;
OMNIMETRICSASSEMBLER_API
HdSceneIndexPrim GetPrim(const SdfPath& primPath) const override;
OMNIMETRICSASSEMBLER_API
SdfPathVector GetChildPrimPaths(const SdfPath& primPath) const override;
protected:
OmniMetricsSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs);
// these three are provided by HdSingleInputFilteringSceneIndexBase
// and must be overridden by inheritors
virtual void _PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries) override;
virtual void _PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries) override;
virtual void _PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries) override;
private:
void _DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators, HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries);
void _WrapPrimsRecursively(const SdfPath& primPath);
private:
// wraps all prims in the scene with a metrics data source
SdfPathTable<HdSceneIndexPrim> _wrappedPrims;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 2,972 | C | 33.97647 | 146 | 0.773217 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsAdapter.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_METRICS_ASSEMBLER_ADAPTER_H_
#define OMNI_METRICS_ASSEMBLER_ADAPTER_H_
#include <pxr/pxr.h>
#include <pxr/usdImaging/usdImaging/apiSchemaAdapter.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
///
/// \class OmniMetricsAssemblerAdapter
///
/// Stage scene index adapter which has the opportunity to evaluate each
/// prim in a scene to determine if the prim has divergent metrics present.
///
/// We use a trick here that a null adapter, while deriving from UsdImagingAPISchemaAdapter
/// gets a call for each USD prim traversed in the scene by the stage scene index. These
/// are known as "keyless adapters" and are supported by the UsdImagingAdapterRegistry.
///
class OmniMetricsAssemblerAdapter : public UsdImagingAPISchemaAdapter
{
public:
OMNIMETRICSASSEMBLER_API
~OmniMetricsAssemblerAdapter() override;
using BaseAdapter = UsdImagingAPISchemaAdapter;
OMNIMETRICSASSEMBLER_API
HdContainerDataSourceHandle GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals
) override;
OMNIMETRICSASSEMBLER_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties
) override;
private:
///
/// Retrieves the MPU value from the layer and returns it in mpu.
///
/// Returns true if the MPU value was able to be retrieved from the layer
/// and false otherwise.
///
bool _GetMpuFromLayer(const SdfLayerHandle& layer, double& mpu);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_METRICS_ASSEMBLER_ADAPTER_H_ | 2,348 | C | 31.178082 | 91 | 0.739779 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/xformSchema.h>
#include "metricsDataSource.h"
#include "metricsSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniMetricsDataSourceTokens,
HDOMNIMETRICSDATASOURCE_TOKENS);
HdOmniMetricsDataSource::HdOmniMetricsDataSource(const HdSceneIndexBase& index, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource) :
_sceneIndex(index),
_primPath(primPath),
_wrappedDataSource(wrappedDataSource)
{
}
void HdOmniMetricsDataSource::UpdateWrappedDataSource(
HdContainerDataSourceHandle wrappedDataSource)
{
_wrappedDataSource = wrappedDataSource;
}
TfTokenVector HdOmniMetricsDataSource::GetNames()
{
// this will return everything supported by the wrapped
// data source - in some cases (xform) we will
// intercept, but most will be pass through
// we keep access to the underlying wrapped xform
// via a new token added by this datasource (this is required
// for computations involving a parent that has already
// been corrected)
TfTokenVector result = (_wrappedDataSource == nullptr) ? TfTokenVector() : _wrappedDataSource->GetNames();
result.push_back(HdOmniMetricsDataSourceTokens->metricsPreservedXform);
return result;
}
HdDataSourceBaseHandle HdOmniMetricsDataSource::Get(const TfToken& name)
{
if (name == HdXformSchemaTokens->xform)
{
// this is an intercept of the flattened transform matrix
// we need to (potentially) compute a metrics-corrected
// flattened transform matrix
return this->_ComputeCorrectedXform();
}
else if (name == HdOmniMetricsDataSourceTokens->metricsPreservedXform)
{
// this would be the original flattened matrix of the wrapped data source
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(HdXformSchemaTokens->xform);
}
}
// all other token values should be defer to the wrapped data source (if any)
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(name);
}
return nullptr;
}
bool HdOmniMetricsDataSource::IsPrimDirtied(const HdDataSourceLocatorSet& locators)
{
static const HdContainerDataSourceHandle containerNull(nullptr);
if (locators.Intersects(HdXformSchema::GetDefaultLocator()))
{
if (HdContainerDataSource::AtomicLoad(_computedCorrectedXformDataSource) != nullptr)
{
HdContainerDataSource::AtomicStore(_computedCorrectedXformDataSource, containerNull);
return true;
}
}
return false;
}
HdDataSourceBaseHandle HdOmniMetricsDataSource::_ComputeCorrectedXform()
{
// there are two cases to consider on the underlying wrapped data source:
// 1. The wrapped data source has metrics information.
// This means that the adapter determined there was a metrics
// divergence in the layers for the stage and the strongest
// opinionated layer for the xformOpOrder attribute. In this case
// it means that we have to correct the divergence directly by
// computing a new flattened local transform for the hydra prim
// 2. The wrapped data source does not have metrics information.
// This means that either the underlying prim has no Xformable data
// at all or that there was no metrics divergence detected.
// However, it could be the child of a divergent prim, and since
// all xforms have been flattened by the flattening scene index
// prior to us wrapping the data, we need to compute a new flattened
// matrix that takes into account the changes on the parent.
//
// the tricky thing is the dirtying associated with the cached data -
// computing whether a prim with divergence changed directly is easy
// but that change also invalidates the children (recusrively)
// if we have already cached the value, and the cache is valid
// return the computed cached value rather than recompute it
HdContainerDataSourceHandle computedCorrectedXformDataSource =
HdContainerDataSource::AtomicLoad(_computedCorrectedXformDataSource);
if (computedCorrectedXformDataSource != nullptr)
{
return computedCorrectedXformDataSource;
}
if (this->_HasMetricsInformation(_wrappedDataSource))
{
// in this case, we need the parent's flattened transform to recover
// the original local transform of the prim, once we have the original
// local transform we can apply the corrective as the last xformOp
// then reflatten by multiplying the parent transform again
SdfPath parentPath = _primPath.GetParentPath();
HdSceneIndexPrim parentPrim = _sceneIndex.GetPrim(parentPath);
computedCorrectedXformDataSource = HdXformSchema::Builder()
.SetMatrix(HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::New(
_wrappedDataSource, parentPrim.dataSource, true))
.SetResetXformStack(this->_GetInputResetXformStackSource())
.Build();
}
else
{
HdContainerDataSourceHandle metricsDataSource = nullptr;
if (_primPath == SdfPath::AbsoluteRootPath())
{
// just directly get whatever the absolute root path has
computedCorrectedXformDataSource = HdContainerDataSource::Cast(_wrappedDataSource->Get(HdXformSchemaTokens->xform));
}
else
{
for(SdfPath p = _primPath.GetParentPath(); p != SdfPath::AbsoluteRootPath(); p = p.GetParentPath())
{
HdSceneIndexPrim prim = _sceneIndex.GetPrim(p);
if (this->_HasMetricsInformation(prim.dataSource))
{
// a parent along the chain did have a metrics
// corrected xform, so we will need to recompute
metricsDataSource = prim.dataSource;
break;
}
}
if (metricsDataSource != nullptr)
{
// compute a new flattened xform from the parent
SdfPath parentPath = _primPath.GetParentPath();
HdSceneIndexPrim parentPrim = _sceneIndex.GetPrim(parentPath);
computedCorrectedXformDataSource = HdXformSchema::Builder()
.SetMatrix(HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::New(
_wrappedDataSource, parentPrim.dataSource, false))
.SetResetXformStack(this->_GetInputResetXformStackSource())
.Build();
}
else
{
// no parent in the chain had a metrics corrected xform
// so the result is really just the original flattened matrix
computedCorrectedXformDataSource = HdContainerDataSource::Cast(_wrappedDataSource->Get(HdXformSchemaTokens->xform));
}
}
}
// cache the data source we intend to use
HdContainerDataSource::AtomicStore(_computedCorrectedXformDataSource, computedCorrectedXformDataSource);
return computedCorrectedXformDataSource;
}
bool HdOmniMetricsDataSource::_HasMetricsInformation(HdContainerDataSourceHandle handle)
{
HdOmniMetricsSchema metricsSchema = HdOmniMetricsSchema::GetFromParent(handle);
return metricsSchema.IsDefined();
}
HdBoolDataSourceHandle HdOmniMetricsDataSource::_GetInputResetXformStackSource()
{
if (_wrappedDataSource != nullptr)
{
return HdBoolDataSource::Cast(
_wrappedDataSource->Get(HdXformSchemaTokens->resetXformStack)
);
}
return nullptr;
}
HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_MetricsCorrectedMatrixDataSource(
HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource,
bool isMetricsCorrectiveSource) :
_inputDataSource(inputDataSource),
_parentDataSource(parentDataSource),
_isMetricsCorrectiveSource(isMetricsCorrectiveSource)
{
}
VtValue HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
GfMatrix4d HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::GetTypedValue(Time shutterOffset)
{
return this->_ComputeCorrectedMatrix(shutterOffset);
}
bool HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
HdSampledDataSourceHandle sources[] = {
this->_GetInputMatrixDataSource(),
this->_GetParentMatrixDataSource()
};
return HdGetMergedContributingSampleTimesForInterval(
TfArraySize(sources),
sources,
startTime,
endTime,
outSampleTimes
);
}
HdMatrixDataSourceHandle HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetParentMetricsPreservedMatrixDataSource() const
{
HdOmniMetricsDataSourceHandle metricsDataSource = HdOmniMetricsDataSource::Cast(_parentDataSource);
if (metricsDataSource != nullptr)
{
HdContainerDataSourceHandle xformDataSource =
HdContainerDataSource::Cast(
metricsDataSource->Get(HdOmniMetricsDataSourceTokens->metricsPreservedXform));
if (xformDataSource == nullptr)
{
return this->_GetParentMatrixDataSource();
}
HdMatrixDataSourceHandle matrixDataSource = HdMatrixDataSource::Cast(
xformDataSource->Get(HdXformSchemaTokens->matrix));
if (matrixDataSource == nullptr)
{
TF_WARN("Xform schema not defined on preserved container data source!");
}
return (matrixDataSource != nullptr) ? matrixDataSource : this->_GetParentMatrixDataSource();
}
// if it didn't have metrics information attached
// just get the original matrix
return this->_GetParentMatrixDataSource();
}
HdMatrixDataSourceHandle HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetParentMatrixDataSource() const
{
HdXformSchema xformSchema = HdXformSchema::GetFromParent(_parentDataSource);
if (xformSchema.IsDefined())
{
return xformSchema.GetMatrix();
}
return nullptr;
}
HdMatrixDataSourceHandle HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetInputMatrixDataSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix();
}
GfMatrix4d HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_ComputeCorrectedMatrix(Time shutterOffset)
{
// since we are dealing with flattened transformations, we have to recover
// the local transform of the input data source in question
// we can do this by knowing the prim's flattened transform
// and the original transform of its parent (the _parentDataSource)
// Let FT be the flattened transform, P be the transform of the parent,
// and LT be the child's local transform. The flattened transform would
// then have been computed as FT = (P)(LT), thus to recover LT we divide
// out by P, which results in LT = (FT) / (P) = FT * (P)^-1
// so we need the inverse of the original parent transform
HdMatrixDataSourceHandle parentPreservedMatrixDataSource = this->_GetParentMetricsPreservedMatrixDataSource();
HdMatrixDataSourceHandle parentMatrixDataSource = this->_GetParentMatrixDataSource();
HdMatrixDataSourceHandle inputMatrixDataSource = this->_GetInputMatrixDataSource();
GfMatrix4d parentMatrix = (parentPreservedMatrixDataSource != nullptr) ?
parentPreservedMatrixDataSource->GetTypedValue(shutterOffset) :
GfMatrix4d(1.0);
GfMatrix4d currentFlattenedTransform = inputMatrixDataSource->GetTypedValue(shutterOffset);
GfMatrix4d inverseParentMatrix = parentMatrix.GetInverse();
GfMatrix4d originalLocalTransform = currentFlattenedTransform * inverseParentMatrix;
// do we need to apply a corrective?
if (_isMetricsCorrectiveSource)
{
// this is a computation requiring a new metrics corrected local
// transform computed from the original data rather than the
// flattened transform already there
GfMatrix4d mpuCorrective = this->_GetMpuCorrective();
GfMatrix4d correctedTransform = originalLocalTransform * mpuCorrective;
// now apply the parent transform to get the new flattened child transform
GfMatrix4d parentMatrix = (parentMatrixDataSource != nullptr) ?
parentMatrixDataSource->GetTypedValue(shutterOffset) :
GfMatrix4d(1.0);
return parentMatrix * correctedTransform;
}
else
{
// no local corrective necessary, just reconcatenate with the new parent
// transform to form the final new flattened child
GfMatrix4d parentUpdatedMatrix = (parentMatrixDataSource != nullptr) ?
parentMatrixDataSource->GetTypedValue(shutterOffset) :
GfMatrix4d(1.0);
return parentUpdatedMatrix * originalLocalTransform;
}
}
GfMatrix4d HdOmniMetricsDataSource::_MetricsCorrectedMatrixDataSource::_GetMpuCorrective()
{
// retrieve the layer and stage MPU values from the wrapped prim
HdOmniMetricsSchema metricsSchema = HdOmniMetricsSchema::GetFromParent(_inputDataSource);
if (!metricsSchema.IsDefined())
{
TF_WARN("MPU divergency was detected but data source has no metrics information!");
return GfMatrix4d(1.0);
}
double mpuCorrectiveValue = metricsSchema.GetLayerMpu()->GetTypedValue(0.0) /
metricsSchema.GetStageMpu()->GetTypedValue(0.0);
GfMatrix4d uniformScaleTransform(1.0);
uniformScaleTransform.SetScale(mpuCorrectiveValue);
return uniformScaleTransform;
}
PXR_NAMESPACE_CLOSE_SCOPE | 14,468 | C++ | 38.859504 | 135 | 0.709773 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniMetricsAssembler/metricsSceneIndex.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/work/utils.h>
#include <pxr/imaging/hd/xformSchema.h>
#include "metricsSceneIndex.h"
#include "metricsDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
OmniMetricsSceneIndexRefPtr OmniMetricsSceneIndex::New(
const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs)
{
return TfCreateRefPtr(new OmniMetricsSceneIndex(inputSceneIndex, inputArgs));
}
OmniMetricsSceneIndex::OmniMetricsSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs) :
HdSingleInputFilteringSceneIndexBase(inputSceneIndex)
{
_WrapPrimsRecursively(SdfPath::AbsoluteRootPath());
}
OmniMetricsSceneIndex::~OmniMetricsSceneIndex() = default;
HdSceneIndexPrim OmniMetricsSceneIndex::GetPrim(const SdfPath &primPath) const
{
// if we have the prim wrapped, return the wrapped one
const auto it = _wrappedPrims.find(primPath);
if (it != _wrappedPrims.end())
{
return it->second;
}
// there shouldn't be a scenario where the prim isn't wrapped
// but in case there is, we return whatever the base scene index
// gives back
return this->_GetInputSceneIndex()->GetPrim(primPath);
}
SdfPathVector OmniMetricsSceneIndex::GetChildPrimPaths(const SdfPath& primPath) const
{
// no change in topology occurs as part of this scene index
// so we can ask the input scene to get the child prim paths directly
return this->_GetInputSceneIndex()->GetChildPrimPaths(primPath);
}
void OmniMetricsSceneIndex::_PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for(const HdSceneIndexObserver::AddedPrimEntry& entry : entries)
{
HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(entry.primPath);
HdContainerDataSourceHandle dataSource = sceneIndexPrim.dataSource;
// attempt to insert a wrapped version for this prim
auto it = _wrappedPrims.insert(
{
entry.primPath,
HdSceneIndexPrim()
});
// get a reference to the inserted prim
// this will be the existing one if insertion failed
HdSceneIndexPrim &prim = it.first->second;
prim.primType = entry.primType;
// if the wrapper does exist, we have to update the data source
if (prim.dataSource != nullptr)
{
HdOmniMetricsDataSource::Cast(prim.dataSource)->UpdateWrappedDataSource(dataSource);
}
else
{
// new insertion, so it wasn't wrapped previously
// wrap the data source here
prim.dataSource = HdOmniMetricsDataSource::New(*this, entry.primPath, dataSource);
}
// if this was a new insertion in the middle of the hieararchy
// we need to invalidate descendent flattened attributes
if (!it.second)
{
// Open Issue: we don't handle this here, because it's just a PoC
// looking at spheres, but in general, we would need to build a set
// containing the locators we are interested in (at minimum this would
// be the transform of the prim itself, HdXformSchemaTokens->xform)
// and make sure the entire prim hierarchy is dirtied if the data source
// associated is dirtied based on that locator
// since this likely requires a plug-in system to solve metrics assembly
// generically, we defer this to a more general solution
}
}
// forward on the notification
this->_SendPrimsAdded(entries);
// also, if we had to dirty entries because of an insertion in the middle
// of the stage hierarchy, send those along too
if (!dirtyEntries.empty())
{
this->_SendPrimsDirtied(dirtyEntries);
}
}
void OmniMetricsSceneIndex::_PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries)
{
for (const HdSceneIndexObserver::RemovedPrimEntry& entry : entries)
{
if (entry.primPath.IsAbsoluteRootPath())
{
// removing the whole scene
_wrappedPrims.ClearInParallel();
TfReset(_wrappedPrims);
}
else
{
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(entry.primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second; it++)
{
WorkSwapDestroyAsync(it->second.dataSource);
}
if(startEndRangeIterator.first != startEndRangeIterator.second)
{
_wrappedPrims.erase(startEndRangeIterator.first);
}
}
}
_SendPrimsRemoved(entries);
}
void OmniMetricsSceneIndex::_PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for (const HdSceneIndexObserver::DirtiedPrimEntry& entry : entries)
{
HdDataSourceLocatorSet locators;
if (entry.dirtyLocators.Intersects(HdXformSchema::GetDefaultLocator()))
{
locators.insert(HdXformSchema::GetDefaultLocator());
}
// Open Issue: what about the radius locator? we would need that, but
// it depends on where our scene index resides - it may already have
// been converted by the ImplicitSceneIndex into a mesh (and it's hard
// to know where exactly our scene index will be inserted)
// we don't solve it here because a general metrics assembler wouldn't
// be considering spheres only, so we defer that to a more general solution
if (!locators.IsEmpty())
{
this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries);
}
}
_SendPrimsDirtied(entries);
if (!dirtyEntries.empty())
{
_SendPrimsDirtied(dirtyEntries);
}
}
void OmniMetricsSceneIndex::_DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators,
HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries)
{
// find subtree range retrieves a start end pair of children
// in the subtree of the given prim path
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second;)
{
HdOmniMetricsDataSourceHandle dataSource = HdOmniMetricsDataSource::Cast(it->second.dataSource);
if (dataSource != nullptr)
{
if (dataSource->IsPrimDirtied(locators))
{
if (it->first != primPath)
{
dirtyEntries->emplace_back(it->first, locators);
}
it++;
}
else
{
it = it.GetNextSubtree();
}
}
else
{
it = it++;
}
}
}
void OmniMetricsSceneIndex::_WrapPrimsRecursively(const SdfPath& primPath)
{
HdSceneIndexPrim prim = this->_GetInputSceneIndex()->GetPrim(primPath);
HdOmniMetricsDataSourceHandle wrappedDataSource = HdOmniMetricsDataSource::New(*this, primPath, prim.dataSource);
_wrappedPrims.insert(
{
primPath,
HdSceneIndexPrim
{
prim.primType,
std::move(wrappedDataSource)
}
}
);
for (const SdfPath& childPath : this->_GetInputSceneIndex()->GetChildPrimPaths(primPath))
{
this->_WrapPrimsRecursively(childPath);
}
}
PXR_NAMESPACE_CLOSE_SCOPE | 8,306 | C++ | 34.5 | 117 | 0.663255 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpPythonModule.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_PYTHON_MODULE_H
#define OMNI_WARP_SCENE_INDEX_WARP_PYTHON_MODULE_H
#include <string>
#include <pxr/pxr.h>
#include <pxr/base/tf/declarePtrs.h>
#include <pxr/base/vt/value.h>
#include <pxr/imaging/hd/meshSchema.h>
#include <pxr/usdImaging/usdImaging/stageSceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniWarpPythonModule);
///
/// \class OmniWarpPythonModule
///
///
///
///
///
class OmniWarpPythonModule
{
public:
OmniWarpPythonModule(const SdfPath &primPath, const std::string& moduleName,
UsdImagingStageSceneIndexConstRefPtr usdImagingSi);
~OmniWarpPythonModule();
void InitMesh(VtIntArray indices, VtVec3fArray vertices,
VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams);
void InitParticles(VtVec3fArray positions,
VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams);
VtVec3fArray ExecSim(VtDictionary simParams);
VtVec3fArray ExecSim(VtDictionary simParams, VtVec3fArray dependentVertices);
private:
std::string _moduleName;
SdfPath _primPath;
UsdImagingStageSceneIndexConstRefPtr _usdImagingSi;
};
using OmniWarpPythonModuleSharedPtr = std::shared_ptr<class OmniWarpPythonModule>;
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_WARP_SCENE_INDEX_WARP_PYTHON_MODULE_H | 1,956 | C | 29.107692 | 82 | 0.75818 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpSceneIndexPlugin.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/imaging/hd/sceneIndexPluginRegistry.h>
#include <pxr/imaging/hio/glslfx.h>
#include "warpSceneIndexPlugin.h"
#include "warpSceneIndex.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
((sceneIndexPluginName, "Omni_WarpSceneIndexPlugin")));
static const char* const _pluginDisplayName = "GL";
TF_REGISTRY_FUNCTION(TfType)
{
HdSceneIndexPluginRegistry::Define<
Omni_WarpSceneIndexPlugin>();
}
TF_REGISTRY_FUNCTION(HdSceneIndexPlugin)
{
const HdSceneIndexPluginRegistry::InsertionPhase insertionPhase = 0;
HdSceneIndexPluginRegistry::GetInstance().RegisterSceneIndexForRenderer(
_pluginDisplayName, _tokens->sceneIndexPluginName, nullptr,
insertionPhase, HdSceneIndexPluginRegistry::InsertionOrderAtStart);
}
Omni_WarpSceneIndexPlugin::
Omni_WarpSceneIndexPlugin() = default;
Omni_WarpSceneIndexPlugin::
~Omni_WarpSceneIndexPlugin() = default;
HdSceneIndexBaseRefPtr
Omni_WarpSceneIndexPlugin::_AppendSceneIndex(
const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs)
{
TF_UNUSED(inputArgs);
return OmniWarpSceneIndex::New(
inputSceneIndex);
}
PXR_NAMESPACE_CLOSE_SCOPE
| 1,807 | C++ | 28.16129 | 76 | 0.767571 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_SCHEMA_H
#define OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_SCHEMA_H
#include <pxr/imaging/hd/schema.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define OMNIWARPCOMPUTATION_SCHEMA_TOKENS \
(warpComputation) \
(sourceFile) \
(dependentPrims) \
(simulationParams) \
TF_DECLARE_PUBLIC_TOKENS(OmniWarpComputationSchemaTokens, OMNIWARPSCENEINDEX_API,
OMNIWARPCOMPUTATION_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class OmniWarpComputationSchema : public HdSchema
{
public:
OmniWarpComputationSchema(HdContainerDataSourceHandle container)
: HdSchema(container) {}
//ACCESSORS
OMNIWARPSCENEINDEX_API
HdStringDataSourceHandle GetSourceFile();
OMNIWARPSCENEINDEX_API
HdPathArrayDataSourceHandle GetDependentPrims();
OMNIWARPSCENEINDEX_API
HdSampledDataSourceHandle GetSimulationParams();
// RETRIEVING AND CONSTRUCTING
/// Builds a container data source which includes the provided child data
/// sources. Parameters with nullptr values are excluded. This is a
/// low-level interface. For cases in which it's desired to define
/// the container with a sparse set of child fields, the Builder class
/// is often more convenient and readable.
OMNIWARPSCENEINDEX_API
static HdContainerDataSourceHandle
BuildRetained(
const HdStringDataSourceHandle &sourceFile,
const HdPathArrayDataSourceHandle &dependentPrims,
const HdSampledDataSourceHandle &simulationParams
);
/// \class OmniWarpComputationSchema::Builder
///
/// Utility class for setting sparse sets of child data source fields to be
/// filled as arguments into BuildRetained. Because all setter methods
/// return a reference to the instance, this can be used in the "builder
/// pattern" form.
class Builder
{
public:
OMNIWARPSCENEINDEX_API
Builder &SetSourceFile(
const HdStringDataSourceHandle &sourceFile);
OMNIWARPSCENEINDEX_API
Builder &SetDependentPrims(
const HdPathArrayDataSourceHandle &dependentPrims);
Builder &SetSimulationParams(
const HdSampledDataSourceHandle &simulationParams);
/// Returns a container data source containing the members set thus far.
OMNIWARPSCENEINDEX_API
HdContainerDataSourceHandle Build();
private:
HdStringDataSourceHandle _sourceFile;
HdPathArrayDataSourceHandle _dependentPrims;
HdSampledDataSourceHandle _simulationParams;
};
/// Retrieves a container data source with the schema's default name token
/// "warpComputation" from the parent container and constructs a
/// OmniWarpComputationSchema instance.
/// Because the requested container data source may not exist, the result
/// should be checked with IsDefined() or a bool comparison before use.
OMNIWARPSCENEINDEX_API
static OmniWarpComputationSchema GetFromParent(
const HdContainerDataSourceHandle &fromParentContainer);
/// Returns a token where the container representing this schema is found in
/// a container by default.
OMNIWARPSCENEINDEX_API
static const TfToken &GetSchemaToken();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the container representing this schema is found by default.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetDefaultLocator();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the source file can be found.
/// This is often useful for checking intersection against the
/// HdDataSourceLocatorSet sent with HdDataSourceObserver::PrimsDirtied.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetSourceFileLocator();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the dependent prims.
/// This is often useful for checking intersection against the
/// HdDataSourceLocatorSet sent with HdDataSourceObserver::PrimsDirtied.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetDependentPrimsLocator();
/// Returns an HdDataSourceLocator (relative to the prim-level data source)
/// where the simulation params can be found.
/// This is often useful for checking intersection against the
/// HdDataSourceLocatorSet sent with HdDataSourceObserver::PrimsDirtied.
OMNIWARPSCENEINDEX_API
static const HdDataSourceLocator &GetSimulationParamsLocator();
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 5,329 | C | 36.013889 | 81 | 0.719835 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationAPIAdapter.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_API_ADAPTER_H
#define OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_API_ADAPTER_H
#include <pxr/usdImaging/usdImaging/apiSchemaAdapter.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
class WarpComputationAPIAdapter : public UsdImagingAPISchemaAdapter
{
public:
using BaseAdapter = UsdImagingAPISchemaAdapter;
OMNIWARPSCENEINDEX_API
HdContainerDataSourceHandle GetImagingSubprimData(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
const UsdImagingDataSourceStageGlobals &stageGlobals) override;
#if PXR_VERSION < 2308
OMNIWARPSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties) override;
#else
OMNIWARPSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties,
UsdImagingPropertyInvalidationType invalidationType) override;
#endif
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_WARP_SCENE_INDEX_WARP_COMPUTATION_API_ADAPTER_H
| 1,927 | C | 31.677966 | 75 | 0.732226 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpSceneIndex.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <string>
#include <pxr/base/tf/pyInvoke.h>
#include <pxr/base/tf/errorMark.h>
#include <pxr/base/tf/pyExceptionState.h>
#include <pxr/base/tf/pyInterpreter.h>
#include <pxr/imaging/hd/primvarSchema.h>
#include <pxr/imaging/hd/primvarsSchema.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/imaging/hd/tokens.h>
#include <pxr/imaging/hd/meshSchema.h>
#include "pxr/imaging/hd/instancerTopologySchema.h"
#include "warpSceneIndex.h"
#include "tokens.h"
#ifdef PXR_PYTHON_SUPPORT_ENABLED
#include <pxr/base/tf/pyInterpreter.h>
#endif // PXR_PYTHON_SUPPORT_ENABLED
PXR_NAMESPACE_OPEN_SCOPE
static VtDictionary
GetSimulationParams(HdContainerDataSourceHandle ds)
{
VtDictionary vtSimParams;
auto warpContainer = HdContainerDataSource::Cast(ds->Get(OmniWarpComputationSchemaTokens->warpComputation));
if (warpContainer)
{
TfTokenVector names = warpContainer->GetNames();
if (std::find(names.begin(), names.end(), OmniWarpComputationSchemaTokens->simulationParams) != names.end())
{
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(ds);
if (warpSchema)
{
VtValue metaData = warpSchema.GetSimulationParams()->GetValue(0);
if (metaData.IsHolding<VtDictionary>())
{
vtSimParams = metaData.UncheckedGet<VtDictionary>();
}
}
}
}
return vtSimParams;
}
static UsdImagingStageSceneIndexRefPtr
FindUsdImagingSceneIndex(const std::vector<HdSceneIndexBaseRefPtr>& inputScenes)
{
TfRefPtr<UsdImagingStageSceneIndex> retVal;
for (size_t i = 0; i < inputScenes.size(); i++)
{
HdSceneIndexBaseRefPtr const &sceneIdx = inputScenes[i];
if (UsdImagingStageSceneIndexRefPtr const imagingSI = TfDynamic_cast<UsdImagingStageSceneIndexRefPtr>(sceneIdx))
{
retVal = imagingSI;
break;
}
if (HdFilteringSceneIndexBaseRefPtr const filteringSi = TfDynamic_cast<HdFilteringSceneIndexBaseRefPtr>(sceneIdx))
{
retVal = FindUsdImagingSceneIndex(filteringSi->GetInputScenes());
if (retVal)
{
break;
}
}
}
return retVal;
}
OmniWarpSceneIndexRefPtr
OmniWarpSceneIndex::New(
const HdSceneIndexBaseRefPtr &inputSceneIndex)
{
return TfCreateRefPtr(
new OmniWarpSceneIndex(
inputSceneIndex));
}
OmniWarpSceneIndex::OmniWarpSceneIndex(
const HdSceneIndexBaseRefPtr &inputSceneIndex)
: HdSingleInputFilteringSceneIndexBase(inputSceneIndex)
{
}
/// A convenience data source implementing the primvar schema from
/// a triple of primvar value, interpolation and role. The latter two
/// are given as tokens. The value can be given either as data source
/// or as thunk returning a data source which is evaluated on each
/// Get.
class _PrimvarDataSource final : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_PrimvarDataSource);
TfTokenVector GetNames() override {
return {HdPrimvarSchemaTokens->primvarValue,
HdPrimvarSchemaTokens->interpolation,
HdPrimvarSchemaTokens->role};
}
HdDataSourceBaseHandle Get(const TfToken &name) override {
if (name == HdPrimvarSchemaTokens->primvarValue) {
return _primvarValueSrc;
}
if (name == HdPrimvarSchemaTokens->interpolation) {
return
HdPrimvarSchema::BuildInterpolationDataSource(
_interpolation);
}
if (name == HdPrimvarSchemaTokens->role) {
return
HdPrimvarSchema::BuildRoleDataSource(
_role);
}
return nullptr;
}
private:
_PrimvarDataSource(
const HdDataSourceBaseHandle &primvarValueSrc,
const TfToken &interpolation,
const TfToken &role)
: _primvarValueSrc(primvarValueSrc)
, _interpolation(interpolation)
, _role(role)
{
}
HdDataSourceBaseHandle _primvarValueSrc;
TfToken _interpolation;
TfToken _role;
};
class _PointsDataSource : public HdVec3fArrayDataSource
{
public:
HD_DECLARE_DATASOURCE(_PointsDataSource);
VtValue GetValue(const Time shutterOffset) override {
return VtValue(GetTypedValue(shutterOffset));
}
VtVec3fArray GetTypedValue(const Time shutterOffset) override
{
HdPrimvarsSchema depPrimVarsSchema = HdPrimvarsSchema::GetFromParent(_depDs);
if (depPrimVarsSchema)
{
HdPrimvarSchema depPrimVar = depPrimVarsSchema.GetPrimvar(HdTokens->points);
if (depPrimVar)
{
HdSampledDataSourceHandle valueDataSource = depPrimVar.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
VtVec3fArray pointsArray = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs), pointsArray);
}
}
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs));
}
bool GetContributingSampleTimesForInterval(
const Time startTime,
const Time endTime,
std::vector<Time> * const outSampleTimes) override
{
return false;
}
private:
_PointsDataSource(HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &simParamsDataSource,
const HdContainerDataSourceHandle &depDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_simParamsDs(simParamsDataSource),
_depDs(depDataSource)
{
}
HdPrimvarsSchema& _schema;
OmniWarpPythonModuleSharedPtr _pythonModule;
HdContainerDataSourceHandle const _depDs;
HdContainerDataSourceHandle const _simParamsDs;
};
class _InstancePositionsDataSource : public HdVec3fArrayDataSource
{
public:
HD_DECLARE_DATASOURCE(_InstancePositionsDataSource);
VtValue GetValue(const Time shutterOffset) override {
return VtValue(GetTypedValue(shutterOffset));
}
VtVec3fArray GetTypedValue(const Time shutterOffset) override
{
HdPrimvarsSchema depPrimVarsSchema = HdPrimvarsSchema::GetFromParent(_depDs);
if (depPrimVarsSchema)
{
HdPrimvarSchema depPrimVar = depPrimVarsSchema.GetPrimvar(HdTokens->points);
if (depPrimVar)
{
HdSampledDataSourceHandle valueDataSource = depPrimVar.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
VtVec3fArray pointsArray = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs), pointsArray);
}
}
return _pythonModule->ExecSim(GetSimulationParams(_simParamsDs));
}
bool GetContributingSampleTimesForInterval(
const Time startTime,
const Time endTime,
std::vector<Time> * const outSampleTimes) override
{
return false;
}
private:
_InstancePositionsDataSource(HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource,
const HdContainerDataSourceHandle &simParamsDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_depDs(depDataSource),
_simParamsDs(simParamsDataSource)
{
}
HdPrimvarsSchema& _schema;
HdContainerDataSourceHandle _depDs;
HdContainerDataSourceHandle _simParamsDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
};
class _MeshPrimVarsOverrideDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_MeshPrimVarsOverrideDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
if (name == HdTokens->points)
{
return _PrimvarDataSource::New(
_PointsDataSource::New(_schema, _pythonModule, _simParamsDs, _depDs),
HdPrimvarSchemaTokens->vertex,
HdPrimvarSchemaTokens->point);
}
HdDataSourceBaseHandle result = _inputDs->Get(name);
return result;
}
private:
_MeshPrimVarsOverrideDataSource(const HdContainerDataSourceHandle &primDataSource,
HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &simParamsDataSource,
const HdContainerDataSourceHandle &depDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_inputDs(primDataSource),
_simParamsDs(simParamsDataSource),
_depDs(depDataSource)
{
}
HdPrimvarsSchema _schema;
OmniWarpPythonModuleSharedPtr _pythonModule;
HdContainerDataSourceHandle const _depDs;
HdContainerDataSourceHandle const _inputDs;
HdContainerDataSourceHandle const _simParamsDs;
};
class _InstancerPrimVarsOverrideDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_InstancerPrimVarsOverrideDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
if (name == HdInstancerTokens->translate)
{
return _PrimvarDataSource::New(
_InstancePositionsDataSource::New(_schema, _pythonModule, _depDs, _simParamsDs),
HdPrimvarSchemaTokens->instance,
HdPrimvarRoleTokens->vector);
}
HdDataSourceBaseHandle result = _inputDs->Get(name);
return result;
}
private:
_InstancerPrimVarsOverrideDataSource(const HdContainerDataSourceHandle &primDataSource,
HdPrimvarsSchema &primVarSchema, OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource,
const HdContainerDataSourceHandle &simParamsDataSource)
: _schema(primVarSchema),
_pythonModule(pythonModule),
_inputDs(primDataSource),
_depDs(depDataSource),
_simParamsDs(simParamsDataSource)
{
}
HdPrimvarsSchema _schema;
HdContainerDataSourceHandle _depDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
HdContainerDataSourceHandle const _inputDs;
HdContainerDataSourceHandle const _simParamsDs;
};
class _WarpMeshDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_WarpMeshDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
// We append our token for the WarpMesh python file token
// We do our init for indices here. Only on reload?
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
auto result = _inputDs->Get(name);
if (name == HdPrimvarsSchemaTokens->primvars)
{
auto primVarSchema = HdPrimvarsSchema::GetFromParent(_inputDs);
if (auto primVarContainer = HdContainerDataSource::Cast(result))
{
return _MeshPrimVarsOverrideDataSource::New(primVarContainer, primVarSchema, _pythonModule, _inputDs, _depDs);
}
}
return result;
}
private:
_WarpMeshDataSource(const SdfPath& primPath,
const HdContainerDataSourceHandle &primDataSource,
OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource)
: _primPath(primPath),
_inputDs(primDataSource),
_pythonModule(pythonModule),
_depDs(depDataSource)
{
}
HdContainerDataSourceHandle _depDs;
HdContainerDataSourceHandle _inputDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
const SdfPath& _primPath;
};
class _WarpInstancerDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_WarpInstancerDataSource);
TfTokenVector GetNames() override
{
if (!_inputDs) {
return {};
}
// We append our token for the WarpMesh python file token
// We do our init for indices here. Only on reload?
return _inputDs->GetNames();
}
HdDataSourceBaseHandle Get(const TfToken &name) override
{
auto result = _inputDs->Get(name);
if (name == HdPrimvarsSchemaTokens->primvars)
{
auto primVarSchema = HdPrimvarsSchema::GetFromParent(_inputDs);
if (auto primVarContainer = HdContainerDataSource::Cast(result))
{
return _InstancerPrimVarsOverrideDataSource::New(primVarContainer, primVarSchema, _pythonModule, _depDs, _simParamsDs);
}
}
return result;
}
private:
_WarpInstancerDataSource(const SdfPath& primPath,
const HdContainerDataSourceHandle &primDataSource,
OmniWarpPythonModuleSharedPtr pythonModule,
const HdContainerDataSourceHandle &depDataSource,
const HdContainerDataSourceHandle &simParamsDataSource)
: _primPath(primPath),
_inputDs(primDataSource),
_pythonModule(pythonModule),
_depDs(depDataSource),
_simParamsDs(simParamsDataSource)
{
}
HdContainerDataSourceHandle _inputDs;
HdContainerDataSourceHandle _depDs;
HdContainerDataSourceHandle _simParamsDs;
OmniWarpPythonModuleSharedPtr _pythonModule;
const SdfPath& _primPath;
};
HdSceneIndexPrim
OmniWarpSceneIndex::GetPrim(const SdfPath& primPath) const
{
HdSceneIndexPrim prim = _GetInputSceneIndex()->GetPrim(primPath);
if (prim.primType == HdPrimTypeTokens->mesh && prim.dataSource)
{
if (OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(prim.dataSource))
{
HdContainerDataSourceHandle _depDs;
if (HdPathArrayDataSourceHandle dependentsDs = warpSchema.GetDependentPrims())
{
VtArray<SdfPath> dependentPrims = dependentsDs->GetTypedValue(0);
if (dependentPrims.size())
{
auto depPrim = _GetInputSceneIndex()->GetPrim(dependentPrims[0]);
if (depPrim.dataSource)
{
_depDs = depPrim.dataSource;
}
}
}
prim.dataSource = _WarpMeshDataSource::New(
primPath, prim.dataSource, GetWarpPythonModule(primPath), _depDs);
}
}
else if (prim.primType == HdPrimTypeTokens->instancer && prim.dataSource)
{
HdInstancerTopologySchema topologySchema = HdInstancerTopologySchema::GetFromParent(prim.dataSource);
if (HdPathArrayDataSourceHandle const ds = topologySchema.GetPrototypes())
{
auto protoTypes = ds->GetTypedValue(0.0f);
for (size_t i = 0; i < protoTypes.size(); ++i)
{
auto protoPrim = _GetInputSceneIndex()->GetPrim(protoTypes[i]);
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(protoPrim.dataSource);
if (warpSchema)
{
// Look for particles to be dependent on a mesh
HdContainerDataSourceHandle _depDs;
if (HdPathArrayDataSourceHandle dependentsDs = warpSchema.GetDependentPrims())
{
VtArray<SdfPath> dependentPrims = dependentsDs->GetTypedValue(0);
if (dependentPrims.size())
{
auto depPrim = _GetInputSceneIndex()->GetPrim(dependentPrims[0]);
if (depPrim.dataSource)
{
_depDs = depPrim.dataSource;
}
}
}
prim.dataSource = _WarpInstancerDataSource::New(
primPath, prim.dataSource, GetWarpPythonModule(primPath), _depDs, protoPrim.dataSource);
}
}
}
}
return prim;
}
SdfPathVector
OmniWarpSceneIndex::GetChildPrimPaths(const SdfPath &primPath) const
{
return _GetInputSceneIndex()->GetChildPrimPaths(primPath);
}
void OmniWarpSceneIndex::_PrimsAdded(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::AddedPrimEntries &entries)
{
if (!_IsObserved()) {
return;
}
for (const HdSceneIndexObserver::AddedPrimEntry& entry : entries)
{
if (entry.primType == HdPrimTypeTokens->mesh)
{
auto prim = _GetInputSceneIndex()->GetPrim(entry.primPath);
HdMeshSchema meshSchema = HdMeshSchema::GetFromParent(prim.dataSource);
HdPrimvarsSchema primVarsSchema = HdPrimvarsSchema::GetFromParent(prim.dataSource);
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(prim.dataSource);
if (meshSchema && warpSchema && primVarsSchema)
{
assert(GetWarpPythonModule(entry.primPath) == nullptr);
HdMeshTopologySchema meshTopologySchema = meshSchema.GetTopology();
UsdImagingStageSceneIndexRefPtr usdImagingSi;
if (auto filteringIdx = dynamic_cast<HdFilteringSceneIndexBase const*>(&sender))
{
// SceneIndexPlugins do not have access to the current stage/frame time.
// Only the UsdImagingStageSceneIndex has this. We store this for each Mesh,
// nullptr is a valid value. If valid, warp simulation can use the exact
// stage time. If null, the warp has to emulate frame time
usdImagingSi = FindUsdImagingSceneIndex(filteringIdx->GetInputScenes());
}
auto vtSimParams = GetSimulationParams(prim.dataSource);
HdPrimvarSchema origPoints = primVarsSchema.GetPrimvar(HdTokens->points);
CreateWarpPythonModule(entry.primPath, warpSchema, meshTopologySchema, origPoints, usdImagingSi, vtSimParams);
}
}
else if (entry.primType == HdPrimTypeTokens->instancer)
{
auto prim = _GetInputSceneIndex()->GetPrim(entry.primPath);
HdPrimvarsSchema primVarSchema = HdPrimvarsSchema::GetFromParent(prim.dataSource);
HdInstancerTopologySchema topologySchema = HdInstancerTopologySchema::GetFromParent(prim.dataSource);
HdPathArrayDataSourceHandle const ds = topologySchema.GetPrototypes();
if (primVarSchema && ds)
{
auto protoTypes = ds->GetTypedValue(0.0f);
for (size_t i = 0; i < protoTypes.size(); ++i)
{
auto protoPrim = _GetInputSceneIndex()->GetPrim(protoTypes[i]);
if (protoPrim.primType == TfToken())
{
continue;
}
OmniWarpComputationSchema warpSchema = OmniWarpComputationSchema::GetFromParent(protoPrim.dataSource);
if (warpSchema)
{
assert(GetWarpPythonModule(entry.primPath) == nullptr);
UsdImagingStageSceneIndexRefPtr usdImagingSi;
if (auto filteringIdx = dynamic_cast<HdFilteringSceneIndexBase const*>(&sender))
{
// SceneIndexPlugins do not have access to the current stage/frame time.
// Only the UsdImagingStageSceneIndex has this. We store this for each Mesh,
// nullptr is a valid value. If valid, warp simulation can use the exact
// stage time. If null, the warp has to emulate frame time
usdImagingSi = FindUsdImagingSceneIndex(filteringIdx->GetInputScenes());
}
auto vtSimParams = GetSimulationParams(protoPrim.dataSource);
HdPrimvarSchema positionsPos = primVarSchema.GetPrimvar(HdInstancerTokens->translate);
CreateWarpPythonModule(entry.primPath, warpSchema, positionsPos, usdImagingSi, vtSimParams);
break;
}
}
}
}
}
_SendPrimsAdded(entries);
return;
}
void
OmniWarpSceneIndex::_PrimsRemoved(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::RemovedPrimEntries &entries)
{
if (!_IsObserved()) {
return;
}
_WarpPythonModuleMap::iterator it = _pythonModuleMap.begin();
while (it != _pythonModuleMap.end())
{
bool bErased = false;
for (const HdSceneIndexObserver::RemovedPrimEntry& entry : entries)
{
if (it->first.HasPrefix(entry.primPath))
{
bErased = true;
it = _pythonModuleMap.erase(it);
break;
}
}
if (!bErased)
{
it++;
}
}
_SendPrimsRemoved(entries);
}
void
OmniWarpSceneIndex::_PrimsDirtied(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::DirtiedPrimEntries &entries)
{
if (!_IsObserved()) {
return;
}
// +++ Not sure this is the right locator for points data
static const HdDataSourceLocatorSet pointDeformLocators
{
HdPrimvarsSchema::GetDefaultLocator().Append(
HdPrimvarSchemaTokens->point),
OmniWarpComputationSchema::GetDefaultLocator().Append(
OmniWarpComputationSchema::GetSourceFileLocator())
};
// If mesh original points or python module path changes
// remove our _pythonModule for this prim and allow
// it to be re-created
//+++ Multithreaded access to _pythonModuleMap
_WarpPythonModuleMap::iterator it = _pythonModuleMap.begin();
while (it != _pythonModuleMap.end())
{
bool bErased = false;
for (const HdSceneIndexObserver::DirtiedPrimEntry &entry : entries)
{
if (it->first.HasPrefix(entry.primPath))
{
if (pointDeformLocators.Intersects(entry.dirtyLocators))
{
bErased = true;
it = _pythonModuleMap.erase(it);
break;
}
}
}
if (!bErased)
{
it++;
}
}
_SendPrimsDirtied(entries);
}
OmniWarpPythonModuleSharedPtr
OmniWarpSceneIndex::GetWarpPythonModule(const SdfPath &primPath) const
{
//+++ Multithreaded access to _pythonModuleMap
auto pythonModule = _pythonModuleMap.find(primPath);
if (pythonModule == _pythonModuleMap.end())
{
return OmniWarpPythonModuleSharedPtr(nullptr);
}
return pythonModule->second;
}
OmniWarpPythonModuleSharedPtr
OmniWarpSceneIndex::CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdMeshTopologySchema& topologySchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams)
{
//+++ Multithreaded access to _pythonModuleMap
std::string moduleName = warpSchema.GetSourceFile()->GetTypedValue(0);
HdIntArrayDataSourceHandle faceIndicesDs = topologySchema.GetFaceVertexIndices();
VtIntArray indices = faceIndicesDs->GetTypedValue(0.f);
HdSampledDataSourceHandle valueDataSource = primVarSchema.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
VtVec3fArray pointsArray = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
// Force terminate of old module
_pythonModuleMap[primPath] = nullptr;
OmniWarpPythonModuleSharedPtr pythonModule =
std::make_shared<OmniWarpPythonModule>(primPath, moduleName, usdImagingSi);
VtIntArray depIndices;
VtVec3fArray depPointsArray;
GetDependentMeshData(warpSchema, depIndices, depPointsArray);
pythonModule->InitMesh(indices, pointsArray, depIndices, depPointsArray, vtSimParams);
_pythonModuleMap[primPath] = pythonModule;
return _pythonModuleMap.find(primPath)->second;
}
OmniWarpPythonModuleSharedPtr
OmniWarpSceneIndex::CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams)
{
//+++ Multithreaded access to _pythonModuleMap
std::string moduleName = warpSchema.GetSourceFile()->GetTypedValue(0);
// Force terminate of old module
_pythonModuleMap[primPath] = nullptr;
HdSampledDataSourceHandle valueDataSource = primVarSchema.GetPrimvarValue();
auto positionsVt = valueDataSource->GetValue(0.f);
VtVec3fArray positionsArray = positionsVt.UncheckedGet<VtArray<GfVec3f>>();
OmniWarpPythonModuleSharedPtr pythonModule =
std::make_shared<OmniWarpPythonModule>(primPath, moduleName, usdImagingSi);
VtIntArray indices;
VtVec3fArray pointsArray;
GetDependentMeshData(warpSchema, indices, pointsArray);
pythonModule->InitParticles(positionsArray, indices, pointsArray, vtSimParams);
_pythonModuleMap[primPath] = pythonModule;
return _pythonModuleMap.find(primPath)->second;
}
void
OmniWarpSceneIndex::GetDependentMeshData(OmniWarpComputationSchema warpSchema, VtIntArray& outIndices, VtVec3fArray& outVertices)
{
VtArray<SdfPath> dependentPrims;
if (HdPathArrayDataSourceHandle dependentsDs = warpSchema.GetDependentPrims())
{
dependentPrims = dependentsDs->GetTypedValue(0);
}
if (!dependentPrims.size())
{
return;
}
//+++ Only support a single dependent prim
auto depPrim = _GetInputSceneIndex()->GetPrim(dependentPrims[0]);
if (depPrim.dataSource)
{
HdPrimvarsSchema depPrimVarsSchema = HdPrimvarsSchema::GetFromParent(depPrim.dataSource);
if (depPrimVarsSchema)
{
HdPrimvarSchema depPrimVar = depPrimVarsSchema.GetPrimvar(HdTokens->points);
if (depPrimVar)
{
HdSampledDataSourceHandle valueDataSource = depPrimVar.GetPrimvarValue();
auto pointsVt = valueDataSource->GetValue(0.f);
outVertices = pointsVt.UncheckedGet<VtArray<GfVec3f>>();
}
}
HdMeshSchema meshSchema = HdMeshSchema::GetFromParent(depPrim.dataSource);
if (meshSchema)
{
HdMeshTopologySchema topologySchema = meshSchema.GetTopology();
HdIntArrayDataSourceHandle faceIndicesDs = topologySchema.GetFaceVertexIndices();
outIndices = faceIndicesDs->GetTypedValue(0.f);
}
}
}
PXR_NAMESPACE_CLOSE_SCOPE | 27,828 | C++ | 34.496173 | 135 | 0.650496 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/__init__.py | from pxr import Tf
# PreparePythonModule didn't make it's way into USD
# until 21.08 - older versions import the module
# manually and call PrepareModule
if hasattr(Tf, "PreparePythonModule"):
Tf.PreparePythonModule()
else:
from . import _omniWarpSceneIndex
Tf.PrepareModule(_omniWarpSceneIndex, locals())
del Tf | 327 | Python | 24.230767 | 51 | 0.75841 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/base/trace/trace.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include "warpComputationSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(OmniWarpComputationSchemaTokens,
OMNIWARPCOMPUTATION_SCHEMA_TOKENS);
HdStringDataSourceHandle
OmniWarpComputationSchema::GetSourceFile()
{
return _GetTypedDataSource<HdStringDataSource>(
OmniWarpComputationSchemaTokens->sourceFile);
}
HdPathArrayDataSourceHandle
OmniWarpComputationSchema::GetDependentPrims()
{
return _GetTypedDataSource<HdPathArrayDataSource>(
OmniWarpComputationSchemaTokens->dependentPrims);
}
HdSampledDataSourceHandle
OmniWarpComputationSchema::GetSimulationParams()
{
return _GetTypedDataSource<HdSampledDataSource>(
OmniWarpComputationSchemaTokens->simulationParams);
}
HdContainerDataSourceHandle
OmniWarpComputationSchema::BuildRetained(
const HdStringDataSourceHandle &sourceFile,
const HdPathArrayDataSourceHandle &dependentPrims,
const HdSampledDataSourceHandle &simulationParams
)
{
TfToken names[3];
HdDataSourceBaseHandle values[3];
size_t count = 0;
if (sourceFile) {
names[count] = OmniWarpComputationSchemaTokens->sourceFile;
values[count++] = sourceFile;
}
if (dependentPrims) {
names[count] = OmniWarpComputationSchemaTokens->dependentPrims;
values[count++] = dependentPrims;
}
if (simulationParams) {
names[count] = OmniWarpComputationSchemaTokens->simulationParams;
values[count++] = simulationParams;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
/*static*/
OmniWarpComputationSchema
OmniWarpComputationSchema::GetFromParent(
const HdContainerDataSourceHandle &fromParentContainer)
{
return OmniWarpComputationSchema(
fromParentContainer
? HdContainerDataSource::Cast(fromParentContainer->Get(
OmniWarpComputationSchemaTokens->warpComputation))
: nullptr);
}
/*static*/
const TfToken &
OmniWarpComputationSchema::GetSchemaToken()
{
return OmniWarpComputationSchemaTokens->warpComputation;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation
);
return locator;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetSourceFileLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation,
OmniWarpComputationSchemaTokens->sourceFile
);
return locator;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetDependentPrimsLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation,
OmniWarpComputationSchemaTokens->dependentPrims
);
return locator;
}
/*static*/
const HdDataSourceLocator &
OmniWarpComputationSchema::GetSimulationParamsLocator()
{
static const HdDataSourceLocator locator(
OmniWarpComputationSchemaTokens->warpComputation,
OmniWarpComputationSchemaTokens->simulationParams
);
return locator;
}
OmniWarpComputationSchema::Builder &
OmniWarpComputationSchema::Builder::SetSourceFile(
const HdStringDataSourceHandle &sourceFile)
{
_sourceFile = sourceFile;
return *this;
}
OmniWarpComputationSchema::Builder &
OmniWarpComputationSchema::Builder::SetDependentPrims(
const HdPathArrayDataSourceHandle &depdendentPrims)
{
_dependentPrims = depdendentPrims;
return *this;
}
OmniWarpComputationSchema::Builder &
OmniWarpComputationSchema::Builder::SetSimulationParams(
const HdSampledDataSourceHandle &simulationParams)
{
_simulationParams = simulationParams;
return *this;
}
HdContainerDataSourceHandle
OmniWarpComputationSchema::Builder::Build()
{
return OmniWarpComputationSchema::BuildRetained(
_sourceFile,
_dependentPrims,
_simulationParams
);
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,653 | C++ | 26.702381 | 75 | 0.762089 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpSceneIndex.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef OMNI_WARP_SCENE_INDEX_WARP_SCENE_INDEX_H
#define OMNI_WARP_SCENE_INDEX_WARP_SCENE_INDEX_H
#include <pxr/pxr.h>
#include <pxr/imaging/hd/filteringSceneIndex.h>
#include <pxr/usdImaging/usdImaging/stageSceneIndex.h>
#include "pxr/imaging/hd/primvarSchema.h"
#include "pxr/imaging/hd/meshSchema.h"
#include "api.h"
#include "warpPythonModule.h"
#include "warpComputationSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniWarpSceneIndex);
class _PointsDataSource;
class _WarpMeshDataSource;
///
/// \class OmniWarpSceneIndex
///
///
///
///
///
class OmniWarpSceneIndex :
public HdSingleInputFilteringSceneIndexBase
{
public:
OMNIWARPSCENEINDEX_API
static OmniWarpSceneIndexRefPtr
New(const HdSceneIndexBaseRefPtr &inputSceneIndex);
OMNIWARPSCENEINDEX_API
HdSceneIndexPrim GetPrim(const SdfPath &primPath) const override;
OMNIWARPSCENEINDEX_API
SdfPathVector GetChildPrimPaths(const SdfPath &primPath) const override;
protected:
OmniWarpSceneIndex(
const HdSceneIndexBaseRefPtr &inputSceneIndex);
void _PrimsAdded(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::AddedPrimEntries &entries) override;
void _PrimsRemoved(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::RemovedPrimEntries &entries) override;
void _PrimsDirtied(
const HdSceneIndexBase &sender,
const HdSceneIndexObserver::DirtiedPrimEntries &entries) override;
private:
OmniWarpPythonModuleSharedPtr GetWarpPythonModule(const SdfPath &primPath) const;
OmniWarpPythonModuleSharedPtr CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdMeshTopologySchema& topologySchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams);
OmniWarpPythonModuleSharedPtr CreateWarpPythonModule(const SdfPath &primPath,
OmniWarpComputationSchema& warpSchema,
HdPrimvarSchema& primVarSchema,
UsdImagingStageSceneIndexRefPtr usdImagingSi,
VtDictionary vtSimParams);
void GetDependentMeshData(OmniWarpComputationSchema warpSchema,
VtIntArray& outIndices,
VtVec3fArray& outVertices);
// Each prim with a WarpComputationAPI gets it's own Python Module instance
typedef std::unordered_map<SdfPath, OmniWarpPythonModuleSharedPtr, SdfPath::Hash> _WarpPythonModuleMap;
mutable _WarpPythonModuleMap _pythonModuleMap;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_WARP_SCENE_INDEX_WARP_SCENE_INDEX_H | 3,199 | C | 31.323232 | 107 | 0.760863 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/wrapTokens.cpp | //
// Copyright 2016 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
// GENERATED FILE. DO NOT EDIT.
#include <boost/python/class.hpp>
#include ".//tokens.h"
PXR_NAMESPACE_USING_DIRECTIVE
namespace {
// Helper to return a static token as a string. We wrap tokens as Python
// strings and for some reason simply wrapping the token using def_readonly
// bypasses to-Python conversion, leading to the error that there's no
// Python type for the C++ TfToken type. So we wrap this functor instead.
class _WrapStaticToken {
public:
_WrapStaticToken(const TfToken* token) : _token(token) { }
std::string operator()() const
{
return _token->GetString();
}
private:
const TfToken* _token;
};
template <typename T>
void
_AddToken(T& cls, const char* name, const TfToken& token)
{
cls.add_static_property(name,
boost::python::make_function(
_WrapStaticToken(&token),
boost::python::return_value_policy<
boost::python::return_by_value>(),
boost::mpl::vector1<std::string>()));
}
} // anonymous
void wrapOmniWarpSceneIndexTokens()
{
boost::python::class_<OmniWarpSceneIndexTokensType, boost::noncopyable>
cls("Tokens", boost::python::no_init);
_AddToken(cls, "warpDependentPrims", OmniWarpSceneIndexTokens->warpDependentPrims);
_AddToken(cls, "warpSourceFile", OmniWarpSceneIndexTokens->warpSourceFile);
_AddToken(cls, "OmniWarpComputationAPI", OmniWarpSceneIndexTokens->OmniWarpComputationAPI);
}
| 2,626 | C++ | 35.999999 | 95 | 0.690023 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpComputationAPIAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/base/tf/stringUtils.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/usdImaging/usdImaging/dataSourceAttribute.h>
#include "warpComputationAPIAdapter.h"
#include "warpComputationAPI.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
(warpComputation)
(sourceFile)
(dependentPrims)
(simulationParams)
);
TF_REGISTRY_FUNCTION(TfType)
{
typedef WarpComputationAPIAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory< UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
// ----------------------------------------------------------------------------
namespace
{
class SimulationParamsDataSource : public HdSampledDataSource
{
public:
HD_DECLARE_DATASOURCE(SimulationParamsDataSource);
SimulationParamsDataSource(
const VtDictionary &dict)
: _customData(dict)
{
}
VtValue
GetValue(Time shutterOffset)
{
return VtValue(_customData);
}
bool
GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time> * outSampleTimes)
{
return false;
}
VtDictionary _customData;
};
class DependentPrimsDataSource : public HdPathArrayDataSource
{
public:
HD_DECLARE_DATASOURCE(DependentPrimsDataSource);
DependentPrimsDataSource(
const UsdRelationship &rel)
: _usdRel(rel)
{
}
VtValue
GetValue(
HdSampledDataSource::Time shutterOffset)
{
return VtValue(GetTypedValue(shutterOffset));
}
VtArray<SdfPath>
GetTypedValue(
HdSampledDataSource::Time shutterOffset)
{
SdfPathVector paths;
_usdRel.GetForwardedTargets(&paths);
VtArray<SdfPath> vtPaths(paths.begin(), paths.end());
return vtPaths;
}
bool
GetContributingSampleTimesForInterval(
HdSampledDataSource::Time startTime,
HdSampledDataSource::Time endTime,
std::vector<HdSampledDataSource::Time> *outSampleTimes)
{
return false;
}
private:
UsdRelationship _usdRel;
};
HD_DECLARE_DATASOURCE_HANDLES(DependentPrimsDataSource);
class _WarpComputationDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(_WarpComputationDataSource);
_WarpComputationDataSource(
const UsdPrim &prim,
const UsdImagingDataSourceStageGlobals &stageGlobals)
: _api(prim)
, _stageGlobals(stageGlobals)
{
}
TfTokenVector GetNames() override
{
TfTokenVector result;
result.reserve(4);
result.push_back(_tokens->warpComputation);
if (UsdAttribute attr = _api.GetSourceFileAttr()) {
result.push_back(_tokens->sourceFile);
VtDictionary customData = attr.GetCustomData();
VtDictionary::iterator iter = customData.begin();
if (iter != customData.end())
{
result.push_back(_tokens->simulationParams);
}
}
if (_api.GetDependentPrimsRel()) {
result.push_back(_tokens->dependentPrims);
}
return result;
}
HdDataSourceBaseHandle Get(const TfToken &name) override {
if (name == _tokens->sourceFile)
{
if (UsdAttribute attr = _api.GetSourceFileAttr())
{
return UsdImagingDataSourceAttributeNew(attr, _stageGlobals);
}
}
else if (name == _tokens->dependentPrims)
{
if (UsdRelationship rel = _api.GetDependentPrimsRel())
{
return DependentPrimsDataSource::New(rel);
}
}
else if (name == _tokens->simulationParams)
{
if (UsdAttribute attr = _api.GetSourceFileAttr())
{
VtDictionary customData = attr.GetCustomData();
VtDictionary::iterator iter = customData.begin();
if (iter != customData.end())
{
return SimulationParamsDataSource::New(customData);
}
}
}
return nullptr;
}
private:
OmniWarpSceneIndexWarpComputationAPI _api;
const UsdImagingDataSourceStageGlobals &_stageGlobals;
};
HD_DECLARE_DATASOURCE_HANDLES(_WarpComputationDataSource);
} // anonymous namespace
// ----------------------------------------------------------------------------
HdContainerDataSourceHandle
WarpComputationAPIAdapter::GetImagingSubprimData(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
const UsdImagingDataSourceStageGlobals &stageGlobals)
{
OmniWarpSceneIndexWarpComputationAPI _api(prim);
std::string pythonModuleName;
UsdAttribute attr = _api.GetSourceFileAttr();
attr.Get(&pythonModuleName, 0.f);
if (pythonModuleName.length())
{
return HdRetainedContainerDataSource::New(
_tokens->warpComputation,
_WarpComputationDataSource::New(
prim, stageGlobals));
}
return nullptr;
}
#if PXR_VERSION < 2308
HdDataSourceLocatorSet
WarpComputationAPIAdapter::InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties)
#else
HdDataSourceLocatorSet
WarpComputationAPIAdapter::InvalidateImagingSubprim(
UsdPrim const& prim,
TfToken const& subprim,
TfToken const& appliedInstanceName,
TfTokenVector const& properties,
const UsdImagingPropertyInvalidationType invalidationType)
#endif
{
#if 0
if (!subprim.IsEmpty() || appliedInstanceName.IsEmpty()) {
return HdDataSourceLocatorSet();
}
std::string prefix = TfStringPrintf(
"collections:%s:", appliedInstanceName.data());
for (const TfToken &propertyName : properties) {
if (TfStringStartsWith(propertyName.GetString(), prefix)) {
return HdDataSourceLocator(
_tokens->usdCollections, appliedInstanceName);
}
}
#endif
return HdDataSourceLocatorSet();
}
PXR_NAMESPACE_CLOSE_SCOPE
| 6,767 | C++ | 25.4375 | 79 | 0.645338 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpPythonModule.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <pxr/base/tf/pyInvoke.h>
#include <pxr/base/tf/errorMark.h>
#include <pxr/base/tf/pyExceptionState.h>
#include <pxr/base/tf/pyInterpreter.h>
#include <pxr/imaging/hd/tokens.h>
#include "warpPythonModule.h"
#include "tokens.h"
PXR_NAMESPACE_OPEN_SCOPE
OmniWarpPythonModule::OmniWarpPythonModule(const SdfPath &primPath,
const std::string& moduleName, UsdImagingStageSceneIndexConstRefPtr usdImagingSi)
: _primPath(primPath),
_moduleName(moduleName),
_usdImagingSi(usdImagingSi)
{
}
OmniWarpPythonModule::~OmniWarpPythonModule()
{
TfPyLock pyLock;
boost::python::object result;
TfPyInvokeAndReturn(_moduleName.c_str(), "terminate_sim", &result, _primPath);
}
void OmniWarpPythonModule::InitMesh(VtIntArray indices, VtVec3fArray vertices,
VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams)
{
TfPyLock pyLock;
boost::python::object result;
TfPyInvokeAndReturn(_moduleName.c_str(), "initialize_sim_mesh", &result, _primPath, indices, vertices,
depIndices, depVertices, simParams);
}
void OmniWarpPythonModule::InitParticles(
VtVec3fArray positions, VtIntArray depIndices, VtVec3fArray depVertices, VtDictionary simParams)
{
TfPyLock pyLock;
boost::python::object result;
TfPyInvokeAndReturn(_moduleName.c_str(), "initialize_sim_particles", &result,
_primPath, positions, depIndices, depVertices, simParams);
}
VtVec3fArray OmniWarpPythonModule::ExecSim(VtDictionary simParams)
{
return ExecSim(simParams, VtVec3fArray());
}
VtVec3fArray OmniWarpPythonModule::ExecSim(VtDictionary simParams, VtVec3fArray dependentVertices)
{
TfPyLock pyLock;
boost::python::object result;
float dt = 0.f;
if (_usdImagingSi)
{
dt = _usdImagingSi->GetTime().GetValue();
}
if (TfPyInvokeAndReturn(_moduleName.c_str(), "exec_sim", &result, _primPath, dt, dependentVertices, simParams))
{
boost::python::extract<VtVec3fArray> theResults(result);
if (theResults.check())
{
return theResults();
}
}
return VtVec3fArray();
}
PXR_NAMESPACE_CLOSE_SCOPE | 2,735 | C++ | 30.090909 | 115 | 0.729068 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/particles.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import warp as wp
import warp.sim
import warp.sim.render
import numpy as np
from pxr import Vt, Sdf
wp.init()
global_examples = {}
# need radius of spehere
class Example2:
def __init__(self):
self.frame_dt = 1.0 / 60
self.frame_count = 400
self.sim_substeps = 64
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = self.frame_count * self.sim_substeps
self.sim_time = 0.0
self.radius = 0.1
self.builder = wp.sim.ModelBuilder()
self.builder.default_particle_radius = self.radius
def update(self):
self.model.particle_grid.build(self.state_0.particle_q, self.radius * 2.0)
for s in range(self.sim_substeps):
self.state_0.clear_forces()
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
# swap states
(self.state_0, self.state_1) = (self.state_1, self.state_0)
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_particles(primPath: Sdf.Path,
src_positions: Vt.Vec3fArray, dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global_examples[primPath] = Example2()
for pt in src_positions:
global_examples[primPath].builder.add_particle(pt, (5.0, 0.0, 0.0), 0.1)
global_examples[primPath].model = global_examples[primPath].builder.finalize()
global_examples[primPath].model.particle_kf = 25.0
global_examples[primPath].model.soft_contact_kd = 100.0
global_examples[primPath].model.soft_contact_kf *= 2.0
global_examples[primPath].state_0 = global_examples[primPath].model.state()
global_examples[primPath].state_1 = global_examples[primPath].model.state()
global_examples[primPath].integrator = wp.sim.SemiImplicitIntegrator()
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
# Not respecting sim_dt at all, using internal time
global global_examples
global_examples[primPath].update()
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].state_0.particle_q.numpy())
| 2,841 | Python | 33.658536 | 136 | 0.697994 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/cloth.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Cloth
#
# Shows a simulation of an FEM cloth model colliding against a static
# rigid body mesh using the wp.sim.ModelBuilder().
#
###########################################################################
import os
import math
import numpy as np
import warp as wp
import warp.sim
import warp.sim.render
from pxr import Usd, UsdGeom, Vt, Sdf
import sys
wp.init()
global_examples = {}
class Example:
def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray):
self.sim_width = 64
self.sim_height = 64
self.frame_dt = 1.0 / 60
self.frame_count = 400
self.sim_substeps = 32
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = self.frame_count * self.sim_substeps
self.sim_time = 0.0
builder = wp.sim.ModelBuilder()
# sim BCs
clothEdgeBendingStiffness = 0.01
clothEdgeDampingStiffness = 0.0
clothTriAreaStiffness = 1000000.0
clothTriDampingStiffness = 100.0
clothTriElasticStiffness = 1000000.0
colliderContactDistance = 1.0
colliderContactQueryRange = 100.0
contactDampingStiffness = 10000.0
contactElasticStiffness = 500000.0
contactFrictionCoeff = 0.75
contactFrictionStiffness = 10000.0
globalScale = 0.01
# cloth grid
builder.add_cloth_grid(
pos=(0.0, 50.0, -25.0),
rot=wp.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.5),
vel=(0.0, 0.0, 0.0),
dim_x=self.sim_width,
dim_y=self.sim_height,
cell_x=1.0,
cell_y=1.0,
mass=0.1,
fix_left=True,
tri_ke=clothTriElasticStiffness * globalScale,
tri_ka=clothTriAreaStiffness * globalScale,
tri_kd=clothTriDampingStiffness * globalScale,
edge_ke=clothEdgeBendingStiffness * globalScale,
edge_kd=clothEdgeDampingStiffness * globalScale
)
# add collider (must have identity transform until we xforms piped through Hydra plugin)
mesh = wp.sim.Mesh(points, indices)
builder.add_shape_mesh(
body=-1,
mesh=mesh,
pos=(0.0, 0.0, 0.0),
rot=wp.quat_identity(),
scale=(1.0, 1.0, 1.0),
ke=1.0e2,
kd=1.0e2,
kf=1.0e1,
)
# set sim BCs
self.model = builder.finalize()
self.model.ground = True
self.model.allocate_soft_contacts(self.model.particle_count)
self.model.gravity = (0, -980, 0)
self.model.soft_contact_ke = contactElasticStiffness * globalScale
self.model.soft_contact_kf = contactFrictionStiffness * globalScale
self.model.soft_contact_mu = contactFrictionCoeff
self.model.soft_contact_kd = contactDampingStiffness * globalScale
self.model.soft_contact_margin = colliderContactDistance * colliderContactQueryRange
self.model.particle_radius = colliderContactDistance
self.integrator = wp.sim.SemiImplicitIntegrator()
self.state_0 = self.model.state()
self.state_1 = self.model.state()
def update(self, sim_time: float):
wp.sim.collide(self.model, self.state_0)
for s in range(self.sim_substeps):
self.state_0.clear_forces()
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
(self.state_0, self.state_1) = (self.state_1, self.state_0)
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray,
dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global_examples[primPath] = Example(dep_mesh_indices, dep_mesh_points)
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
# Not respecting sim_dt at all, using internal time
global global_examples
global_examples[primPath].update(sim_dt)
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].state_0.particle_q.numpy()) | 4,791 | Python | 33.978102 | 112 | 0.625339 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/deform01.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warp as wp
import numpy as np
from pxr import Vt, Sdf
@wp.kernel
def deform(positions: wp.array(dtype=wp.vec3), t: float):
tid = wp.tid()
x = positions[tid]
offset = -wp.sin(x[0]) * 0.06
scale = wp.sin(t)
x = x + wp.vec3(0.0, offset * scale, 0.0)
positions[tid] = x
class Example:
def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray):
self.mesh = wp.Mesh(
points=wp.array(points, dtype=wp.vec3),
indices=wp.array(indices, dtype=int),
)
def update(self, sim_time: float):
wp.launch(kernel=deform, dim=len(self.mesh.points), inputs=[self.mesh.points, sim_time])
# refit the mesh BVH to account for the deformation
self.mesh.refit()
wp.init()
global_examples = {}
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray,
dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global_examples[primPath] = Example(src_indices, src_points)
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
# Sim expects 60 samples per second (or hydra time of 1.0)
global_examples[primPath].update(sim_dt / 60.0)
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].mesh.points.numpy())
def is_enabled():
return True
| 2,140 | Python | 31.439393 | 112 | 0.693458 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/warpModules/ocean.py | # Copyright 2023 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warp as wp
import numpy as np
from pxr import Vt, Sdf
wp.init()
sim_params_global = {
'wave_amplitude': 1.5,
'wave_directionality': 0.0,
'wind_speed': 10.0,
'water_depth': 50.0,
'scale': 1.0,
'direction': 0.0,
}
#warp function definitions
# fractional part of a (w.r.t. floor(a))
@wp.func
def frac(a: float):
return a - wp.floor(a)
# square of a
@wp.func
def sqr(a: float):
return a * a
@wp.func
def alpha_beta_spectrum(omega: float,
peak_omega: float,
alpha: float,
beta: float,
gravity: float):
return ( (alpha * gravity * gravity / wp.pow(omega, 5.0)) * wp.exp(- beta * wp.pow(peak_omega/omega, 4.0)) )
@wp.func
def jonswap_peak_sharpening(omega: float,
peak_omega: float,
gamma: float):
sigma = float(0.07)
if omega > peak_omega:
sigma = float(0.09)
return wp.pow(gamma, wp.exp(- 0.5 * sqr( (omega - peak_omega) / (sigma * peak_omega)) ))
@wp.func
def jonswap_spectrum(omega: float,
gravity: float,
wind_speed: float,
fetch_km: float,
gamma: float):
#https://wikiwaves.org/Ocean-Wave_Spectra#JONSWAP_Spectrum
fetch = 1000.0 * fetch_km
alpha = 0.076 * wp.pow(wind_speed * wind_speed / (gravity * fetch), 0.22)
peak_omega = 22.0 * wp.pow(wp.abs(gravity * gravity / (wind_speed * fetch)), 1.0/3.0)
return (jonswap_peak_sharpening(omega, peak_omega, gamma) * alpha_beta_spectrum(omega, peak_omega, alpha, 1.25, gravity))
@wp.func
def TMA_spectrum(omega: float,
gravity: float,
wind_speed: float,
fetch_km: float,
gamma: float,
waterdepth: float):
#https://dl.acm.org/doi/10.1145/2791261.2791267
omegaH = omega * wp.sqrt(waterdepth/gravity)
omegaH = wp.max(0.0, wp.min(2.2, omegaH))
phi = 0.5 * omegaH * omegaH
if omegaH > 1.0:
phi = 1.0 - 0.5 * sqr(2.0 - omegaH)
return phi * jonswap_spectrum(omega, gravity, wind_speed, fetch_km, gamma);
#warp kernel definitions
@wp.kernel
def update_profile(profile: wp.array(dtype=wp.vec3),
profile_res: int,
profile_data_num: int,
lambdaMin: float,
lambdaMax: float,
profile_extend: float,
time: float,
windspeed: float,
waterdepth: float
):
x = wp.tid()
randself = wp.rand_init(7)
# sampling parameters
omega0 = wp.sqrt(2.0 * 3.14159 * 9.80665 / lambdaMin)
omega1 = wp.sqrt(2.0 * 3.14159 * 9.80665 / lambdaMax)
omega_delta = wp.abs(omega1 - omega0) / float(profile_data_num)
# we blend three displacements for seamless spatial profile tiling
space_pos_1 = profile_extend * float(x) / float(profile_res)
space_pos_2 = space_pos_1 + profile_extend
space_pos_3 = space_pos_1 - profile_extend
p1 = wp.vec2(0.0,0.0)
p2 = wp.vec2(0.0,0.0)
p3 = wp.vec2(0.0,0.0)
for i in range(0, profile_data_num):
omega = wp.abs(omega0 + (omega1 - omega0) * float(i) / float(profile_data_num)) # linear sampling of omega
k = omega * omega / 9.80665
phase = -time * omega + wp.randf(randself) * 2.0 * 3.14159
amplitude = float(10000.0) * wp.sqrt(wp.abs(2.0 * omega_delta * TMA_spectrum(omega, 9.80665, windspeed, 100.0, 3.3, waterdepth)))
p1 = wp.vec2( p1[0] + amplitude * wp.sin(phase + space_pos_1 * k), p1[1] - amplitude * wp.cos(phase + space_pos_1 * k) )
p2 = wp.vec2( p2[0] + amplitude * wp.sin(phase + space_pos_2 * k), p2[1] - amplitude * wp.cos(phase + space_pos_2 * k) )
p3 = wp.vec2( p3[0] + amplitude * wp.sin(phase + space_pos_3 * k), p3[1] - amplitude * wp.cos(phase + space_pos_3 * k) )
# cubic blending coefficients
s = float(float(x) / float(profile_res))
c1 = float(2.0 * s * s * s - 3.0 * s * s + 1.0)
c2 = float(-2.0 * s * s * s + 3.0 * s * s)
disp_out = wp.vec3( (p1[0] + c1 * p2[0] + c2 * p3[0]) / float(profile_data_num), (p1[1] + c1 * p2[1] + c2 * p3[1]) / float(profile_data_num), 0. )
wp.store(profile, x, disp_out)
@wp.kernel
def update_points(out_points: wp.array(dtype=wp.vec3),
in_points: wp.array(dtype=wp.vec3),
profile: wp.array(dtype=wp.vec3),
profile_res: int,
profile_extent: float,
amplitude: float,
directionality: float,
direction: float,
antiAlias: int,
camPosX: float,
camPosY: float,
camPosZ: float):
tid = wp.tid()
p_crd = in_points[tid]
p_crd = wp.vec3(p_crd[0], p_crd[2], p_crd[1])
randself = wp.rand_init(7)
disp_x = float(0.)
disp_y = float(0.)
disp_z = float(0.)
w_sum = float(0.)
direction_count = (int)(128)
for d in range(0, direction_count):
r = float(d) * 2. * 3.14159265359 / float(direction_count) + 0.02
dir_x = wp.cos(r)
dir_y = wp.sin(r)
# directional amplitude
t = wp.abs( direction - r )
if (t > 3.14159265359):
t = 2.0 * 3.14159265359 - t
t = pow(t, 1.2)
dirAmp = (2.0 * t * t * t - 3.0 * t * t + 1.0) * 1.0 + (- 2.0 * t * t * t + 3.0 * t * t) * (1.0 - directionality)
dirAmp = dirAmp / (1.0 + 10.0 * directionality)
rand_phase = wp.randf(randself)
x_crd = (p_crd[0] * dir_x + p_crd[2] * dir_y) / profile_extent + rand_phase
pos_0 = int(wp.floor(x_crd * float(profile_res))) % profile_res
if x_crd < 0.:
pos_0 = pos_0 + profile_res - 1
pos_1 = int(pos_0 + 1) % profile_res
p_disp_0 = profile[pos_0]
p_disp_1 = profile[pos_1]
w = frac( x_crd * float(profile_res) )
prof_height_x = dirAmp * float((1. - w) * p_disp_0[0] + w * p_disp_1[0])
prof_height_y = dirAmp * float((1. - w) * p_disp_0[1] + w * p_disp_1[1])
disp_x = disp_x + dir_x * prof_height_x
disp_y = disp_y + prof_height_y
disp_z = disp_z + dir_y * prof_height_x
w_sum = w_sum + 1.
# simple anti-aliasing: reduce amplitude with increasing distance to viewpoint
if (antiAlias > 0):
v1 = wp.normalize( wp.vec3( p_crd[0] - camPosX, max( 100.0, wp.abs(p_crd[1] - camPosY)), p_crd[2] - camPosZ) )
amplitude *= wp.sqrt( wp.abs(v1[1]) )
# write output vertex position
outP = wp.vec3(p_crd[0] + amplitude * disp_x / w_sum, p_crd[1] + amplitude * disp_y / w_sum, p_crd[2] + amplitude * disp_z / w_sum)
wp.store(out_points, tid, wp.vec3(outP[0], outP[2], outP[1]))
class Example:
def __init__(self, indices: Vt.IntArray, points: Vt.Vec3fArray):
# profile buffer intializations
print('[Ocean deformer] Initializing profile buffer.')
self.profile_extent = 410.0 #physical size of profile, should be around half the resolution
self.profile_res = int(8192)
self.profile_wavenum = int(1000)
self.profile_CUDA = wp.zeros(self.profile_res, dtype=wp.vec3, device="cuda:0")
self.points_in = wp.array(points, dtype=wp.vec3, device="cuda:0")
self.points_out = wp.array(points, dtype=wp.vec3, device="cuda:0")
print(self.points_in)
print(self.points_out)
def update(self, sim_time: float):
global sim_params_global
# params
wave_amplitude = sim_params_global["wave_amplitude"]
wave_directionality = sim_params_global["wave_directionality"]
wind_speed = sim_params_global["wind_speed"]
water_depth = sim_params_global["water_depth"]
scale = sim_params_global["scale"]
direction = sim_params_global["direction"]
# Parameters
time = float(sim_time)
amplitude = max(0.0001, min(1000.0, float(wave_amplitude)))
minWavelength = 0.1
maxWavelength = 250.0
direction = float(direction) % 6.28318530718
directionality = max(0.0, min(1.0, 0.02 * float(wave_directionality)))
windspeed = max(0.0, min(30.0, float(wind_speed)))
waterdepth = max(1.0, min(1000.0, float(water_depth)))
scale = min(10000.0, max(0.001, float(scale)))
antiAlias = int(0)
campos = [0.0, 0.0, 0.0]
# create 1D profile buffer for this timestep using wave paramters stored in internal self CUDA memory
wp.launch(
kernel=update_profile,
dim=self.profile_res,
inputs=[self.profile_CUDA, int(self.profile_res), int(self.profile_wavenum), float(minWavelength), float(maxWavelength), float(self.profile_extent), float(time), float(windspeed), float(waterdepth)],
outputs=[],
device="cuda:0")
# update point positions using the profile buffer created above
wp.launch(
kernel=update_points,
dim=len(self.points_out),
inputs=[self.points_out, self.points_in, self.profile_CUDA, int(self.profile_res), float(self.profile_extent*scale), float(amplitude), float(directionality), float(direction), int(antiAlias), float(campos[0]), float(campos[1]), float(campos[2]) ],
outputs=[],
device="cuda:0")
global_examples = {}
def terminate_sim(primPath: Sdf.Path):
global global_examples
global_examples[primPath] = None
def initialize_sim_mesh(primPath: Sdf.Path, src_indices: Vt.IntArray, src_points: Vt.Vec3fArray,
dep_mesh_indices: Vt.IntArray = None, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global sim_params_global
if sim_params:
sim_params_global = sim_params
global_examples[primPath] = Example(src_indices, src_points)
def exec_sim(primPath: Sdf.Path, sim_dt: float, dep_mesh_points: Vt.Vec3fArray = None, sim_params: dict = None):
global global_examples
global sim_params_global
if sim_params:
sim_params_global = sim_params
# Sim expects 60 samples per second (or hydra time of 1.0)
global_examples[primPath].update(sim_dt / 60.0)
return Vt.Vec3fArray.FromNumpy(global_examples[primPath].points_out.numpy())
| 11,029 | Python | 37.838028 | 260 | 0.580288 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/preferences.py | #
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr.Usdviewq.qt import QtCore, QtGui, QtWidgets
from .preferencesUI import Ui_Preferences
class Preferences(QtWidgets.QDialog):
def __init__(self, parent, attr):
super(Preferences, self).__init__(parent)
self._ui = Ui_Preferences()
self._ui.setupUi(self)
self._attr = attr
metadata = self._attr.GetMetadata("customData")
self._ui.scaleSpinBox.setValue(metadata["scale"])
self._ui.directionSpinBox.setValue(metadata["direction"])
self._ui.windSpeedSpinBox.setValue(metadata["wind_speed"])
self._ui.waterDepthSpinBox.setValue(metadata["water_depth"])
self._ui.waveAmplitudeSpinBox.setValue(metadata["wave_amplitude"])
self._ui.waveDirectionalitySpinBox.setValue(metadata["wave_directionality"])
self._ui.buttonBox.clicked.connect(self._buttonBoxButtonClicked)
def _apply(self):
self._attr.SetMetadataByDictKey('customData', 'scale', self._ui.scaleSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'direction', self._ui.directionSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'wind_speed', self._ui.windSpeedSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'water_depth', self._ui.waterDepthSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'wave_amplitude', self._ui.waveAmplitudeSpinBox.value())
self._attr.SetMetadataByDictKey('customData', 'wave_directionality', self._ui.waveDirectionalitySpinBox.value())
def _buttonBoxButtonClicked(self, button):
role = self._ui.buttonBox.buttonRole(button)
Roles = QtWidgets.QDialogButtonBox.ButtonRole
if role == Roles.AcceptRole or role == Roles.ApplyRole:
self._apply()
if role == Roles.AcceptRole or role == Roles.RejectRole:
self.close()
| 2,923 | Python | 46.16129 | 120 | 0.718782 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/__init__.py | from pxr import Tf
from pxr.Usdviewq.plugin import PluginContainer
from .preferences import Preferences
def launchPreferences(usdviewApi):
prim = usdviewApi.stage.GetPrimAtPath("/World/grid/Grid")
attr = prim.GetAttribute("warp:sourceFile")
_preferencesDlg = Preferences(usdviewApi.qMainWindow, attr)
_preferencesDlg.show()
_preferencesDlg = None
class OceanSimPluginContainer(PluginContainer):
def registerPlugins(self, plugRegistry, usdviewApi):
self._launchPreferences = plugRegistry.registerCommandPlugin(
"OceanSimPluginContainer.launchPreferences",
"Launch Preferences",
launchPreferences)
def configureView(self, plugRegistry, plugUIBuilder):
tutMenu = plugUIBuilder.findOrCreateMenu("OceanSim")
tutMenu.addItem(self._launchPreferences)
Tf.Type.Define(OceanSimPluginContainer) | 878 | Python | 32.807691 | 69 | 0.749431 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniWarpSceneIndex/oceanSim/preferencesUI_pyside6.py | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'preferencesUI.ui'
##
## Created by: Qt User Interface Compiler version 6.5.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QUrl, Qt)
from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor,
QFont, QFontDatabase, QGradient, QIcon,
QImage, QKeySequence, QLinearGradient, QPainter,
QPalette, QPixmap, QRadialGradient, QTransform)
from PySide6.QtWidgets import (QAbstractButton, QApplication, QDialog, QDialogButtonBox,
QDoubleSpinBox, QFrame, QHBoxLayout, QLabel,
QSizePolicy, QSpacerItem, QVBoxLayout, QWidget)
class Ui_Preferences(object):
def setupUi(self, Ocean_Simulation_Settings):
if not Ocean_Simulation_Settings.objectName():
Ocean_Simulation_Settings.setObjectName(u"Ocean_Simulation_Settings")
Ocean_Simulation_Settings.resize(295, 99)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName(u"verticalLayout")
self.prefsOverButtonsLayout = QVBoxLayout()
self.prefsOverButtonsLayout.setObjectName(u"prefsOverButtonsLayout")
self.horizontalLayout_3 = QHBoxLayout()
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.scaleLabel = QLabel()
self.scaleLabel.setObjectName(u"scaleLabel")
self.horizontalLayout_3.addWidget(self.scaleLabel)
self.horizontalSpacer_2a = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(self.horizontalSpacer_2a)
self.scaleSpinBox = QDoubleSpinBox()
self.scaleSpinBox.setObjectName(u"scaleSpinBox")
self.scaleSpinBox.setDecimals(2)
self.scaleSpinBox.setMinimum(0.000000000000000)
self.scaleSpinBox.setValue(1.000000000000000)
self.horizontalLayout_3.addWidget(self.scaleSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QHBoxLayout()
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.directionLabel = QLabel()
self.directionLabel.setObjectName(u"directionLabel")
self.horizontalLayout_4.addWidget(self.directionLabel)
self.horizontalSpacer_2b = QSpacerItem(26, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(self.horizontalSpacer_2b)
self.directionSpinBox = QDoubleSpinBox()
self.directionSpinBox.setObjectName(u"directionSpinBox")
self.directionSpinBox.setDecimals(2)
self.directionSpinBox.setMinimum(0.000000000000000)
self.directionSpinBox.setValue(0.000000000000000)
self.horizontalLayout_4.addWidget(self.directionSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QHBoxLayout()
self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")
self.windSpeedLabel = QLabel()
self.windSpeedLabel.setObjectName(u"windSpeedLabel")
self.horizontalLayout_5.addWidget(self.windSpeedLabel)
self.horizontalSpacer_2c = QSpacerItem(24, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(self.horizontalSpacer_2c)
self.windSpeedSpinBox = QDoubleSpinBox()
self.windSpeedSpinBox.setObjectName(u"windSpeedSpinBox")
self.windSpeedSpinBox.setDecimals(2)
self.windSpeedSpinBox.setMinimum(0.000000000000000)
self.windSpeedSpinBox.setValue(10.000000000000000)
self.horizontalLayout_5.addWidget(self.windSpeedSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QHBoxLayout()
self.horizontalLayout_6.setObjectName(u"horizontalLayout_6")
self.waterDepthLabel = QLabel()
self.waterDepthLabel.setObjectName(u"waterDepthLabel")
self.horizontalLayout_6.addWidget(self.waterDepthLabel)
self.horizontalSpacer_2d = QSpacerItem(24, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(self.horizontalSpacer_2d)
self.waterDepthSpinBox = QDoubleSpinBox()
self.waterDepthSpinBox.setObjectName(u"waterDepthSpinBox")
self.waterDepthSpinBox.setDecimals(2)
self.waterDepthSpinBox.setMinimum(0.000000000000000)
self.waterDepthSpinBox.setValue(50.000000000000000)
self.horizontalLayout_6.addWidget(self.waterDepthSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QHBoxLayout()
self.horizontalLayout_7.setObjectName(u"horizontalLayout_7")
self.waveAmplitudeLabel = QLabel()
self.waveAmplitudeLabel.setObjectName(u"waveAmplitudeLabel")
self.horizontalLayout_7.addWidget(self.waveAmplitudeLabel)
self.horizontalSpacer_2e = QSpacerItem(21, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(self.horizontalSpacer_2e)
self.waveAmplitudeSpinBox = QDoubleSpinBox()
self.waveAmplitudeSpinBox.setObjectName(u"waveAmplitudeSpinBox")
self.waveAmplitudeSpinBox.setDecimals(2)
self.waveAmplitudeSpinBox.setMinimum(0.000000000000000)
self.waveAmplitudeSpinBox.setValue(1.500000000000000)
self.horizontalLayout_7.addWidget(self.waveAmplitudeSpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_7)
self.horizontalLayout_8 = QHBoxLayout()
self.horizontalLayout_8.setObjectName(u"horizontalLayout_8")
self.waveDirectionalityLabel = QLabel()
self.waveDirectionalityLabel.setObjectName(u"waveDirectionalityLabel")
self.horizontalLayout_8.addWidget(self.waveDirectionalityLabel)
self.horizontalSpacer_2f = QSpacerItem(17, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(self.horizontalSpacer_2f)
self.waveDirectionalitySpinBox = QDoubleSpinBox()
self.waveDirectionalitySpinBox.setObjectName(u"waveDirectionalitySpinBox")
self.waveDirectionalitySpinBox.setMinimum(0.000000000000000)
self.waveDirectionalitySpinBox.setValue(0.000000000000000)
self.horizontalLayout_8.addWidget(self.waveDirectionalitySpinBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_8)
self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.prefsOverButtonsLayout.addItem(self.verticalSpacer)
self.line = QFrame()
self.line.setObjectName(u"line")
self.line.setFrameShape(QFrame.HLine)
self.line.setFrameShadow(QFrame.Sunken)
self.prefsOverButtonsLayout.addWidget(self.line)
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(self.horizontalSpacer)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setObjectName(u"buttonBox")
self.buttonBox.setStandardButtons(QDialogButtonBox.Apply|QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.horizontalLayout_2.addWidget(self.buttonBox)
self.prefsOverButtonsLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout.addLayout(self.prefsOverButtonsLayout)
self.retranslateUi(Ocean_Simulation_Settings)
QMetaObject.connectSlotsByName(Ocean_Simulation_Settings)
# setupUi
def retranslateUi(self, Ocean_Simulation_Settings):
Ocean_Simulation_Settings.setWindowTitle(QCoreApplication.translate("Preferences", u"Ocean Simulation Settings", None))
Ocean_Simulation_Settings.setProperty("comment", QCoreApplication.translate("Preferences", u"\n"
" Copyright 2020 Pixar \n"
" \n"
" Licensed under the Apache License, Version 2.0 (the \"Apache License\") \n"
" with the following modification; you may not use this file except in \n"
" compliance with the Apache License and the following modification to it: \n"
" Section 6. Trademarks. is deleted and replaced with: \n"
" \n"
" 6. Trademarks. This License does not grant permission to use the trade \n"
" names, trademarks, service marks, or product names of the Licensor \n"
" and its affiliates, except as required to comply with Section 4(c) of \n"
" the License and to reproduce the content of the NOTI"
"CE file. \n"
" \n"
" You may obtain a copy of the Apache License at \n"
" \n"
" http://www.apache.org/licenses/LICENSE-2.0 \n"
" \n"
" Unless required by applicable law or agreed to in writing, software \n"
" distributed under the Apache License with the above modification is \n"
" distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY \n"
" KIND, either express or implied. See the Apache License for the specific \n"
" language governing permissions and limitations under the Apache License. \n"
" ", None))
self.scaleLabel.setText(QCoreApplication.translate("Preferences", u"Scale", None))
self.directionLabel.setText(QCoreApplication.translate("Preferences", u"Direction", None))
self.windSpeedLabel.setText(QCoreApplication.translate("Preferences", u"Wind Speed", None))
self.waterDepthLabel.setText(QCoreApplication.translate("Preferences", u"Water Depth", None))
self.waveAmplitudeLabel.setText(QCoreApplication.translate("Preferences", u"Wave Amplitude", None))
self.waveDirectionalityLabel.setText(QCoreApplication.translate("Preferences", u"Wave Directionality", None))
# retranslateUi
| 10,887 | Python | 46.134199 | 127 | 0.669055 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedPrimDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/gf/transform.h>
#include <pxr/usd/usdGeom/tokens.h>
#include <pxr/imaging/hd/xformSchema.h>
#include "computedPrimDataSource.h"
#include "localPositionSchema.h"
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialComputedPrimDataSource::HdOmniGeospatialComputedPrimDataSource(
HdContainerDataSourceHandle inputDataSource) :
_inputDataSource(inputDataSource)
{
_matrixDataSource =
HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::New(_inputDataSource);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialComputedPrimDataSource::Has(const TfToken& name)
{
return (name == HdXformSchemaTokens->resetXformStack) ||
(name == HdXformSchemaTokens->matrix);
}
#endif
TfTokenVector HdOmniGeospatialComputedPrimDataSource::GetNames()
{
// this container data source retrieves the xform tokens
TfTokenVector result;
result.push_back(HdXformSchemaTokens->resetXformStack);
result.push_back(HdXformSchemaTokens->matrix);
return result;
}
HdDataSourceBaseHandle HdOmniGeospatialComputedPrimDataSource::Get(const TfToken& name)
{
if (_inputDataSource != nullptr)
{
if (name == HdXformSchemaTokens->resetXformStack)
{
// we don't modify the underlying time-sampled data
// for resetXformStack, so return that directly
HdXformSchema xformSchema = HdXformSchema::GetFromParent(_inputDataSource);
return xformSchema.IsDefined() ? xformSchema.GetResetXformStack() : nullptr;
}
else if (name == HdXformSchemaTokens->matrix)
{
// note even if resetXformStack was true we consider
// the geospatial data to override that
return _matrixDataSource;
}
}
return nullptr;
}
HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GeospatialMatrixDataSource(
HdContainerDataSourceHandle inputDataSource) : _inputDataSource(inputDataSource)
{
}
VtValue HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetTypedValue(Time shutterOffset)
{
return this->_ComputeTransformedMatrix(shutterOffset);
}
bool HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
HdSampledDataSourceHandle sources[] = {
this->_GetMatrixSource(),
this->_GetLocalPositionSource()
};
return HdGetMergedContributingSampleTimesForInterval(
TfArraySize(sources),
sources,
startTime,
endTime,
outSampleTimes);
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetMatrixSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix();
}
HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetLocalPositionSource() const
{
return HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(_inputDataSource).GetPosition();
}
HdTokenDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetTangentPlaneSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetTangentPlane();
}
HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetReferencePositionSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetReferencePosition();
}
HdVec3dDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetOrientationSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetOrientation();
}
HdTokenDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageUpAxisSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetStageUpAxis();
}
HdDoubleDataSourceHandle HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageMetersPerUnitSource() const
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(_inputDataSource).GetStageMetersPerUnit();
}
GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetLocalPosition(const Time shutterOffset) const
{
HdVec3dDataSourceHandle dataSource = this->_GetLocalPositionSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfVec3d(1.0);
}
TfToken HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetTangentPlane() const
{
HdTokenDataSourceHandle dataSource = this->_GetTangentPlaneSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return TfToken();
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetReferencePosition() const
{
HdVec3dDataSourceHandle dataSource = this->_GetReferencePositionSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return GfVec3d(1.0);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetOrientation() const
{
HdVec3dDataSourceHandle dataSource = this->_GetOrientationSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return GfVec3d(1.0);
}
TfToken HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageUpAxis() const
{
HdTokenDataSourceHandle dataSource = this->_GetStageUpAxisSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return UsdGeomTokens->y;
}
double HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GetStageMetersPerUnit() const
{
HdDoubleDataSourceHandle dataSource = this->_GetStageMetersPerUnitSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(0.0f);
}
return 0.01;
}
GfMatrix4d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_ComputeTransformedMatrix(const Time shutterOffset) const
{
// NOTE: in the case of the geospatially applied prim, we are completely
// ignoring the fact that resetXformStack may be true at any given time sample
// that is, geospatial positioning takes priority over local transformation reset
// to compute the local position, we need to first get the geodetic reference
TfToken targetFrame = this->_GetTangentPlane();
GfVec3d tangentPosition = this->_GetReferencePosition();
GfVec3d orientation = this->_GetOrientation();
GfVec3d localPosition = this->_GetLocalPosition(shutterOffset);
double metersPerUnit = this->_GetStageMetersPerUnit();
TfToken upAxis = this->_GetStageUpAxis();
// calculate the new geodetic translation
auto enu = this->_EcefToEnu(this->_GeodeticToEcef(localPosition), tangentPosition);
GfVec3d translation = this->_EnuToCartesian(enu, upAxis, metersPerUnit, tangentPosition);
// we only want to replace the translation piece
// but since the transform may have orientation and scale
// information, we need to extract that from the existing
// matrix first
GfTransform currentTransform(this->_GetMatrix(shutterOffset));
GfVec3d existingScale = currentTransform.GetScale();
GfRotation existingRotation = currentTransform.GetRotation();
GfRotation existingPivotOrientation = currentTransform.GetPivotOrientation();
GfVec3d existingPivotPosition = currentTransform.GetPivotPosition();
// now combine the new translation with the existing scale / rotation
GfTransform newTransform(existingScale, existingPivotOrientation,
existingRotation, existingPivotPosition, translation);
return newTransform.GetMatrix();
}
// Geospatial transform functions
// For reference:
// https://onlinelibrary.wiley.com/doi/pdf/10.1002/9780470099728.app3
// https://en.wikipedia.org/wiki/Geographic_coordinate_conversion
// Implementation of Ferrari's solution
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_GeodeticToEcef(const GfVec3d & llh) const
{
double lambda = llh[0] * GeoConstants::radians;
double phi = llh[1] * GeoConstants::radians;
double sin_lambda = sin(lambda);
double N = GeoConstants::semiMajorAxis / sqrt(1 - GeoConstants::eccentricity * sin_lambda * sin_lambda);
double cos_lambda = cos(lambda);
double cos_phi = cos(phi);
double sin_phi = sin(phi);
return PXR_NS::GfVec3d((llh[2] + N) * cos_lambda * cos_phi, (llh[2] + N) * cos_lambda * sin_phi,
(llh[2] + (1 - GeoConstants::eccentricity) * N) * sin_lambda);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const
{
double lambda = llh[0] * GeoConstants::radians;
double phi = llh[1] * GeoConstants::radians;
double sin_lambda = sin(lambda);
double N = GeoConstants::semiMajorAxis / sqrt(1 - GeoConstants::eccentricity * sin_lambda * sin_lambda);
double cos_lambda = cos(lambda);
double cos_phi = cos(phi);
double sin_phi = sin(phi);
PXR_NS::GfVec3d pt((llh[2] + N) * cos_lambda * cos_phi,
(llh[2] + N) * cos_lambda * sin_phi,
(llh[2] + (1 - GeoConstants::eccentricity) * N) * sin_lambda);
auto delta = ecef - pt;
return PXR_NS::GfVec3d(-sin_phi * delta[0] + cos_phi * delta[1],
-cos_phi * sin_lambda * delta[0] - sin_lambda * sin_phi * delta[1] + cos_lambda * delta[2],
cos_lambda * cos_phi * delta[0] + cos_lambda * sin_phi * delta[1] + sin_lambda * delta[2]);
}
GfVec3d HdOmniGeospatialComputedPrimDataSource::_GeospatialMatrixDataSource::_EnuToCartesian(
const GfVec3d& enu,
const TfToken& upAxis,
const double& metersPerUnit,
const GfVec3d& reference) const
{
auto cartesian = GfVec3d(reference[0] < 0.0 ? -enu[0] : enu[0],
upAxis == UsdGeomTokens->y ? enu[2] : enu[1],
upAxis == UsdGeomTokens->z ? enu[2] : enu[1]);
cartesian /= metersPerUnit;
return cartesian;
}
PXR_NAMESPACE_CLOSE_SCOPE | 11,354 | C++ | 35.394231 | 137 | 0.747314 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_
#define HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_
#include <pxr/imaging/hd/schema.h>
#include <pxr/imaging/hd/dataSourceLocator.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS \
(referencePositionApi) \
(tangentPlane) \
(referencePosition) \
(orientation) \
(stageUpAxis) \
(stageMetersPerUnit) \
TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialWGS84ReferencePositionSchemaTokens, OMNIGEOSCENEINDEX_API,
HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class HdOmniGeospatialWGS84ReferencePositionSchema : public HdSchema
{
public:
HdOmniGeospatialWGS84ReferencePositionSchema(HdContainerDataSourceHandle container)
: HdSchema(container) { }
OMNIGEOSCENEINDEX_API
HdTokenDataSourceHandle GetTangentPlane();
OMNIGEOSCENEINDEX_API
HdVec3dDataSourceHandle GetReferencePosition();
OMNIGEOSCENEINDEX_API
HdVec3dDataSourceHandle GetOrientation();
OMNIGEOSCENEINDEX_API
HdTokenDataSourceHandle GetStageUpAxis();
OMNIGEOSCENEINDEX_API
HdDoubleDataSourceHandle GetStageMetersPerUnit();
OMNIGEOSCENEINDEX_API
static HdOmniGeospatialWGS84ReferencePositionSchema GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer);
OMNIGEOSCENEINDEX_API
static const HdDataSourceLocator& GetDefaultLocator();
OMNIGEOSCENEINDEX_API
static HdContainerDataSourceHandle BuildRetained(
const HdTokenDataSourceHandle& tangentPlane,
const HdVec3dDataSourceHandle& referencePosition,
const HdVec3dDataSourceHandle& orientation,
const HdTokenDataSourceHandle& stageUpAxis,
const HdDoubleDataSourceHandle& stageMetersPerUnit
);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_SCHEMA_H_ | 2,662 | C | 32.70886 | 99 | 0.730278 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/api.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GEO_SCENE_INDEX_API_H
#define OMNI_GEO_SCENE_INDEX_API_H
#include "pxr/base/arch/export.h"
#if defined(PXR_STATIC)
# define OMNIGEOSCENEINDEX_API
# define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...)
# define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...)
# define OMNIGEOSCENEINDEX_LOCAL
#else
# if defined(OMNIGEOSCENEINDEX_EXPORTS)
# define OMNIGEOSCENEINDEX_API ARCH_EXPORT
# define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
# else
# define OMNIGEOSCENEINDEX_API ARCH_IMPORT
# define OMNIGEOSCENEINDEX_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIGEOSCENEINDEX_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
# endif
# define OMNIGEOSCENEINDEX_LOCAL ARCH_HIDDEN
#endif
#endif // OMNI_GEO_INDEX_API_H
| 1,544 | C | 39.657894 | 99 | 0.734456 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/usdImaging/usdImaging/dataSourceStageGlobals.h>
#include <omniGeospatial/wGS84LocalPositionAPI.h>
#include "localPositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
class HdOmniGeospatialWGS84LocalPositionDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialWGS84LocalPositionDataSource);
HdOmniGeospatialWGS84LocalPositionDataSource(const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals);
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
OmniGeospatialWGS84LocalPositionAPI _localPositionApi;
const UsdImagingDataSourceStageGlobals& _stageGlobals;
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialWGS84LocalPositionDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_DATA_SOURCE_H_ | 1,710 | C | 33.219999 | 81 | 0.792398 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedDependentDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
PXR_NAMESPACE_OPEN_SCOPE
/// \class HdOmniGeospatialComputedDependentDataSource
///
/// A datasource representing a container data source mimicing
/// that of a container data source for xform data, but returning
/// computed values based on geospatial data applied to the parent
/// (or some parent in the hierarchy) of this prim.
///
class HdOmniGeospatialComputedDependentDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialComputedDependentDataSource);
HdOmniGeospatialComputedDependentDataSource(HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
HdDataSourceBaseHandle _ComputeGeospatiallyAffectedXform();
private:
HdContainerDataSourceHandle _inputDataSource;
HdContainerDataSourceHandle _parentDataSource;
HdMatrixDataSourceHandle _matrixDataSource;
class _GeospatiallyAffectedMatrixDataSource : public HdMatrixDataSource
{
public:
HD_DECLARE_DATASOURCE(_GeospatiallyAffectedMatrixDataSource);
VtValue GetValue(Time shutterOffset) override;
GfMatrix4d GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
_GeospatiallyAffectedMatrixDataSource(HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource);
HdMatrixDataSourceHandle _GetMatrixSource() const;
HdBoolDataSourceHandle _GetResetXformStackSource() const;
HdMatrixDataSourceHandle _GetParentMatrixSource() const;
HdMatrixDataSourceHandle _GetParentOriginalMatrixSource() const;
GfMatrix4d _GetMatrix(const Time shutterOffset) const;
bool _GetResetXformStack(const Time shutterOffset) const;
GfMatrix4d _GetParentMatrix(const Time shutterOffset) const;
GfMatrix4d _GetParentOriginalMatrix(const Time shutterOffset) const;
// geospatial transform methods
GfMatrix4d _ComputeTransformedMatrix(const Time shutterOffset) const;
HdContainerDataSourceHandle _inputDataSource;
HdContainerDataSourceHandle _parentDataSource;
};
HD_DECLARE_DATASOURCE_HANDLES(_GeospatiallyAffectedMatrixDataSource);
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialComputedDependentDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_COMPUTED_DEPENDENT_DATA_SOURCE_H_ | 3,530 | C | 35.402061 | 92 | 0.768272 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/xformSchema.h>
#include "geospatialDataSource.h"
#include "computedPrimDataSource.h"
#include "computedDependentDataSource.h"
#include "localPositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialDataSourceTokens,
HDOMNIGEOSPATIALDATASOURCE_TOKENS);
HdOmniGeospatialDataSource::HdOmniGeospatialDataSource(const HdSceneIndexBase& index, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource) :
_sceneIndex(index),
_primPath(primPath),
_wrappedDataSource(wrappedDataSource)
{
}
void HdOmniGeospatialDataSource::UpdateWrappedDataSource(
HdContainerDataSourceHandle wrappedDataSource)
{
_wrappedDataSource = wrappedDataSource;
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialDataSource::Has(const TfToken& name)
{
if (name == HdOmniGeospatialDataSourceTokens->geospatialPreservedXform)
{
return true;
}
return (_wrappedDataSource != nullptr) ? _wrappedDataSource->Has(name) : false;
}
#endif
TfTokenVector HdOmniGeospatialDataSource::GetNames()
{
// since we only wrapped Xformables, this should
// also return HdXformSchemaTokens->xform
TfTokenVector result = (_wrappedDataSource == nullptr) ? TfTokenVector() : _wrappedDataSource->GetNames();
result.push_back(HdOmniGeospatialDataSourceTokens->geospatialPreservedXform);
return result;
}
HdDataSourceBaseHandle HdOmniGeospatialDataSource::Get(const TfToken& name)
{
if (name == HdXformSchemaTokens->xform)
{
// this is an intercept of the flattened transform matrix
// we need to dynamically compute a geospatial one
return this->_ComputeGeospatialXform();
}
else if (name == HdOmniGeospatialDataSourceTokens->geospatialPreservedXform)
{
// this would be the original flattened matrix of the wrapped data source
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(HdXformSchemaTokens->xform);
}
}
// all other token values should be defer to the wrapped data source (if any)
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(name);
}
return nullptr;
}
bool HdOmniGeospatialDataSource::IsPrimDirtied(const HdDataSourceLocatorSet& locators)
{
static const HdContainerDataSourceHandle containerNull(nullptr);
if (locators.Intersects(HdXformSchema::GetDefaultLocator()))
{
if (HdContainerDataSource::AtomicLoad(_computedGeospatialPrimDataSource) != nullptr ||
HdContainerDataSource::AtomicLoad(_computedGeospatialDependentDataSource) != nullptr)
{
HdContainerDataSource::AtomicStore(_computedGeospatialPrimDataSource, containerNull);
HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, containerNull);
return true;
}
}
return false;
}
HdDataSourceBaseHandle HdOmniGeospatialDataSource::_ComputeGeospatialXform()
{
// since matrices are time sampled, we actually don't compute anything
// here, we just setup the right HdMatrixDataSources to be able to
// compute a final value at a specific time sample when asked
// to do that, we have two cases:
// 1. The wrapped prim in question has a local geodetic position applied
// In this case, all of the information we need to compute the position
// is stored inside of the wrapped prim itself (i.e. the geodetic root
// tangentFrame and geodtic position from the applied API schema)
// 2. The wrapped prim in question does not have a local geodetic position
// applied, but it's parent in the stage hierarchy does, which means
// that we need the wrapped prim plus it's parent prim to be able to
// compute the new correct transform
//
// Case 1 is easy - we can detect whether we have the information or not
// and create the right data source to return.
//
// Case 2 is a bit more difficult to do performantly - at the moment
// we will walk the parent prim hierarchy to the root to determine
// this information, but likely you would want to cache this locally
// on the wrapped prim. We can certainly do that, but then we have to
// be concerned about invalidating it at the right time. We'll leave this
// as a TODO for the future.
//
if (this->_HasGeospatialInformation(_wrappedDataSource))
{
// this is case 1, and we can create a data source specifically
// catered to do that computation
HdContainerDataSourceHandle computedGeospatialPrimDataSource =
HdContainerDataSource::AtomicLoad(_computedGeospatialPrimDataSource);
if (computedGeospatialPrimDataSource != nullptr)
{
// we have a previously cached value so can return that directly
return computedGeospatialPrimDataSource;
}
// otherwise we have to compute a new one
// since the container responsible for the xform token
// needs to take into account both resetXform and matrix
// and since both of those can be time-sampled, we have to make
// sure we can respond appropriately to any query
// so we will need a complete view of the wrapped data source
// to perform the computation
computedGeospatialPrimDataSource = HdOmniGeospatialComputedPrimDataSource::New(_wrappedDataSource);
HdContainerDataSource::AtomicStore(_computedGeospatialPrimDataSource, computedGeospatialPrimDataSource);
return computedGeospatialPrimDataSource;
}
else
{
// this is case 2, in order to perform this transformation appropriately
// we have to walk the parent hierarchy to find the parent with a local position
// geospatial API attached to it - if none exists we can return the wrapped
// data source directly, but if one does exist we need a new data source capable
// of handling the dynamic compute at any time sample
HdContainerDataSourceHandle computedGeospatialDependentDataSource =
HdContainerDataSource::AtomicLoad(_computedGeospatialDependentDataSource);
if (computedGeospatialDependentDataSource != nullptr)
{
// we have a previously cached value and can return that directly
return computedGeospatialDependentDataSource;
}
// otherwise we have to compute a new one
// so we need to follow the prim hierarchy up until we reach
// a geospatially applied one (if any)
if (_primPath != SdfPath::AbsoluteRootPath())
{
HdContainerDataSourceHandle geospatialDataSource = nullptr;
for (SdfPath p = _primPath.GetParentPath(); p != SdfPath::AbsoluteRootPath(); p = p.GetParentPath())
{
HdSceneIndexPrim prim = _sceneIndex.GetPrim(p);
if (this->_HasGeospatialInformation(prim.dataSource))
{
// found it!
geospatialDataSource = prim.dataSource;
}
}
// if we didn't find a geospatially applied parent, we don't need to do anything
if (geospatialDataSource == nullptr)
{
if (_wrappedDataSource != nullptr)
{
HdContainerDataSourceHandle dataSource = HdContainerDataSource::Cast(_wrappedDataSource->Get(HdXformSchemaTokens->xform));
if (dataSource != nullptr)
{
HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, dataSource);
return _computedGeospatialDependentDataSource;
}
return nullptr;
}
return nullptr;
}
// otherwise we need a new datasource that can perform the compute between
// the immediate parent and the prim in question
SdfPath parentPath = _primPath.GetParentPath();
HdSceneIndexPrim parentSceneIndexPrim = _sceneIndex.GetPrim(parentPath);
computedGeospatialDependentDataSource = HdOmniGeospatialComputedDependentDataSource::New(_wrappedDataSource,
parentSceneIndexPrim.dataSource);
HdContainerDataSource::AtomicStore(_computedGeospatialDependentDataSource, computedGeospatialDependentDataSource);
return computedGeospatialDependentDataSource;
}
else
{
// it's the root path, and we don't have to do anything here
// NOTE: this makes the assumption that root never has geospatial information applied
if (_wrappedDataSource != nullptr)
{
return _wrappedDataSource->Get(HdXformSchemaTokens->xform);
}
}
}
return nullptr;
}
bool HdOmniGeospatialDataSource::_HasGeospatialInformation(HdContainerDataSourceHandle handle)
{
HdOmniGeospatialWGS84LocalPositionSchema localPositionSchema = HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(handle);
return localPositionSchema.IsDefined();
}
PXR_NAMESPACE_CLOSE_SCOPE | 9,813 | C++ | 40.235294 | 142 | 0.689086 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/usdImaging/usdImaging/dataSourceAttribute.h>
#include "localPositionDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialWGS84LocalPositionDataSource::HdOmniGeospatialWGS84LocalPositionDataSource(
const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals) :
_stageGlobals(stageGlobals)
{
_localPositionApi = OmniGeospatialWGS84LocalPositionAPI(prim);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialWGS84LocalPositionDataSource::Has(const TfToken& name)
{
return (name == HdOmniGeospatialWGS84LocalPositionSchemaTokens->position);
}
#endif
TfTokenVector HdOmniGeospatialWGS84LocalPositionDataSource::GetNames()
{
// return the hydra attribute names this data source is responsible for
TfTokenVector names;
names.push_back(HdOmniGeospatialWGS84LocalPositionSchemaTokens->position);
return names;
}
HdDataSourceBaseHandle HdOmniGeospatialWGS84LocalPositionDataSource::Get(const TfToken& name)
{
// retrieves the data source values for the attributes this data source
// supports
if (name == HdOmniGeospatialWGS84LocalPositionSchemaTokens->position)
{
return UsdImagingDataSourceAttribute<GfVec3d>::New(
_localPositionApi.GetPositionAttr(), _stageGlobals);
}
// this is a name we don't support
return nullptr;
}
PXR_NAMESPACE_CLOSE_SCOPE | 1,954 | C++ | 32.135593 | 93 | 0.772262 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedPrimDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
PXR_NAMESPACE_OPEN_SCOPE
/// \class HdOmniGeospatialComputedPrimDataSource
///
/// A datasource representing a container data source mimicing
/// that of a container data source for xform data, but returning
/// computed values based on geospatial data applied to the prim.
///
class HdOmniGeospatialComputedPrimDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialComputedPrimDataSource);
HdOmniGeospatialComputedPrimDataSource(HdContainerDataSourceHandle inputDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
HdDataSourceBaseHandle _ComputeGeospatialXform();
GfVec3d _GeodeticToEcef(const GfVec3d& llh) const;
GfVec3d _EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const;
GfVec3d _EnuToCartesian(const GfVec3d& enu, const TfToken& upAxis, const double& metersPerUnit, const GfVec3d& reference) const;
private:
HdContainerDataSourceHandle _inputDataSource;
HdMatrixDataSourceHandle _matrixDataSource;
class _GeospatialMatrixDataSource : public HdMatrixDataSource
{
public:
HD_DECLARE_DATASOURCE(_GeospatialMatrixDataSource);
VtValue GetValue(Time shutterOffset) override;
GfMatrix4d GetTypedValue(Time shutterOffset) override;
bool GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes) override;
private:
_GeospatialMatrixDataSource(HdContainerDataSourceHandle inputDataSource);
HdMatrixDataSourceHandle _GetMatrixSource() const;
HdVec3dDataSourceHandle _GetLocalPositionSource() const;
HdTokenDataSourceHandle _GetTangentPlaneSource() const;
HdVec3dDataSourceHandle _GetReferencePositionSource() const;
HdVec3dDataSourceHandle _GetOrientationSource() const;
HdTokenDataSourceHandle _GetStageUpAxisSource() const;
HdDoubleDataSourceHandle _GetStageMetersPerUnitSource() const;
GfMatrix4d _GetMatrix(const Time shutterOffset) const;
GfVec3d _GetLocalPosition(const Time shutterOffset) const;
TfToken _GetTangentPlane() const;
GfVec3d _GetReferencePosition() const;
GfVec3d _GetOrientation() const;
TfToken _GetStageUpAxis() const;
double _GetStageMetersPerUnit() const;
// geospatial transform methods
GfMatrix4d _ComputeTransformedMatrix(const Time shutterOffset) const;
GfVec3d _GeodeticToEcef(const GfVec3d& llh) const;
GfVec3d _EcefToEnu(const GfVec3d& ecef, const GfVec3d& llh) const;
GfVec3d _EnuToCartesian(const GfVec3d& enu, const TfToken& upAxis, const double& metersPerUnit, const GfVec3d& reference) const;
struct GeoConstants
{
static constexpr double semiMajorAxis = 6378137.0;
static constexpr double semiMinorAxis = 6356752.3142;
static constexpr double flattening = 1.0 / 298.257223563;
static constexpr double eccentricity = flattening * (2 - flattening);
static constexpr double radians = M_PI / 180.0;
static constexpr double degrees = 180.0 / M_PI;
};
HdContainerDataSourceHandle _inputDataSource;
};
HD_DECLARE_DATASOURCE_HANDLES(_GeospatialMatrixDataSource);
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialComputedPrimDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_COMPUTED_PRIM_DATA_SOURCE_H_ | 4,414 | C | 37.72807 | 136 | 0.738106 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialSceneIndex.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GEO_SCENE_INDEX_H_
#define OMNI_GEO_SCENE_INDEX_H_
#include <pxr/pxr.h>
#include <pxr/usd/sdf/pathTable.h>
#include <pxr/imaging/hd/filteringSceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_REF_PTRS(OmniGeospatialSceneIndex);
///
/// \class OmniGeospatialSceneIndex
///
/// A scene index responsible for observing an input flattened scene
/// index and producing a comparable scene in which geospatial transforms
/// have been applied to prims with geospatial state attached to them
/// and for updating the transform of their children as needed.
///
/// Note that with Render Delegate 2.0 and the ability to pull data
/// from a non-flattened scene, this implementation will have to be
/// revisited to work with the unflattened xform representation of
/// the hydra prims.
///
class OmniGeospatialSceneIndex : public HdSingleInputFilteringSceneIndexBase
{
public:
OMNIGEOSCENEINDEX_API
static OmniGeospatialSceneIndexRefPtr New(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs = nullptr);
OMNIGEOSCENEINDEX_API
~OmniGeospatialSceneIndex() override;
OMNIGEOSCENEINDEX_API
HdSceneIndexPrim GetPrim(const SdfPath& primPath) const override;
OMNIGEOSCENEINDEX_API
SdfPathVector GetChildPrimPaths(const SdfPath& primPath) const override;
protected:
OmniGeospatialSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs);
// these three are provided by HdSingleInputFilteringSceneIndexBase
// and must be overridden by inheritors
virtual void _PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries) override;
virtual void _PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries) override;
virtual void _PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries) override;
private:
SdfPathTable<HdSceneIndexPrim>::_IterBoolPair _IsPrimWrapped(const SdfPath& primPath) const;
HdSceneIndexPrim& _WrapPrim(const SdfPath& primPath, const HdSceneIndexPrim& hdPrim) const;
void _DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators, HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries);
/*HdContainerDataSourceHandle _ComputeDataSource(
const SdfPath& primPath,
const HdContainerDataSourceHandle& primDataSource) const;
void _ComputeChildDataSources(const SdfPath& parentPath,
const HdContainerDataSourceHandle& parentDataSource) const;
HdContainerDataSourceHandle _ComputeMatrixDependenciesDataSource(
const SdfPath& primPath) const;*/
private:
// marked as mutable because it is an internal cache
// that is written to on-demand from the GetPrim method
// which is a const method by interface definition in HdSceneIndexBase
mutable SdfPathTable<HdSceneIndexPrim> _wrappedPrims;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 3,668 | C | 36.438775 | 146 | 0.773446 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialWGS84ReferencePositionSchemaTokens,
HDOMNIGEOSPATIALWGS84REFERENCEPOSITION_SCHEMA_TOKENS);
HdTokenDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetTangentPlane()
{
return _GetTypedDataSource<HdTokenDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane);
}
HdVec3dDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetReferencePosition()
{
return _GetTypedDataSource<HdVec3dDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition);
}
HdVec3dDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetOrientation()
{
return _GetTypedDataSource<HdVec3dDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation);
}
HdTokenDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetStageUpAxis()
{
return _GetTypedDataSource<HdTokenDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis);
}
HdDoubleDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::GetStageMetersPerUnit()
{
return _GetTypedDataSource<HdDoubleDataSource>(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit);
}
HdOmniGeospatialWGS84ReferencePositionSchema HdOmniGeospatialWGS84ReferencePositionSchema::GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer)
{
if (fromParentContainer == nullptr)
{
return HdOmniGeospatialWGS84ReferencePositionSchema(nullptr);
}
return HdOmniGeospatialWGS84ReferencePositionSchema(
HdContainerDataSource::Cast(fromParentContainer->Get(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi))
);
}
const HdDataSourceLocator& HdOmniGeospatialWGS84ReferencePositionSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi
);
return locator;
}
HdContainerDataSourceHandle HdOmniGeospatialWGS84ReferencePositionSchema::BuildRetained(
const HdTokenDataSourceHandle& tangentPlane,
const HdVec3dDataSourceHandle& referencePosition,
const HdVec3dDataSourceHandle& orientation,
const HdTokenDataSourceHandle& stageUpAxis,
const HdDoubleDataSourceHandle& stageMetersPerUnit)
{
TfToken names[5];
HdDataSourceBaseHandle values[5];
size_t count = 0;
if (tangentPlane != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane;
values[count] = tangentPlane;
count++;
}
if (referencePosition != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition;
values[count] = referencePosition;
count++;
}
if (orientation != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation;
values[count] = orientation;
count++;
}
if (stageUpAxis != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis;
values[count] = stageUpAxis;
count++;
}
if (stageMetersPerUnit != nullptr)
{
names[count] = HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit;
values[count] = stageMetersPerUnit;
count++;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,221 | C++ | 33.048387 | 105 | 0.773513 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionSchema.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_
#define HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_
#include <pxr/imaging/hd/schema.h>
#include <pxr/imaging/hd/dataSourceLocator.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS \
(localPositionApi) \
(position) \
TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialWGS84LocalPositionSchemaTokens, OMNIGEOSCENEINDEX_API,
HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS);
//-----------------------------------------------------------------------------
class HdOmniGeospatialWGS84LocalPositionSchema : public HdSchema
{
public:
HdOmniGeospatialWGS84LocalPositionSchema(HdContainerDataSourceHandle container)
: HdSchema(container) { }
OMNIGEOSCENEINDEX_API
HdVec3dDataSourceHandle GetPosition();
OMNIGEOSCENEINDEX_API
static HdOmniGeospatialWGS84LocalPositionSchema GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer);
OMNIGEOSCENEINDEX_API
static const HdDataSourceLocator& GetDefaultLocator();
OMNIGEOSCENEINDEX_API
static HdContainerDataSourceHandle BuildRetained(
const HdVec3dDataSourceHandle& position
);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_LOCAL_POSITION_SCHEMA_H_ | 1,985 | C | 32.661016 | 95 | 0.716877 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/usdImaging/usdImaging/dataSourceStageGlobals.h>
#include <omniGeospatial/wGS84ReferencePositionAPI.h>
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
class HdOmniGeospatialWGS84ReferencePositionDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialWGS84ReferencePositionDataSource);
HdOmniGeospatialWGS84ReferencePositionDataSource(const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals);
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
private:
OmniGeospatialWGS84ReferencePositionAPI _referencePositionApi;
const UsdImagingDataSourceStageGlobals& _stageGlobals;
template <typename T>
class _StageDataSource : public HdTypedSampledDataSource<T>
{
public:
HD_DECLARE_DATASOURCE(_StageDataSource<T>);
VtValue GetValue(HdSampledDataSource::Time shutterOffset) override
{
return VtValue(GetTypedValue(shutterOffset));
}
T GetTypedValue(HdSampledDataSource::Time shutterOffset) override
{
return _value;
}
bool GetContributingSampleTimesForInterval(
HdSampledDataSource::Time startTime,
HdSampledDataSource::Time endTime,
std::vector<HdSampledDataSource::Time>* outSampleTimes) override
{
return false;
}
private:
_StageDataSource(const T& value);
T _value;
};
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialWGS84ReferencePositionDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_DATA_SOURCE_H_ | 2,546 | C | 30.060975 | 85 | 0.739199 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionAPIAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include <omniGeospatial/wGS84ReferencePositionAPI.h>
#include "referencePositionAPIAdapter.h"
#include "referencePositionDataSource.h"
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_REGISTRY_FUNCTION(TfType)
{
typedef OmniGeospatialWGS84ReferencePositionAPIAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
#if PXR_VERSION >= 2302
HdContainerDataSourceHandle OmniGeospatialWGS84ReferencePositionAPIAdapter::GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#else
HdContainerDataSourceHandle OmniGeospatialWGS84ReferencePositionAPIAdapter::GetImagingSubprimData(
const TfToken& subprim,
const UsdPrim& prim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#endif
{
// at the point we are invoked here, the stage scene index has already determined
// that the API schema applies to the prim, so we can safely create our
// data source
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
// there shouldn't be a subprim or an applied instance name
// if there is, we don't really know what to do with it
// so we return null to indicate there is no data source
// for this prim setup
return nullptr;
}
return HdRetainedContainerDataSource::New(
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi,
HdOmniGeospatialWGS84ReferencePositionDataSource::New(prim, stageGlobals)
);
}
#if PXR_VERSION >= 2302
HdDataSourceLocatorSet OmniGeospatialWGS84ReferencePositionAPIAdapter::InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#else
HdDataSourceLocatorSet OmniGeospatialWGS84ReferencePositionAPIAdapter::InvalidateImagingSubprim(
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#endif
{
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
return HdDataSourceLocatorSet();
}
TfToken geospatialPrefix("omni:geospatial:wgs84:reference");
for (const TfToken& propertyName : properties)
{
if (TfStringStartsWith(propertyName, geospatialPrefix))
{
return HdOmniGeospatialWGS84ReferencePositionSchema::GetDefaultLocator();
}
}
return HdDataSourceLocatorSet();
}
PXR_NAMESPACE_CLOSE_SCOPE | 3,306 | C++ | 33.810526 | 98 | 0.753781 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialDataSource.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_
#define HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_
#include <pxr/imaging/hd/dataSource.h>
#include <pxr/imaging/hd/dataSourceTypeDefs.h>
#include <pxr/imaging/hd/sceneIndex.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
//-----------------------------------------------------------------------------
#define HDOMNIGEOSPATIALDATASOURCE_TOKENS \
(geospatialPreservedXform)
TF_DECLARE_PUBLIC_TOKENS(HdOmniGeospatialDataSourceTokens, OMNIGEOSCENEINDEX_API,
HDOMNIGEOSPATIALDATASOURCE_TOKENS);
//-----------------------------------------------------------------------------
/// \class HdOmniGeospatialDataSource
///
/// A datasource representing a wrapped view of an existing flattened
/// data source where the xform token is intercepted and a new geospatial
/// matrix dynamically calculated.
///
class HdOmniGeospatialDataSource : public HdContainerDataSource
{
public:
HD_DECLARE_DATASOURCE(HdOmniGeospatialDataSource);
HdOmniGeospatialDataSource(const HdSceneIndexBase& sceneIndex, const SdfPath& primPath,
HdContainerDataSourceHandle wrappedDataSource);
void UpdateWrappedDataSource(HdContainerDataSourceHandle wrappedDataSource);
// data source overrides
TfTokenVector GetNames() override;
HdDataSourceBaseHandle Get(const TfToken& name) override;
#if PXR_VERSION < 2302
bool Has(const TfToken& name) override;
#endif
// determines if the data source would be dirtied based on the locators given
bool IsPrimDirtied(const HdDataSourceLocatorSet& locators);
private:
bool _HasGeospatialInformation(HdContainerDataSourceHandle dataSource);
HdDataSourceBaseHandle _ComputeGeospatialXform();
private:
const HdSceneIndexBase& _sceneIndex;
SdfPath _primPath;
HdContainerDataSourceHandle _wrappedDataSource;
// cached computed datasources
HdContainerDataSourceAtomicHandle _computedGeospatialPrimDataSource;
HdContainerDataSourceAtomicHandle _computedGeospatialDependentDataSource;
};
HD_DECLARE_DATASOURCE_HANDLES(HdOmniGeospatialDataSource);
PXR_NAMESPACE_CLOSE_SCOPE
#endif // HD_OMNI_GEOSPATIAL_DATA_SOURCE_H_ | 2,737 | C | 31.987951 | 91 | 0.739496 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionAPIAdapter.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include <omniGeospatial/wGS84LocalPositionAPI.h>
#include <omniGeospatial/wGS84ReferencePositionAPI.h>
#include "localPositionAPIAdapter.h"
#include "localPositionDataSource.h"
#include "localPositionSchema.h"
#include "referencePositionDataSource.h"
#include "referencePositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_REGISTRY_FUNCTION(TfType)
{
typedef OmniGeospatialWGS84LocalPositionAPIAdapter Adapter;
TfType t = TfType::Define<Adapter, TfType::Bases<Adapter::BaseAdapter> >();
t.SetFactory<UsdImagingAPISchemaAdapterFactory<Adapter> >();
}
#if PXR_VERSION >= 2302
HdContainerDataSourceHandle OmniGeospatialWGS84LocalPositionAPIAdapter::GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#else
HdContainerDataSourceHandle OmniGeospatialWGS84LocalPositionAPIAdapter::GetImagingSubprimData(
const TfToken& subprim,
const UsdPrim& prim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals)
#endif
{
// at the point we are invoked here, the stage scene index has already determined
// that the API schema applies to the prim, so we can safely create our
// data source
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
// there shouldn't be a subprim or an applied instance name
// if there is, we don't really know what to do with it
// so we return null to indicate there is no data source
// for this prim setup
return nullptr;
}
// to make it a bit easier, we will traverse the parent structure here to find a geodetic root
// rather than traversing it in the scene index - this is because we have all of the information
// we need at the point where this prim is getting processed
HdDataSourceBaseHandle referencePositionDataSource = nullptr;
for (UsdPrim parentPrim = prim; !parentPrim.IsPseudoRoot(); parentPrim = parentPrim.GetParent())
{
if (parentPrim.HasAPI<OmniGeospatialWGS84ReferencePositionAPI>())
{
// bake the geodetic root information into this local prim
referencePositionDataSource = HdOmniGeospatialWGS84ReferencePositionDataSource::New(parentPrim, stageGlobals);
break;
}
}
// only process local position if we found a geodetic root - if we didn't
// it means that this is an unrooted local position so we keep whatever
// transform information the prim would have had otherwise
if (referencePositionDataSource != nullptr)
{
return HdRetainedContainerDataSource::New(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi,
HdOmniGeospatialWGS84LocalPositionDataSource::New(prim, stageGlobals),
HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePositionApi,
referencePositionDataSource
);
}
return nullptr;
}
#if PXR_VERSION >= 2302
HdDataSourceLocatorSet OmniGeospatialWGS84LocalPositionAPIAdapter::InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#else
HdDataSourceLocatorSet OmniGeospatialWGS84LocalPositionAPIAdapter::InvalidateImagingSubprim(
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties)
#endif
{
if (!subprim.IsEmpty() || !appliedInstanceName.IsEmpty())
{
return HdDataSourceLocatorSet();
}
TfToken geospatialPrefix("omni:geospatial:wgs84:local");
for (const TfToken& propertyName : properties)
{
if (TfStringStartsWith(propertyName, geospatialPrefix))
{
return HdOmniGeospatialWGS84LocalPositionSchema::GetDefaultLocator();
}
}
return HdDataSourceLocatorSet();
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,574 | C++ | 36.809917 | 122 | 0.740927 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/usd/usdGeom/metrics.h>
#include <pxr/usd/usdGeom/tokens.h>
#include <pxr/usdImaging/usdImaging/dataSourceAttribute.h>
#include "referencePositionDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialWGS84ReferencePositionDataSource::HdOmniGeospatialWGS84ReferencePositionDataSource(
const UsdPrim& prim,
const UsdImagingDataSourceStageGlobals& stageGlobals) :
_stageGlobals(stageGlobals)
{
_referencePositionApi = OmniGeospatialWGS84ReferencePositionAPI(prim);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialWGS84ReferencePositionDataSource::Has(const TfToken& name)
{
return (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis) ||
(name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit);
}
#endif
TfTokenVector HdOmniGeospatialWGS84ReferencePositionDataSource::GetNames()
{
// return the hydra attribute names this data source is responsible for
TfTokenVector names;
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis);
names.push_back(HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit);
return names;
}
HdDataSourceBaseHandle HdOmniGeospatialWGS84ReferencePositionDataSource::Get(const TfToken& name)
{
// retrieves the data source values for the attributes this data source
// supports
if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->tangentPlane)
{
return UsdImagingDataSourceAttribute<TfToken>::New(
_referencePositionApi.GetTangentPlaneAttr(), _stageGlobals);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->referencePosition)
{
return UsdImagingDataSourceAttribute<GfVec3d>::New(
_referencePositionApi.GetReferencePositionAttr(), _stageGlobals);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->orientation)
{
return UsdImagingDataSourceAttribute<GfVec3d>::New(
_referencePositionApi.GetOrientationAttr(), _stageGlobals);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageUpAxis)
{
TfToken upAxis = UsdGeomTokens->y;
UsdStageWeakPtr stage = _referencePositionApi.GetPrim().GetStage();
if (stage != nullptr)
{
upAxis = UsdGeomGetStageUpAxis(stage);
}
return _StageDataSource<TfToken>::New(upAxis);
}
else if (name == HdOmniGeospatialWGS84ReferencePositionSchemaTokens->stageMetersPerUnit)
{
double mpu = 0.01;
UsdStageWeakPtr stage = _referencePositionApi.GetPrim().GetStage();
if (stage != nullptr)
{
mpu = UsdGeomGetStageMetersPerUnit(stage);
}
return _StageDataSource<double>::New(mpu);
}
// this is a name we don't support
return nullptr;
}
template <typename T>
HdOmniGeospatialWGS84ReferencePositionDataSource::_StageDataSource<T>::_StageDataSource(const T& value) : _value(value)
{
}
PXR_NAMESPACE_CLOSE_SCOPE | 4,155 | C++ | 38.207547 | 119 | 0.754513 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/referencePositionAPIAdapter.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_
#define OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_
#include <pxr/pxr.h>
#include <pxr/usdImaging/usdImaging/apiSchemaAdapter.h>
#include "api.h"
PXR_NAMESPACE_OPEN_SCOPE
class OmniGeospatialWGS84ReferencePositionAPIAdapter : public UsdImagingAPISchemaAdapter
{
public:
using BaseAdapter = UsdImagingAPISchemaAdapter;
#if PXR_VERSION >= 2302
OMNIGEOSCENEINDEX_API
HdContainerDataSourceHandle GetImagingSubprimData(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals
) override;
#else
OMNIGEOSCENEINDEX_API
HdContainerDataSourceHandle GetImagingSubprimData(
const TfToken& subprim,
const UsdPrim& prim,
const TfToken& appliedInstanceName,
const UsdImagingDataSourceStageGlobals& stageGlobals
) override;
#endif
#if PXR_VERSION >= 2302
OMNIGEOSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
const UsdPrim& prim,
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties
) override;
#else
OMNIGEOSCENEINDEX_API
HdDataSourceLocatorSet InvalidateImagingSubprim(
const TfToken& subprim,
const TfToken& appliedInstanceName,
const TfTokenVector& properties
) override;
#endif
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif // OMNI_GEOSPATIAL_WGS84_REFERENCE_POSITION_API_ADAPTER_H_ | 2,144 | C | 30.544117 | 88 | 0.747201 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/computedDependentDataSource.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/xformSchema.h>
#include "geospatialDataSource.h"
#include "computedDependentDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
HdOmniGeospatialComputedDependentDataSource::HdOmniGeospatialComputedDependentDataSource(
HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource) :
_inputDataSource(inputDataSource),
_parentDataSource(parentDataSource)
{
_matrixDataSource =
HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::New(
_inputDataSource, parentDataSource);
}
#if PXR_VERSION < 2302
bool HdOmniGeospatialComputedDependentDataSource::Has(const TfToken& name)
{
return (name == HdXformSchemaTokens->resetXformStack) ||
(name == HdXformSchemaTokens->matrix);
}
#endif
TfTokenVector HdOmniGeospatialComputedDependentDataSource::GetNames()
{
// this container data source retrieves the xform tokens
TfTokenVector result;
result.push_back(HdXformSchemaTokens->resetXformStack);
result.push_back(HdXformSchemaTokens->matrix);
return result;
}
HdDataSourceBaseHandle HdOmniGeospatialComputedDependentDataSource::Get(const TfToken& name)
{
if (_inputDataSource != nullptr)
{
if (name == HdXformSchemaTokens->resetXformStack)
{
// we don't modify the underlying time-sampled data
// for resetXformStack, so return that directly
HdXformSchema xformSchema = HdXformSchema::GetFromParent(_inputDataSource);
return xformSchema.IsDefined() ? xformSchema.GetResetXformStack() : nullptr;
}
else if (name == HdXformSchemaTokens->matrix)
{
return _matrixDataSource;
}
}
return nullptr;
}
HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::_GeospatiallyAffectedMatrixDataSource(
HdContainerDataSourceHandle inputDataSource,
HdContainerDataSourceHandle parentDataSource) :
_inputDataSource(inputDataSource),
_parentDataSource(parentDataSource)
{
}
VtValue HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetValue(Time shutterOffset)
{
return VtValue(this->GetTypedValue(shutterOffset));
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetTypedValue(Time shutterOffset)
{
return this->_ComputeTransformedMatrix(shutterOffset);
}
bool HdOmniGeospatialComputedDependentDataSource::_GeospatiallyAffectedMatrixDataSource::GetContributingSampleTimesForInterval(
Time startTime,
Time endTime,
std::vector<Time>* outSampleTimes)
{
HdSampledDataSourceHandle sources[] = {
this->_GetMatrixSource(),
this->_GetParentMatrixSource()
};
return HdGetMergedContributingSampleTimesForInterval(
TfArraySize(sources),
sources,
startTime,
endTime,
outSampleTimes);
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetMatrixSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetMatrix();
}
HdBoolDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetResetXformStackSource() const
{
return HdXformSchema::GetFromParent(_inputDataSource).GetResetXformStack();
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentMatrixSource() const
{
return HdXformSchema::GetFromParent(_parentDataSource).GetMatrix();
}
HdMatrixDataSourceHandle HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentOriginalMatrixSource() const
{
// the parent data source here should be a geospatial data source
// but in the even it is not, this method will simply return the same
// matrix as that of _GetParentMatrixSource
HdOmniGeospatialDataSourceHandle geospatialDataSource =
HdOmniGeospatialDataSource::Cast(_parentDataSource);
if (geospatialDataSource != nullptr)
{
HdContainerDataSourceHandle xformDataSource =
HdContainerDataSource::Cast(
geospatialDataSource->Get(HdOmniGeospatialDataSourceTokens->geospatialPreservedXform));
if (xformDataSource == nullptr)
{
TF_WARN("Parent data source could not retrieve preserved xform!");
return this->_GetParentMatrixSource();
}
HdMatrixDataSourceHandle matrixDataSource = HdMatrixDataSource::Cast(
xformDataSource->Get(HdXformSchemaTokens->matrix));
if (matrixDataSource == nullptr)
{
TF_WARN("Xform schema not defined on preserved container data source!");
}
return (matrixDataSource != nullptr) ? matrixDataSource : this->_GetParentMatrixSource();
}
else
{
TF_WARN("Parent data source has no geospatial data source!");
}
return this->_GetParentMatrixSource();
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
bool HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetResetXformStack(const Time shutterOffset) const
{
HdBoolDataSourceHandle dataSource = this->_GetResetXformStackSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return false;
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetParentMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_GetParentOriginalMatrix(const Time shutterOffset) const
{
HdMatrixDataSourceHandle dataSource = this->_GetParentOriginalMatrixSource();
if (dataSource != nullptr)
{
return dataSource->GetTypedValue(shutterOffset);
}
return GfMatrix4d(1.0);
}
GfMatrix4d HdOmniGeospatialComputedDependentDataSource::
_GeospatiallyAffectedMatrixDataSource::_ComputeTransformedMatrix(const Time shutterOffset) const
{
// this prim did not have geospatial information applied to it,
// but it is the child of one that did, so we compute the updated
// value based on the recomputed value of the parent
// however, we actually only want to do this if this prim does
// not have a resetXformStack applied
bool resetXformStack = this->_GetResetXformStack(shutterOffset);
if (!resetXformStack)
{
// to compute the affected matrix, we first need to acquire the parent information
GfMatrix4d flattenedParentTransform = this->_GetParentMatrix(shutterOffset);
GfMatrix4d originalParentTransform = this->_GetParentOriginalMatrix(shutterOffset);
// since we are dealing with flattened transformations, we have to recover
// the local transform of the prim data source in question
// we can do this by knowing the prim's flattened transform
// and the original transform of its parent (the _dependsOnDataSource)
// Let FT be the flattened transform, P be the transform of the parent,
// and LT be the child's local transform. The flattened transform would
// then have been computed as FT = (P)(LT), thus to recover LT we divide
// out by P, which results in LT = (FT) / (P) = FT * (P)^-1
// so we need the inverse of the original parent transform
GfMatrix4d inverseParentTransform = originalParentTransform.GetInverse();
GfMatrix4d originalChildTransform = this->_GetMatrix(shutterOffset);
GfMatrix4d childLocalTransform = originalChildTransform * inverseParentTransform;
// once we have the local transform, we can re-apply the new
// flattened parent transform - this is the new geospatially affected transform
// of the child
return flattenedParentTransform * childLocalTransform;
}
// if resetXformStack was true, the original flattened transform of
// of the input data source is valid here and we don't recompute
return this->_GetMatrix(shutterOffset);
}
PXR_NAMESPACE_CLOSE_SCOPE | 9,285 | C++ | 35.996016 | 128 | 0.74238 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/localPositionSchema.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/imaging/hd/retainedDataSource.h>
#include "localPositionSchema.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdOmniGeospatialWGS84LocalPositionSchemaTokens,
HDOMNIGEOSPATIALWGS84LOCALPOSITION_SCHEMA_TOKENS);
HdVec3dDataSourceHandle HdOmniGeospatialWGS84LocalPositionSchema::GetPosition()
{
return _GetTypedDataSource<HdVec3dDataSource>(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->position);
}
HdOmniGeospatialWGS84LocalPositionSchema HdOmniGeospatialWGS84LocalPositionSchema::GetFromParent(
const HdContainerDataSourceHandle& fromParentContainer)
{
if (fromParentContainer == nullptr)
{
return HdOmniGeospatialWGS84LocalPositionSchema(nullptr);
}
return HdOmniGeospatialWGS84LocalPositionSchema(
HdContainerDataSource::Cast(fromParentContainer->Get(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi))
);
}
const HdDataSourceLocator& HdOmniGeospatialWGS84LocalPositionSchema::GetDefaultLocator()
{
static const HdDataSourceLocator locator(
HdOmniGeospatialWGS84LocalPositionSchemaTokens->localPositionApi
);
return locator;
}
HdContainerDataSourceHandle HdOmniGeospatialWGS84LocalPositionSchema::BuildRetained(
const HdVec3dDataSourceHandle& position)
{
TfToken names[1];
HdDataSourceBaseHandle values[1];
size_t count = 0;
if (position != nullptr)
{
names[count] = HdOmniGeospatialWGS84LocalPositionSchemaTokens->position;
values[count] = position;
count++;
}
return HdRetainedContainerDataSource::New(count, names, values);
}
PXR_NAMESPACE_CLOSE_SCOPE | 2,240 | C++ | 31.955882 | 97 | 0.775 |
NVIDIA-Omniverse/usd-plugin-samples/src/hydra-plugins/omniGeoSceneIndex/geospatialSceneIndex.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/work/utils.h>
#include <pxr/imaging/hd/xformSchema.h>
#include <pxr/imaging/hd/retainedDataSource.h>
#include <pxr/imaging/hd/overlayContainerDataSource.h>
#include <pxr/imaging/hd/dependenciesSchema.h>
#include "geospatialSceneIndex.h"
#include "referencePositionSchema.h"
#include "localPositionSchema.h"
#include "geospatialDataSource.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(
_tokens,
(positionToXform)
);
OmniGeospatialSceneIndexRefPtr OmniGeospatialSceneIndex::New(
const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs)
{
return TfCreateRefPtr(new OmniGeospatialSceneIndex(inputSceneIndex, inputArgs));
}
OmniGeospatialSceneIndex::OmniGeospatialSceneIndex(const HdSceneIndexBaseRefPtr& inputSceneIndex,
const HdContainerDataSourceHandle& inputArgs) :
HdSingleInputFilteringSceneIndexBase(inputSceneIndex)
{
}
OmniGeospatialSceneIndex::~OmniGeospatialSceneIndex() = default;
HdSceneIndexPrim OmniGeospatialSceneIndex::GetPrim(const SdfPath &primPath) const
{
// lookup the prim to see if we have wrapped it yet
auto iterBoolPair = this->_IsPrimWrapped(primPath);
if (iterBoolPair.second)
{
// we have it wrapped already, so return the wrapped prim
return iterBoolPair.first->second;
}
// we haven't wrapped it yet, but we only need to wrap it
// if it is Xformable - geospatial transforms have the potential
// to affect anything that has a transform, so even if it is
// never affected (e.g. resetXform is true or it is not the child
// of a geospatially applied prim) we wrap it here for simplicity
// sake at the cost of an extra HdSceneIndexPrim (as in some cases
// it will even retain its original data source)
// note that unlike the flattening scene index we wrap lazily
// instead of walking the tree at construction time - this is because
// there is a low chance of geospatial information being attached
// to a prim and in cases where the scene isn't goesptially grounded
// but the scene index is still applied we don't want to walk the
// whole scene
HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(primPath);
HdXformSchema xformSchema = HdXformSchema::GetFromParent(sceneIndexPrim.dataSource);
if (xformSchema.IsDefined() && !xformSchema.GetResetXformStack())
{
return this->_WrapPrim(primPath, sceneIndexPrim);
}
// otherwise we don't need to wrap it and can return it directly
return sceneIndexPrim;
}
SdfPathVector OmniGeospatialSceneIndex::GetChildPrimPaths(const SdfPath& primPath) const
{
// no change in topology occurs as part of this scene index
// so we can ask the input scene to get the child prim paths directly
return this->_GetInputSceneIndex()->GetChildPrimPaths(primPath);
}
SdfPathTable<HdSceneIndexPrim>::_IterBoolPair OmniGeospatialSceneIndex::_IsPrimWrapped(const SdfPath& primPath) const
{
bool result = false;
const auto it = _wrappedPrims.find(primPath);
if (it != _wrappedPrims.end())
{
// because SdfPathTable inserts all parents
// when a path gets inserted, there may be an empty
// entry in our cache if a child path was visited first
// to verify we have to check the prim type and data source
if (it->second.primType != TfToken() || it->second.dataSource != nullptr)
{
// not an auto-insertion of the parent
result = true;
}
}
return std::make_pair(it, result);
}
HdSceneIndexPrim& OmniGeospatialSceneIndex::_WrapPrim(const SdfPath& primPath, const HdSceneIndexPrim& hdPrim) const
{
// PRECONDITION: The table must not yet contain a wrapped prim, check via _IsPrimWrapped first!
// wrapping a scene index prim involves creating our geospatial data source to wrap the original
// scene index prim's data source - this will allow us to intercept the xform token to return
// a compute geospatial transform and still provide access to the original xform via the wrapped data source
HdContainerDataSourceHandle wrappedDataSource = HdOmniGeospatialDataSource::New(*this, primPath, hdPrim.dataSource);
const auto it = _wrappedPrims.find(primPath);
if (it != _wrappedPrims.end())
{
// in this case, the entry is there, but it was auto-created
// by SdfPathTable, meaning it should have empty entries
TF_VERIFY(it->second.primType == TfToken());
TF_VERIFY(it->second.dataSource == nullptr);
it->second.primType = hdPrim.primType;
it->second.dataSource = std::move(wrappedDataSource);
return it->second;
}
else
{
auto iterBoolPair = _wrappedPrims.insert(
{
primPath,
HdSceneIndexPrim {
hdPrim.primType,
std::move(wrappedDataSource)
}
}
);
return iterBoolPair.first->second;
}
}
void OmniGeospatialSceneIndex::_PrimsAdded(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::AddedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for(const HdSceneIndexObserver::AddedPrimEntry& entry : entries)
{
HdSceneIndexPrim sceneIndexPrim = this->_GetInputSceneIndex()->GetPrim(entry.primPath);
// cache the prim if necessary
HdXformSchema xformSchema = HdXformSchema::GetFromParent(sceneIndexPrim.dataSource);
if (xformSchema.IsDefined() && !xformSchema.GetResetXformStack())
{
auto iterBoolPair = this->_IsPrimWrapped(entry.primPath);
if (iterBoolPair.second)
{
/// we already wrapped this prim, so we need to update it
HdSceneIndexPrim& wrappedPrim = iterBoolPair.first->second;
wrappedPrim.primType = entry.primType;
if (wrappedPrim.dataSource != nullptr)
{
HdOmniGeospatialDataSource::Cast(wrappedPrim.dataSource)->UpdateWrappedDataSource(sceneIndexPrim.dataSource);
}
// if we updated it, we have to now see if we need
// to dirty any cached values alreday in the hierarchy
static HdDataSourceLocatorSet locators = {
HdXformSchema::GetDefaultLocator()
};
this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries);
}
else
{
// we don't yet have this prim wrapped - do so now
this->_WrapPrim(entry.primPath, sceneIndexPrim);
}
}
}
// forward on the notification
this->_SendPrimsAdded(entries);
// also, if we had to dirty entries because of an insertion in the middle
// of the stage hierarchy, send those along too
if (!dirtyEntries.empty())
{
this->_SendPrimsDirtied(dirtyEntries);
}
}
void OmniGeospatialSceneIndex::_PrimsRemoved(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::RemovedPrimEntries& entries)
{
for (const HdSceneIndexObserver::RemovedPrimEntry& entry : entries)
{
if (entry.primPath.IsAbsoluteRootPath())
{
// removing the whole scene
_wrappedPrims.ClearInParallel();
TfReset(_wrappedPrims);
}
else
{
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(entry.primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second; it++)
{
WorkSwapDestroyAsync(it->second.dataSource);
}
if(startEndRangeIterator.first != startEndRangeIterator.second)
{
_wrappedPrims.erase(startEndRangeIterator.first);
}
}
}
_SendPrimsRemoved(entries);
}
void OmniGeospatialSceneIndex::_PrimsDirtied(const HdSceneIndexBase& sender,
const HdSceneIndexObserver::DirtiedPrimEntries& entries)
{
HdSceneIndexObserver::DirtiedPrimEntries dirtyEntries;
for (const HdSceneIndexObserver::DirtiedPrimEntry& entry : entries)
{
HdDataSourceLocatorSet locators;
if (entry.dirtyLocators.Intersects(HdXformSchema::GetDefaultLocator()))
{
locators.insert(HdXformSchema::GetDefaultLocator());
}
if (!locators.IsEmpty())
{
this->_DirtyHierarchy(entry.primPath, locators, &dirtyEntries);
}
}
_SendPrimsDirtied(entries);
if (!dirtyEntries.empty())
{
_SendPrimsDirtied(dirtyEntries);
}
}
void OmniGeospatialSceneIndex::_DirtyHierarchy(const SdfPath& primPath, const HdDataSourceLocatorSet& locators,
HdSceneIndexObserver::DirtiedPrimEntries* dirtyEntries)
{
// find subtree range retrieves a start end pair of children
// in the subtree of the given prim path
auto startEndRangeIterator = _wrappedPrims.FindSubtreeRange(primPath);
for (auto it = startEndRangeIterator.first; it != startEndRangeIterator.second;)
{
// if we have a valid wrapper for the prim, we need to check
// whether it needs to be dirtied - this involves checking the
// data sources to see if they have cached data and if so
// this indicates it needs to be updated
if (it->second.dataSource != nullptr)
{
HdOmniGeospatialDataSourceHandle geospatialDataSource =
HdOmniGeospatialDataSource::Cast(it->second.dataSource);
if (geospatialDataSource != nullptr && geospatialDataSource->IsPrimDirtied(locators))
{
if (it->first != primPath)
{
dirtyEntries->emplace_back(it->first, locators);
}
it++;
}
else
{
it++;
}
}
else
{
it++;
}
}
}
PXR_NAMESPACE_CLOSE_SCOPE | 10,650 | C++ | 36.37193 | 129 | 0.665446 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/fileFormat/edfFileFormat/edfFileFormat.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "edfFileFormat.h"
#include "edfData.h"
PXR_NAMESPACE_OPEN_SCOPE
EdfFileFormat::EdfFileFormat() : SdfFileFormat(
EdfFileFormatTokens->Id,
EdfFileFormatTokens->Version,
EdfFileFormatTokens->Target,
EdfFileFormatTokens->Extension)
{
}
EdfFileFormat::~EdfFileFormat()
{
}
bool EdfFileFormat::CanRead(const std::string& filePath) const
{
return true;
}
bool EdfFileFormat::Read(SdfLayer* layer, const std::string& resolvedPath, bool metadataOnly) const
{
// these macros emit methods defined in the Pixar namespace
// but not properly scoped, so we have to use the namespace
// locally here - note this isn't strictly true since we had to open
// the namespace scope anyway because the macros won't allow non-Pixar namespaces
// to be used because of some auto-generated content
PXR_NAMESPACE_USING_DIRECTIVE
if (!TF_VERIFY(layer))
{
return false;
}
// construct the SdfAbstractData object from the file format args
// and set that as the layer data - note this is a different object
// from that constructed in the InitData method - this may or may
// not be an issue, something to be investigated in more detail when
// working through the backend - either way we associate it with the layer
// so we always have a mapping from the dynamic layer and the specific
// set of parameters that created it
const FileFormatArguments& args = layer->GetFileFormatArguments();
SdfAbstractDataRefPtr layerData = this->InitData(args);
// inform the data provider that it's time to read the content
// this is a good time for it to cache data that it needs to generate
// the prim / property specs when asked for them via the data apis
EdfData& edfData = dynamic_cast<EdfData&>(*layerData);
bool readSuccess = edfData.Read();
if (readSuccess)
{
this->_SetLayerData(layer, layerData);
// for now, this is dynamic content read one way from a source external system
// therefore we mark that the layer is read-only
// later we will remove this restriction and explore what it means to edit
// data that is sourced from external data formats
layer->SetPermissionToSave(false);
layer->SetPermissionToEdit(false);
}
return readSuccess;
}
bool EdfFileFormat::WriteToString(const SdfLayer& layer, std::string* str, const std::string& comment) const
{
// this POC doesn't support writing
return false;
}
bool EdfFileFormat::WriteToStream(const SdfSpecHandle& spec, std::ostream& out, size_t indent) const
{
// this POC doesn't support writing
return false;
}
SdfAbstractDataRefPtr EdfFileFormat::InitData(const FileFormatArguments& args) const
{
// create the data parameters object to capture what data was used to create the layer
EdfDataParameters parameters = EdfDataParameters::FromFileFormatArgs(args);
return EdfData::CreateFromParameters(parameters);
}
bool EdfFileFormat::_ShouldSkipAnonymousReload() const
{
return false;
}
bool EdfFileFormat::_ShouldReadAnonymousLayers() const
{
return true;
}
void EdfFileFormat::ComposeFieldsForFileFormatArguments(const std::string& assetPath, const PcpDynamicFileFormatContext& context, FileFormatArguments* args, VtValue* contextDependencyData) const
{
VtValue val;
if (context.ComposeValue(EdfFileFormatTokens->Params, &val) && val.IsHolding<VtDictionary>())
{
// the composition engine has composed the metadata values of the prim appropriately
// for the currently composed stage, we read these metadata values that were composed
// and make them part of the file format arguments to load the dependent layer
VtDictionary dict = val.UncheckedGet<VtDictionary>();
const VtValue* dictVal = TfMapLookupPtr(dict, EdfDataParametersTokens->dataProviderId);
if (dictVal != nullptr)
{
(*args)[EdfDataParametersTokens->dataProviderId] = dictVal->UncheckedGet<std::string>();
}
// unfortunately, FileFormatArguments is a typedef for a map<string, string>
// which means we have to unpack the provider arguments dictionary
// to keep the unpacking simple, we assume for now that the providerArgs
// is itself a dictionary containing only string paris and values
// we can remove this restriction later for simple types (using TfStringify)
// but would need some work (recursively) for embedded lists and dictionary values
dictVal = TfMapLookupPtr(dict, EdfDataParametersTokens->providerArgs);
if (dictVal != nullptr)
{
std::string prefix = EdfDataParametersTokens->providerArgs.GetString();
VtDictionary providerArgs = dictVal->UncheckedGet<VtDictionary>();
for (VtDictionary::iterator it = providerArgs.begin(); it != providerArgs.end(); it++)
{
(*args)[prefix + ":" + it->first] = it->second.UncheckedGet<std::string>();
}
}
}
}
bool EdfFileFormat::CanFieldChangeAffectFileFormatArguments(const TfToken& field, const VtValue& oldValue, const VtValue& newValue, const VtValue& contextDependencyData) const
{
const VtDictionary& oldDictionaryValue = oldValue.IsHolding<VtDictionary>() ?
oldValue.UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
const VtDictionary& newDictionaryValue = newValue.IsHolding<VtDictionary>() ?
newValue.UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
// nothing to do if both metadata values are empty
if (oldDictionaryValue.empty() && newDictionaryValue.empty())
{
return false;
}
// our layer is new if:
// 1. there is a new provider
// 2. there is a change to the value of the provider specific data
const VtValue* oldProviderId =
TfMapLookupPtr(oldDictionaryValue, EdfDataParametersTokens->dataProviderId);
const VtValue* newProviderId =
TfMapLookupPtr(newDictionaryValue, EdfDataParametersTokens->dataProviderId);
if (oldProviderId != nullptr && newProviderId != nullptr)
{
if (oldProviderId->UncheckedGet<std::string>() != newProviderId->UncheckedGet<std::string>())
{
// different providers!
return true;
}
else
{
// same provider, but the specific provider metadata may have changed
const VtValue* oldProviderDictionaryValue =
TfMapLookupPtr(oldDictionaryValue, EdfDataParametersTokens->providerArgs);
const VtValue* newProviderDictionaryValue =
TfMapLookupPtr(newDictionaryValue, EdfDataParametersTokens->providerArgs);
const VtDictionary& oldProviderDictionary = oldProviderDictionaryValue->IsHolding<VtDictionary>() ?
oldProviderDictionaryValue->UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
const VtDictionary& newProviderDictionary = newProviderDictionaryValue->IsHolding<VtDictionary>() ?
newProviderDictionaryValue->UncheckedGet<VtDictionary>() : VtGetEmptyDictionary();
return oldProviderDictionary != newProviderDictionary;
}
}
else
{
// one of them (or both) are nullptrs
if (oldProviderId == nullptr && newProviderId == nullptr)
{
// no change to provider, don't need to check parameters
return false;
}
// otherwise one changed
return true;
}
}
// these macros emit methods defined in the Pixar namespace
// but not properly scoped, so we have to use the namespace
// locally here
TF_DEFINE_PUBLIC_TOKENS(
EdfFileFormatTokens,
((Id, "edfFileFormat"))
((Version, "1.0"))
((Target, "usd"))
((Extension, "edf"))
((Params, "EdfDataParameters"))
);
TF_REGISTRY_FUNCTION(TfType)
{
SDF_DEFINE_FILE_FORMAT(EdfFileFormat, SdfFileFormat);
}
PXR_NAMESPACE_CLOSE_SCOPE | 7,937 | C++ | 35.75 | 194 | 0.754567 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/api.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_OMNIMETPROVIDER_API_H_
#define OMNI_OMNIMETPROVIDER_API_H_
#include "pxr/base/arch/export.h"
#if defined(PXR_STATIC)
# define OMNIMETPROVIDER_API
# define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...)
# define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...)
# define OMNIMETPROVIDER_LOCAL
#else
# if defined(OMNIMETPROVIDER_EXPORTS)
# define OMNIMETPROVIDER_API ARCH_EXPORT
# define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
# else
# define OMNIMETPROVIDER_API ARCH_IMPORT
# define OMNIMETPROVIDER_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
# define OMNIMETPROVIDER_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
# endif
# define OMNIMETPROVIDER_LOCAL ARCH_HIDDEN
#endif
#endif | 1,498 | C | 38.447367 | 97 | 0.732977 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.cpp | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <pxr/base/tf/token.h>
#include <pxr/base/vt/value.h>
#include <pxr/base/js/json.h>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/sdf/schema.h>
#include <pxr/usd/sdf/payload.h>
#include <pxr/usd/sdf/primSpec.h>
#include <pxr/usd/sdf/attributeSpec.h>
#include <pxr/usd/usd/tokens.h>
#include <edfDataProviderFactory.h>
#include "omniMetProvider.h"
#include <iostream>
#include <curl/curl.h>
PXR_NAMESPACE_OPEN_SCOPE
EDF_DEFINE_DATAPROVIDER(OmniMetProvider);
TF_DEFINE_PUBLIC_TOKENS(
OmniMetProviderProviderArgKeys,
(dataLodLevel)
(deferredRead)
(lod1Count)
);
TF_DEFINE_PRIVATE_TOKENS(
EdfFieldKeys,
(EdfDataParameters)
);
TF_DEFINE_PRIVATE_TOKENS(
OmniMetProviderTypeNames,
(AmaDepartment)
(AmaObject)
);
TF_DEFINE_PRIVATE_TOKENS(
OmniMetProviderFieldKeys,
(departmentId)
(displayName)
(objectID)
(isHighlight)
(accessionNumber)
(accessionYear)
(isPublicDomain)
(primaryImage)
(primaryImageSmall)
(additionalImages)
(constituents)
(department)
(objectName)
(title)
(culture)
(period)
(dynasty)
(reign)
(portfolio)
(artistRole)
(artistPrefix)
(artistDisplayName)
(artistDisplayBio)
(artistSuffix)
(artistAlphaSort)
(artistNationality)
(artistGender)
(artistWikidata_URL)
(artistULAN_URL)
(objectDate)
(objectBeginDate)
(objectEndDate)
(medium)
(dimensions)
(measurements)
(creditLine)
(geographyType)
(city)
(state)
(county)
(country)
(region)
(subregion)
(locale)
(locus)
(excavation)
(river)
(classification)
(rightsAndReproduction)
(linkResource)
(metadataDate)
(repository)
(objectURL)
(objectWikidataURL)
(isTimelineWork)
(galleryNumber)
);
enum struct DataLodLevel
{
Level0 = 0,
Level1 = 1,
Level2 = 2
};
// urls used to retrieve the data
static const std::string DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/departments";
static const std::string OBJECTS_IN_DEPARTMENT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects?departmentIds=";
static const std::string OBJECT_URL = "https://collectionapi.metmuseum.org/public/collection/v1/objects/";
static const SdfPath DATA_ROOT_PATH("/Data");
OmniMetProvider::OmniMetProvider(const EdfDataParameters& parameters) : IEdfDataProvider(parameters)
{
curl_global_init(CURL_GLOBAL_DEFAULT);
}
OmniMetProvider::~OmniMetProvider()
{
curl_global_cleanup();
}
bool OmniMetProvider::Read(std::shared_ptr<IEdfSourceData> sourceData)
{
// this gives the provider a chance to load all data it needs to on first layer read
// if we are parameterized for a deferred read, we do nothing and read on demand
// at first ask, if it's not a deferred read, we load all appropriate content from the
// back-end here
if(!this->IsDeferredRead())
{
// it's not a deferred read, so determine how much data we want to really load
int lodLevel = this->GetDataLodLevel();
if (lodLevel == static_cast<int>(DataLodLevel::Level0))
{
// load the departments
this->_LoadData(false, 0, sourceData);
}
else if (lodLevel == static_cast<int>(DataLodLevel::Level1))
{
// load the departments and their children
// but cap the number of children at the specified level
this->_LoadData(true, this->GetLod1Count(), sourceData);
}
else
{
// max lod level, load everything
this->_LoadData(true, 0, sourceData);
}
}
return true;
}
void OmniMetProvider::_LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData)
{
// load the department data
std::string departmentData = this->_LoadDepartments();
std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData, sourceData);
// do we want to load objects as well?
if (includeObjects)
{
for (auto it = departments.begin(); it != departments.end(); it++)
{
std::vector<std::string> objectData = this->_LoadObjects(TfStringify(it->second), objectCount);
for (auto itt = objectData.begin(); itt != objectData.end(); itt++)
{
this->_ParseObject(*itt, it->first, sourceData);
}
}
}
}
std::string OmniMetProvider::_LoadDepartments()
{
std::string departments;
CURL* departmentCurl = curl_easy_init();
if (departmentCurl != nullptr)
{
CURLcode resultCode;
curl_easy_setopt(departmentCurl, CURLOPT_URL, DEPARTMENT_URL.c_str());
curl_easy_setopt(departmentCurl, CURLOPT_HTTPGET, 1L);
curl_easy_setopt(departmentCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback);
// allocate a string that we can append the result onto
std::string* result = new std::string();
curl_easy_setopt(departmentCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result));
resultCode = curl_easy_perform(departmentCurl);
if (resultCode == CURLE_OK)
{
departments = *result;
}
else
{
TF_CODING_ERROR("Unable to load departments from '%s'!", DEPARTMENT_URL.c_str());
}
// done with the callback data
delete result;
// done with the request handle
curl_easy_cleanup(departmentCurl);
}
return departments;
}
std::vector<int> OmniMetProvider::_ParseObjectIds(const std::string& response) const
{
std::vector<int> objectIds;
PXR_NS::JsValue jsValue = PXR_NS::JsParseString(response, nullptr);
if (!jsValue.IsNull())
{
PXR_NS::JsObject rootObject = jsValue.GetJsObject();
PXR_NS::JsObject::iterator it = rootObject.find("objectIDs");
if (it != rootObject.end())
{
PXR_NS::JsArray jsonObjectIdArray = it->second.GetJsArray();
for (auto objectIdIt = jsonObjectIdArray.begin(); objectIdIt != jsonObjectIdArray.end(); objectIdIt++)
{
objectIds.push_back((*objectIdIt).GetInt());
}
}
else
{
TF_CODING_ERROR("Unable to find 'objectIDs' array in returned data '%s'!", response.c_str());
}
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", response.c_str());
}
return objectIds;
}
std::vector<std::string> OmniMetProvider::_LoadObjects(const std::string& departmentId, size_t objectCount)
{
// NOTE: this should be updated to make these requests in parallel in the case
// where we aren't doing deferred reads
// ideally we wouldn't want to initialize a new curl handle here, but since this
// call can be made in the parallel prim indexing, we can't share the easy handle
// across threads, so we take the overhead hit here
std::vector<std::string> objects;
CURL* objectCurl = curl_easy_init();
std::string url = OBJECTS_IN_DEPARTMENT_URL + departmentId;
std::string* result = new std::string();
CURLcode resultCode;
*result = "";
curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str());
curl_easy_setopt(objectCurl, CURLOPT_HTTPGET, 1L);
curl_easy_setopt(objectCurl, CURLOPT_WRITEFUNCTION, OmniMetProvider::_CurlWriteCallback);
curl_easy_setopt(objectCurl, CURLOPT_WRITEDATA, reinterpret_cast<void*>(result));
resultCode = curl_easy_perform(objectCurl);
if (resultCode == CURLE_OK)
{
// process result
std::vector<int> objectIds = this->_ParseObjectIds(*result);
// objectCount = 0 means load all objects
// objectCount > 0 means load max that many objects
size_t counter = 0;
for (auto objectIdIterator = objectIds.begin(); objectIdIterator != objectIds.end() && (objectCount == 0 || counter < objectCount); objectIdIterator++)
{
// reset the URL and result buffer
// NOTE: this should be updated to make these requests in parallel
url = OBJECT_URL + TfStringify(*objectIdIterator);
*result = "";
curl_easy_setopt(objectCurl, CURLOPT_URL, url.c_str());
resultCode = curl_easy_perform(objectCurl);
if (resultCode == CURLE_OK)
{
objects.push_back(*result);
}
counter++;
}
}
// done with the callback data
delete result;
// done with the request handle
curl_easy_cleanup(objectCurl);
return objects;
}
std::vector<std::pair<std::string, int>> OmniMetProvider::_ParseDepartments(const std::string& departmentJson,
std::shared_ptr<IEdfSourceData> sourceData)
{
std::vector<std::pair<std::string, int>> parsedDepartments;
JsValue jsValue = JsParseString(departmentJson, nullptr);
if (!jsValue.IsNull())
{
JsObject rootObject = jsValue.GetJsObject();
JsObject::iterator it = rootObject.find("departments");
if (it != rootObject.end())
{
JsArray departments = it->second.GetJsArray();
std::string parent = DATA_ROOT_PATH.GetAsString();
for (auto departmentIt = departments.begin(); departmentIt != departments.end(); departmentIt++)
{
// for each department, create a prim to represent it
JsObject department = (*departmentIt).GetJsObject();
int departmentId = department[OmniMetProviderFieldKeys->departmentId.GetString()].GetInt();
std::string displayName = department[OmniMetProviderFieldKeys->displayName.GetString()].GetString();
// create the prim
std::string primName = TfMakeValidIdentifier(displayName);
sourceData->CreatePrim(DATA_ROOT_PATH, primName, SdfSpecifier::SdfSpecifierDef,
OmniMetProviderTypeNames->AmaDepartment);
// create the attributes for the prim
SdfPath parentPrim = SdfPath(parent + "/" + primName);
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->departmentId.GetString(),
SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform, VtValue(departmentId));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->displayName.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform, VtValue(displayName));
parsedDepartments.push_back(std::make_pair(parentPrim.GetAsString(), departmentId));
}
}
else
{
TF_CODING_ERROR("Unable to find 'departments' array in returned data '%s'!", departmentJson.c_str());
}
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", departmentJson.c_str());
}
return parsedDepartments;
}
void OmniMetProvider::_ParseObject(const std::string& objectData, const std::string& parentPath,
std::shared_ptr<IEdfSourceData> sourceData)
{
// from the parent path given and the data contained in the JSON
// object retrieved from the server, we can create the full prim
JsValue jsValue = JsParseString(objectData, nullptr);
if (!jsValue.IsNull())
{
JsObject rootObject = jsValue.GetJsObject();
// the root object contains all of our properties that we now need
// to create a prim spec for the object and a set of property
// specs for it
// NOTE: this code uses the "default value" of a property spec
// to represent the authored value coming from the external system
// We don't need to do sub-composition over the data coming
// from the external system, so we ever only have a value or not
// so if HasDefaultValue is true on the property spec, it means
// there was an authored value that came from the remote system
// One optimization we could do in the layer above (EdfData) is
// to add schema acquisition and checking in the loop. This would allow us
// to create the property spec or not depending on if the value that came in
// is different from the true fallback declared in the schema
// (but we'd have to change the ask for the property to check whether
// the schema has the property rather than if the property spec exists)
std::string objectName = rootObject[OmniMetProviderFieldKeys->objectName.GetString()].GetString();
std::string primName = TfMakeValidIdentifier(objectName) +
TfStringify(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt());
// create the prim
SdfPath newPrimParentPath(parentPath);
sourceData->CreatePrim(newPrimParentPath, primName, SdfSpecifier::SdfSpecifierDef,
OmniMetProviderTypeNames->AmaObject);
// set the fact that this prim has an API schema attached to it
// usdGenSchema doesn't generate a public token for the actual
// API schema class name, so we hard code that here
SdfPath parentPrim = SdfPath(parentPath + "/" + primName);
TfTokenVector apiSchemas;
apiSchemas.push_back(TfToken("OmniMetArtistAPI"));
VtValue apiSchemasValue(apiSchemas);
sourceData->SetField(parentPrim, UsdTokens->apiSchemas, apiSchemasValue);
// create the attributes for the prim
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->objectID.GetString(),
SdfValueTypeNames->Int, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->objectID.GetString()].GetInt()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isHighlight.GetString(),
SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->isHighlight.GetString()].GetBool()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionNumber.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->accessionNumber.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->accessionYear.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->accessionYear.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->isPublicDomain.GetString(),
SdfValueTypeNames->Bool, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->isPublicDomain.GetString()].GetBool()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImage.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->primaryImage.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->primaryImageSmall.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->primaryImageSmall.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->department.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->department.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->title.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->title.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->culture.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->culture.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->period.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->period.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->dynasty.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->dynasty.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->reign.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->reign.GetString()].GetString()));
sourceData->CreateAttribute(parentPrim, OmniMetProviderFieldKeys->portfolio.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->portfolio.GetString()].GetString()));
// artist information complying with sample API schema
std::string namespaceFieldPrefix = "omni:met:artist:";
JsObject::const_iterator i = rootObject.find(OmniMetProviderFieldKeys->artistRole.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistRole.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistRole.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistPrefix.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistPrefix.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistPrefix.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayName.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayName.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayName.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistDisplayBio.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistDisplayBio.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistDisplayBio.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistSuffix.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistSuffix.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistSuffix.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistAlphaSort.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistAlphaSort.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistAlphaSort.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistNationality.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistNationality.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistNationality.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistGender.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistGender.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistGender.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistWikidata_URL.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistWikidata_URL.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistWikidata_URL.GetString()].GetString()));
}
i = rootObject.find(OmniMetProviderFieldKeys->artistULAN_URL.GetString());
if (i != rootObject.end())
{
sourceData->CreateAttribute(parentPrim, namespaceFieldPrefix + OmniMetProviderFieldKeys->artistULAN_URL.GetString(),
SdfValueTypeNames->String, SdfVariability::SdfVariabilityUniform,
VtValue(rootObject[OmniMetProviderFieldKeys->artistULAN_URL.GetString()].GetString()));
}
// note that there are quite a few additional properties that could be pulled, the above
// represents only a sample of the data that is there - if you'd like to try the rest as an
// exercise, you can enhance the schema attributes and read the remaining ones here
}
else
{
TF_CODING_ERROR("Data returned '%s' was not JSON or was empty!", objectData.c_str());
}
}
bool OmniMetProvider::ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData)
{
// if the parent path is the root, we need to load the departments
// but only if we are in a deferred read scenario
if (this->IsDeferredRead())
{
SdfPath parentPrimPath = SdfPath(parentPath);
int lodLevel = this->GetDataLodLevel();
if (parentPrimPath == DATA_ROOT_PATH)
{
// load the department data
std::cout << "Loading department data..." << std::endl;
std::string departmentData = this->_LoadDepartments();
std::vector<std::pair<std::string, int>> departments = this->_ParseDepartments(departmentData,
sourceData);
}
else
{
VtValue typeNameValue;
if(sourceData->HasField(SdfPath(parentPath), SdfFieldKeys->TypeName, &typeNameValue))
{
if (typeNameValue.UncheckedGet<TfToken>() == OmniMetProviderTypeNames->AmaDepartment &&
this->GetDataLodLevel() != static_cast<int>(DataLodLevel::Level0))
{
// it's a department, we need to load the objects
// associated with the department
std::string departmentIdPath = parentPath + "." + OmniMetProviderFieldKeys->departmentId.GetString();
VtValue departmentId;
if (sourceData->HasAttribute(SdfPath(departmentIdPath), &departmentId))
{
size_t objectCount = 0;
if (lodLevel == static_cast<int>(DataLodLevel::Level1))
{
objectCount = this->GetLod1Count();
}
// load the object data
std::cout << "Loading object data for " + parentPath + "..." << std::endl;
std::vector<std::string> objectData = this->_LoadObjects(TfStringify(departmentId.UncheckedGet<int>()), objectCount);
for (auto it = objectData.begin(); it != objectData.end(); it++)
{
this->_ParseObject(*it, parentPath, sourceData);
}
}
}
}
}
return true;
}
return false;
}
bool OmniMetProvider::IsDataCached() const
{
return !this->IsDeferredRead();
}
int OmniMetProvider::GetDataLodLevel() const
{
int dataLodLevel = 0;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->dataLodLevel);
if (it != parameters.providerArgs.end())
{
dataLodLevel = TfUnstringify<int>(it->second);
if (dataLodLevel < 0)
{
dataLodLevel = 0;
}
}
return dataLodLevel;
}
size_t OmniMetProvider::GetLod1Count() const
{
// although the incoming string from the parameter set
// might be interpretable as a negative integer
// it doesn't really make practical sense, so if
// it is interpreted as negative, we clamp to 0
// and return an unsigned version to the caller
size_t lod1Count = 0;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->lod1Count);
if (it != parameters.providerArgs.end())
{
lod1Count = TfUnstringify<int>(it->second);
if (lod1Count < 0)
{
lod1Count = 0;
}
}
return static_cast<size_t>(lod1Count);
}
bool OmniMetProvider::IsDeferredRead() const
{
bool deferredRead = false;
EdfDataParameters parameters = this->GetParameters();
std::unordered_map<std::string, std::string>::const_iterator it = parameters.providerArgs.find(OmniMetProviderProviderArgKeys->deferredRead);
if (it != parameters.providerArgs.end())
{
deferredRead = TfUnstringify<bool>(it->second);
}
return deferredRead;
}
size_t OmniMetProvider::_CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp)
{
std::string* result = reinterpret_cast<std::string*>(userp);
result->append(reinterpret_cast<const char* const>(data), nmemb);
return nmemb;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 27,507 | C++ | 41.780715 | 159 | 0.662704 |
NVIDIA-Omniverse/usd-plugin-samples/src/usd-plugins/dynamicPayload/omniMetProvider/omniMetProvider.h | // Copyright 2023 NVIDIA CORPORATION
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_
#define OMNI_OMNIMETPROVIDER_OMNIMETPROVIDER_H_
#include <string>
#include <vector>
#include <utility>
#include <pxr/pxr.h>
#include <pxr/base/tf/token.h>
#include <pxr/usd/sdf/layer.h>
#include <pxr/usd/sdf/schema.h>
#include <iEdfDataProvider.h>
PXR_NAMESPACE_OPEN_SCOPE
TF_DECLARE_PUBLIC_TOKENS(
OmniMetProviderProviderArgKeys,
(dataLodLevel)
(deferredRead)
(lod1Count)
);
/// \class OmniMetProvider
///
/// Defines a specific EDF back-end data provider for reading information
/// from the Metropolitan Museum of Art REST APIs and converting that
/// into prim and attribute data that can be processed by USD.
///
class OmniMetProvider : public IEdfDataProvider
{
public:
OmniMetProvider(const EdfDataParameters& parameters);
virtual ~OmniMetProvider();
virtual bool Read(std::shared_ptr<IEdfSourceData> sourceData) override;
virtual bool ReadChildren(const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData) override;
virtual bool IsDataCached() const override;
private:
int GetDataLodLevel() const;
size_t GetLod1Count() const;
bool IsDeferredRead() const;
void _LoadData(bool includeObjects, size_t objectCount, std::shared_ptr<IEdfSourceData> sourceData);
std::string _LoadDepartments();
std::vector<std::string> _LoadObjects(const std::string& departmentId, size_t objectCount);
std::vector<std::pair<std::string, int>> _ParseDepartments(const std::string& departmentJson,
std::shared_ptr<IEdfSourceData> sourceData);
void _ParseObject(const std::string& objectData, const std::string& parentPath, std::shared_ptr<IEdfSourceData> sourceData);
// NOTE: these methods are not technically const, since they do change internal state
// in the edfData object's layer data. This is ok, because that object is a cache
// https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#es50-dont-cast-away-const
// the mutuable cache state is allowed to change internally and still keep the semantics
// of the object not changing from the outside
void _LoadDepartments(bool includeObjects) const;
void _LoadObjects(const std::string& departmentId, const std::string& parentPath) const;
bool _IsDepartmentDataCached() const;
bool _IsObjectDataCached(const std::string& parentPath) const;
void _ParseDepartments(const std::string& response) const;
std::vector<int> _ParseObjectIds(const std::string& response) const;
void _ParseObject(const std::string& parentPath, const std::string& response) const;
static size_t _CurlWriteCallback(void* data, size_t size, size_t nmemb, void* userp);
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif | 3,321 | C | 37.627907 | 128 | 0.747666 |
NVIDIA-Omniverse/kit-osc/README.md | # OSC Omniverse Kit Extension [omni.osc]
Omniverse Kit extension for sending and receiving OSC (Open Sound Control) messages.

*The OSC control surface app running on the iPad is [TouchOSC](https://hexler.net/touchosc).*
# Getting Started
Open the Community tab under Extensions window (`Window > Extensions`), search for `OSC`, and install and enable the `omni.osc` extension.

## Running the server
After installing and enabling the extension, you should see the following window.

Enter the private IP address of the computer running your Kit application and the desired port, then click `Start`. If you are prompted to configure your Windows Firewall, ensure that the Kit application is allowed to communicate with other devices on the private network.

You can find the private IP address of your computer by running `ipconfig` in the Windows terminal.

If you run the server on `localhost`, that means the server can only receive messages from OSC clients running on the same machine. If you want to receive messages from OSC clients running on other devices on the same network, you must run the server on an IP address that is visible to those devices.
Once the server is running, confirm that it can successfully receive messages by inspecting the verbose console logs. It might be helpful to filter only the logs that originate from `omni.osc`.

## Receiving messages with Python
Below is a python snippet that demonstrates how to handle OSC messages received by the server. It assumes that the OSC server configured above is running. You can paste and run the below snippet directly into the Omniverse Script Editor for testing.
```python
import carb
import carb.events
import omni.osc
def on_event(event: carb.events.IEvent) -> None:
addr, args = omni.osc.osc_message_from_carb_event(event)
carb.log_info(f"Received OSC message: [{addr}, {args}]")
sub = omni.osc.subscribe_to_osc_event_stream(on_event)
```
## Receiving messages with ActionGraph
Search for `OSC` in the Action Graph nodes list and add the `On OSC Message` node to your graph. The node takes a single input,
the OSC address path that this node will handle. This input can be a valid regular expression. Note that this input field does *not* support
OSC pattern matching expressions. The node outputs an OmniGraph bundle with two attributes named `address` and `arguments` which you
can access by using the `Extract Attribute` node.

You can find example USD stages that demonstrate how to configure an ActionGraph using this extension at [exts/omni.osc/data/examples](/exts/omni.osc/data/examples).
## Sending messages from Python
Since `omni.osc` depends on [python-osc](https://pypi.org/project/python-osc/), you can import this module directly in
your own Python code to send OSC messages. Please see the [documentation](https://python-osc.readthedocs.io/en/latest/) for additional
information and support.
```python
import random
import time
from pythonosc import udp_client
client = udp_client.SimpleUDPClient("127.0.0.1", 3334)
client.send_message("/scale", [random.random(), random.random(), random.random()])
```
You can paste and run the above snippet directly into the Omniverse Script Editor for testing.
## Sending messages from ActionGraph
This is not currently implemented.
## Limitations & Known Issues
- OSC Bundles are currently not supported.
- The OmniGraph `On OSC Message` node can only handle OSC messages containing lists of floating-point arguments.
# Help
The below sections should help you diagnose any potential issues you may encounter while working with `omni.osc` extension.
## Unable to receive messages
1. First, enable verbose logs in the console (filter by the `omni.osc` extension). The server will log any messages received.
2. Confirm that the computer running the Kit application and the device sending the OSC messages are on the same network.
3. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall. Note that
you may have multiple instances of kit.exe on this list. When in doubt, ensure that all of them have the appropriate permission.

4. Confirm that the Windows Defender Firewall allows incoming UDP traffic to the port in use.
5. Confirm that the device sending the OSC messages is sending the messages via UDP to the correct IP address and port.
6. Use a tool such as [wireshark](https://www.wireshark.org/) to confirm that the computer running the Kit application is receiving UDP traffic from the device.
## Unable to send messages
1. Confirm that the computer running the Kit application and the device receiving the OSC messages are on the same network.
2. Confirm that kit.exe is allowed to communicate with the private network through the Windows Defender Firewall.
3. Confirm that the device receiving the OSC messages is able to receive incoming UDP traffic at the port in use.
# Contributing
The source code for this repository is provided as-is and we are not accepting outside contributions.
# License
- The code in this repository is licensed under the Apache License 2.0. See [LICENSE](/LICENSE).
- python-osc is licensed under the Unlicense. See [exts/omni.osc/vendor/LICENSE-python-osc](/exts/omni.osc/vendor/LICENSE-python-osc).
# Resources
- [https://opensoundcontrol.stanford.edu/spec-1_0.html](https://opensoundcontrol.stanford.edu/spec-1_0.html)
- [https://en.wikipedia.org/wiki/Open_Sound_Control](https://en.wikipedia.org/wiki/Open_Sound_Control)
- [https://python-osc.readthedocs.io/en/latest/](https://python-osc.readthedocs.io/en/latest/)
| 5,998 | Markdown | 46.992 | 301 | 0.779593 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "0.3.1"
# The title and description fields are primarily for displaying extension info in UI
title = "OSC (Open Sound Control)"
description="Send and receive OSC (Open Sound Control) messages"
authors = ["NVIDIA"]
repository = "https://github.com/NVIDIA-Omniverse/kit-osc"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
icon = "data/icon.png"
preview_image = "data/preview.png"
# One of categories for UI.
category = "Other"
# Keywords for the extension
keywords = ["kit", "osc"]
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.pipapi" = {}
"omni.graph" = {}
"omni.graph.bundle.action" = {}
# Main python module this extension provides, it will be publicly available as "import omni.osc.core".
[[python.module]]
name = "omni.osc"
[python.pipapi]
archiveDirs = ["vendor"]
[settings.exts."omni.osc"]
address = "localhost"
port = 3334
[[test]]
dependencies = ["omni.graph", "omni.kit.test"]
| 983 | TOML | 22.999999 | 102 | 0.703967 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/extension.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Any, List
import carb
import carb.events
import carb.profiler
import omni.ext
import omni.kit.app
from pythonosc.dispatcher import Dispatcher
from .core import carb_event_payload_from_osc_message, push_to_osc_event_stream
from .menu import OscMenu
from .server import DaemonOSCUDPServer
from .window import OscWindow
class OmniOscExt(omni.ext.IExt):
def on_startup(self, ext_id):
def on_start(host: str, port: int) -> bool:
return self.server.start(host, port)
def on_stop() -> bool:
return self.server.stop()
def toggle_window_visible(_arg0, _arg1) -> None:
"""
Toggle the window visibility from the editor menu item
"""
self.window.visible = not self.window.visible
self.server = OmniOscExt.create_server()
# The main UI window
default_addr = carb.settings.get_settings().get("exts/omni.osc/address")
default_port = carb.settings.get_settings().get("exts/omni.osc/port")
self.window = OscWindow(
on_start=on_start, on_stop=on_stop, default_addr=default_addr, default_port=default_port
)
# The editor menu entry that toggles the window visibility
self.menu = OscMenu(on_click=toggle_window_visible)
# Toggle the editor menu entry when the user closes the window
self.window.set_visibility_changed_fn(lambda visible: self.menu.set_item_value(visible))
def on_shutdown(self):
self.window = None
self.menu = None
if self.server is not None:
self.server.stop()
self.server = None
def create_server() -> DaemonOSCUDPServer:
"""
Create a server that routes all OSC messages to a carbonite event stream
"""
@carb.profiler.profile
def on_osc_msg(addr: str, *args: List[Any]) -> None:
"""
OSC message handler
"""
carb.log_verbose(f"OSC message: [{addr}, {args}]")
payload = carb_event_payload_from_osc_message(addr, args)
push_to_osc_event_stream(payload)
# Server
dispatcher = Dispatcher()
dispatcher.set_default_handler(on_osc_msg)
return DaemonOSCUDPServer(dispatcher)
| 2,714 | Python | 34.723684 | 100 | 0.658438 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.kit.pipapi
# python-osc:
# - SWIPAT request: http://nvbugs/3684871
# - A copy of the source is forked to https://github.com/NVIDIA-Omniverse/python-osc
# - The dependency vendored and installed from exts/omni.osc/vendor/python_osc-1.8.0-py3-none-any.whl
omni.kit.pipapi.install(
package="python-osc", module="pythonosc", use_online_index=False, ignore_cache=True, ignore_import_check=False
)
from pythonosc import * # noqa: F401
from .core import * # noqa: F401,F403
from .extension import * # noqa: F401,F403
from .server import * # noqa: F401,F403
# NOTE(jshrake): omni.graph is an optional dependency so handle the case
# that the below import fails
try:
from .ogn import *
except Exception as e:
print(f"omni.osc failed to import OGN due to {e}")
pass
| 1,219 | Python | 37.124999 | 114 | 0.754717 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/core.py | ## Copyright © 2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
##
## This software product is a proprietary product of Nvidia Corporation and its affiliates
## (the "Company") and all right, title, and interest in and to the software
## product, including all associated intellectual property rights, are and
## shall remain exclusively with the Company.
##
## This software product is governed by the End User License Agreement
## provided with the software product.
from typing import Callable, Tuple
import carb
import carb.events
import omni.ext
import omni.kit.app
OSC_EVENT_TYPE_NAME: str = "omni.osc"
OSC_EVENT_TYPE: int = carb.events.type_from_string(OSC_EVENT_TYPE_NAME)
OSC_MESSAGE_ADDRESS_STR = "address"
OSC_MESSAGE_ARGUMENTS_STR = "arguments"
def get_osc_event_stream() -> carb.events._events.IEventStream:
"""
Returns the OSC event stream
"""
return omni.kit.app.get_app().get_message_bus_event_stream()
def push_to_osc_event_stream(payload: dict) -> None:
"""
Push a payload to the OSC event stream
"""
get_osc_event_stream().push(OSC_EVENT_TYPE, sender=0, payload=payload)
def subscribe_to_osc_event_stream(
cb: Callable[[carb.events._events.IEvent], None]
) -> carb.events._events.ISubscription:
"""
Returns a Carbonite event subscription to the OSC event stream
"""
return get_osc_event_stream().create_subscription_to_pop_by_type(OSC_EVENT_TYPE, cb)
def carb_event_payload_from_osc_message(address: str, args: list) -> dict:
"""
Return a carbonite event payload suitable for pushing to the OSC event stream
"""
return {OSC_MESSAGE_ADDRESS_STR: address, OSC_MESSAGE_ARGUMENTS_STR: args}
def osc_message_from_carb_event(e: carb.events.IEvent) -> Tuple[str, list]:
"""
Return the OSC message address and arguments extracted from a carbonite event payload
"""
return (e.payload[OSC_MESSAGE_ADDRESS_STR], e.payload[OSC_MESSAGE_ARGUMENTS_STR])
| 1,961 | Python | 34.672727 | 90 | 0.7231 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/server.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import threading
import carb
import carb.events
from pythonosc import osc_server
from pythonosc.dispatcher import Dispatcher
class DaemonOSCUDPServer:
"""
Run a python-osc BlockingOSCUDPServer in a separate thread.
Usage::
import omni.osc.core as osc
dispatcher = osc.Dispatcher()
dispatcher.set_default_handler(lambda(path, args): print(f"{path}: {args}"))
server = osc.DaemonOSCUDPServer(dispatcher)
server.start("192.168.0.1", 3434)
# ...
server.stop()
"""
def __init__(self, dispatcher: Dispatcher):
self.dispatcher: Dispatcher = dispatcher
self.server: osc_server.BlockingOSCUDPServer = None
self.thread: threading.Thread = None
def running(self) -> bool:
"""
Returns true if the server is running
"""
return self.thread is not None and self.thread.is_alive()
def start(self, addr: str, port: int) -> bool:
"""
Start the OSC server on the specified address and port.
Does nothing if the server is already running.
"""
if not self.running():
carb.log_info(f"Starting OSC server on {addr}:{port}")
try:
self.server = osc_server.BlockingOSCUDPServer((addr, port), dispatcher=self.dispatcher)
self.thread = threading.Thread(target=lambda: self.server.serve_forever())
# NOTE(jshrake): Running the thread in daemon mode ensures that the thread and server
# are properly disposed of in the event that the main thread exits unexpectedly.
self.thread.daemon = True
self.thread.start()
except Exception as e:
carb.log_error(f"Error starting OSC server: {e}")
else:
carb.log_info("OSC server already running")
return self.running()
def stop(self) -> bool:
"""
Stops the OSC server.
"""
if self.running():
carb.log_info("Stopping OSC server")
try:
self.server.shutdown()
self.thread.join()
except Exception as e:
carb.log_error(f"Error stopping OSC server: {e}")
finally:
self.server = None
self.thread = None
else:
carb.log_info("OSC server not running")
return self.running()
| 2,857 | Python | 34.28395 | 103 | 0.615681 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/menu.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import omni.kit.ui
MENU_PATH = "Window/OSC"
class OscMenu:
def __init__(self, on_click):
editor_menu = omni.kit.ui.get_editor_menu()
if not editor_menu:
return
editor_menu.add_item(menu_path=MENU_PATH, on_click=on_click, toggle=True, value=True)
def set_item_value(self, val: bool) -> None:
editor_menu = omni.kit.ui.get_editor_menu()
if not editor_menu:
return
editor_menu.set_value(MENU_PATH, val)
def __del__(self):
editor_menu = omni.kit.ui.get_editor_menu()
if not editor_menu:
return
if editor_menu.has_item(MENU_PATH):
editor_menu.remove_item(MENU_PATH)
| 1,125 | Python | 33.121211 | 93 | 0.672889 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/window.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from typing import Callable
import omni.ui as ui
OnStartCallback = Callable[[str, int], bool]
OnStopCallback = Callable[[], bool]
class OscWindow(ui.Window):
def __init__(
self, default_addr: str, default_port: int, on_start: OnStartCallback, on_stop: OnStopCallback
) -> None:
super().__init__("OSC UDP Server", width=300, height=300)
def start() -> None:
"""
Callback when the user presses the start button
"""
is_running = on_start(addr.as_string, port.as_int)
running.set_value(is_running)
def stop() -> None:
"""
Callback when the user presses the stop button
"""
is_running = on_stop()
running.set_value(is_running)
def update_running_label(label: ui.Label, running: bool) -> None:
"""
Keep the UI label up to date with the state of the server
"""
if running:
label.text = f"Running UDP server @ {addr.as_string}:{port.as_int}"
label.set_style({"color": "green"})
else:
label.text = "Stopped"
label.set_style({"color": "red"})
def toggle_enabled(field: ui.AbstractField, running: bool) -> None:
"""
Enable or disable the input field based on the state of the server
"""
field.enabled = not running
color = "gray" if running else "white"
field.set_style({"color": color})
# Settings
addr = ui.SimpleStringModel(default_addr)
port = ui.SimpleIntModel(default_port)
running = ui.SimpleBoolModel(False)
with self.frame:
with ui.VStack():
label = ui.Label("", height=20)
update_running_label(label, running.get_value_as_bool())
running.add_value_changed_fn(lambda m: update_running_label(label, m.get_value_as_bool()))
with ui.VStack(height=20):
with ui.HStack():
ui.Label("Address:")
addr_field = ui.StringField(addr)
toggle_enabled(addr_field, running.get_value_as_bool())
running.add_value_changed_fn(lambda m: toggle_enabled(addr_field, m.get_value_as_bool()))
ui.Spacer(height=2)
with ui.HStack():
ui.Label("Port:")
port_field = ui.IntField(port)
toggle_enabled(port_field, running.get_value_as_bool())
running.add_value_changed_fn(lambda m: toggle_enabled(port_field, m.get_value_as_bool()))
with ui.VStack():
ui.Button("Start", clicked_fn=start)
ui.Button("Stop", clicked_fn=stop)
| 3,323 | Python | 39.536585 | 113 | 0.560036 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/ogn/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
Dynamically import every file in a directory tree that looks like a Python Ogn Node.
This includes linked directories, which is the mechanism by which nodes can be hot-reloaded from the source tree.
"""
# Required to register nodes in Kit 104
try:
import omni.graph.core as og
og.register_ogn_nodes(__file__, "omni.osc")
except Exception:
# Swallow any exceptions
pass
| 817 | Python | 37.952379 | 113 | 0.774786 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/ogn/nodes/OgnOnOscEvent.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""
This is the implementation of the OGN node defined in OgnOnOscEvent.ogn
This implementation is inspired by the OgnOnCustomEvent node
See https://gitlab-master.nvidia.com/omniverse/kit/-/blob/master/kit/source/extensions/omni.graph.action/nodes/OgnOnCustomEvent.py # noqa E501
"""
import re
from typing import Any, List, Union
import carb
import carb.events
import carb.profiler
import omni.graph.core as og
import omni.osc
from omni.osc.core import OSC_MESSAGE_ADDRESS_STR, OSC_MESSAGE_ARGUMENTS_STR
from .. import OgnOnOscEventDatabase
class OgnOnOscEventInternalState:
"""Convenience class for maintaining per-node state information"""
def __init__(self):
"""Instantiate the per-node state information."""
# This subscription object controls the lifetime of our callback, it will be
# cleaned up automatically when our node is destroyed
self.sub = None
# Set when the callback has triggered
self.is_set = False
# The last event received
self.event: Union[None, carb.events.IEvent] = None
# The node instance handle
self.node = None
# The regex used to match the OSC address path
self.osc_path_regex = ""
# The compiled regex pattern
self.osc_path_regex_pattern = None
@carb.profiler.profile
def on_event(self, event: carb.events.IEvent):
"""The event callback"""
if event is None:
return
# Only handle messages with a path that matches the OSC address path regex
osc_addr, _ = omni.osc.osc_message_from_carb_event(event)
if self.osc_path_regex_pattern is None or not self.osc_path_regex_pattern.match(osc_addr):
return
self.is_set = True
self.event = event
# Tell the evaluator we need to be computed
if self.node.is_valid():
self.node.request_compute()
@carb.profiler.profile
def first_time_subscribe(self, node: og.Node, osc_path_regex: str) -> bool:
"""Checked call to set up carb subscription
Args:
node: The node instance
event_name: The name of the carb event
Returns:
True if we subscribed, False if we are already subscribed
"""
if self.osc_path_regex != osc_path_regex:
# osc path regex changed since we last subscribed, re-compile
try:
self.osc_path_regex_pattern = re.compile(osc_path_regex)
self.osc_path_regex = osc_path_regex
except Exception as e:
carb.log_error(f"Error compiling OSC Address Path Regex '{osc_path_regex}': {e}")
if self.sub is None:
self.sub = omni.osc.subscribe_to_osc_event_stream(self.on_event)
self.node = node
return True
return False
def try_pop_event(self) -> Union[None, carb.events.IEvent]:
"""Pop the last event received, or None if there is no event to pop"""
if self.is_set:
self.is_set = False
event = self.event
self.event = None
return event
return None
# ======================================================================
class OgnOnOscEvent:
"""
This node triggers when an OSC event is received that matches the OSC address path regex.
"""
@staticmethod
def internal_state():
"""Returns an object that will contain per-node state information"""
return OgnOnOscEventInternalState()
@staticmethod
def release(node):
state = OgnOnOscEventDatabase.OgnOnOscEventDatabase.per_node_internal_state(node)
if state.sub:
state.sub.unsubscribe()
state.sub = None
@staticmethod
def check_all_args_are_floats(args: List[Any]) -> bool:
"""
Returns true if the OSC message arguments has the shape of List[float]
"""
all_args_are_float = all(isinstance(arg, float) for arg in args)
return all_args_are_float
@staticmethod
@carb.profiler.profile
def compute(db: og.Database) -> bool:
state: OgnOnOscEventInternalState = db.internal_state
osc_path_regex = db.inputs.path
state.first_time_subscribe(db.node, osc_path_regex)
event = state.try_pop_event()
if event is None:
return False
try:
addr, args = omni.osc.osc_message_from_carb_event(event)
# Populate the output bundle
bundle: og._impl.bundles.BundleContents = db.outputs.message
bundle.clear()
# Update the address attribute
addr_attribute = bundle.insert((og.Type(og.BaseDataType.TOKEN), OSC_MESSAGE_ADDRESS_STR))
addr_attribute.value = addr
# Update the arguments attribute
all_args_are_floats = OgnOnOscEvent.check_all_args_are_floats(args)
# NOTE(jshrake): This node currently only supports OSC arguments shaped like a List[Float]
if all_args_are_floats:
if len(args) == 1:
# Argument list contains a single element, write it as a double
args_attribute = bundle.insert((og.Type(og.BaseDataType.DOUBLE), OSC_MESSAGE_ARGUMENTS_STR))
args_attribute.value = args[0]
elif len(args) > 1:
# Argument list contains multiple element, write it as a list
args_attribute = bundle.insert((og.Type(og.BaseDataType.DOUBLE, tuple_count=len(args), array_depth=0), OSC_MESSAGE_ARGUMENTS_STR))
args_attribute.value = args
else:
carb.log_warn(f"OnOscMessage node expected OSC message arguments to be of type List[Float], instead got {args}")
return False
db.outputs.execOut = og.ExecutionAttributeState.ENABLED
except Exception as e:
carb.log_error(f"Error in OgnOnOscEvent::compute: {e}")
return False
return True
| 6,464 | Python | 37.254438 | 150 | 0.629332 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/omni/osc/tests/tests.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import asyncio
import omni.kit.test
import omni.osc
class Test(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
pass
# After running each test
async def tearDown(self):
pass
async def test_can_start_and_stop_server(self):
server = omni.osc.DaemonOSCUDPServer(None)
is_running = server.start("localhost", 12345)
self.assertTrue(is_running)
await asyncio.sleep(0.1)
is_running = server.running()
self.assertTrue(is_running)
is_running = server.stop()
self.assertFalse(is_running)
async def test_server_can_receive_messages(self):
server = omni.osc.OmniOscExt.create_server()
is_running = server.start("localhost", 3337)
self.assertTrue(is_running)
self.count = 0
def on_event(e) -> None:
addr, _ = omni.osc.osc_message_from_carb_event(e)
self.assertEqual(e.type, omni.osc.core.OSC_EVENT_TYPE)
self.assertEqual(addr, "/filter")
self.count += 1
sub = omni.osc.subscribe_to_osc_event_stream(on_event)
total_msg_count = 10
def send_messages():
import random
from pythonosc import udp_client
client = udp_client.SimpleUDPClient(address="127.0.0.1", port=3337)
self.assertTrue(client is not None)
for _ in range(total_msg_count):
client.send_message("/filter", random.random())
send_messages()
# Wait a few seconds for the server to receive the messages
await asyncio.sleep(3)
# Manually pump the stream so our subscription callback executes
omni.osc.get_osc_event_stream().pump()
self.assertEqual(self.count, total_msg_count)
| 2,226 | Python | 34.919354 | 79 | 0.655436 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [0.3.1] - 2023-09-28
### Changed
- Update CHANGELOG
## [0.3.0] - 2023-09-26
### Changed
- Fix OGN node registration for Kit 105.1
## [0.2.0] - 2022-09-12
### Changed
- The `On OSC Message` OmniGraph node now outputs a Bundle typed value rather than an Unknown typed value.
- Users can extract the "address" and the "arguments" of the OSC message with the `Extract Attribute` node.
## [0.1.1] - 2022-09-12
### Changed
- Updated documentation.
## [0.1.0] - 2022-09-02
### Added
- Initial release.
| 600 | Markdown | 22.115384 | 107 | 0.671667 |
NVIDIA-Omniverse/kit-osc/exts/omni.osc/docs/README.md | # omni.osc
Omniverse Kit extension for sending and receiving OSC (Open Sound Control) messages. | 96 | Markdown | 31.333323 | 84 | 0.802083 |
AccelerationAgency/omniverse-extensions/exts/taa.google.spreadsheet.api/taa/google/spreadsheet/api/extension.py | import omni.ext
import omni.ui as ui
import omni.kit.commands
from typing import List
from pxr import Gf
omni.kit.pipapi.install('google-api-python-client')
omni.kit.pipapi.install('google-auth-httplib2')
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
SPACING = 4
LABEL_WIDTH = 120
class MyExtension(omni.ext.IExt):
data = {'translate_x': 0, 'translate_y': 0, 'translate_z': 0, 'rotate_x': 0, 'rotate_y': 0, 'rotate_z': 0, 'scale_x': 0, 'scale_y': 0, 'scale_z': 0}
subscription = None
stage = None
google_sheet = None
label_width = 50
_source_prim_model = ui.SimpleStringModel()
# lifecycle
def on_startup(self, ext_id):
print("[taa.google.spreadsheet.api] Extension starting up")
self.stage = omni.usd.get_context().get_stage()
self._window = ui.Window("TAA Google Spreadsheet API", width=400, height=270)
with self._window.frame:
with ui.VStack(height=0, spacing=SPACING):
with ui.CollapsableFrame("Source", name="group"):
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
ui.Label("Prim", name="attribute_name", width=LABEL_WIDTH)
ui.StringField(model=self._source_prim_model)
ui.Button(" S ", width=0, height=0, style={"margin": 0}, clicked_fn=self._on_get_selection, tooltip="Get From Selection")
ui.Spacer(height= 12)
with ui.CollapsableFrame("Settings", name="group"):
with ui.VStack(height=0, spacing=SPACING):
ui.Label('Spreadsheet ID', height=20)
self.spreadsheet_id_field = ui.StringField(height=20)
ui.Label('Range', height=20)
self.range_field = ui.StringField(height=20)
ui.Label('API Key', height=20)
self.api_key_field = ui.StringField(height=20)
ui.Spacer(height= 12)
self.startButton = ui.Button("Start", height=54, clicked_fn=lambda: self.start(), style={"background_color": "green"})
self.stopButton = ui.Button("Stop", height=54, clicked_fn=lambda: self.stop(), style={"color": "red"})
ui.Spacer(height= 12)
self.statusLabel = ui.Label('Click start to begin', height=14, style={"font_size": 12})
self.stopButton.visible = False
print("[taa.google.spreadsheet.api] Extension start up complete")
def on_shutdown(self):
print("Extension shutting down")
self.stop()
print("Extension shutdown complete")
# custom methods
def _on_get_selection(self):
print('_on_get_selection', self.get_selection())
self._source_prim_model.as_string = ", ".join(self.get_selection())
def get_selection(self) -> List[str]:
return omni.usd.get_context().get_selection().get_selected_prim_paths()
def apply_changes(self, frame):
try:
# load the data from Google Spreadsheet ever few seconds; this API is rate limited
frameNumber = int(frame.payload["SWHFrameNumber"])
if(frameNumber % 180 != 0): return
print('applying changes')
self.read_data()
# act on all selected prims
paths = self.list_paths_of_selected_prims()
for path in paths:
# get reference to the prim on stage, making sure that it's valid
prim = self.stage.GetPrimAtPath(path)
if prim.IsValid() == False: continue
# transform the prim based on the settings in the Google Spreadsheet
self.move_prim(prim)
self.rotate_prim(prim)
self.scale_prim(prim)
print('changes applied successfully')
except Exception as err:
print(err)
def read_config(self):
try:
spreadsheetId = self.spreadsheet_id_field.model.get_value_as_string()
range = self.range_field.model.get_value_as_string()
api_key = self.api_key_field.model.get_value_as_string()
return (spreadsheetId, range, api_key)
except Exception as err:
print(err)
def read_data(self):
try:
spreadsheetId, range, api_key = self.read_config()
if self.google_sheet == None:
service = build('sheets', 'v4', developerKey=api_key)
self.google_sheet = service.spreadsheets()
result = self.google_sheet.values().get(spreadsheetId=spreadsheetId, range=range).execute()
values = result.get('values', [])
data = toJSON(values)
# normalize and clean data
self.data["shape"] = data.setdefault('shape', 'Cube')
self.data["size"] = float(data.setdefault('size', 100))
self.data["radius"] = float(data.setdefault('radius', 100))
self.data["translate_x"] = float(data.setdefault('translate_x', 0))
self.data["translate_y"] = float(data.setdefault('translate_y', 0))
self.data["translate_z"] = float(data.setdefault('translate_z', 0))
self.data["rotate_x"] = float(data.setdefault('rotate_x', 0))
self.data["rotate_y"] = float(data.setdefault('rotate_y', 0))
self.data["rotate_z"] = float(data.setdefault('rotate_z', 0))
self.data["scale_x"] = float(data.setdefault('scale_x', 1))
self.data["scale_y"] = float(data.setdefault('scale_y', 1))
self.data["scale_z"] = float(data.setdefault('scale_z', 1))
except HttpError as err:
print(err)
def move_prim(self, prim):
try:
x = self.data.get('translate_x')
y = self.data.get('translate_y')
z = self.data.get('translate_z')
omni.kit.commands.execute('TransformPrimSRT',
path=prim.GetPath(),
new_translation=Gf.Vec3d(x, y, z),
)
except Exception as err:
print("Failed to move prim", err)
def rotate_prim(self, prim):
try:
x = self.data.get('rotate_x')
y = self.data.get('rotate_y')
z = self.data.get('rotate_z')
omni.kit.commands.execute('TransformPrimSRT',
path=prim.GetPath(),
new_rotation_euler=Gf.Vec3d(x, y, z),
)
except Exception as err:
print("Failed to rotate prime", err)
def scale_prim(self, prim):
try:
x = self.data.get('scale_x')
y = self.data.get('scale_y')
z = self.data.get('scale_z')
omni.kit.commands.execute('TransformPrimSRT',
path=prim.GetPath(),
new_scale=Gf.Vec3d(x, y, z),
)
except Exception as err:
print("Failed to scale prim", err)
def list_paths_of_selected_prims(self):
try:
paths = [i.strip() for i in self._source_prim_model.as_string.split(",")]
if not paths:
paths = self.get_selection()
if not paths:
pass
return paths
except Exception as err:
print(err)
def start(self):
self.read_data()
def on_update_apply(frame): self.apply_changes(frame)
self.subscription = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(on_update_apply)
self.startButton.visible = False
self.stopButton.visible = True
self.statusLabel.text = "Status: started"
def stop(self):
if self.subscription: del self.subscription
self.startButton.visible = True
self.stopButton.visible = False
self.statusLabel.text = "Status: stopped"
"""
Utility functions
"""
def toJSON(values):
json = {}
if not values:
return json
for row in values:
key = row[0]
value = row[1]
if not key or not value:
continue
json[row[0]] = row[1]
return json
| 8,802 | Python | 27.124601 | 152 | 0.527153 |
AccelerationAgency/omniverse-extensions/exts/taa.google.spreadsheet.api/config/extension.toml | [package]
version = "1.0.0"
title = "TAA - Google Spreadsheet API"
description="An exploration into using Google Spreadsheet data to objects on the stage"
readme = "docs/README.md"
repository = ""
category = "Other"
keywords = ["taa", "google", "spreadsheet", "api", "example"]
icon = "data/taa-logo.png"
[dependencies]
"omni.kit.uiapp" = {}
[[python.module]]
name = "taa.google.spreadsheet.api" | 399 | TOML | 23.999999 | 87 | 0.696742 |
AccelerationAgency/omniverse-extensions/exts/taa.omniverse.cameracreator/taa/omniverse/cameracreator/extension.py | import omni.ext
import omni.ui as ui
import omni.kit.commands as commands
class MyExtension(omni.ext.IExt):
# Lifecycle
def on_startup(self, ext_id):
print("[taa.omniverse.viewport] Extension starting up")
self._window = ui.Window("TAA Quick Camera", width=200, height = 200)
with self._window.frame:
with ui.VStack(height = 0, spacing = 4):
self.perspectiveButton = ui.Button("Perspective", height=40, clicked_fn=lambda: self.create_perspective_camera(), style={"background_color":"black"})
self.topButton = ui.Button("Top", height=40, clicked_fn=lambda: self.create_top_camera(), style={"background_color":"black"})
self.frontButton = ui.Button("Front", height=40, clicked_fn=lambda: self.create_front_camera(), style={"background_color":"black"})
self.rightButton = ui.Button("Right", height=40, clicked_fn=lambda: self.create_right_camera(), style={"background_color":"black"})
print("[taa.omniverse.viewport] Extension start up complete")
def on_shutdown(self):
print("[taa.omniverse.viewport] Extension shutting down")
self.stop()
print("[taa.omniverse.viewport] Extension shutdown complete")
# Custom methods
def set_camera(self, path):
omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window().set_active_camera(path)
def rename_camera(self, name):
cameraPath = omni.kit.viewport_legacy.get_viewport_interface().get_viewport_window().get_active_camera()
omni.kit.commands.execute('MovePrims', paths_to_move={cameraPath: f'/World/Camera_{name}'})
def create_perspective_camera(self):
print("[taa.omniverse.viewport] Creating new perspective camera")
self.set_camera("/OmniverseKit_Persp")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Perspective")
def create_top_camera(self):
print("[taa.omniverse.viewport] Creating new top-down camera")
self.set_camera("/OmniverseKit_Top")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Top")
def create_front_camera(self):
print("[taa.omniverse.viewport] Creating new front view camera")
self.set_camera("/OmniverseKit_Front")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Front")
def create_right_camera(self):
print("[taa.omniverse.viewport] Creating new right view camera")
self.set_camera("/OmniverseKit_Right")
commands.execute('DuplicateFromActiveViewportCameraCommand', viewport_name='Viewport')
self.rename_camera("Right")
def start(self):
print("[taa.omniverse.viewport] Starting...")
def stop(self):
print("[taa.omniverse.viewport] Stopping...")
| 2,974 | Python | 44.76923 | 165 | 0.675521 |
AccelerationAgency/omniverse-extensions/exts/taa.omniverse.cameracreator/config/extension.toml | [package]
version = "1.0.0"
title = "TAA - Omniverse Camera Creator"
description = "An simple extension that lets you quickly create cameras with a single click."
readme = "docs/README.md"
repository = ""
category = "Other"
keywords = ["taa", "viewport", "create", "camera", "view"]
icon = "data/taa-logo.png"
[dependencies]
"omni.kit.uiapp" = {}
[[python.module]]
name = "taa.omniverse.cameracreator" | 405 | TOML | 24.374998 | 93 | 0.693827 |
ilanhuang/audio2face-streamgpt-public/README.md | # Stream-GPT
Stream-GPT is an Omniverse Extension that uses OpenAI's GPT-3 model to create a virtual assistant. It allows users to interact with the assistant through both text and voice, and the assistant responds in kind. The extension uses OpenAI's Whisper ASR system to transcribe audio input and Eleven Labs' API to convert the assistant's text responses into audio.
## Getting Started
### Prerequisites
- Python 3.6 or higher
- Omniverse Kit
- Omniverse Audio2Face
- OpenAI API key
- Eleven Labs API key
### Installation
1. Clone the repository:
```bash
git clone https://github.com/ilanhuang/audio2face-stream-chatgpt.git
```
2. Install the required Python packages:
```bash
pip install -r requirements.txt
```
3. Update the `sys.path.append` in `extension.py` with the correct path to the `streaming_server` directory in your local clone of the repository.
```python
sys.path.append("C:\\Users\\YourUsername\\path\\to\\stream-gpt\\pkg\\audio2face-2022.2.1\\exts\\omni.audio2face.player\omni\\audio2face\\player\\scripts\\streaming_server")
```
4. Add the custom extension to Omniverse:
- Go to the "Windows" tab on the top of the screen.
- Scroll down to "Extensions".
- Click on the gear icon to open the Extensions settings.
- Click on the "+" button to add a new path to the custom extension.
- A window will pop up when you turn on the extension.
5. Set your OpenAI and Eleven Labs API keys, as well as the voice_id, model_id, and the Audio2Face's audioplayer's prim path (instance_name) in the extension's settings:
- Open the extension and click on the "Settings" button.
- Enter your OpenAI API key, Eleven Labs API key, voice_id, model_id and instance name in the corresponding fields. (A text file in the repository lists the available voice ids.)
## Usage
Once the application is running, you can interact with the virtual assistant through the UI. You can type your prompts into the text field and click on the "Send" button or use the "Record Audio" button to speak your prompts. The assistant will respond in the chat log and through your speakers.
You can also add a system to the GPT virtual assistant by typing it in the "System" field in the UI.
All interactions made with the extension are saved in a folder named "chat_logs" for future reference. | 2,294 | Markdown | 40.727272 | 358 | 0.762424 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/recording_transcription.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import pyaudio
import wave
import keyboard
import time
from time import sleep
import openai
import datetime
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def timestamp_to_datetime(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z")
def record_client_voice(output_filename, recording_status):
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
frames = []
p = pyaudio.PyAudio()
stream = None
try:
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
start_time = time.time()
min_duration = 0.1
while recording_status() or time.time() - start_time < min_duration:
data = stream.read(CHUNK)
frames.append(data)
except Exception as e:
print(f"Error while recording audio: {e}")
finally:
if stream is not None:
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(output_filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
return output_filename
def transcribe_audio_to_text(file_path):
with open(file_path, 'rb') as audio_file:
transcript_response = openai.Audio.transcribe("whisper-1", audio_file)
return transcript_response["text"] | 2,508 | Python | 32.013157 | 240 | 0.64673 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/transmission.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import grpc
import os
import soundfile
import numpy as np
import audio2face_pb2
import audio2face_pb2_grpc
import sounddevice as sd
import time
from typing import Iterator
import requests
import queue
import threading
import carb
def generate_stream(text: str, voice_id: str, model_id: str, api_key: str, stream_chunk_size: int = 2048) -> Iterator[bytes]:
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}/stream"
data = dict(text=text, model_id=model_id, voice_settings=None)
headers = {"xi-api-key": api_key}
response = requests.post(url, json=data, headers=headers, stream=True)
for chunk in response.iter_content(chunk_size=stream_chunk_size):
if chunk:
yield chunk
def read_api_key_from_file(file_path: str) -> str:
with open(file_path, 'r') as f:
return f.read().strip()
def text_to_audio_stream(text, instance_name, api_key):
print("text_to_audio_stream: start")
settings = carb.settings.get_settings()
voice_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/VOICE_ID")
model_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/MODEL_ID")
audio_stream = generate_stream(text, voice_id, model_id, api_key)
current_dir = os.path.dirname(os.path.realpath(__file__))
audio_filename = os.path.join(current_dir, "temp_audio_response.mp3")
with open(audio_filename, 'wb') as f:
for chunk in audio_stream:
f.write(chunk)
audio_data, samplerate = soundfile.read(audio_filename, dtype="float32")
if len(audio_data.shape) > 1:
audio_data = np.average(audio_data, axis=1)
url = "localhost:50051"
audio_queue = queue.Queue()
audio_queue.put(audio_data)
def audio_streamer():
while not audio_queue.empty():
audio_chunk = audio_queue.get()
push_audio_track_stream(url, audio_chunk, samplerate, instance_name)
audio_thread = threading.Thread(target=audio_streamer)
audio_thread.start()
os.remove(audio_filename)
print("text_to_audio_stream: end")
def push_audio_track_stream(url, audio_data, samplerate, instance_name):
print("push_audio_track_stream: start")
chunk_size = samplerate // 10
sleep_between_chunks = 0.04
with grpc.insecure_channel(url) as channel:
print("Channel created")
stub = audio2face_pb2_grpc.Audio2FaceStub(channel)
def make_generator():
start_marker = audio2face_pb2.PushAudioRequestStart(
samplerate=samplerate,
instance_name=instance_name,
block_until_playback_is_finished=False,
)
yield audio2face_pb2.PushAudioStreamRequest(start_marker=start_marker)
for i in range(len(audio_data) // chunk_size + 1):
try:
time.sleep(sleep_between_chunks)
chunk = audio_data[i * chunk_size : i * chunk_size + chunk_size]
yield audio2face_pb2.PushAudioStreamRequest(audio_data=chunk.astype(np.float32).tobytes())
except Exception as e:
print(f"Error in generator function: {e}")
break
request_generator = make_generator()
print("Sending audio data...")
response = stub.PushAudioStream(request_generator)
if response.success:
print("SUCCESS")
else:
print(f"ERROR: {response.message}")
print("Channel closed") | 4,203 | Python | 39.038095 | 240 | 0.66738 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/extension.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import omni.ext
import sys
sys.path.append("C:\\Users\\ERKS 2\\Documents\\Omniverse\\ov\\pkg\\audio2face-2022.2.1\\exts\\omni.audio2face.player\omni\\audio2face\\player\\scripts\\streaming_server")
import openai
import carb
from .window import AudioChatWindow
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
openai.api_key = AudioChatWindow.get_openai_api_key()
self._window = AudioChatWindow("VIRTUAL ASSISTANT", width=400, height=525)
def on_shutdown(self):
self._window.destroy()
self._window = None
| 1,821 | Python | 55.937498 | 240 | 0.741351 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/chatbot.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import openai
import json
import numpy as np
from numpy.linalg import norm
import re
from time import time,sleep
from uuid import uuid4
import datetime
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def load_json(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return json.load(infile)
def save_json(filepath, payload):
with open(filepath, 'w', encoding='utf-8') as outfile:
json.dump(payload, outfile, ensure_ascii=False, sort_keys=True, indent=2)
def timestamp_to_datetime(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z")
def gpt3_embedding(content, engine='text-embedding-ada-002'):
content = content.encode(encoding='ASCII',errors='ignore').decode() # fix any UNICODE errors
response = openai.Embedding.create(input=content,engine=engine)
vector = response['data'][0]['embedding'] # this is a normal list
return vector
def chatgpt_completion(messages, model="gpt-4", temp=0.0, top_p=1.0, tokens=400, freq_pen=0.0, pres_pen=0.0):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,)
text = response['choices'][0]['message']['content']
tokens_used = response['usage']['total_tokens']
filename = 'chat_%s_aibot.json' % time()
script_dir = os.path.dirname(os.path.realpath(__file__))
chat_logs_path = os.path.join(script_dir, 'chat_logs')
if not os.path.exists(chat_logs_path):
os.makedirs(chat_logs_path)
input_message = messages[-1]['content']
log_content = f"User:\n{input_message}\n\nAi_Bot:\n{text}\n\nTokens used: {tokens_used}"
save_file(os.path.join(chat_logs_path, filename), log_content)
return text
def flatten_convo(conversation):
convo = ''
for i in conversation:
convo += '%s: %s\n' % (i['role'].upper(), i['content'])
return convo.strip()
def set_openai_api_key(api_key):
openai.api_key = api_key
def set_system_content(content):
global system_content
system_content = content
if __name__ == '__main__':
convo_length = 30
set_openai_api_key(api_key)
conversation = list()
conversation.append({'role': 'system', 'content': system_content})
counter = 0
while True:
# get user input, save to file
a = input('\n\nCLIENT: ')
conversation.append({'role': 'user', 'content': a})
filename = 'chat_%s_client.txt' % time()
if not os.path.exists('chat_logs'):
os.makedirs('chat_logs')
save_file('chat_logs/%s' % filename, a)
flat = flatten_convo(conversation)
# generate a response
response = chatgpt_completion(conversation)
conversation.append({'role': 'assistant', 'content': response})
print('\n\nAI_Bot: %s' % response)
# increment counter and consolidate memories
counter += 2
if counter >= 10:
# reset conversation
conversation = list()
conversation.append({'role': 'system', 'content': system_content})
| 4,226 | Python | 35.128205 | 240 | 0.643871 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/window.py | #Stream-GPT
#GNU - GLP Licence
#Copyright (C) <year> <Huang I Lan & Erks - Virtual Studio>
#This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import omni.ui as ui
import omni.kit.commands
from omni.kit.window.popup_dialog.form_dialog import FormDialog
from time import time
from .recording_transcription import record_client_voice, transcribe_audio_to_text
from .chatbot import chatgpt_completion, set_system_content
from .transmission import text_to_audio_stream
import threading
import time
import tempfile
import datetime
import carb
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def timestamp_to_datetime(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z")
class AudioChatWindow(ui.Window):
def _build_fn(self):
with self.frame:
with ui.VStack():
with ui.ScrollingFrame(height=ui.Percent(75)):
self.chat_log = ui.Label("", word_wrap=True)
with ui.HStack(height=ui.Percent(10)):
ui.StringField(model=self._prompt_model, multiline=True)
with ui.HStack(height=ui.Percent(10)):
self.record_audio_button = ui.Button("Record Audio", height=40, clicked_fn=lambda *_args, **_kwargs: self._toggle_record_audio())
ui.Button("Send", height=40, clicked_fn=lambda: self._send_text_prompt())
with ui.HStack():
ui.Button("Settings", tooltip="Configure API Key, Instance name and Default System", width=0, height=0, clicked_fn=lambda: self._open_settings())
system_settings_button = ui.Button("System", height=0, width=0)
system_settings_button.set_clicked_fn(lambda: self.show_system_settings_menu())
def __init__(self, title: str, **kwargs) -> None:
self.conversation = [{"role": "system", "content": ""}]
self.system_content_model = ui.SimpleStringModel()
self.lock = threading.Lock()
super().__init__(title, **kwargs)
self._prompt_model = ui.SimpleStringModel()
self.frame.set_build_fn(self._build_fn)
def show_system_settings_menu(self):
self.system_settings_menu = ui.Menu("")
with self.system_settings_menu:
ui.StringField(model=self.system_content_model, multiline=True)
self.system_settings_menu.show()
def _toggle_record_audio(self):
if not hasattr(self, "recording"):
self.recording = False
if not self.recording:
self.recording = True
threading.Thread(target=self._record_and_transcribe_audio).start()
else:
self.recording = False
def _process_conversation(self, user_content):
current_system_content = self.system_content_model.get_value_as_string().strip()
if current_system_content != self.conversation[0]['content']:
self.reset_chat()
set_system_content(current_system_content)
self.conversation.append({"role": "user", "content": user_content})
response = chatgpt_completion(self.conversation)
self.chat_log.text += f"\nUser: {user_content}\nAssistant: {response}"
settings = carb.settings.get_settings()
instance_name = settings.get_as_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME")
threading.Thread(target=text_to_audio_stream, args=(response, instance_name, self.get_elevenlabs_api_key())).start()
def _record_and_transcribe_audio(self):
output_filename = "recorded_audio.wav"
record_client_voice(output_filename)
transcript = transcribe_audio_to_text(output_filename)
self._send_audio_transcript(transcript)
def _send_audio_transcript(self, transcript):
self.chat_log.text += "\nThinking..."
threading.Thread(target=self._process_conversation, args=(transcript,)).start()
def reset_chat(self):
self.chat_log.text = ""
self.conversation = [{"role": "system", "content": self.system_content_model.get_value_as_string().strip()}]
def _save_settings(self, dialog):
values = dialog.get_values()
settings = carb.settings.get_settings()
settings.set_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI", values["APIKey_OPEN_AI"])
settings.set_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS", values["APIKey_ELEVEN_LABS"])
settings.set_string("/persistent/exts/omni.example.streamgpt/VOICE_ID", values["ELEVEN_LABS_VOICE_ID"])
settings.set_string("/persistent/exts/omni.example.streamgpt/MODEL_ID", values["ELEVEN_LABS_MODEL_ID"])
settings.set_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME", values["INSTANCE_NAME"])
dialog.hide()
def _open_settings(self):
settings = carb.settings.get_settings()
apikey_open_ai = settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI")
apikey_eleven_labs = settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS")
voice_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/VOICE_ID")
model_id = settings.get_as_string("/persistent/exts/omni.example.streamgpt/MODEL_ID")
instance_name = settings.get_as_string("/persistent/exts/omni.example.streamgpt/INSTANCE_NAME")
if apikey_open_ai == "":
apikey_open_ai = "Enter OPEN-AI API Key Here"
if apikey_eleven_labs == "":
apikey_eleven_labs = "Enter ELEVEN-LABS API Key Here"
if instance_name == "":
instance_name = "Enter Instance Name Here"
if voice_id == "":
voice_id = "Enter Eleven Labs Voice ID Here"
if model_id == "":
model_id = "Enter Eleven Labs Model ID Here"
field_defs = [
FormDialog.FieldDef("APIKey_OPEN_AI", "OPEN-AI API Key: ", ui.StringField, apikey_open_ai),
FormDialog.FieldDef("APIKey_ELEVEN_LABS", "ELEVEN-LABS API Key: ", ui.StringField, apikey_eleven_labs),
FormDialog.FieldDef("ELEVEN_LABS_VOICE_ID", "Voice ID: ", ui.StringField, voice_id),
FormDialog.FieldDef("ELEVEN_LABS_MODEL_ID", "Model ID: ", ui.StringField, model_id),
FormDialog.FieldDef("INSTANCE_NAME", "Instance Name: ", ui.StringField, instance_name),
]
dialog = FormDialog(
title="Settings",
message="Your Settings: ",
field_defs=field_defs,
ok_handler=lambda dialog: self._save_settings(dialog))
dialog.show()
@staticmethod
def get_openai_api_key():
settings = carb.settings.get_settings()
return settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_OPEN_AI")
def get_elevenlabs_api_key(self):
settings = carb.settings.get_settings()
return settings.get_as_string("/persistent/exts/omni.example.streamgpt/APIKey_ELEVEN_LABS")
def _send_text_prompt(self):
prompt = self._prompt_model.get_value_as_string()
self.chat_log.text += "\nThinking..."
threading.Thread(target=self._process_conversation, args=(prompt,)).start()
self._prompt_model.set_value("")
def _toggle_record_audio(self):
if not hasattr(self, "recording"):
self.recording = False
self.recording = not self.recording
if self.recording:
self.record_audio_button.text = "Stop Recording"
else:
self.record_audio_button.text = "Record Audio"
threading.Thread(target=self._record_and_transcribe_audio_alternative).start()
def recording_status(self):
return self.recording
def _record_and_transcribe_audio_alternative(self):
with self.lock:
temp_audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
temp_audio_filename = temp_audio_file.name
temp_audio_file.close()
recorded_audio_filename = record_client_voice(temp_audio_filename, self.recording_status)
transcript = transcribe_audio_to_text(recorded_audio_filename)
os.remove(temp_audio_filename)
if transcript.strip():
self._send_audio_transcript(transcript)
def destroy(self):
super().destroy()
self._prompt_model = None | 9,174 | Python | 47.036649 | 240 | 0.645193 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/stream/gptchat/pytransform/__init__.py | # These module alos are used by protection code, so that protection
# code needn't import anything
import os
import platform
import sys
import struct
# Because ctypes is new from Python 2.5, so pytransform doesn't work
# before Python 2.5
#
from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \
pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE
from fnmatch import fnmatch
#
# Support Platforms
#
plat_path = 'platforms'
plat_table = (
('windows', ('windows', 'cygwin*')),
('darwin', ('darwin',)),
('ios', ('ios',)),
('linux', ('linux*',)),
('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
('poky', ('poky',)),
)
arch_table = (
('x86', ('i?86', )),
('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
('arm', ('armv5',)),
('armv6', ('armv6l',)),
('armv7', ('armv7l',)),
('ppc64', ('ppc64le',)),
('mips32', ('mips',)),
('aarch32', ('aarch32',)),
('aarch64', ('aarch64', 'arm64'))
)
#
# Hardware type
#
HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5)
#
# Global
#
_pytransform = None
class PytransformError(Exception):
pass
def dllmethod(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
@dllmethod
def version_info():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('version_info', _pytransform))
return dlfunc()
@dllmethod
def init_pytransform():
major, minor = sys.version_info[0:2]
# Python2.5 no sys.maxsize but sys.maxint
# bitness = 64 if sys.maxsize > 2**32 else 32
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
init_module = prototype(('init_module', _pytransform))
ret = init_module(major, minor, pythonapi._handle)
if (ret & 0xF000) == 0x1000:
raise PytransformError('Initialize python wrapper failed (%d)'
% (ret & 0xFFF))
return ret
@dllmethod
def init_runtime():
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(0, 0, 0, 0)
@dllmethod
def encrypt_code_object(pubkey, co, flags, suffix=''):
_pytransform.set_option(6, suffix.encode())
prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int)
dlfunc = prototype(('encrypt_code_object', _pytransform))
return dlfunc(pubkey, co, flags)
@dllmethod
def generate_license_key(prikey, keysize, rcode):
prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p)
dlfunc = prototype(('generate_license_key', _pytransform))
return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \
else dlfunc(prikey, keysize, rcode.encode())
@dllmethod
def get_registration_code():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_registration_code', _pytransform))
return dlfunc()
@dllmethod
def get_expired_days():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_expired_days', _pytransform))
return dlfunc()
@dllmethod
def clean_obj(obj, kind):
prototype = PYFUNCTYPE(c_int, py_object, c_int)
dlfunc = prototype(('clean_obj', _pytransform))
return dlfunc(obj, kind)
def clean_str(*args):
tdict = {
'str': 0,
'bytearray': 1,
'unicode': 2
}
for obj in args:
k = tdict.get(type(obj).__name__)
if k is None:
raise RuntimeError('Can not clean object: %s' % obj)
clean_obj(obj, k)
def get_hd_info(hdtype, name=None):
if hdtype not in range(HT_DOMAIN + 1):
raise RuntimeError('Invalid parameter hdtype: %s' % hdtype)
size = 256
t_buf = c_char * size
buf = t_buf()
cname = c_char_p(0 if name is None
else name.encode('utf-8') if hasattr('name', 'encode')
else name)
if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1):
raise PytransformError('Get hardware information failed')
return buf.value.decode()
def show_hd_info():
return _pytransform.show_hd_info()
def assert_armored(*names):
prototype = PYFUNCTYPE(py_object, py_object)
dlfunc = prototype(('assert_armored', _pytransform))
def wrapper(func):
def wrap_execute(*args, **kwargs):
dlfunc(names)
return func(*args, **kwargs)
return wrap_execute
return wrapper
def check_armored(*names):
try:
prototype = PYFUNCTYPE(py_object, py_object)
prototype(('assert_armored', _pytransform))(names)
return True
except RuntimeError:
return False
def get_license_info():
info = {
'ISSUER': None,
'EXPIRED': None,
'HARDDISK': None,
'IFMAC': None,
'IFIPV4': None,
'DOMAIN': None,
'DATA': None,
'CODE': None,
}
rcode = get_registration_code().decode()
if rcode.startswith('*VERSION:'):
index = rcode.find('\n')
info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '')
rcode = rcode[index+1:]
index = 0
if rcode.startswith('*TIME:'):
from time import ctime
index = rcode.find('\n')
info['EXPIRED'] = ctime(float(rcode[6:index]))
index += 1
if rcode[index:].startswith('*FLAGS:'):
index += len('*FLAGS:') + 1
info['FLAGS'] = ord(rcode[index - 1])
prev = None
start = index
for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']:
index = rcode.find('*%s:' % k)
if index > -1:
if prev is not None:
info[prev] = rcode[start:index]
prev = k
start = index + len(k) + 2
info['CODE'] = rcode[start:]
i = info['CODE'].find(';')
if i > 0:
info['DATA'] = info['CODE'][i+1:]
info['CODE'] = info['CODE'][:i]
return info
def get_license_code():
return get_license_info()['CODE']
def get_user_data():
return get_license_info()['DATA']
def _match_features(patterns, s):
for pat in patterns:
if fnmatch(s, pat):
return True
def _gnu_get_libc_version():
try:
prototype = CFUNCTYPE(c_char_p)
ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
return ver.decode().split('.')
except Exception:
pass
def format_platform(platid=None):
if platid:
return os.path.normpath(platid)
plat = platform.system().lower()
mach = platform.machine().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
if plat == 'linux':
cname, cver = platform.libc_ver()
if cname == 'musl':
plat = 'musl'
elif cname == 'libc':
plat = 'android'
elif cname == 'glibc':
v = _gnu_get_libc_version()
if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
plat = 'centos6'
for alias, archlist in arch_table:
if _match_features(archlist, mach):
mach = alias
break
if plat == 'windows' and mach == 'x86_64':
bitness = struct.calcsize('P'.encode()) * 8
if bitness == 32:
mach = 'x86'
return os.path.join(plat, mach)
# Load _pytransform library
def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
path = os.path.dirname(__file__) if path is None \
else os.path.normpath(path)
plat = platform.system().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
name = '_pytransform' + suffix
if plat == 'linux':
filename = os.path.abspath(os.path.join(path, name + '.so'))
elif plat in ('darwin', 'ios'):
filename = os.path.join(path, name + '.dylib')
elif plat == 'windows':
filename = os.path.join(path, name + '.dll')
elif plat in ('freebsd', 'poky'):
filename = os.path.join(path, name + '.so')
else:
filename = None
if platid is not None and os.path.isfile(platid):
filename = platid
elif platid is not None or not os.path.exists(filename) or not is_runtime:
libpath = platid if platid is not None and os.path.isabs(platid) else \
os.path.join(path, plat_path, format_platform(platid))
filename = os.path.join(libpath, os.path.basename(filename))
if filename is None:
raise PytransformError('Platform %s not supported' % plat)
if not os.path.exists(filename):
raise PytransformError('Could not find "%s"' % filename)
try:
m = cdll.LoadLibrary(filename)
except Exception as e:
if sys.flags.debug:
print('Load %s failed:\n%s' % (filename, e))
raise
# Removed from v4.6.1
# if plat == 'linux':
# m.set_option(-1, find_library('c').encode())
if not os.path.abspath('.') == os.path.abspath(path):
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
elif (not is_runtime) and sys.platform.startswith('cygwin'):
path = os.environ['PYARMOR_CYGHOME']
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
# Required from Python3.6
m.set_option(2, sys.byteorder.encode())
if sys.flags.debug:
m.set_option(3, c_char_p(1))
m.set_option(4, c_char_p(not is_runtime))
# Disable advanced mode by default
m.set_option(5, c_char_p(not advanced))
# Set suffix for private package
if suffix:
m.set_option(6, suffix.encode())
return m
def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
global _pytransform
_pytransform = _load_library(path, is_runtime, platid, suffix, advanced)
return init_pytransform()
def pyarmor_runtime(path=None, suffix='', advanced=0):
if _pytransform is not None:
return
try:
pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced)
init_runtime()
except Exception as e:
if sys.flags.debug or hasattr(sys, '_catch_pyarmor'):
raise
sys.stderr.write("%s\n" % str(e))
sys.exit(1)
# ----------------------------------------------------------
# End of pytransform
# ----------------------------------------------------------
#
# Unused
#
@dllmethod
def generate_license_file(filename, priname, rcode, start=-1, count=1):
prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int)
dlfunc = prototype(('generate_project_license_files', _pytransform))
return dlfunc(filename.encode(), priname.encode(), rcode.encode(),
start, count) if sys.version_info[0] == 3 \
else dlfunc(filename, priname, rcode, start, count)
#
# Not available from v5.6
#
def generate_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey, newkey = _generate_pytransform_key(licfile, pubkey)
return prikey, pubkey, capkey, newkey, prolic
@dllmethod
def _generate_project_capsule():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('generate_project_capsule', _pytransform))
return dlfunc()
@dllmethod
def _generate_pytransform_key(licfile, pubkey):
prototype = PYFUNCTYPE(py_object, c_char_p, py_object)
dlfunc = prototype(('generate_pytransform_key', _pytransform))
return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile,
pubkey)
#
# Deprecated functions from v5.1
#
@dllmethod
def encrypt_project_files(proname, filelist, mode=0):
prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int)
dlfunc = prototype(('encrypt_project_files', _pytransform))
return dlfunc(proname.encode(), filelist, mode)
def generate_project_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey = _encode_capsule_key_file(licfile)
return prikey, pubkey, capkey, prolic
@dllmethod
def _encode_capsule_key_file(licfile):
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
dlfunc = prototype(('encode_capsule_key_file', _pytransform))
return dlfunc(licfile.encode(), None)
@dllmethod
def encrypt_files(key, filelist, mode=0):
t_key = c_char * 32
prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int)
dlfunc = prototype(('encrypt_files', _pytransform))
return dlfunc(t_key(*key), filelist, mode)
@dllmethod
def generate_module_key(pubname, key):
t_key = c_char * 32
prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p)
dlfunc = prototype(('generate_module_key', _pytransform))
return dlfunc(pubname.encode(), t_key(*key), None)
#
# Compatible for PyArmor v3.0
#
@dllmethod
def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1):
'''Only for old version, before PyArmor 3'''
pyarmor_init(is_runtime=1)
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(systrace, sysprofile, threadtrace, threadprofile)
@dllmethod
def import_module(modname, filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
_import_module = prototype(('import_module', _pytransform))
return _import_module(modname.encode(), filename.encode())
@dllmethod
def exec_file(filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(c_int, c_char_p)
_exec_file = prototype(('exec_file', _pytransform))
return _exec_file(filename.encode())
| 13,587 | Python | 27.07438 | 79 | 0.60499 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/config/extension.toml | [package]
# Semantic Versioning is used: https://semver.org/
version = "1.0.2"
# Lists people or organizations that are considered the "authors" of the package.
authors = ["Huang I Lan - Erks Virtual Studio"]
# The title and description fields are primarily for displaying extension info in UI
title = "stream-gpt"
description="Extension for NVIDIA Omniverse that provides a simple chatbot UI to record audio inputs, transcribe them, use transcriptions as chat GPT prompts, generate responses, convert responses to audio, and transmit them to Audio2Face via gRPC, while maintaining your original scripting style and modular system.."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = ""
# One of categories for UI.
category = "Chatbot"
# Keywords for the extension
keywords = ["Chat_GPT", "AI_assistant"]
# Location of change log file in target (final) folder of extension, relative to the root.
# More info on writing changelog: https://keepachangelog.com/en/1.0.0/
changelog="docs/CHANGELOG.md"
# Preview image and icon. Folder named "data" automatically goes in git lfs (see .gitattributes file).
# Preview image is shown in "Overview" of Extensions window. Screenshot of an extension might be a good preview image.
preview_image = "data/preview.png"
# Icon is shown in Extensions window, it is recommended to be square, of size 256x256.
icon = "data/icon.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
[python.pipapi]
requirements = [
"pyaudio",
"openai",
"keyboard",
"soundfile",
"elevenlabs",
"pydub",
"gtts",
]
# Allow going to online index if package can't be found locally (not recommended)
use_online_index = true
# Main python module this extension provides, it will be publicly available as "import stream.gptchat".
[[python.module]]
name = "stream.gptchat"
[[test]]
# Extra dependencies only to be used during test run
dependencies = [
"omni.kit.ui_test" # UI testing extension
]
| 2,071 | TOML | 32.967213 | 318 | 0.740222 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/docs/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.0.2] - 2023-07-06
- Upgraded the UI to allow users to add API keys, Voice_ID, Voice_Models, and Instance Name directly from the UI, eliminating the need for hardcoding.
## [1.0.0] - 2023-04-13
- Initial version of extension UI template with a window.
| 355 | Markdown | 28.666664 | 150 | 0.715493 |
ilanhuang/audio2face-streamgpt-public/exts/stream.gptchat/docs/README.md | # Stream-GPT
Stream-GPT is an Omniverse Extension that uses OpenAI's GPT-3 model to create a virtual assistant. It allows users to interact with the assistant through both text and voice, and the assistant responds in kind. The extension uses OpenAI's Whisper ASR system to transcribe audio input and Eleven Labs' API to convert the assistant's text responses into audio.
## Getting Started
### Prerequisites
- OpenAI API key
- Eleven Labs API key
### SET UP
1. Set your OpenAI and Eleven Labs API keys, as well as the voice_id, model_id, and the Audio2Face's audioplayer's prim path (instance_name) in the extension's settings:
- Open the extension and click on the "Settings" button.
- Enter your OpenAI API key, Eleven Labs API key, voice_id, model_id and instance name in the corresponding fields. (A text file in the repository lists the available voice ids.)
## Usage
Once the application is running, you can interact with the virtual assistant through the UI. You can type your prompts into the text field and click on the "Send" button or use the "Record Audio" button to speak your prompts. The assistant will respond in the chat log and through your speakers.
You can also add a system to the GPT virtual assistant by typing it in the "System" field in the UI.
All interactions made with the extension are saved in a folder named "chat_logs" for future reference.
| 1,389 | Markdown | 46.931033 | 358 | 0.773938 |
ilanhuang/audio2face-streamgpt-public/UE5_install_files/extension.toml | [package]
version = "104.10.8"
title = "Audio2Face Exporter"
authors = ["NVIDIA"]
description="Custom Kit exporter for audio2face"
repository = ""
keywords = ["audio2face"]
category = "Animation"
readme = "docs/README.md"
changelog = "docs/CHANGELOG.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.ui" = {optional = true}
"omni.kit.window.filepicker" = {optional = true}
"omni.graph" = {}
"omni.graph.tools" = {}
"omni.kit.menu.utils" = {optional = true}
"omni.kit.window.viewport" = {optional = true}
"omni.kit.viewport.utility" = {optional = true}
"omni.client" = {}
"omni.anim.shared" = {}
"omni.deform.shared" = {}
"omni.audio2face.common" = {}
"omni.audio2face.ui.common" = {optional = true}
"omni.audio2face.tool" = {}
"omni.services.core"={}
[[python.module]]
name = "omni.audio2face.exporter"
[[test]]
dependencies = [
"omni.kit.renderer.core",
"omni.ui",
"omni.kit.window.filepicker",
"omni.kit.menu.utils",
"omni.kit.window.viewport",
"omni.kit.viewport.utility",
"omni.audio2face.ui.common"
]
timeout = 900
stdoutFailPatterns.exclude = [
"*failed to upload minidump*", # Exclude grahics leaks until fixed
]
[package.writeTarget]
kit = true
platform = true
[python.pipapi]
requirements = ['python-osc']
use_online_index = true | 1,310 | TOML | 22.836363 | 71 | 0.681679 |
ilanhuang/audio2face-streamgpt-public/UE5_install_files/from pythonosc import udp_client.py | from pythonosc import udp_client
blend = ["eyeBlinkLeft", "eyeLookDownLeft", "eyeLookInLeft", "eyeLookOutLeft", "eyeLookUpLeft", "eyeSquintLeft", "eyeWideLeft", "eyeBlinkRight", "eyeLookDownRight", "eyeLookInRight", "eyeLookOutRight", "eyeLookUpRight", "eyeSquintRight", "eyeWideRight", "jawForward", "jawLeft", "jawRight", "jawOpen", "mouthClose", "mouthFunnel", "mouthPucker", "mouthLeft", "mouthRight", "mouthSmileLeft", "mouthSmileRight", "mouthFrownLeft", "mouthFrownRight", "mouthDimpleLeft", "mouthDimpleRight", "mouthStretchLeft", "mouthStretchRight", "mouthRollLower", "mouthRollUpper", "mouthShrugLower", "mouthShrugUpper", "mouthPressLeft", "mouthPressRight", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthUpperUpLeft", "mouthUpperUpRight", "browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight", "cheekPuff", "cheekSquintLeft", "cheekSquintRight", "noseSneerLeft", "noseSneerRight", "tongueOut"]
client = udp_client.SimpleUDPClient('127.0.0.1', 5008)
osc_array = outWeight.tolist()
count = 0
for i in osc_array:
client.send_message('/' + str(blend[count]), i)
count += 1
[python.pipapi]
requirements = ['python-osc']
use_online_index = true | 1,267 | Python | 89.571422 | 910 | 0.708761 |
ilanhuang/audio2face-streamgpt-public/UE5_install_files/facsSolver.py | import numpy as np
from omni.audio2face.common import log_error, log_info, log_warn
from scipy.optimize import lsq_linear
from pythonosc import udp_client
class FacsSolver:
def __init__(self, neutral_mat, delta_mat):
self.weightRegulCoeff = 3.5
self.weightRegulCoeff_scale = 10.0
self.prevRegulCoeff = 3.5
self.prevRegulCoeff_scale = 100.0
self.sparseRegulCoeff = 1.0
self.sparseRegulCoeff_scale = 0.25
self.symmetryRegulCoeff = 1.0
self.symmetryRegulCoeff_scale = 10.0
self.neutral_mat = neutral_mat
self.delta_mat_orig = delta_mat
self.delta_mat = delta_mat
self.numPoses_orig = self.delta_mat_orig.shape[1]
self.numPoses = self.numPoses_orig
self.lb_orig = np.zeros(self.numPoses_orig)
self.ub_orig = self.lb_orig + 1.0
self.lb = self.lb_orig.copy()
self.ub = self.ub_orig.copy()
self.activeIdxMap = range(self.numPoses_orig)
self.activePosesBool = np.array([True for pi in range(self.numPoses_orig)], dtype=bool)
self.cancelPoseIndices = np.array([-1 for pi in range(self.numPoses_orig)], dtype=int)
self.symmetryPoseIndices = np.array([-1 for pi in range(self.numPoses_orig)], dtype=int)
self.cancelList = []
self.symmetryList = []
self.symShapeMat = np.zeros((self.numPoses_orig, self.numPoses_orig))
self.prevWeights = np.zeros(self.numPoses_orig)
# TODO L1 implementation
l1RegulMat = np.ones((1, self.numPoses))
self.l1RegulMat = np.dot(l1RegulMat.T, l1RegulMat)
self.compute_A_mat()
def compute_A_mat(self):
self.A = (
np.dot(self.delta_mat.T, self.delta_mat)
+ self.weightRegulCoeff * self.weightRegulCoeff_scale * np.eye(self.numPoses)
+ self.prevRegulCoeff * self.prevRegulCoeff_scale * np.eye(self.numPoses)
+ self.sparseRegulCoeff ** 2 * self.sparseRegulCoeff_scale * self.l1RegulMat
+ self.symmetryRegulCoeff * self.symmetryRegulCoeff_scale * self.symShapeMat
)
self.A = self.A.astype(np.float64)
def set_activePoses(self, activePosesBool):
self.activePosesBool = activePosesBool
# 1 - simple approach
# self.ub *= np.array(self.activePosesBool)
# 2- less computation way
self.delta_mat = self.delta_mat_orig[:, self.activePosesBool]
self.numPoses = self.delta_mat.shape[1]
self.lb = self.lb_orig[self.activePosesBool]
self.ub = self.ub_orig[self.activePosesBool]
self.prevWeights = np.zeros(self.numPoses)
self.activeIdxMap = []
cnt = 0
for idx in range(self.numPoses_orig):
if self.activePosesBool[idx]:
self.activeIdxMap.append(cnt)
cnt += 1
else:
self.activeIdxMap.append(-1)
# update L1 regularization mat
l1RegulMat = np.ones((1, self.numPoses))
self.l1RegulMat = np.dot(l1RegulMat.T, l1RegulMat)
# update cancel pair index
self.set_cancelPoses(self.cancelPoseIndices)
# update symmetry pair index
self.set_symmetryPoses(self.symmetryPoseIndices) # update self.A here
def set_cancelPoses(self, cancelPoseIndices):
self.cancelPoseIndices = cancelPoseIndices
# filter out cancel shapes
self.cancelList = []
maxIdx = np.max(self.cancelPoseIndices)
if maxIdx < 0:
return
for ci in range(maxIdx + 1):
cancelIndices = np.where(self.cancelPoseIndices == ci)[0]
if len(cancelIndices) > 2:
log_warn("There is more than 2 poses for a cancel index %d" % ci)
break
elif len(cancelIndices) < 2:
log_warn("There is less than 2 poses for a cancel index %d" % ci)
break
self.cancelList.append(cancelIndices)
# print ('cancel shape list', self.cancelList)
activeCancelList = []
for pIdx1, pIdx2 in self.cancelList:
if self.activePosesBool[pIdx1] and self.activePosesBool[pIdx2]:
activeCancelList.append([self.activeIdxMap[pIdx1], self.activeIdxMap[pIdx2]])
# print (activeCancelList)
self.cancelList = activeCancelList
def set_symmetryPoses(self, symmetryPoseIndices):
self.symmetryPoseIndices = symmetryPoseIndices
self.symmetryList = []
maxIdx = np.max(self.symmetryPoseIndices)
if maxIdx < 0:
self.symShapeMat = np.zeros((self.numPoses, self.numPoses))
else:
for ci in range(maxIdx + 1):
symmetryIndices = np.where(self.symmetryPoseIndices == ci)[0]
if len(symmetryIndices) > 2:
log_warn("There is more than 2 poses for a cancel index %d" % ci)
break
elif len(symmetryIndices) < 2:
log_warn("There is less than 2 poses for a cancel index %d" % ci)
break
self.symmetryList.append(symmetryIndices)
activeSymmetryList = []
for pIdx1, pIdx2 in self.symmetryList:
if self.activePosesBool[pIdx1] and self.activePosesBool[pIdx2]:
activeSymmetryList.append([self.activeIdxMap[pIdx1], self.activeIdxMap[pIdx2]])
self.symmetryList = activeSymmetryList
symShapeMat = np.zeros((len(self.symmetryList), self.numPoses))
for si, [pose1Idx, pose2Idx] in enumerate(self.symmetryList):
symShapeMat[si, pose1Idx] = 1.0
symShapeMat[si, pose2Idx] = -1.0
self.symShapeMat = np.dot(symShapeMat.T, symShapeMat)
self.compute_A_mat()
def set_l2_regularization(self, L2=3.5):
self.weightRegulCoeff = L2
self.compute_A_mat()
def set_tempo_regularization(self, temporal=3.5):
self.prevRegulCoeff = temporal
self.compute_A_mat()
def set_l1_regularization(self, L1=1.0):
self.sparseRegulCoeff = L1
self.compute_A_mat()
def set_symmetry_regularization(self, value=1.0):
self.symmetryRegulCoeff = value
self.compute_A_mat()
def computeFacsWeights(self, point_mat):
target_delta_mat = point_mat - self.neutral_mat
B = (
np.dot(self.delta_mat.T, target_delta_mat).flatten()
+ self.prevRegulCoeff * self.prevRegulCoeff_scale * self.prevWeights
)
B = B.astype(np.float64)
res = lsq_linear(self.A, B, bounds=(self.lb, self.ub), lsmr_tol="auto", verbose=0, method="bvls")
# print ('first pass:', res.x)
if len(self.cancelList) > 0:
# check cancelling poses -
ub = self.ub.copy()
lb = self.lb.copy()
for pose1Idx, pose2Idx in self.cancelList:
if res.x[pose1Idx] >= res.x[pose2Idx]:
ub[pose2Idx] = 1e-10
else:
ub[pose1Idx] = 1e-10
res = lsq_linear(self.A, B, bounds=(lb, ub), lsmr_tol="auto", verbose=0, method="bvls")
self.prevWeights = res.x
# print ('second pass:', res.x)
outWeight = np.zeros(self.numPoses_orig)
outWeight[self.activePosesBool] = res.x
outWeight = outWeight * (outWeight > 1.0e-9)
# print (outWeight)
blend = ["eyeBlinkLeft", "eyeLookDownLeft", "eyeLookInLeft", "eyeLookOutLeft", "eyeLookUpLeft", "eyeSquintLeft", "eyeWideLeft", "eyeBlinkRight", "eyeLookDownRight", "eyeLookInRight", "eyeLookOutRight", "eyeLookUpRight", "eyeSquintRight", "eyeWideRight", "jawForward", "jawLeft", "jawRight", "jawOpen", "mouthClose", "mouthFunnel", "mouthPucker", "mouthLeft", "mouthRight", "mouthSmileLeft", "mouthSmileRight", "mouthFrownLeft", "mouthFrownRight", "mouthDimpleLeft", "mouthDimpleRight", "mouthStretchLeft", "mouthStretchRight", "mouthRollLower", "mouthRollUpper", "mouthShrugLower", "mouthShrugUpper", "mouthPressLeft", "mouthPressRight", "mouthLowerDownLeft", "mouthLowerDownRight", "mouthUpperUpLeft", "mouthUpperUpRight", "browDownLeft", "browDownRight", "browInnerUp", "browOuterUpLeft", "browOuterUpRight", "cheekPuff", "cheekSquintLeft", "cheekSquintRight", "noseSneerLeft", "noseSneerRight", "tongueOut"]
try:
client = udp_client.SimpleUDPClient('127.0.0.1', 27008)
osc_array = outWeight.tolist()
count = 0
for i in osc_array:
client.send_message('/' + str(blend[count]), i)
count += 1
except Exception as e:
log_error(f"Error in OSC communication: {e}") | 8,708 | Python | 41.276699 | 918 | 0.614378 |
matthias-research/omni.fun/README.md | # omni.fun
A simple plugin for nvidia's omniverse
| 50 | Markdown | 15.999995 | 38 | 0.78 |
matthias-research/omni.fun/exts/omni.fun/config/extension.toml |
[package]
# Semantic Versioning is used: https://semver.org/
version = "0.1.0"
authors = ["Ten Minute Physics"]
title = "Fun"
description="Ten Minute Physics domniverse extension"
readme = "docs/README.md"
repository="https://github.com/matthias-research/omni.fun"
category = "sim"
keywords = ["simulation"]
changelog="docs/CHANGELOG.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
# Watch the .ogn files for hot reloading (only works for Python files)
[fswatcher.patterns]
include = ["*.ogn", "*.py"]
exclude = ["Ogn*Database.py", "*/ogn*"]
[dependencies]
"omni.kit.test" = {}
"omni.kit.menu.utils" = {}
"omni.timeline" = {}
"omni.usd" = {}
# Main python module this extension provides, it will be publicly available as "import omni.play".
[[python.module]]
name = "omni.fun"
| 797 | TOML | 24.741935 | 98 | 0.697616 |
matthias-research/omni.fun/exts/omni.fun/config/extension.gen.toml | [package]
[package.target]
python = ["cp37"]
[package.publish]
date = 1635811509
kitVersion = "103.0+master.0.75457a67.gitlab"
| 127 | TOML | 17.285712 | 45 | 0.732283 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/sim.py | # Copyright 2022 Matthias Müller - Ten Minute Physics,
# https://www.youtube.com/c/TenMinutePhysics
# www.matthiasMueller.info/tenMinutePhysics
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import math
import warp as wp
from pxr import Usd, UsdGeom, Gf, Sdf
from .usdutils import *
gravity = -9.81
@wp.struct
class SimData:
sphere_radius: wp.array(dtype=float)
sphere_mass: wp.array(dtype=float)
sphere_pos: wp.array(dtype=wp.vec3)
sphere_rot: wp.array(dtype=wp.quat)
sphere_lin_vel: wp.array(dtype=wp.vec3)
sphere_ang_vel: wp.array(dtype=wp.vec3)
sphere_pos_corr: wp.array(dtype=wp.vec3)
sphere_lin_corr: wp.array(dtype=wp.vec3)
sphere_ang_corr: wp.array(dtype=wp.vec3)
sphere_num_corr: wp.array(dtype=int)
sphere_lower_bounds: wp.array(dtype=wp.vec3)
sphere_upper_bounds: wp.array(dtype=wp.vec3)
sphere_bvh_id: wp.uint64
obj_mesh_id: wp.uint64
obj_tri_ids: wp.array(dtype=int)
obj_orig_pos: wp.array(dtype=wp.vec3)
obj_pos: wp.array(dtype=wp.vec3)
obj_prev_pos: wp.array(dtype=wp.vec3)
obj_transforms: wp.array(dtype=wp.mat44)
obj_pos_transform_nr: wp.array(dtype=int)
@wp.kernel
def dev_integrate(
dt: float,
gravity: wp.vec3,
bounds_margin: float,
sim: SimData):
sphere_nr = wp.tid()
pos = sim.sphere_pos[sphere_nr]
lin_vel = sim.sphere_lin_vel[sphere_nr]
rot = sim.sphere_rot[sphere_nr]
ang_vel = sim.sphere_ang_vel[sphere_nr]
# move state forward in time
lin_vel = lin_vel + gravity * dt
pos = pos + lin_vel * dt
qt = wp.quat(ang_vel[0], ang_vel[1], ang_vel[2], 0.0) * (dt * 0.5)
rot = wp.normalize(rot + qt * rot)
sim.sphere_pos[sphere_nr] = pos
sim.sphere_lin_vel[sphere_nr] = lin_vel
sim.sphere_rot[sphere_nr] = rot
# compute bounding box for bvh
pred_pos = pos + lin_vel * dt
lower = wp.vec3(wp.min(pos[0], pred_pos[0]), wp.min(pos[1], pred_pos[1]), wp.min(pos[2], pred_pos[2]))
upper = wp.vec3(wp.max(pos[0], pred_pos[0]), wp.max(pos[1], pred_pos[1]), wp.max(pos[2], pred_pos[2]))
m = bounds_margin + sim.sphere_radius[sphere_nr]
sim.sphere_lower_bounds[sphere_nr] = lower - wp.vec3(m, m, m)
sim.sphere_upper_bounds[sphere_nr] = upper + wp.vec3(m, m, m)
@wp.kernel
def dev_handle_sphere_sphere_collisions(
restitution: float,
sim: SimData):
sphere0 = wp.tid()
eps = 0.00001
pos0 = sim.sphere_pos[sphere0]
radius0 = sim.sphere_radius[sphere0]
m0 = sim.sphere_mass[sphere0]
w0 = 1.0 / (m0 + eps)
vel0 = sim.lin_vel[sphere0]
ang0 = sim.ang_vel[sphere0]
lower = sim.sphere_lower_bounds[sphere0]
upper = sim.sphere_upper_bounds[sphere0]
query = wp.bvh_query_aabb(sim.spheres_bvh_id, lower, upper)
sphere1 = int(0)
while (wp.bvh_query_next(query, sphere1)):
if sphere1 < sphere0: # handle each pair only once!
pos1 = sim.sphere_pos[sphere1]
radius1 = sim.sphere_radius[sphere1]
m1 = sim.sphere_mass[sphere1]
w1 = 1.0 / (m1 + eps)
vel1 = sim.lin_vel[sphere1]
ang1 = sim.ang_vel[sphere1]
min_dist = radius0 + radius1
pos_normal = wp.normalize(pos1 - pos0)
dist = wp.dot(pos_normal, pos1 - pos0)
if dist < min_dist:
# bounce
wp.atomic_add(sim.sphere_num_corr, sphere0, 1)
wp.atomic_add(sim.sphere_num_corr, sphere1, 1)
pos_corr = pos_normal / (w0 + w1) * (min_dist - dist + eps)
wp.atomic_add(sim.pos_corr, sphere0, -w0 * pos_corr)
wp.atomic_add(sim.pos_corr, sphere1, +w1 * pos_corr)
vn0 = wp.dot(vel0, pos_normal)
vn1 = wp.dot(vel1, pos_normal)
new_vn0 = (m0 * vn0 + m1 * vn1 - m1 * (vn0 - vn1) * restitution) / (m0 + m1)
new_vn1 = (m0 * vn0 + m1 * vn1 - m0 * (vn1 - vn0) * restitution) / (m0 + m1)
new_vn0 = wp.min(0.0, new_vn0)
new_vn1 = wp.max(0.0, new_vn1)
lin_corr0 = pos_normal * (new_vn0 - vn0)
lin_corr1 = pos_normal * (new_vn1 - vn1)
wp.atomic_add(sim.sphere_lin_corr, sphere0, lin_corr0)
wp.atomic_add(sim.sphere_lin_corr, sphere1, lin_corr1)
vel0 = vel0 + lin_corr0
vel1 = vel1 + lin_corr1
# friction
ang_normal = wp.normalize(ang0 * m0 + ang1 * m1)
ang_normal = wp.nomralize(ang_normal - pos_normal * wp.dot(pos_normal, ang_normal))
vt0 = wp.dot(vel0, wp.cross(ang_normal, pos_normal))
vt1 = wp.dot(vel1, wp.cross(ang_normal, pos_normal))
omega0 = wp.dot(ang0, ang_normal)
omega1 = wp.dot(ang1, ang_normal)
# v0 + (o0 - do*w0) * r0 = v1 + (o1 + do*w1) * r1
domega = (vt1 + omega1 * radius1 - vt0 - omega0 * radius0) / (radius0 * w0 + radius1 * w1)
ang_corr0 = ang_normal * (omega0 - domega * w0) - ang0
ang_corr1 = ang_normal * (omega1 + domega * w1) - ang1
ang0 = ang0 + ang_corr0
ang1 = ang1 + ang_corr1
wp.atomic_add(sim.sphere_ang_corr, sphere0, ang_corr0)
wp.atomic_add(sim.sphere_ang_corr, sphere1, ang_corr1)
@wp.kernel
def dev_update_obj_pos(sim: SimData):
id = wp.tid()
trans_nr = sim.pos_transform_nr[id]
pos = sim.obj_transforms[trans_nr] * sim.orig_pos[id]
sim.prev_pos[id] = sim.pos[id]
sim.pos[id] = pos
@wp.kernel
def dev_handle_sphere_obj_collisions(
dt: float,
restitution: float,
sim: SimData):
sphere_nr = wp.tid()
pos = sim.sphere_pos[sphere_nr]
radius = sim.sphere_radius[sphere_nr]
vel = sim.lin_vel[sphere_nr]
ang = sim.ang_vel[sphere_nr]
inside = float(0.0)
face_nr = int(0)
u = float(0.0)
v = float(0.0)
found = wp.mesh_query_point(sim.obj_mesh_id, pos, radius, inside, face_nr, u, v)
if not found:
return
id0 = sim.obj_tri_ids[3 * face_nr]
id1 = sim.obj_tri_ids[3 * face_nr + 1]
id2 = sim.obj_tri_ids[3 * face_nr + 2]
p0 = sim.obj_pos[id0]
p1 = sim.obj_pos[id1]
p2 = sim.obj_pos[id2]
closest = u * p0 + v * p1 + (1.0 - u - v) * p2
pos_normal = wp.normalize(pos - closest)
dist = wp.dot(pos_normal, pos - closest)
if dist >= radius:
return
sim.sphere_pos[sphere_nr] = pos - pos_normal * (radius - dist)
v0 = (p0 - sim.mesh_prev_points[id0]) / dt
v1 = (p1 - sim.mesh_prev_points[id1]) / dt
v2 = (p2 - sim.mesh_prev_points[id2]) / dt
v_mesh = v0 + u * (v1 - v0) + v * (v2 - v0)
v_mesh = u * v0 + v * v1 + (1.0 - u - v) * v2
vn_sphere = wp.dot(sim.sphere_lin_vel[sphere_nr], pos_normal)
vn_mesh = wp.dot(v_mesh, pos_normal)
new_vn = wp.min(vn_mesh - (vn_sphere - vn_mesh) * restitution, 0.0)
sim.sphere_lin_vel[sphere_nr] = v + pos_normal * (new_vn - vn_sphere)
# friction
ang_normal = wp.normalize(ang)
ang_normal = wp.nomralize(ang - pos_normal * wp.dot(pos_normal, ang_normal))
vt = wp.dot(vel, wp.cross(ang_normal, pos_normal))
omega = wp.dot(ang, ang_normal)
# vel + (omega + do) * r = v_mesh
domega = (vt + omega * radius - v_mesh) / radius
ang = ang + ang_normal * (omega - domega)
sim.sphere_ang_vel[sphere_nr] = ang
@wp.kernel
def dev_apply_corrections(
sim: SimData):
sphere_nr = wp.tid()
num = sim.sphere_num_corr[sphere_nr]
if num > 0:
s = 1.0 / float(num)
sim.sphere_pos[sphere_nr] += sim.sphere_pos_corr[sphere_nr] * s
sim.sphere_lin_vel[sphere_nr] += sim.sphere_lin_corr[sphere_nr] * s
sim.sphere_ang_vel[sphere_nr] += sim.sphere_ang_corr[sphere_nr] * s
class Sim():
def __init__(self, stage):
self.paused = True
self.stage = stage
self.device = 'cuda'
self.prim_cache = UsdGeom.XformCache()
self.dev_sim_data = SimData()
self.host_sim_data = SimData()
self.spheres_bvh = None
self.obj_mesh = None
self.sphere_usd_meshes = []
self.obj_usd_prims = []
self.obj_usd_transforms = []
self.initialized = False
self.time_step = 1.0 / 30.0
self.num_substeps = 5
self.restitution = 0.1
self.jacobi_scale = 0.25
self.num_spheres = 0
self.frame_nr = 0
def init(self):
if not self.stage:
return
obj_pos = []
obj_pos_transform_nr = []
obj_tri_ids = []
sphere_pos = []
sphere_radius = []
sphere_inv_mass = []
self.sphere_usd_meshes = []
self.sphere_usd_transforms = []
s = 4.0 / 3.0 * 3.141592
print("traversing stage")
for prim in self.stage.Traverse():
if prim.GetTypeName() == "Mesh":
mesh = UsdGeom.Mesh(prim)
name = mesh.GetName()
points = mesh.GetPointsAttr().Get(0.0)
if name.find("sphere") != 0 or name.find("Sphere") != 0:
# create a sphere
trans_mat, trans_t = get_global_transform(prim, 0.0, False)
trans_points = points @ trans_mat
min = np.min(trans_points, axis = 0)
max = np.max(trans_points, axis = 0)
radius = np.max(max - min) * 0.5
sphere_radius.append(radius)
sphere_pos.append(trans_t)
mass = s * radius * radius * radius
sphere_inv_mass.append(1.0 / mass)
clone = clone_prim(self.stage, prim)
self.sphere_usd_meshes.append(UsdGeom.Mesh(clone))
self.sphere_usd_transforms.append(clone.Get)
else:
obj_nr = len(self.obj_usd_prims)
self.object_usd_prims.append(prim)
# create obstacle points
first_pos = len(obj_pos)
for i in range(len(mesh_points)):
p = mesh_points[i]
obj_pos.append(wp.vec3(*p))
obj_pos_transform_nr.append(obj_nr)
# create obstacle triangles
mesh_poly_indices = mesh.GetFaceVertexIndicesAttr().Get(0.0)
mesh_face_sizes = mesh.GetFaceVertexCountsAttr().Get(0.0)
mesh_points = np.array(points)
first_index = 0
for i in range(len(mesh_face_sizes)):
face_size = mesh_face_sizes[i]
for j in range(1, face_size-1):
obj_tri_ids.append(first_pos + mesh_poly_indices[first_index])
obj_tri_ids.append(first_pos + mesh_poly_indices[first_index + j])
obj_tri_ids.append(first_pos + mesh_poly_indices[first_index + j + 1])
first_index += face_size
# create objects warp buffers
if len(obj_pos) > 0:
self.dev_sim_data.obj_pos = wp.array(obj_pos, dtype=wp.vec3, device=self.device)
self.dev_sim_data.pbj_prev_pos = wp.array(obj_pos, dtype=wp.vec3, device=self.device)
self.dev_sim_data.obj_tri_ids = wp.array(obj_tri_ids, dtype=int, device=self.device)
self.obj_mesh = wp.Mesh(self.dev_sim_data.obj_pos, self.dev_sim_data.obj_tri_ids)
self.dev_sim_data.obj_mesh_id = self.obj_mesh.id
num_objs = len(self.object_usd_prims)
mat = wp.mat44()
self.obj_transforms = np.array([mat] * num_objs)
self.dev_sim_data.obj_transforms = wp.zeros(shape=(num_objs), dtype=wp.mat44, device=self.device)
# create sphere warp buffers
self.num_spheres = len(sphere_pos)
if self.num_spheres > 0:
self.dev_sim_data.sphere_radius = wp.array(sphere_radius, dtype=float, device=self.device)
self.dev_sim_data.sphere_pos = wp.array(sphere_pos, dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_quat = wp.zeros(shape=(self.num_spheres), dtype=wp.quat, device=self.device)
self.dev_sim_data.sphere_vel = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_omega = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_lower_bounds = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.dev_sim_data.sphere_upper_bounds = wp.zeros(shape=(self.num_spheres), dtype=wp.vec3, device=self.device)
self.host_sim_data.sphere_pos = wp.array(sphere_pos, dtype=wp.vec3, device="cpu")
self.host_sim_data.sphere_quat = wp.zeros(shape=(self.num_spheres), dtype=wp.quat, device="cpu")
# zero time step to initialize sphere bounds
wp.launch(kernel = self.dev_integrate,
inputs = [0.0, wp.vec3(0.0, 0.0, 0.0), self.dev_sim_data],
dim = self.num_spheres, device=self.device)
self.sphere_bvh = wp.Bvh(self.dev_sim_data.sphere_lower_bounds, self.dev_sim_data.sphere_upper_bounds)
self.dev_sim_data.sphere_bvh_id = self.sphere_bvh.id
def simulate(self):
if self.paused:
return
self.frame_nr += 1
print("simulating", self.frame_nr)
return
# update objects
for i in range(len(self.object_usd_prims)):
self.obj_transforms[i] = get_global_transform(self.object_usd_prims[i], 0.0, True)
wp.copy(self.dev_sim_data.obj_transforms, wp.array(self.obj_transforms, dtype=wp.array(wp.mat44), copy=False, device="cpu"))
wp.launch(kernel = dev_update_obj_pos,
inputs = [self.dev_sim_data],
dim = len(self.dev_sim_data.obj_pos), device=self.device)
self.obj_mesh.refit()
#simulate spheres
wp.launch(kernel = dev_integrate,
inputs = [self.time_step, self.gravity, self.dev_sim_data],
dim = self.num_spheres, device=self.device)
self.sphere_bvh.refit()
self.dev_sim_data.sphere_pos_corr.zero_()
self.dev_sim_data.sphere_lin_corr.zero_()
self.dev_sim_data.sphere_ang_corr.zero_()
self.dev_sim_data.sphere_num_corr.zero_()
wp.launch(kernel = dev_handle_sphere_sphere_collisions,
inputs = [self.restitution, self.dev_sim_data],
dim = self.num_spheres, device=self.device)
wp.launch(kernel = dev_apply_corrections,
inputs = [self.dev_sim_data],
dim = self.num_spheres, device=self.device)
wp.launch(kernel = dev_handle_sphere_obj_collisions,
inputs = [self.time_step, self.restitution, self.dev_sim_data],
dim = self.num_spheres, device=self.device)
# update stage
wp.copy(self.host_sim_data.sphere_pos, self.dev_sim_data.sphere_pos)
wp.copy(self.host_sim_data.sphere_quat, self.dev_sim_data.sphere_quat)
pos = self.host_sim_data.numpy()
quat = self.host_sim_data.numpy()
for i in range(self.num_spheres):
set_transform(self.sphere_usd_meshes, pos[i], quat[i])
def reset(self):
hide_clones(self.stage)
self.paused = True
| 16,580 | Python | 34.734914 | 462 | 0.5769 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/extension.py | # Copyright 2022 Matthias Müller - Ten Minute Physics,
# https://www.youtube.com/c/TenMinutePhysics
# www.matthiasMueller.info/tenMinutePhysics
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import omni.ext
import os
import omni.usd
from omni import ui
from pxr import Usd
from .controls import ControlsWindow
from .sim import Sim
EXAMPLES_PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../data/scenes"))
class OmniFunExtension(omni.ext.IExt):
def on_startup(self, ext_id):
print("fun on_startup")
setattr(self, "controls", None)
setattr(self, "sim", None)
stage = omni.usd.get_context().get_stage()
self.sim = Sim(stage)
self.sim.init()
editor_menu = omni.kit.ui.get_editor_menu()
self.menu_items = []
if editor_menu:
self.controls_menu = editor_menu.add_item(
f"Window/Fun/Controls",
lambda _, value: self.show_controls(value),
toggle=True, value=False
)
self.menu_items.append(editor_menu.add_item(
f"Window/Fun/SimpleScene",
lambda _, value: self.load_example("simple.usd"),
toggle=False, value=False
))
# self.show_controls(True)
# set callbacks
self.update_event_stream = omni.kit.app.get_app_interface().get_update_event_stream()
self.stage_event_sub = omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(self.on_event)
def on_shutdown(self):
print("fun on_shutdown")
self.menu_items = None
self.update_event_stream = None
self.stage_event_sub = None
if self.sim:
self.sim.reset()
self.show_controls(False)
def init_callback(self, state):
if state:
stage = omni.usd.get_context().get_stage()
if self.sim:
self.sim = Sim(stage)
self.update_event_sub = self.update_event_stream.create_subscription_to_pop(self.on_update)
else:
if self.sim:
self.sim.reset()
self.sim = None
def play_callback(self, state):
if self.sim:
self.sim.paused = not state
def on_update(self, dt):
if self.sim:
self.sim.simulate()
def set_controls_menu(self, visible):
omni.kit.ui.get_editor_menu().set_value(f"Window/Fun/Controls", visible)
def show_controls(self, is_visible):
if is_visible:
if not hasattr(self, "controls"):
setattr(self, "controls", None)
if self.controls is None:
self.controls = ControlsWindow(
init_callback=self.init_callback,
play_callback=self.play_callback)
self.controls.create_window(lambda visible: self.set_controls_menu(visible))
self.controls.show_window()
else:
self.controls.show_window()
elif self.controls:
self.controls.destroy_window()
self.controls = None
def on_event(self, event):
if event.type == int(omni.usd.StageEventType.CLOSED):
if self.sim:
self.sim.reset()
if event.type == int(omni.usd.StageEventType.OPENED):
if self.sim:
self.sim.init()
def load_example(self, scene_name):
def new_stage():
stage_path = os.path.normpath(os.path.join(EXAMPLES_PATH, scene_name))
omni.usd.get_context().open_stage(stage_path)
if self.sim:
self.sim.init()
omni.kit.window.file.prompt_if_unsaved_stage(new_stage)
| 4,788 | Python | 35.007519 | 462 | 0.618421 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/gpu.py | # Copyright 2022 Matthias Müller - Ten Minute Physics,
# https://www.youtube.com/c/TenMinutePhysics
# www.matthiasMueller.info/tenMinutePhysics
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import warp as wp
@wp.struct
class SimData:
spheres_pos: wp.array(dtype=wp.vec3)
spheres_prev_pos: wp.array(dtype=wp.vec3)
spheres_pos_corr: wp.array(dtype=wp.vec3)
spheres_vel: wp.array(dtype=wp.vec3)
spheres_radius: wp.array(dtype=float)
spheres_inv_mass: wp.array(dtype=float)
mesh_id: wp.uint64
mesh_verts: wp.array(dtype=wp.vec3)
mesh_tri_ids: wp.array(dtype=int)
@wp.func
def closest_point_on_triangle(
p: wp.vec3, p0: wp.vec3, p1: wp.vec3, p2: wp.vec3):
e0 = p1 - p0
e1 = p2 - p0
tmp = p0 - p
a = wp.dot(e0, e0)
b = wp.dot(e0, e1)
c = wp.dot(e1, e1)
d = wp.dot(e0, tmp)
e = wp.dot(e1, tmp)
coords = wp.vec3(b*e - c*d, b*d - a*e, a*c - b*b)
x = 0.0
y = 0.0
z = 0.0
if coords[0] <= 0.0:
if c != 0.0:
y = -e / c
elif coords[1] <= 0.0:
if a != 0.0:
x = -d / a
elif coords[0] + coords[1] > coords[2]:
den = a + c - b - b
num = c + e - b - d
if den != 0.0:
x = num / den
y = 1.0 - x
else:
if coords[2] != 0.0:
x = coords[0] / coords[2]
y = coords[1] / coords[2]
x = wp.clamp(x, 0.0, 1.0)
y = wp.clamp(y, 0.0, 1.0)
bary = wp.vec3(1.0 - x - y, x, y)
return bary
@wp.kernel
def dev_integrate_spheres(
dt: float,
gravity: wp.vec3,
data: SimData):
sphere_nr = wp.tid()
w = data.spheres_inv_mass[sphere_nr]
if w > 0.0:
data.spheres_vel[sphere_nr] += gravity * dt
data.spheres_prev_pos[sphere_nr] = data.spheres_pos[sphere_nr]
data.spheres_pos[sphere_nr] += data.spheres_vel[sphere_nr] * dt
def integrate_spheres(num_spheres: int, dt: float, gravity: wp.vec3, data: SimData, device):
wp.launch(kernel = dev_integrate_spheres,
inputs = [dt, gravity, data], dim=num_spheres, device=device)
@wp.kernel
def dev_update_spheres(
dt: float,
jacobi_scale: float,
data: SimData):
sphere_nr = wp.tid()
w = data.spheres_inv_mass[sphere_nr]
if w > 0.0:
data.spheres_pos[sphere_nr] = data.spheres_pos[sphere_nr] + jacobi_scale * data.spheres_pos_corr
data.spheres_vel[sphere_nr] = (data.spheres_pos[sphere_nr] - data.spheres_prev_pos[sphere_nr]) / dt
def update_spheres(num_spheres: int, dt: float, jacobi_scale: float, data: SimData, device):
wp.launch(kernel = dev_update_spheres,
inputs = [dt, jacobi_scale, data], dim=num_spheres, device=device)
@wp.kernel
def dev_solve_mesh_collisions(
data: SimData):
sphere_nr = wp.tid()
w = data.spheres_inv_mass[sphere_nr]
if w > 0.0:
pos = data.spheres_pos[sphere_nr]
r = data.spheres_radius[sphere_nr]
# query bounding volume hierarchy
bounds_lower = pos - wp.vec3(r, r, r)
bounds_upper = pos + wp.vec3(r, r, r)
query = wp.mesh_query_aabb(data.mesh_id, bounds_lower, bounds_upper)
tri_nr = int(0)
while (wp.mesh_query_aabb_next(query, tri_nr)):
p0 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr]]
p1 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr + 1]]
p2 = data.mesh_verts[data.mesh_tri_ids[3*tri_nr + 2]]
hit = closest_point_on_triangle(pos, p0, p1, p2)
n = pos - hit
d = wp.length(n)
if d < r:
n = wp.normalize(n)
data.spheres_pos[sphere_nr] = data.spheres_pos[sphere_nr] + n * (r - d)
def solve_mesh_collisions(num_spheres: int, data: SimData, device):
wp.launch(kernel = dev_solve_mesh_collisions,
inputs = [data], dim=num_spheres, device=device)
@wp.kernel
def dev_solve_sphere_collisions(
num_spheres: int,
data: SimData):
i0 = wp.tid()
p0 = data.spheres_pos[i0]
r0 = data.spheres_radius[i0]
w0 = data.spheres_inv_mass[i0]
# simpe O(n^2) collision detection
for i1 in range(num_spheres):
if i1 > i0:
p1 = data.spheres_pos[i1]
r1 = data.spheres_radius[i1]
w1 = data.spheres_inv_mass[i1]
w = w0 + w1
if w > 0.0:
n = p1 - p0
d = wp.length(n)
n = wp.noramlize(n)
if d < r0 + r1:
corr = n * (r0 + r1 - d) / w
data.spheres_corr[i0] = data.spheres_corr[i0] - corr * w0
data.spheres_corr[i1] = data.spheres_corr[i1] - corr * w0
def solve_sphere_collisions(num_spheres: int, data: SimData, device):
wp.launch(kernel = dev_solve_sphere_collisions,
inputs = [num_spheres, data], dim=num_spheres, device=device)
| 6,034 | Python | 32.342541 | 462 | 0.586841 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/controls.py | import carb
import omni.ui
import omni.usd
import omni.kit.app
from pxr import Usd, Sdf
from .sim import gravity
class ControlsWindow:
def __init__(self, init_callback=None, play_callback=None):
self._window = None
self.buttons = [
[None, init_callback, False, "Init", "Reset"],
[None, play_callback, False, "Play", "Pause"]]
def __bool__(self):
return self._window is not None
def create_window(self, visibility_changed_fn):
window_flags = omni.ui.WINDOW_FLAGS_NO_SCROLLBAR
self._window = omni.ui.Window("Fun Controls", flags=window_flags, width=400, height=400, dockPreference=omni.ui.DockPreference.RIGHT_TOP)
self._window.set_visibility_changed_fn(visibility_changed_fn)
self.rebuild_ui()
def show_window(self):
self._window.visible = True
def hide_window(self):
self._window.visible = False
def destroy_window(self):
if self._window:
self._window.visible = False
self._window.destroy()
self._window = None
def button_pressed(self, button):
state = not button[2]
button[2] = state
button[0].text = button[4] if state else button[3]
button[1](state)
def set_parameter(self, param_name, val):
if param_name == "gravity":
gravity = val
def rebuild_ui(self):
ui = omni.ui
row_height = 20
v_spacing = 10
h_spacing = 20
if self._window and self._window.visible:
with self._window.frame:
with ui.VStack(spacing=v_spacing, padding=50):
with ui.HStack(spacing=h_spacing, height=row_height):
for button in self.buttons:
button[0] = ui.Button(
button[3], width=100, height=15, margin=10,
clicked_fn=lambda button=button: self.button_pressed(button))
with ui.HStack(spacing=h_spacing, height=row_height):
ui.Label("Gravity", width=ui.Percent(50), height=10, name="Gravity")
slider = ui.FloatSlider(min=0.0,max=10.0, width=ui.Percent(50))
slider.model.add_value_changed_fn(
lambda val, param_name="gravity": self.set_parameter(param_name, val.get_value_as_float()))
| 2,487 | Python | 29.341463 | 145 | 0.554483 |
matthias-research/omni.fun/exts/omni.fun/omni/fun/scripts/usdutils.py | from pxr import Usd, UsdGeom, Gf, UsdShade
import numpy as np
import warp as wp
prim_cache = None
def get_global_transform(prim, time, return_mat44):
if prim_cache is None:
prim_cache = UsdGeom.XformCache()
prim_cache.SetTime(time)
m = prim_cache.GetLocalToWorldTransform(prim)
if return_mat44:
return wp.mat44(
m[0][0], m[1][0], m[2][0], m[3][0],
m[0][1], m[1][1], m[2][1], m[3][1],
m[0][2], m[1][2], m[2][2], m[3][2],
m[0][3], m[1][3], m[2][3], m[3][3])
else:
A = np.array([[m[0][0], m[0][1], m[0][2]], [m[1][0], m[1][1], m[1][2]], [m[2][0], m[2][1], m[2][2]]])
b = np.array([m[3][0], m[3][1], m[3][2]])
return A, b
def set_transform(mesh, trans, quat):
usd_mat = Gf.Matrix4d()
usd_mat.SetRotateOnly(Gf.Quatd(*quat))
usd_mat.SetTranslateOnly(Gf.Vec3d(*trans))
xform = UsdGeom.Xform(mesh)
xform.GetOrderedXformOps()[0].Set(usd_mat)
def clone_primvar(self, prim, prim_clone, name, time=0.0):
try:
attr = UsdGeom.Primvar(prim.GetAttribute(name))
prim_clone.CreatePrimvar(name, attr.GetTypeName(), attr.GetInterpolation()).Set(attr.Get(time))
except:
pass
def clone_prim(stage, prim):
vis = prim.GetAttribute("visibility")
if vis:
vis.Set("invisible")
mesh = UsdGeom.Mesh(prim)
clone_prim_path = '/' + str(prim.GetPath()).replace("/", "_") + '_clone'
UsdGeom.Mesh.Define(stage, clone_prim_path)
prim_clone = UsdGeom.Mesh(stage.GetPrimAtPath(clone_prim_path))
mesh_clone = UsdGeom.Mesh(prim_clone)
stage.GetPrimAtPath(clone_prim_path).SetActive(True)
xform = UsdGeom.Xform(mesh_clone)
xform.ClearXformOpOrder()
xform.AddXformOp(UsdGeom.XformOp.TypeTransform)
trans_mat, trans_t = get_global_transform(prim, 0.0, True)
trans_points = mesh.GetPointsAttr().Get(0.0) @ trans_mat + trans_t
normal_mat = np.array([\
trans_mat[0,:] / np.linalg.norm(trans_mat[0,:]), \
trans_mat[1,:] / np.linalg.norm(trans_mat[1,:]), \
trans_mat[2,:] / np.linalg.norm(trans_mat[2,:])])
trans_normals = mesh.GetNormalsAttr().Get(0.0) @ normal_mat
mesh_clone.GetPointsAttr().Set(trans_points)
mesh_clone.GetNormalsAttr().Set(trans_normals)
mesh_clone.SetNormalsInterpolation(mesh.GetNormalsInterpolation())
mesh_clone.GetFaceVertexIndicesAttr().Set(mesh.GetFaceVertexIndicesAttr().Get(0.0))
mesh_clone.GetFaceVertexCountsAttr().Set(mesh.GetFaceVertexCountsAttr().Get(0.0))
mesh_clone.GetCornerIndicesAttr().Set(mesh.GetCornerIndicesAttr().Get(0.0))
mesh_clone.GetCornerSharpnessesAttr().Set(mesh.GetCornerSharpnessesAttr().Get(0.0))
mesh_clone.GetCreaseIndicesAttr().Set(mesh.GetCreaseIndicesAttr().Get(0.0))
mesh_clone.GetCreaseLengthsAttr().Set(mesh.GetCreaseLengthsAttr().Get(0.0))
mesh_clone.GetCreaseSharpnessesAttr().Set(mesh.GetCreaseSharpnessesAttr().Get(0.0))
mesh_clone.GetSubdivisionSchemeAttr().Set(mesh.GetSubdivisionSchemeAttr().Get(0.0))
mesh_clone.GetInterpolateBoundaryAttr().Set(mesh.GetInterpolateBoundaryAttr().Get(0.0))
mesh_clone.GetFaceVaryingLinearInterpolationAttr().Set(mesh.GetFaceVaryingLinearInterpolationAttr().Get(0.0))
mesh_clone.GetTriangleSubdivisionRuleAttr().Set(mesh.GetTriangleSubdivisionRuleAttr().Get(0.0))
mesh_clone.GetHoleIndicesAttr().Set(mesh.GetHoleIndicesAttr().Get(0.0))
for attr in prim.GetAttributes():
type = str(attr.GetTypeName())
if type.find("texCoord") >= 0:
clone_primvar(prim, prim_clone, attr.GetName())
try:
mat = UsdShade.MaterialBindingAPI(prim).GetDirectBinding().GetMaterial()
UsdShade.MaterialBindingAPI(prim_clone).Bind(mat)
except:
pass
return prim_clone
def hide_clones(stage):
if stage is None:
return
for prim in stage.Traverse():
if str(prim.GetName()).find("_clone") >= 0:
prim.SetActive(False)
else:
vis = prim.GetAttribute("visibility")
if vis:
vis.Set("inherited")
| 4,122 | Python | 34.543103 | 113 | 0.643862 |
matthias-research/omni.fun/exts/omni.fun/docs/CHANGELOG.md | # CHANGELOG
## [0.1.0] - 2022-08-15
- Initial publish for alpha testing
| 77 | Markdown | 7.666666 | 35 | 0.636364 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.