file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
Road-Balance/RB_WheeledRobotExample/README.md
|
# Adding a wheeled robots tutorial codes for Youtube video
| 58 |
Markdown
| 57.999942 | 58 | 0.827586 |
Road-Balance/RB_WheeledRobotExample/RBWheeledRobotExample_python/ui_builder.py
|
# This software contains source code provided by NVIDIA Corporation.
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import numpy as np
import omni.timeline
import omni.ui as ui
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.objects.cuboid import FixedCuboid
from omni.isaac.core.prims import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.utils.stage import add_reference_to_stage, create_new_stage, get_current_stage
from omni.isaac.core.world import World
from omni.isaac.ui.element_wrappers import CollapsableFrame, StateButton
from omni.isaac.ui.element_wrappers.core_connectors import LoadButton, ResetButton
from omni.isaac.ui.ui_utils import get_style
from omni.usd import StageEventType
from pxr import Sdf, UsdLux
from .scenario import ExampleScenario
class UIBuilder:
def __init__(self):
# Frames are sub-windows that can contain multiple UI elements
self.frames = []
# UI elements created using a UIElementWrapper instance
self.wrapped_ui_elements = []
# Get access to the timeline to control stop/pause/play programmatically
self._timeline = omni.timeline.get_timeline_interface()
# Run initialization for the provided example
self._on_init()
###################################################################################
# The Functions Below Are Called Automatically By extension.py
###################################################################################
def on_menu_callback(self):
"""Callback for when the UI is opened from the toolbar.
This is called directly after build_ui().
"""
pass
def on_timeline_event(self, event):
"""Callback for Timeline events (Play, Pause, Stop)
Args:
event (omni.timeline.TimelineEventType): Event Type
"""
if event.type == int(omni.timeline.TimelineEventType.STOP):
# When the user hits the stop button through the UI, they will inevitably discover edge cases where things break
# For complete robustness, the user should resolve those edge cases here
# In general, for extensions based off this template, there is no value to having the user click the play/stop
# button instead of using the Load/Reset/Run buttons provided.
self._scenario_state_btn.reset()
self._scenario_state_btn.enabled = False
def on_physics_step(self, step: float):
"""Callback for Physics Step.
Physics steps only occur when the timeline is playing
Args:
step (float): Size of physics step
"""
pass
def on_stage_event(self, event):
"""Callback for Stage Events
Args:
event (omni.usd.StageEventType): Event Type
"""
if event.type == int(StageEventType.OPENED):
# If the user opens a new stage, the extension should completely reset
self._reset_extension()
def cleanup(self):
"""
Called when the stage is closed or the extension is hot reloaded.
Perform any necessary cleanup such as removing active callback functions
Buttons imported from omni.isaac.ui.element_wrappers implement a cleanup function that should be called
"""
for ui_elem in self.wrapped_ui_elements:
ui_elem.cleanup()
def build_ui(self):
"""
Build a custom UI tool to run your extension.
This function will be called any time the UI window is closed and reopened.
"""
world_controls_frame = CollapsableFrame("World Controls", collapsed=False)
with world_controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
self._load_btn = LoadButton(
"Load Button", "LOAD", setup_scene_fn=self._setup_scene, setup_post_load_fn=self._setup_scenario
)
self._load_btn.set_world_settings(physics_dt=1 / 60.0, rendering_dt=1 / 60.0)
self.wrapped_ui_elements.append(self._load_btn)
self._reset_btn = ResetButton(
"Reset Button", "RESET", pre_reset_fn=None, post_reset_fn=self._on_post_reset_btn
)
self._reset_btn.enabled = False
self.wrapped_ui_elements.append(self._reset_btn)
run_scenario_frame = CollapsableFrame("Run Scenario")
with run_scenario_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
self._scenario_state_btn = StateButton(
"Run Scenario",
"RUN",
"STOP",
on_a_click_fn=self._on_run_scenario_a_text,
on_b_click_fn=self._on_run_scenario_b_text,
physics_callback_fn=self._update_scenario,
)
self._scenario_state_btn.enabled = False
self.wrapped_ui_elements.append(self._scenario_state_btn)
######################################################################################
# Functions Below This Point Support The Provided Example And Can Be Deleted/Replaced
######################################################################################
def _on_init(self):
self._articulation = None
self._cuboid = None
self._scenario = ExampleScenario()
def _add_light_to_stage(self):
"""
A new stage does not have a light by default. This function creates a spherical light
"""
sphereLight = UsdLux.SphereLight.Define(get_current_stage(), Sdf.Path("/World/SphereLight"))
sphereLight.CreateRadiusAttr(2)
sphereLight.CreateIntensityAttr(100000)
XFormPrim(str(sphereLight.GetPath())).set_world_pose([6.5, 0, 12])
def _setup_scene(self):
"""
This function is attached to the Load Button as the setup_scene_fn callback.
On pressing the Load Button, a new instance of World() is created and then this function is called.
The user should now load their assets onto the stage and add them to the World Scene.
In this example, a new stage is loaded explicitly, and all assets are reloaded.
If the user is relying on hot-reloading and does not want to reload assets every time,
they may perform a check here to see if their desired assets are already on the stage,
and avoid loading anything if they are. In this case, the user would still need to add
their assets to the World (which has low overhead). See commented code section in this function.
"""
# Load the UR10e
robot_prim_path = "/ur10e"
path_to_robot_usd = get_assets_root_path() + "/Isaac/Robots/UniversalRobots/ur10e/ur10e.usd"
# Do not reload assets when hot reloading. This should only be done while extension is under development.
# if not is_prim_path_valid(robot_prim_path):
# create_new_stage()
# add_reference_to_stage(path_to_robot_usd, robot_prim_path)
# else:
# print("Robot already on Stage")
create_new_stage()
self._add_light_to_stage()
add_reference_to_stage(path_to_robot_usd, robot_prim_path)
# Create a cuboid
self._cuboid = FixedCuboid(
"/Scenario/cuboid", position=np.array([0.3, 0.3, 0.5]), size=0.05, color=np.array([255, 0, 0])
)
self._articulation = Articulation(robot_prim_path)
# Add user-loaded objects to the World
world = World.instance()
world.scene.add(self._articulation)
world.scene.add(self._cuboid)
def _setup_scenario(self):
"""
This function is attached to the Load Button as the setup_post_load_fn callback.
The user may assume that their assets have been loaded by their setup_scene_fn callback, that
their objects are properly initialized, and that the timeline is paused on timestep 0.
In this example, a scenario is initialized which will move each robot joint one at a time in a loop while moving the
provided prim in a circle around the robot.
"""
self._reset_scenario()
# UI management
self._scenario_state_btn.reset()
self._scenario_state_btn.enabled = True
self._reset_btn.enabled = True
def _reset_scenario(self):
self._scenario.teardown_scenario()
self._scenario.setup_scenario(self._articulation, self._cuboid)
def _on_post_reset_btn(self):
"""
This function is attached to the Reset Button as the post_reset_fn callback.
The user may assume that their objects are properly initialized, and that the timeline is paused on timestep 0.
They may also assume that objects that were added to the World.Scene have been moved to their default positions.
I.e. the cube prim will move back to the position it was in when it was created in self._setup_scene().
"""
self._reset_scenario()
# UI management
self._scenario_state_btn.reset()
self._scenario_state_btn.enabled = True
def _update_scenario(self, step: float):
"""This function is attached to the Run Scenario StateButton.
This function was passed in as the physics_callback_fn argument.
This means that when the a_text "RUN" is pressed, a subscription is made to call this function on every physics step.
When the b_text "STOP" is pressed, the physics callback is removed.
Args:
step (float): The dt of the current physics step
"""
self._scenario.update_scenario(step)
def _on_run_scenario_a_text(self):
"""
This function is attached to the Run Scenario StateButton.
This function was passed in as the on_a_click_fn argument.
It is called when the StateButton is clicked while saying a_text "RUN".
This function simply plays the timeline, which means that physics steps will start happening. After the world is loaded or reset,
the timeline is paused, which means that no physics steps will occur until the user makes it play either programmatically or
through the left-hand UI toolbar.
"""
self._timeline.play()
def _on_run_scenario_b_text(self):
"""
This function is attached to the Run Scenario StateButton.
This function was passed in as the on_b_click_fn argument.
It is called when the StateButton is clicked while saying a_text "STOP"
Pausing the timeline on b_text is not strictly necessary for this example to run.
Clicking "STOP" will cancel the physics subscription that updates the scenario, which means that
the robot will stop getting new commands and the cube will stop updating without needing to
pause at all. The reason that the timeline is paused here is to prevent the robot being carried
forward by momentum for a few frames after the physics subscription is canceled. Pausing here makes
this example prettier, but if curious, the user should observe what happens when this line is removed.
"""
self._timeline.pause()
def _reset_extension(self):
"""This is called when the user opens a new stage from self.on_stage_event().
All state should be reset.
"""
self._on_init()
self._reset_ui()
def _reset_ui(self):
self._scenario_state_btn.reset()
self._scenario_state_btn.enabled = False
self._reset_btn.enabled = False
| 12,160 |
Python
| 43.874539 | 138 | 0.6375 |
Road-Balance/RB_WheeledRobotExample/config/extension.toml
|
[core]
reloadable = true
order = 0
[package]
version = "1.0.0"
category = "Simulation"
title = "RBWheeledRobotExample"
description = ""
authors = ["NVIDIA"]
repository = ""
keywords = []
changelog = "docs/CHANGELOG.md"
readme = "docs/README.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
[dependencies]
"omni.kit.uiapp" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotLimoDiff"
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotLimoDiffROS2"
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotLimoAckermannROS2"
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotLimoAckermannTwistROS2"
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotsKaya"
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotSummit"
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotSummitO3Wheel"
[[python.module]]
name = "RBWheeledRobotExample_python.WheeledRobotSummitO3WheelROS2"
| 1,047 |
TOML
| 21.297872 | 72 | 0.762178 |
Road-Balance/RB_WheeledRobotExample/docs/CHANGELOG.md
|
# Changelog
## [0.1.0] - 2024-05-14
### Added
- Initial version of RBWheeledRobotExample Extension
| 102 |
Markdown
| 11.874999 | 52 | 0.696078 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/README.md
|
# Isaac ROS cuMotion
NVIDIA accelerated packages for arm motion planning and control
<div align="center"><a class="reference internal image-reference" href="https://media.githubusercontent.com/media/NVIDIA-ISAAC-ROS/.github/main/resources/isaac_ros_docs/repositories_and_packages/isaac_ros_cumotion/cumotion_ur10_demo.gif/"><img alt="image" src="https://media.githubusercontent.com/media/NVIDIA-ISAAC-ROS/.github/main/resources/isaac_ros_docs/repositories_and_packages/isaac_ros_cumotion/cumotion_ur10_demo.gif/" width="600px"/></a></div>
## Overview
[Isaac ROS cuMotion](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_cumotion) provides CUDA-accelerated manipulation
capabilities for robots in ROS 2, enabling faster processing speeds and real-time performance
that are crucial to complex robotics tasks such as motion planning.
It provides two main capabilities, motion generation for robot
arms via an integration of cuMotion into MoveIt 2 and segmentation of robot from depth streams
using cuMotion’s kinematics and geometry processing functions to accurately identify and filter robot parts.
This allows one to reconstruct obstacles in the environment without spurious contributions from the robot itself.
The key advantages of using Isaac ROS cuMotion are:
* Increased Efficiency: CUDA acceleration significantly speeds up processing times,
allowing for complex computation, such as collision avoidance, occurring at real-time.
* Enhanced Precision: Accurate motion planning and segmentation allow for better
performance in tasks requiring fine manipulation and detailed environmental interaction.
* Improved Flexibility: Modular design allows easy integration with existing ROS 2 setups,
such as configurations using MoveIt 2, enabling customization and scalability using familiar
tooling.
The Isaac ROS cuMotion repository currently contains the following packages:
`isaac_ros_cumotion`:
: This package contains the cuMotion planner node and the robot segmentation node.
`isaac_ros_cumotion_examples`:
: This package contains various examples demonstrating use of cuMotion with MoveIt.
`isaac_ros_cumotion_moveit`:
: This package provides a plugin for MoveIt 2 that exposes cuMotion as an external planner, leveraging `isaac_ros_cumotion`.
Isaac ROS cuMotion is also featured as part of [Isaac Manipulator](https://nvidia-isaac-ros.github.io/reference_workflows/isaac_manipulator/index.html).
---
## Documentation
Please visit the [Isaac ROS Documentation](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/index.html) to learn how to use this repository.
---
## Packages
* [`isaac_ros_cumotion`](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_cumotion/index.html)
* [Motion Generation](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_cumotion/index.html#motion-generation)
* [Robot Segmentation](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_cumotion/index.html#robot-segmentation)
* [`isaac_ros_cumotion_moveit`](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_cumotion_moveit/index.html)
* [Quickstart](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_cumotion_moveit/index.html#quickstart)
* [Try More Examples](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_cumotion_moveit/index.html#try-more-examples)
* [Troubleshooting](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_cumotion_moveit/index.html#troubleshooting)
* [`isaac_ros_esdf_visualizer`](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_esdf_visualizer/index.html)
* [Overview](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_esdf_visualizer/index.html#overview)
* [API](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_esdf_visualizer/index.html#api)
* [`isaac_ros_moveit_goal_setter`](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_moveit_goal_setter/index.html)
* [Overview](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_moveit_goal_setter/index.html#overview)
* [API](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_moveit_goal_setter/index.html#api)
* [`isaac_ros_moveit_goal_setter_interfaces`](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_cumotion/isaac_ros_moveit_goal_setter_interfaces/index.html)
## Latest
Update 2024-05-30: Initial release
| 4,775 |
Markdown
| 69.235293 | 453 | 0.802723 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_robot_description/setup.py
|
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from glob import glob
import os
from setuptools import find_packages, setup
package_name = 'isaac_ros_cumotion_robot_description'
setup(
name=package_name,
version='3.0.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages', ['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(
os.path.join('share', package_name, 'xrdf'),
glob(os.path.join('xrdf', '*.xrdf')),
),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Isaac ROS Maintainers',
maintainer_email='[email protected]',
description='Package containing XRDF (extended robot description format) files for '
'various robots',
license='Apache-2.0',
tests_require=['pytest'],
)
| 1,569 |
Python
| 33.130434 | 88 | 0.690886 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/curobo_core/setup.py
|
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from setuptools import find_namespace_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
package_name = 'curobo_core'
extra_cuda_args = {
'nvcc': [
'--threads=8',
'-O3',
'--ftz=true',
'--fmad=true',
'--prec-div=false',
'--prec-sqrt=false',
]
}
# create a list of modules to be compiled:
ext_modules = [
CUDAExtension(
'curobo.curobolib.lbfgs_step_cu',
[
'curobo/src/curobo/curobolib/cpp/lbfgs_step_cuda.cpp',
'curobo/src/curobo/curobolib/cpp/lbfgs_step_kernel.cu',
],
extra_compile_args=extra_cuda_args,
),
CUDAExtension(
'curobo.curobolib.kinematics_fused_cu',
[
'curobo/src/curobo/curobolib/cpp/kinematics_fused_cuda.cpp',
'curobo/src/curobo/curobolib/cpp/kinematics_fused_kernel.cu',
],
extra_compile_args=extra_cuda_args,
),
CUDAExtension(
'curobo.curobolib.geom_cu',
[
'curobo/src/curobo/curobolib/cpp/geom_cuda.cpp',
'curobo/src/curobo/curobolib/cpp/sphere_obb_kernel.cu',
'curobo/src/curobo/curobolib/cpp/pose_distance_kernel.cu',
'curobo/src/curobo/curobolib/cpp/self_collision_kernel.cu',
],
extra_compile_args=extra_cuda_args,
),
CUDAExtension(
'curobo.curobolib.line_search_cu',
[
'curobo/src/curobo/curobolib/cpp/line_search_cuda.cpp',
'curobo/src/curobo/curobolib/cpp/line_search_kernel.cu',
'curobo/src/curobo/curobolib/cpp/update_best_kernel.cu',
],
extra_compile_args=extra_cuda_args,
),
CUDAExtension(
'curobo.curobolib.tensor_step_cu',
[
'curobo/src/curobo/curobolib/cpp/tensor_step_cuda.cpp',
'curobo/src/curobo/curobolib/cpp/tensor_step_kernel.cu',
],
extra_compile_args=extra_cuda_args,
),
]
setup(
name=package_name,
version='3.0.0',
packages=find_namespace_packages(where='curobo/src'),
package_dir={'': 'curobo/src'},
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Isaac ROS Maintainers',
maintainer_email='[email protected]',
description='This package wraps the cuRobo library as a ROS 2 package. cuRobo serves as the current backend for cuMotion.',
license='NVIDIA Isaac ROS Software License',
tests_require=['pytest'],
entry_points={
'console_scripts': [
],
},
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension},
package_data={
'curobo': ['**/*.*'],
},
include_package_data=True,
)
| 3,583 |
Python
| 32.185185 | 127 | 0.634105 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_moveit_goal_setter/launch/isaac_ros_goal_setter.launch.py
|
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES',
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import launch
from launch.actions import DeclareLaunchArgument
from launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
def get_robot_description():
ur_type = LaunchConfiguration('ur_type')
robot_ip = LaunchConfiguration('robot_ip')
joint_limit_params = PathJoinSubstitution(
[FindPackageShare('ur_description'), 'config', ur_type, 'joint_limits.yaml']
)
kinematics_params = PathJoinSubstitution(
[FindPackageShare('ur_description'), 'config', ur_type, 'default_kinematics.yaml']
)
physical_params = PathJoinSubstitution(
[FindPackageShare('ur_description'), 'config', ur_type, 'physical_parameters.yaml']
)
visual_params = PathJoinSubstitution(
[FindPackageShare('ur_description'), 'config', ur_type, 'visual_parameters.yaml']
)
robot_description_content = Command(
[
PathJoinSubstitution([FindExecutable(name='xacro')]), ' ',
PathJoinSubstitution([FindPackageShare('ur_description'), 'urdf', 'ur.urdf.xacro']),
' ', 'robot_ip:=', robot_ip,
' ', 'joint_limit_params:=', joint_limit_params,
' ', 'kinematics_params:=', kinematics_params,
' ', 'physical_params:=', physical_params,
' ', 'visual_params:=', visual_params,
' ', 'safety_limits:=true',
' ', 'safety_pos_margin:=0.15',
' ', 'safety_k_position:=20',
' ', 'name:=ur', ' ', 'ur_type:=', ur_type, ' ', 'prefix:=''',
]
)
robot_description = {'robot_description': robot_description_content}
return robot_description
def get_robot_description_semantic():
# MoveIt Configuration
robot_description_semantic_content = Command(
[
PathJoinSubstitution([FindExecutable(name='xacro')]), ' ',
PathJoinSubstitution([FindPackageShare('ur_moveit_config'), 'srdf', 'ur.srdf.xacro']),
' ', 'name:=ur', ' ', 'prefix:=""',
]
)
robot_description_semantic = {
'robot_description_semantic': robot_description_semantic_content
}
return robot_description_semantic
def generate_launch_description():
launch_args = [
DeclareLaunchArgument(
'ur_type',
description='Type/series of used UR robot.',
choices=['ur3', 'ur3e', 'ur5', 'ur5e', 'ur10', 'ur10e', 'ur16e', 'ur20'],
default_value='ur5e',
),
DeclareLaunchArgument(
'robot_ip',
description='IP address of the robot',
default_value='192.56.1.2',
),
]
moveit_kinematics_params = PathJoinSubstitution(
[FindPackageShare('ur_moveit_config'), 'config', 'default_kinematics.yaml']
)
robot_description = get_robot_description()
robot_description_semantic = get_robot_description_semantic()
isaac_ros_moveit_goal_setter = Node(
package='isaac_ros_moveit_goal_setter',
executable='isaac_ros_moveit_goal_setter',
name='isaac_ros_moveit_goal_setter',
output='screen',
parameters=[
robot_description,
robot_description_semantic,
moveit_kinematics_params
],
)
return launch.LaunchDescription(launch_args + [isaac_ros_moveit_goal_setter])
| 4,135 |
Python
| 36.6 | 99 | 0.643047 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_moveit_goal_setter/src/goal_setter_node.cpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#include "geometry_msgs/msg/pose_stamped.hpp"
#include "isaac_ros_moveit_goal_setter/goal_setter_node.hpp"
namespace nvidia
{
namespace isaac_ros
{
namespace manipulation
{
GoalSetterNode::GoalSetterNode(std::string name, const rclcpp::NodeOptions & options)
: node_{std::make_shared<rclcpp::Node>(name, options)},
planner_group_name_(node_->declare_parameter<std::string>(
"planner_group_name", "ur_manipulator")),
planner_id_(node_->declare_parameter<std::string>("planner_id", "cuMotion")),
end_effector_link_(node_->declare_parameter<std::string>("end_effector_link", "wrist_3_link")),
move_group_interface_{moveit::planning_interface::MoveGroupInterface(node_, planner_group_name_)}
{
set_target_pose_service_ =
node_->create_service<isaac_ros_goal_setter_interfaces::srv::SetTargetPose>(
"set_target_pose", std::bind(
&GoalSetterNode::SetTargetPoseCallback, this, std::placeholders::_1, std::placeholders::_2));
ConfigureMoveit();
}
void GoalSetterNode::ConfigureMoveit()
{
// Initialize the move group interface
move_group_interface_.setPlannerId(planner_id_);
RCLCPP_INFO(node_->get_logger(), "Planner ID : %s", move_group_interface_.getPlannerId().c_str());
move_group_interface_.setEndEffectorLink(end_effector_link_);
RCLCPP_INFO(node_->get_logger(), "End Effector Link : %s", end_effector_link_.c_str());
}
void GoalSetterNode::SetTargetPoseCallback(
const std::shared_ptr<isaac_ros_goal_setter_interfaces::srv::SetTargetPose_Request> req,
std::shared_ptr<isaac_ros_goal_setter_interfaces::srv::SetTargetPose_Response> res)
{
res->success = false;
RCLCPP_DEBUG(node_->get_logger(), "Triggered SetTargetPoseCallback");
RCLCPP_DEBUG(
node_->get_logger(), "Pose : x=%f, y=%f, z=%f, qx=%f, qy=%f, qz=%f, qw=%f",
req->pose.pose.position.x, req->pose.pose.position.y, req->pose.pose.position.z,
req->pose.pose.orientation.x, req->pose.pose.orientation.y, req->pose.pose.orientation.z,
req->pose.pose.orientation.w);
auto success = move_group_interface_.setPoseTarget(req->pose, end_effector_link_);
if (!success) {
RCLCPP_ERROR(node_->get_logger(), "Failed to set target pose!");
return;
}
auto const [status, plan] = [this] {
moveit::planning_interface::MoveGroupInterface::Plan msg;
auto const ok = static_cast<bool>(move_group_interface_.plan(msg));
return std::make_pair(ok, msg);
}();
// Execute the plan
if (status) {
RCLCPP_ERROR(node_->get_logger(), "Executing!");
move_group_interface_.execute(plan);
res->success = true;
} else {
RCLCPP_ERROR(node_->get_logger(), "Planning failed!");
}
}
} // namespace manipulation
} // namespace isaac_ros
} // namespace nvidia
int main(int argc, char * argv[])
{
rclcpp::init(argc, argv);
auto goal_setter_node = std::make_shared<nvidia::isaac_ros::manipulation::GoalSetterNode>(
"moveit_goal_setter",
rclcpp::NodeOptions().automatically_declare_parameters_from_overrides(true));
rclcpp::executors::MultiThreadedExecutor executor;
executor.add_node(goal_setter_node->GetNode());
executor.spin();
rclcpp::shutdown();
return 0;
}
| 3,877 |
C++
| 35.933333 | 100 | 0.708538 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_moveit_goal_setter/scripts/pose_to_pose.py
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES',
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import time
from geometry_msgs.msg import Pose, PoseStamped
from isaac_ros_goal_setter_interfaces.srv import SetTargetPose
import rclpy
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from rclpy.executors import MultiThreadedExecutor
from rclpy.node import Node
from tf2_ros import TransformException
from tf2_ros.buffer import Buffer
from tf2_ros.transform_listener import TransformListener
class PoseToPoseNode(Node):
def __init__(self):
super().__init__('pose_to_pose_node')
self._world_frame = self.declare_parameter(
'world_frame', 'base_link').get_parameter_value().string_value
self._target_frames = self.declare_parameter(
'target_frames', ['target1_frame']).get_parameter_value().string_array_value
self._target_frame_idx = 0
self._plan_timer_period = self.declare_parameter(
'plan_timer_period', 0.01).get_parameter_value().double_value
self._tf_buffer = Buffer(cache_time=rclpy.duration.Duration(seconds=60.0))
self._tf_listener = TransformListener(self._tf_buffer, self)
self._goal_service_cb_group = MutuallyExclusiveCallbackGroup()
self._goal_client = self.create_client(
SetTargetPose, 'set_target_pose', callback_group=self._goal_service_cb_group)
while not self._goal_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('Service set_target_pose not available! Waiting...')
self._goal_req = SetTargetPose.Request()
self.timer = self.create_timer(self._plan_timer_period, self.on_timer)
def _transform_msg_to_pose_msg(self, tf_msg):
pose = Pose()
pose.position.x = tf_msg.translation.x
pose.position.y = tf_msg.translation.y
pose.position.z = tf_msg.translation.z
pose.orientation.x = tf_msg.rotation.x
pose.orientation.y = tf_msg.rotation.y
pose.orientation.z = tf_msg.rotation.z
pose.orientation.w = tf_msg.rotation.w
return pose
def send_goal(self, pose):
self.get_logger().debug('Sending pose target to planner.')
self._goal_req.pose = pose
self.future = self._goal_client.call_async(self._goal_req)
while not self.future.done():
time.sleep(0.001)
return self.future.result()
def on_timer(self):
# Check if there is a valid transform between world and target frame
try:
world_frame_pose_target_frame = self._tf_buffer.lookup_transform(
self._world_frame, self._target_frames[self._target_frame_idx],
self.get_clock().now(), rclpy.duration.Duration(seconds=10.0)
)
except TransformException as ex:
self.get_logger().warning(f'Waiting for target_frame pose transform to be available \
in TF, between {self._world_frame} and \
{self._target_frames[self._target_frame_idx]}. if \
warning persists, check if the transform is \
published to tf. Message from TF: {ex}')
return
output_msg = PoseStamped()
output_msg.header.stamp = self.get_clock().now().to_msg()
output_msg.header.frame_id = self._world_frame
output_msg.pose = self._transform_msg_to_pose_msg(world_frame_pose_target_frame.transform)
response = self.send_goal(output_msg)
self.get_logger().debug(f'Goal set with response: {response}')
if response.success:
self._target_frame_idx = (self._target_frame_idx + 1) % len(self._target_frames)
else:
self.get_logger().warning('target pose was not reachable by planner, trying again \
on the next iteration')
def main(args=None):
rclpy.init(args=args)
pose_to_pose_node = PoseToPoseNode()
executor = MultiThreadedExecutor()
executor.add_node(pose_to_pose_node)
try:
executor.spin()
except KeyboardInterrupt:
pose_to_pose_node.get_logger().info(
'KeyboardInterrupt, shutting down.\n'
)
pose_to_pose_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 5,036 |
Python
| 37.450381 | 98 | 0.645751 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_moveit_goal_setter/include/isaac_ros_moveit_goal_setter/goal_setter_node.hpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#ifndef ISAAC_ROS_MOVEIT_GOAL_SETTER__GOAL_SETTER_NODE_HPP_
#define ISAAC_ROS_MOVEIT_GOAL_SETTER__GOAL_SETTER_NODE_HPP_
#include <memory>
#include "isaac_ros_common/qos.hpp"
#include "isaac_ros_goal_setter_interfaces/srv/set_target_pose.hpp"
#include <moveit/move_group_interface/move_group_interface.h>
#include <rclcpp/rclcpp.hpp>
namespace nvidia
{
namespace isaac_ros
{
namespace manipulation
{
class GoalSetterNode
{
public:
GoalSetterNode(std::string name, const rclcpp::NodeOptions & options);
~GoalSetterNode() = default;
std::shared_ptr<rclcpp::Node> GetNode() const {return node_;}
void ConfigureMoveit();
private:
void SetTargetPoseCallback(
const std::shared_ptr<isaac_ros_goal_setter_interfaces::srv::SetTargetPose_Request> req,
std::shared_ptr<isaac_ros_goal_setter_interfaces::srv::SetTargetPose_Response> res);
const std::shared_ptr<rclcpp::Node> node_;
std::string planner_group_name_;
std::string planner_id_;
std::string end_effector_link_;
moveit::planning_interface::MoveGroupInterface move_group_interface_;
rclcpp::Service<isaac_ros_goal_setter_interfaces::srv::SetTargetPose>::SharedPtr
set_target_pose_service_;
};
} // namespace manipulation
} // namespace isaac_ros
} // namespace nvidia
#endif // ISAAC_ROS_MOVEIT_GOAL_SETTER__GOAL_SETTER_NODE_HPP_
| 2,060 |
C++
| 29.761194 | 92 | 0.748058 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion/launch/isaac_ros_cumotion.launch.py
|
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES',
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
from ament_index_python.packages import get_package_share_directory
import launch
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
import yaml
def read_params(pkg_name, params_dir, params_file_name):
params_file = os.path.join(
get_package_share_directory(pkg_name), params_dir, params_file_name)
return yaml.safe_load(open(params_file, 'r'))
def launch_args_from_params(pkg_name, params_dir, params_file_name, prefix: str = None):
launch_args = []
launch_configs = {}
params = read_params(pkg_name, params_dir, params_file_name)
for param, value in params['/**']['ros__parameters'].items():
if value is not None:
arg_name = param if prefix is None else f'{prefix}.{param}'
launch_args.append(DeclareLaunchArgument(name=arg_name, default_value=str(value)))
launch_configs[param] = LaunchConfiguration(arg_name)
return launch_args, launch_configs
def generate_launch_description():
"""Launch file to bring up cumotion planner node."""
launch_args, launch_configs = launch_args_from_params(
'isaac_ros_cumotion', 'params', 'isaac_ros_cumotion_params.yaml', 'cumotion_planner')
cumotion_planner_node = Node(
name='cumotion_planner',
package='isaac_ros_cumotion',
namespace='',
executable='cumotion_planner_node',
parameters=[
launch_configs
],
output='screen',
)
return launch.LaunchDescription(launch_args + [cumotion_planner_node])
| 2,336 |
Python
| 34.953846 | 94 | 0.707192 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion/isaac_ros_cumotion/util.py
|
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
from curobo.geom.types import Sphere
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
def get_spheres_marker(
robot_spheres, base_frame: str, time, rgb=[0.1, 0.1, 0.1, 0.5], start_idx: int = 0):
m_arr = MarkerArray()
for i in range(len(robot_spheres)):
r_s = Sphere(
name='sphere',
radius=robot_spheres[i, -1],
pose=robot_spheres[i, :3].tolist() + [1, 0, 0, 0],
)
# print(rgb[i])
m = get_marker_sphere(r_s, base_frame, time, start_idx + i, rgb)
m_arr.markers.append(m)
return m_arr
def get_marker_sphere(sphere: Sphere, base_frame: str, time, idx=0, rgb=[0.4, 0.4, 0.8, 1.0]):
marker = Marker()
marker.header.frame_id = base_frame
marker.header.stamp = time
marker.id = idx
marker.type = Marker.SPHERE
marker.action = Marker.ADD
marker.scale.x = sphere.radius * 2
marker.scale.y = sphere.radius * 2
marker.scale.z = sphere.radius * 2
marker.color.r = rgb[0]
marker.color.g = rgb[1]
marker.color.b = rgb[2]
marker.color.a = rgb[3]
# pose:
marker.pose.position.x = sphere.position[0]
marker.pose.position.y = sphere.position[1]
marker.pose.position.z = sphere.position[2]
marker.pose.orientation.w = 1.0
return marker
| 1,803 |
Python
| 33.692307 | 94 | 0.667221 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion/isaac_ros_cumotion/cumotion_planner.py
|
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
from os import path
import time
from ament_index_python.packages import get_package_share_directory
from curobo.geom.sdf.world import CollisionCheckerType
from curobo.geom.types import Cuboid
from curobo.geom.types import Cylinder
from curobo.geom.types import Mesh
from curobo.geom.types import Sphere
from curobo.geom.types import VoxelGrid as CuVoxelGrid
from curobo.geom.types import WorldConfig
from curobo.types.base import TensorDeviceType
from curobo.types.math import Pose
from curobo.types.state import JointState as CuJointState
from curobo.util.logger import setup_curobo_logger
from curobo.util_file import get_robot_configs_path
from curobo.util_file import join_path
from curobo.util_file import load_yaml
from curobo.wrap.reacher.motion_gen import MotionGen
from curobo.wrap.reacher.motion_gen import MotionGenConfig
from curobo.wrap.reacher.motion_gen import MotionGenPlanConfig
from curobo.wrap.reacher.motion_gen import MotionGenStatus
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
from isaac_ros_cumotion.xrdf_utils import convert_xrdf_to_curobo
from moveit_msgs.action import MoveGroup
from moveit_msgs.msg import CollisionObject
from moveit_msgs.msg import MoveItErrorCodes
from moveit_msgs.msg import RobotTrajectory
import numpy as np
from nvblox_msgs.srv import EsdfAndGradients
import rclpy
from rclpy.action import ActionServer
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from rclpy.executors import MultiThreadedExecutor
from rclpy.node import Node
from sensor_msgs.msg import JointState
from shape_msgs.msg import SolidPrimitive
from std_msgs.msg import ColorRGBA
import torch
from trajectory_msgs.msg import JointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from visualization_msgs.msg import Marker
class CumotionActionServer(Node):
def __init__(self):
super().__init__('cumotion_action_server')
self.tensor_args = TensorDeviceType()
self.declare_parameter('robot', 'ur5e.yml')
self.declare_parameter('time_dilation_factor', 0.5)
self.declare_parameter('collision_cache_mesh', 20)
self.declare_parameter('collision_cache_cuboid', 20)
self.declare_parameter('interpolation_dt', 0.02)
self.declare_parameter('voxel_dims', [2.0, 2.0, 2.0])
self.declare_parameter('voxel_size', 0.05)
self.declare_parameter('read_esdf_world', False)
self.declare_parameter('publish_curobo_world_as_voxels', False)
self.declare_parameter('add_ground_plane', False)
self.declare_parameter('publish_voxel_size', 0.05)
self.declare_parameter('max_publish_voxels', 50000)
self.declare_parameter('joint_states_topic', '/joint_states')
self.declare_parameter('tool_frame', rclpy.Parameter.Type.STRING)
self.declare_parameter('grid_position', [0.0, 0.0, 0.0])
self.declare_parameter('esdf_service_name', '/nvblox_node/get_esdf_and_gradient')
self.declare_parameter('urdf_path', rclpy.Parameter.Type.STRING)
self.declare_parameter('enable_curobo_debug_mode', False)
self.declare_parameter('override_moveit_scaling_factors', False)
debug_mode = (
self.get_parameter('enable_curobo_debug_mode').get_parameter_value().bool_value
)
if debug_mode:
setup_curobo_logger('info')
else:
setup_curobo_logger('warning')
self.__voxel_pub = self.create_publisher(Marker, '/curobo/voxels', 10)
self._action_server = ActionServer(
self, MoveGroup, 'cumotion/move_group', self.execute_callback
)
try:
self.__urdf_path = self.get_parameter('urdf_path')
self.__urdf_path = self.__urdf_path.get_parameter_value().string_value
if self.__urdf_path == '':
self.__urdf_path = None
except rclpy.exceptions.ParameterUninitializedException:
self.__urdf_path = None
try:
self.__tool_frame = self.get_parameter('tool_frame')
self.__tool_frame = self.__tool_frame.get_parameter_value().string_value
if self.__tool_frame == '':
self.__tool_frame = None
except rclpy.exceptions.ParameterUninitializedException:
self.__tool_frame = None
self.__xrdf_path = path.join(
get_package_share_directory('isaac_ros_cumotion_robot_description'), 'xrdf'
)
self.__joint_states_topic = (
self.get_parameter('joint_states_topic').get_parameter_value().string_value
)
self.__add_ground_plane = (
self.get_parameter('add_ground_plane').get_parameter_value().bool_value
)
self.__override_moveit_scaling_factors = (
self.get_parameter('override_moveit_scaling_factors').get_parameter_value().bool_value
)
# ESDF service
self.__read_esdf_grid = (
self.get_parameter('read_esdf_world').get_parameter_value().bool_value
)
self.__publish_curobo_world_as_voxels = (
self.get_parameter('publish_curobo_world_as_voxels').get_parameter_value().bool_value
)
self.__grid_position = (
self.get_parameter('grid_position').get_parameter_value().double_array_value
)
self.__max_publish_voxels = (
self.get_parameter('max_publish_voxels').get_parameter_value().integer_value
)
self.__voxel_dims = (
self.get_parameter('voxel_dims').get_parameter_value().double_array_value
)
self.__publish_voxel_size = (
self.get_parameter('publish_voxel_size').get_parameter_value().double_value
)
self.__voxel_size = self.get_parameter('voxel_size').get_parameter_value().double_value
self.__esdf_client = None
self.__esdf_req = None
if self.__read_esdf_grid:
esdf_service_name = (
self.get_parameter('esdf_service_name').get_parameter_value().string_value
)
esdf_service_cb_group = MutuallyExclusiveCallbackGroup()
self.__esdf_client = self.create_client(
EsdfAndGradients, esdf_service_name, callback_group=esdf_service_cb_group
)
while not self.__esdf_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info(
f'Service({esdf_service_name}) not available, waiting again...'
)
self.__esdf_req = EsdfAndGradients.Request()
self.warmup()
self.__query_count = 0
self.__tensor_args = self.motion_gen.tensor_args
self.subscription = self.create_subscription(
JointState, self.__joint_states_topic, self.js_callback, 10
)
self.__js_buffer = None
def js_callback(self, msg):
self.__js_buffer = {
'joint_names': msg.name,
'position': msg.position,
'velocity': msg.velocity,
}
def warmup(self):
robot_file = self.get_parameter('robot').get_parameter_value().string_value
if robot_file == '':
self.get_logger().fatal('Received empty robot file')
raise SystemExit
collision_cache_cuboid = (
self.get_parameter('collision_cache_cuboid').get_parameter_value().integer_value
)
collision_cache_mesh = (
self.get_parameter('collision_cache_mesh').get_parameter_value().integer_value
)
interpolation_dt = (
self.get_parameter('interpolation_dt').get_parameter_value().double_value
)
self.get_logger().info('Loaded robot file name: ' + robot_file)
self.get_logger().info('warming up cuMotion, wait until ready')
tensor_args = self.tensor_args
world_file = WorldConfig.from_dict(
{
'cuboid': {
'table': {
'pose': [0, 0, -0.05, 1, 0, 0, 0], # x, y, z, qw, qx, qy, qz
'dims': [2.0, 2.0, 0.1],
}
},
'voxel': {
'world_voxel': {
'dims': self.__voxel_dims,
'pose': [0, 0, 0, 1, 0, 0, 0], # x, y, z, qw, qx, qy, qz
'voxel_size': self.__voxel_size,
'feature_dtype': torch.bfloat16,
},
},
}
)
if robot_file.lower().endswith('.xrdf'):
if self.__urdf_path is None:
self.get_logger().fatal('urdf_path is required to load robot from .xrdf')
raise SystemExit
robot_dict = load_yaml(join_path(self.__xrdf_path, robot_file))
robot_dict = convert_xrdf_to_curobo(self.__urdf_path, robot_dict, self.get_logger())
else:
robot_dict = load_yaml(join_path(get_robot_configs_path(), robot_file))
if self.__urdf_path is not None:
robot_dict['robot_cfg']['kinematics']['urdf_path'] = self.__urdf_path
robot_dict = robot_dict['robot_cfg']
motion_gen_config = MotionGenConfig.load_from_robot_config(
robot_dict,
world_file,
tensor_args,
interpolation_dt=interpolation_dt,
collision_cache={
'obb': collision_cache_cuboid,
'mesh': collision_cache_mesh,
},
collision_checker_type=CollisionCheckerType.VOXEL,
ee_link_name=self.__tool_frame,
)
motion_gen = MotionGen(motion_gen_config)
self.__robot_base_frame = motion_gen.kinematics.base_link
motion_gen.warmup(enable_graph=True)
self.__world_collision = motion_gen.world_coll_checker
if not self.__add_ground_plane:
motion_gen.clear_world_cache()
self.motion_gen = motion_gen
self.get_logger().info('cuMotion is ready for planning queries!')
def update_voxel_grid(self):
self.get_logger().info('Calling ESDF service')
# This is half of x,y and z dims
aabb_min = Point()
aabb_min.x = -1 * self.__voxel_dims[0] / 2
aabb_min.y = -1 * self.__voxel_dims[1] / 2
aabb_min.z = -1 * self.__voxel_dims[2] / 2
# This is a voxel size.
voxel_dims = Vector3()
voxel_dims.x = self.__voxel_dims[0]
voxel_dims.y = self.__voxel_dims[1]
voxel_dims.z = self.__voxel_dims[2]
esdf_future = self.send_request(aabb_min, voxel_dims)
while not esdf_future.done():
time.sleep(0.001)
response = esdf_future.result()
esdf_grid = self.get_esdf_voxel_grid(response)
if torch.max(esdf_grid.feature_tensor) <= (-1000.0 + 0.5 * self.__voxel_size + 1e-5):
self.get_logger().error('ESDF data is empty, try again after few seconds.')
return False
self.__world_collision.update_voxel_data(esdf_grid)
self.get_logger().info('Updated ESDF grid')
return True
def send_request(self, aabb_min_m, aabb_size_m):
self.__esdf_req.aabb_min_m = aabb_min_m
self.__esdf_req.aabb_size_m = aabb_size_m
self.get_logger().info(
f'ESDF req = {self.__esdf_req.aabb_min_m}, {self.__esdf_req.aabb_size_m}'
)
esdf_future = self.__esdf_client.call_async(self.__esdf_req)
return esdf_future
def get_esdf_voxel_grid(self, esdf_data):
esdf_array = esdf_data.esdf_and_gradients
array_shape = [
esdf_array.layout.dim[0].size,
esdf_array.layout.dim[1].size,
esdf_array.layout.dim[2].size,
]
array_data = np.array(esdf_array.data)
array_data = self.__tensor_args.to_device(array_data)
# Array data is reshaped to x y z channels
array_data = array_data.view(array_shape[0], array_shape[1], array_shape[2]).contiguous()
# Array is squeezed to 1 dimension
array_data = array_data.reshape(-1, 1)
# nvblox uses negative distance inside obstacles, cuRobo needs the opposite:
array_data = -1 * array_data
# nvblox assigns a value of -1000.0 for unobserved voxels, making
array_data[array_data >= 1000.0] = -1000.0
# nvblox distance are at origin of each voxel, cuRobo's esdf needs it to be at faces
array_data = array_data + 0.5 * self.__voxel_size
esdf_grid = CuVoxelGrid(
name='world_voxel',
dims=self.__voxel_dims,
pose=[
self.__grid_position[0],
self.__grid_position[1],
self.__grid_position[2],
1,
0.0,
0.0,
0,
], # x, y, z, qw, qx, qy, qz
voxel_size=self.__voxel_size,
feature_dtype=torch.float32,
feature_tensor=array_data,
)
return esdf_grid
def get_cumotion_collision_object(self, mv_object: CollisionObject):
objs = []
pose = mv_object.pose
world_pose = [
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.w,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
]
world_pose = Pose.from_list(world_pose)
supported_objects = True
if len(mv_object.primitives) > 0:
for k in range(len(mv_object.primitives)):
pose = mv_object.primitive_poses[k]
primitive_pose = [
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.w,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
]
object_pose = world_pose.multiply(Pose.from_list(primitive_pose)).tolist()
if mv_object.primitives[k].type == SolidPrimitive.BOX:
# cuboid:
dims = mv_object.primitives[k].dimensions
obj = Cuboid(
name=str(mv_object.id) + '_' + str(k) + '_cuboid',
pose=object_pose,
dims=dims,
)
objs.append(obj)
elif mv_object.primitives[k].type == SolidPrimitive.SPHERE:
# sphere:
radius = mv_object.primitives[k].dimensions[
mv_object.primitives[k].SPHERE_RADIUS
]
obj = Sphere(
name=str(mv_object.id) + '_' + str(k) + '_sphere',
pose=object_pose,
radius=radius,
)
objs.append(obj)
elif mv_object.primitives[k].type == SolidPrimitive.CYLINDER:
# cylinder:
cyl_height = mv_object.primitives[k].dimensions[
mv_object.primitives[k].CYLINDER_HEIGHT
]
cyl_radius = mv_object.primitives[k].dimensions[
mv_object.primitives[k].CYLINDER_RADIUS
]
obj = Cylinder(
name=str(mv_object.id) + '_' + str(k) + '_cylinder',
pose=object_pose,
height=cyl_height,
radius=cyl_radius,
)
objs.append(obj)
elif mv_object.primitives[k].type == SolidPrimitive.CONE:
self.get_logger().error('Cone primitive is not supported')
supported_objects = False
else:
self.get_logger().error('Unknown primitive type')
supported_objects = False
if len(mv_object.meshes) > 0:
for k in range(len(mv_object.meshes)):
pose = mv_object.mesh_poses[k]
mesh_pose = [
pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.w,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
]
object_pose = world_pose.multiply(Pose.from_list(mesh_pose)).tolist()
verts = mv_object.meshes[k].vertices
verts = [[v.x, v.y, v.z] for v in verts]
tris = [
[v.vertex_indices[0], v.vertex_indices[1], v.vertex_indices[2]]
for v in mv_object.meshes[k].triangles
]
obj = Mesh(
name=str(mv_object.id) + '_' + str(len(objs)) + '_mesh',
pose=object_pose,
vertices=verts,
faces=tris,
)
objs.append(obj)
return objs, supported_objects
def get_joint_trajectory(self, js: CuJointState, dt: float):
traj = RobotTrajectory()
cmd_traj = JointTrajectory()
q_traj = js.position.cpu().view(-1, js.position.shape[-1]).numpy()
vel = js.velocity.cpu().view(-1, js.position.shape[-1]).numpy()
acc = js.acceleration.view(-1, js.position.shape[-1]).cpu().numpy()
for i in range(len(q_traj)):
traj_pt = JointTrajectoryPoint()
traj_pt.positions = q_traj[i].tolist()
if js is not None and i < len(vel):
traj_pt.velocities = vel[i].tolist()
if js is not None and i < len(acc):
traj_pt.accelerations = acc[i].tolist()
time_d = rclpy.time.Duration(seconds=i * dt).to_msg()
traj_pt.time_from_start = time_d
cmd_traj.points.append(traj_pt)
cmd_traj.joint_names = js.joint_names
cmd_traj.header.stamp = self.get_clock().now().to_msg()
traj.joint_trajectory = cmd_traj
return traj
def update_world_objects(self, moveit_objects):
world_update_status = True
if len(moveit_objects) > 0:
cuboid_list = []
sphere_list = []
cylinder_list = []
mesh_list = []
for i, obj in enumerate(moveit_objects):
cumotion_objects, world_update_status = self.get_cumotion_collision_object(obj)
for cumotion_object in cumotion_objects:
if isinstance(cumotion_object, Cuboid):
cuboid_list.append(cumotion_object)
elif isinstance(cumotion_object, Cylinder):
cylinder_list.append(cumotion_object)
elif isinstance(cumotion_object, Sphere):
sphere_list.append(cumotion_object)
elif isinstance(cumotion_object, Mesh):
mesh_list.append(cumotion_object)
world_model = WorldConfig(
cuboid=cuboid_list,
cylinder=cylinder_list,
sphere=sphere_list,
mesh=mesh_list,
).get_collision_check_world()
self.motion_gen.update_world(world_model)
if self.__read_esdf_grid:
world_update_status = self.update_voxel_grid()
if self.__publish_curobo_world_as_voxels:
voxels = self.__world_collision.get_esdf_in_bounding_box(
Cuboid(
name='test',
pose=[0.0, 0.0, 0.0, 1, 0, 0, 0], # x, y, z, qw, qx, qy, qz
dims=self.__voxel_dims,
),
voxel_size=self.__publish_voxel_size,
)
xyzr_tensor = voxels.xyzr_tensor.clone()
xyzr_tensor[..., 3] = voxels.feature_tensor
self.publish_voxels(xyzr_tensor)
return world_update_status
def execute_callback(self, goal_handle):
self.get_logger().info('Executing goal...')
# check moveit scaling factors:
min_scaling_factor = min(goal_handle.request.request.max_velocity_scaling_factor,
goal_handle.request.request.max_acceleration_scaling_factor)
time_dilation_factor = min(1.0, min_scaling_factor)
if time_dilation_factor <= 0.0 or self.__override_moveit_scaling_factors:
time_dilation_factor = self.get_parameter(
'time_dilation_factor').get_parameter_value().double_value
self.get_logger().info('Planning with time_dilation_factor: ' +
str(time_dilation_factor))
plan_req = goal_handle.request.request
goal_handle.succeed()
scene = goal_handle.request.planning_options.planning_scene_diff
world_objects = scene.world.collision_objects
world_update_status = self.update_world_objects(world_objects)
result = MoveGroup.Result()
if not world_update_status:
result.error_code.val = MoveItErrorCodes.COLLISION_CHECKING_UNAVAILABLE
self.get_logger().error('World update failed.')
return result
start_state = None
if len(plan_req.start_state.joint_state.position) > 0:
start_state = self.motion_gen.get_active_js(
CuJointState.from_position(
position=self.tensor_args.to_device(
plan_req.start_state.joint_state.position
).unsqueeze(0),
joint_names=plan_req.start_state.joint_state.name,
)
)
else:
self.get_logger().info(
'PlanRequest start state was empty, reading current joint state'
)
if start_state is None or plan_req.start_state.is_diff:
if self.__js_buffer is None:
self.get_logger().error(
'joint_state was not received from ' + self.__joint_states_topic
)
return result
# read joint state:
state = CuJointState.from_position(
position=self.tensor_args.to_device(self.__js_buffer['position']).unsqueeze(0),
joint_names=self.__js_buffer['joint_names'],
)
state.velocity = self.tensor_args.to_device(self.__js_buffer['velocity']).unsqueeze(0)
current_joint_state = self.motion_gen.get_active_js(state)
if start_state is not None and plan_req.start_state.is_diff:
start_state.position += current_joint_state.position
start_state.velocity += current_joint_state.velocity
else:
start_state = current_joint_state
if len(plan_req.goal_constraints[0].joint_constraints) > 0:
self.get_logger().info('Calculating goal pose from Joint target')
goal_config = [
plan_req.goal_constraints[0].joint_constraints[x].position
for x in range(len(plan_req.goal_constraints[0].joint_constraints))
]
goal_jnames = [
plan_req.goal_constraints[0].joint_constraints[x].joint_name
for x in range(len(plan_req.goal_constraints[0].joint_constraints))
]
goal_state = self.motion_gen.get_active_js(
CuJointState.from_position(
position=self.tensor_args.to_device(goal_config).view(1, -1),
joint_names=goal_jnames,
)
)
goal_pose = self.motion_gen.compute_kinematics(goal_state).ee_pose.clone()
elif (
len(plan_req.goal_constraints[0].position_constraints) > 0
and len(plan_req.goal_constraints[0].orientation_constraints) > 0
):
self.get_logger().info('Using goal from Pose')
position = (
plan_req.goal_constraints[0]
.position_constraints[0]
.constraint_region.primitive_poses[0]
.position
)
position = [position.x, position.y, position.z]
orientation = plan_req.goal_constraints[0].orientation_constraints[0].orientation
orientation = [orientation.w, orientation.x, orientation.y, orientation.z]
pose_list = position + orientation
goal_pose = Pose.from_list(pose_list, tensor_args=self.tensor_args)
# Check if link names match:
position_link_name = plan_req.goal_constraints[0].position_constraints[0].link_name
orientation_link_name = (
plan_req.goal_constraints[0].orientation_constraints[0].link_name
)
plan_link_name = self.motion_gen.kinematics.ee_link
if position_link_name != orientation_link_name:
self.get_logger().error(
'Link name for Target Position "'
+ position_link_name
+ '" and Target Orientation "'
+ orientation_link_name
+ '" do not match'
)
result.error_code.val = MoveItErrorCodes.INVALID_LINK_NAME
return result
if position_link_name != plan_link_name:
self.get_logger().error(
'Link name for Target Pose "'
+ position_link_name
+ '" and Planning frame "'
+ plan_link_name
+ '" do not match, relaunch node with tool_frame = '
+ position_link_name
)
result.error_code.val = MoveItErrorCodes.INVALID_LINK_NAME
return result
else:
self.get_logger().error('Goal constraints not supported')
self.motion_gen.reset(reset_seed=False)
motion_gen_result = self.motion_gen.plan_single(
start_state,
goal_pose,
MotionGenPlanConfig(max_attempts=5, enable_graph_attempt=1,
time_dilation_factor=time_dilation_factor),
)
result = MoveGroup.Result()
if motion_gen_result.success.item():
result.error_code.val = MoveItErrorCodes.SUCCESS
result.trajectory_start = plan_req.start_state
traj = self.get_joint_trajectory(
motion_gen_result.optimized_plan, motion_gen_result.optimized_dt.item()
)
result.planning_time = motion_gen_result.total_time
result.planned_trajectory = traj
elif not motion_gen_result.valid_query:
self.get_logger().error(
f'Invalid planning query: {motion_gen_result.status}'
)
if motion_gen_result.status == MotionGenStatus.INVALID_START_STATE_JOINT_LIMITS:
result.error_code.val = MoveItErrorCodes.START_STATE_INVALID
if motion_gen_result.status in [
MotionGenStatus.INVALID_START_STATE_WORLD_COLLISION,
MotionGenStatus.INVALID_START_STATE_SELF_COLLISION,
]:
result.error_code.val = MoveItErrorCodes.START_STATE_IN_COLLISION
else:
self.get_logger().error(
f'Motion planning failed wih status: {motion_gen_result.status}'
)
if motion_gen_result.status == MotionGenStatus.IK_FAIL:
result.error_code.val = MoveItErrorCodes.NO_IK_SOLUTION
self.get_logger().info(
'returned planning result (query, success, failure_status): '
+ str(self.__query_count)
+ ' '
+ str(motion_gen_result.success.item())
+ ' '
+ str(motion_gen_result.status)
)
self.__query_count += 1
return result
def publish_voxels(self, voxels):
vox_size = self.__publish_voxel_size
# create marker:
marker = Marker()
marker.header.frame_id = self.__robot_base_frame
marker.id = 0
marker.type = 6 # cube list
marker.ns = 'curobo_world'
marker.action = 0
marker.pose.orientation.w = 1.0
marker.lifetime = rclpy.duration.Duration(seconds=1000.0).to_msg()
marker.frame_locked = False
marker.scale.x = vox_size
marker.scale.y = vox_size
marker.scale.z = vox_size
# get only voxels that are inside surfaces:
voxels = voxels[voxels[:, 3] >= 0.0]
vox = voxels.view(-1, 4).cpu().numpy()
marker.points = []
for i in range(min(len(vox), self.__max_publish_voxels)):
pt = Point()
pt.x = float(vox[i, 0])
pt.y = float(vox[i, 1])
pt.z = float(vox[i, 2])
color = ColorRGBA()
d = vox[i, 3]
rgba = [min(1.0, 1.0 - float(d)), 0.0, 0.0, 1.0]
color.r = rgba[0]
color.g = rgba[1]
color.b = rgba[2]
color.a = rgba[3]
marker.colors.append(color)
marker.points.append(pt)
# publish voxels:
marker.header.stamp = self.get_clock().now().to_msg()
self.__voxel_pub.publish(marker)
def main(args=None):
rclpy.init(args=args)
cumotion_action_server = CumotionActionServer()
executor = MultiThreadedExecutor()
executor.add_node(cumotion_action_server)
try:
executor.spin()
except KeyboardInterrupt:
cumotion_action_server.get_logger().info('KeyboardInterrupt, shutting down.\n')
cumotion_action_server.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 30,274 |
Python
| 40.816298 | 98 | 0.562397 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion/isaac_ros_cumotion/robot_segmenter.py
|
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
from copy import deepcopy
import threading
import time
from curobo.types.base import TensorDeviceType
from curobo.types.camera import CameraObservation
from curobo.types.math import Pose as CuPose
from curobo.types.state import JointState as CuJointState
from curobo.util_file import get_robot_configs_path
from curobo.util_file import join_path
from curobo.util_file import load_yaml
from curobo.wrap.model.robot_segmenter import RobotSegmenter
from cv_bridge import CvBridge
from isaac_ros_cumotion.util import get_spheres_marker
from isaac_ros_cumotion.xrdf_utils import convert_xrdf_to_curobo
from message_filters import ApproximateTimeSynchronizer
from message_filters import Subscriber
import numpy as np
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import Image
from sensor_msgs.msg import JointState
from tf2_ros import TransformException
from tf2_ros.buffer import Buffer
from tf2_ros.transform_listener import TransformListener
import torch
from visualization_msgs.msg import MarkerArray
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
class CumotionRobotSegmenter(Node):
"""This node filters out depth pixels assosiated with a robot body using a mask."""
def __init__(self):
super().__init__('cumotion_robot_segmentation')
self.declare_parameter('robot', 'ur5e.yml')
self.declare_parameter('cuda_device', 0)
self.declare_parameter('distance_threshold', 0.1)
self.declare_parameter('time_sync_slop', 0.1)
self.declare_parameter('tf_lookup_duration', 5.0)
self.declare_parameter('joint_states_topic', '/joint_states')
self.declare_parameter('debug_robot_topic', '/cumotion/robot_segmenter/robot_spheres')
self.declare_parameter('depth_image_topics', ['/cumotion/depth_1/image_raw'])
self.declare_parameter('depth_camera_infos', ['/cumotion/depth_1/camera_info'])
self.declare_parameter('robot_mask_publish_topics', ['/cumotion/depth_1/robot_mask'])
self.declare_parameter('world_depth_publish_topics', ['/cumotion/depth_1/world_depth'])
self.declare_parameter('log_debug', False)
self.declare_parameter('urdf_path', rclpy.Parameter.Type.STRING)
try:
self.__urdf_path = self.get_parameter('urdf_path')
self.__urdf_path = self.__urdf_path.get_parameter_value().string_value
except rclpy.exceptions.ParameterUninitializedException:
self.__urdf_path = None
distance_threshold = (
self.get_parameter('distance_threshold').get_parameter_value().double_value)
time_sync_slop = self.get_parameter('time_sync_slop').get_parameter_value().double_value
self._tf_lookup_duration = (
self.get_parameter('tf_lookup_duration').get_parameter_value().double_value
)
joint_states_topic = (
self.get_parameter('joint_states_topic').get_parameter_value().string_value)
debug_robot_topic = (
self.get_parameter('debug_robot_topic').get_parameter_value().string_value)
depth_image_topics = (
self.get_parameter('depth_image_topics').get_parameter_value().string_array_value)
depth_camera_infos = (
self.get_parameter('depth_camera_infos').get_parameter_value().string_array_value)
publish_mask_topics = (
self.get_parameter(
'robot_mask_publish_topics').get_parameter_value().string_array_value)
world_depth_topics = (
self.get_parameter(
'world_depth_publish_topics').get_parameter_value().string_array_value)
self._log_debug = self.get_parameter('log_debug').get_parameter_value().bool_value
num_cameras = len(depth_image_topics)
self._num_cameras = num_cameras
if len(depth_camera_infos) != num_cameras:
self.get_logger().error(
'Number of topics in depth_camera_infos does not match depth_image_topics')
if len(publish_mask_topics) != num_cameras:
self.get_logger().error(
'Number of topics in publish_mask_topics does not match depth_image_topics')
if len(world_depth_topics) != num_cameras:
self.get_logger().error(
'Number of topics in world_depth_topics does not match depth_image_topics')
cuda_device_id = self.get_parameter('cuda_device').get_parameter_value().integer_value
self._tensor_args = TensorDeviceType(device=torch.device('cuda', cuda_device_id))
# Create subscribers:
subscribers = [Subscriber(self, Image, topic) for topic in depth_image_topics]
subscribers.append(Subscriber(self, JointState, joint_states_topic))
# Subscribe to topics with sync:
self.approx_time_sync = ApproximateTimeSynchronizer(
tuple(subscribers), queue_size=100, slop=time_sync_slop)
self.approx_time_sync.registerCallback(self.process_depth_and_joint_state)
self.info_subscribers = []
for idx in range(num_cameras):
self.info_subscribers.append(
self.create_subscription(
CameraInfo, depth_camera_infos[idx],
lambda msg, index=idx: self.camera_info_cb(msg, index), 10)
)
self.mask_publishers = [
self.create_publisher(Image, topic, 10) for topic in publish_mask_topics]
self.segmented_publishers = [
self.create_publisher(Image, topic, 10) for topic in world_depth_topics]
self.debug_robot_publisher = self.create_publisher(MarkerArray, debug_robot_topic, 10)
self.tf_buffer = Buffer(cache_time=rclpy.duration.Duration(seconds=60.0))
self.tf_listener = TransformListener(self.tf_buffer, self)
# Create a depth mask publisher:
robot_file = self.get_parameter('robot').get_parameter_value().string_value
self.br = CvBridge()
# Create buffers to store data:
self._depth_buffers = None
self._depth_intrinsics = [None for x in range(num_cameras)]
self._robot_pose_camera = [None for x in range(num_cameras)]
self._depth_encoding = None
self._js_buffer = None
self._timestamp = None
self._camera_headers = []
self.lock = threading.Lock()
self.timer = self.create_timer(0.01, self.on_timer)
robot_dict = load_yaml(join_path(get_robot_configs_path(), robot_file))
if robot_file.lower().endswith('.xrdf'):
if self.__urdf_path is None:
self.get_logger().fatal('urdf_path is required to load robot from .xrdf')
raise SystemExit
robot_dict = convert_xrdf_to_curobo(self.__urdf_path, robot_dict, self.get_logger())
if self.__urdf_path is not None:
robot_dict['robot_cfg']['kinematics']['urdf_path'] = self.__urdf_path
self._cumotion_segmenter = RobotSegmenter.from_robot_file(
robot_dict, distance_threshold=distance_threshold)
self._cumotion_base_frame = self._cumotion_segmenter.base_link
self._robot_pose_cameras = None
self.get_logger().info(f'Node initialized with {self._num_cameras} cameras')
def process_depth_and_joint_state(self, *msgs):
self._depth_buffers = []
self._depth_encoding = []
self._camera_headers = []
for msg in msgs:
if (isinstance(msg, Image)):
img = self.br.imgmsg_to_cv2(msg)
if msg.encoding == '32FC1':
img = 1000.0 * img
self._depth_buffers.append(img)
self._camera_headers.append(msg.header)
self._depth_encoding.append(msg.encoding)
if (isinstance(msg, JointState)):
self._js_buffer = {'joint_names': msg.name, 'position': msg.position}
self._timestamp = msg.header.stamp
def camera_info_cb(self, msg, idx):
self._depth_intrinsics[idx] = msg.k
def publish_robot_spheres(self, traj: CuJointState):
kin_state = self._cumotion_segmenter.robot_world.get_kinematics(traj.position)
spheres = kin_state.link_spheres_tensor.cpu().numpy()
current_time = self.get_clock().now().to_msg()
m_arr = get_spheres_marker(
spheres[0],
self._cumotion_base_frame,
current_time,
rgb=[0.0, 1.0, 0.0, 1.0],
)
self.debug_robot_publisher.publish(m_arr)
def is_subscribed(self) -> bool:
count_mask = max(
[mask_pub.get_subscription_count() for mask_pub in self.mask_publishers]
+ [seg_pub.get_subscription_count() for seg_pub in self.segmented_publishers]
)
if count_mask > 0:
return True
return False
def publish_images(self, depth_mask, segmented_depth, camera_header, idx: int):
if self.mask_publishers[idx].get_subscription_count() > 0:
depth_mask = depth_mask[idx]
msg = self.br.cv2_to_imgmsg(depth_mask, 'mono8')
msg.header = camera_header[idx]
self.mask_publishers[idx].publish(msg)
if self.segmented_publishers[idx].get_subscription_count() > 0:
segmented_depth = segmented_depth[idx]
if self._depth_encoding[idx] == '16UC1':
segmented_depth = segmented_depth.astype(np.uint16)
elif self._depth_encoding[idx] == '32FC1':
segmented_depth = segmented_depth / 1000.0
msg = self.br.cv2_to_imgmsg(segmented_depth, self._depth_encoding[idx])
msg.header = camera_header[idx]
self.segmented_publishers[idx].publish(msg)
def on_timer(self):
computation_time = -1.0
node_time = -1.0
if not self.is_subscribed():
return
if ((not all(isinstance(intrinsic, np.ndarray) for intrinsic in self._depth_intrinsics))
or (len(self._camera_headers) == 0) or (self._timestamp is None)):
return
timestamp = self._timestamp
# Read camera transforms
if self._robot_pose_cameras is None:
self.get_logger().info('Reading TF from cameras')
with self.lock:
camera_headers = deepcopy(self._camera_headers)
for i in range(self._num_cameras):
if self._robot_pose_camera[i] is None:
try:
t = self.tf_buffer.lookup_transform(
self._cumotion_base_frame,
camera_headers[i].frame_id,
timestamp,
rclpy.duration.Duration(seconds=self._tf_lookup_duration),
)
self._robot_pose_camera[i] = CuPose.from_list(
[
t.transform.translation.x,
t.transform.translation.y,
t.transform.translation.z,
t.transform.rotation.w,
t.transform.rotation.x,
t.transform.rotation.y,
t.transform.rotation.z,
]
)
except TransformException as ex:
self.get_logger().debug(f'Could not transform {camera_headers[i].frame_id} \
to { self._cumotion_base_frame}: {ex}')
continue
if None not in self._robot_pose_camera:
self._robot_pose_cameras = CuPose.cat(self._robot_pose_camera)
self.get_logger().info('Received TF from cameras to robot')
# Check if all camera transforms have been received
if self._robot_pose_cameras is None:
return
with self.lock:
timestamp = self._timestamp
depth_image = np.copy(np.stack((self._depth_buffers)))
intrinsics = np.copy(np.stack(self._depth_intrinsics))
js = np.copy(self._js_buffer['position'])
j_names = deepcopy(self._js_buffer['joint_names'])
camera_headers = deepcopy(self._camera_headers)
self._timestamp = None
self._camera_headers = []
start_node_time = time.time()
depth_image = self._tensor_args.to_device(depth_image.astype(np.float32))
depth_image = depth_image.view(
self._num_cameras, depth_image.shape[-2], depth_image.shape[-1])
if not self._cumotion_segmenter.ready:
intrinsics = self._tensor_args.to_device(intrinsics).view(self._num_cameras, 3, 3)
cam_obs = CameraObservation(depth_image=depth_image, intrinsics=intrinsics)
self._cumotion_segmenter.update_camera_projection(cam_obs)
self.get_logger().info('Updated Projection Matrices')
cam_obs = CameraObservation(depth_image=depth_image, pose=self._robot_pose_cameras)
q = CuJointState.from_numpy(
position=js, joint_names=j_names, tensor_args=self._tensor_args).unsqueeze(0)
q = self._cumotion_segmenter.robot_world.get_active_js(q)
start_segmentation_time = time.time()
depth_mask, segmented_depth = self._cumotion_segmenter.get_robot_mask_from_active_js(
cam_obs, q)
if self._log_debug:
torch.cuda.synchronize()
computation_time = time.time() - start_segmentation_time
depth_mask = depth_mask.cpu().numpy().astype(np.uint8) * 255
segmented_depth = segmented_depth.cpu().numpy()
for x in range(depth_mask.shape[0]):
self.publish_images(depth_mask, segmented_depth, camera_headers, x)
if self.debug_robot_publisher.get_subscription_count() > 0:
self.publish_robot_spheres(q)
if self._log_debug:
node_time = time.time() - start_node_time
self.get_logger().info(f'Node Time(ms), Computation Time(ms): {node_time * 1000.0},\
{computation_time * 1000.0}')
def main(args=None):
# Initialize the rclpy library
rclpy.init(args=args)
# Create the node
cumotion_segmenter = CumotionRobotSegmenter()
# Spin the node so the callback function is called.
rclpy.spin(cumotion_segmenter)
# Destroy the node explicitly
cumotion_segmenter.destroy_node()
# Shutdown the ROS client library for Python
rclpy.shutdown()
if __name__ == '__main__':
main()
| 15,118 |
Python
| 42.445402 | 100 | 0.618071 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion/isaac_ros_cumotion/xrdf_utils.py
|
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
from typing import Any, Dict, Union
from curobo.cuda_robot_model.urdf_kinematics_parser import UrdfKinematicsParser
from curobo.util_file import load_yaml
def return_value_if_exists(input_dict: Dict, key: str, logger, suffix: str = 'xrdf') -> Any:
if key not in input_dict:
logger.error(key + ' key not found in ' + suffix)
raise ValueError(key + ' key not found in ' + suffix)
return input_dict[key]
def convert_xrdf_to_curobo(urdf_string: str, input_dict: Union[str, Dict], logger) -> Dict:
# urdf_string needs to be a path to a urdf file.
if isinstance(input_dict, str):
input_dict = load_yaml(input_dict)
if return_value_if_exists(input_dict, 'format', logger) != 'xrdf':
logger.error('format is not xrdf')
raise ValueError('format is not xrdf')
if return_value_if_exists(input_dict, 'format_version', logger) > 1.0:
logger.warn('format_version is greater than 1.0')
# Also get base link as root of urdf
kinematics_parser = UrdfKinematicsParser(urdf_string, build_scene_graph=True)
joint_names = kinematics_parser.get_controlled_joint_names()
base_link = kinematics_parser.root_link
output_dict = {}
if 'collision' in input_dict:
coll_name = return_value_if_exists(input_dict['collision'], 'geometry', logger)
if 'spheres' not in input_dict['geometry'][coll_name]:
logger.error('spheres key not found in xrdf')
raise ValueError('spheres key not found in xrdf')
coll_spheres = return_value_if_exists(input_dict['geometry'][coll_name], 'spheres', logger)
output_dict['collision_spheres'] = coll_spheres
buffer_distance = return_value_if_exists(
input_dict['collision'], 'buffer_distance', logger
)
output_dict['collision_sphere_buffer'] = buffer_distance
output_dict['collision_link_names'] = list(coll_spheres.keys())
if 'self_collision' in input_dict:
if input_dict['self_collision']['geometry'] != input_dict['collision']['geometry']:
logger.error('self_collision geometry does not match collision geometry')
raise ValueError('self_collision geometry does not match collision geometry')
self_collision_ignore = return_value_if_exists(
input_dict['self_collision'],
'ignore',
logger,
)
self_collision_buffer = return_value_if_exists(
input_dict['self_collision'],
'buffer_distance',
logger,
)
output_dict['self_collision_ignore'] = self_collision_ignore
output_dict['self_collision_buffer'] = self_collision_buffer
else:
logger.error('self_collision key not found in xrdf')
raise ValueError('self_collision key not found in xrdf')
else:
logger.warn('collision key not found in xrdf, collision avoidance is disabled')
tool_frames = return_value_if_exists(input_dict, 'tool_frames', logger)
output_dict['ee_link'] = tool_frames[0]
output_dict['link_names'] = None
if len(tool_frames) > 1:
output_dict['link_names'] = input_dict['tool_frames']
# cspace:
cspace_dict = return_value_if_exists(input_dict, 'cspace', logger)
active_joints = return_value_if_exists(cspace_dict, 'joint_names', logger)
default_joint_positions = return_value_if_exists(input_dict, 'default_joint_positions', logger)
active_config = []
locked_joints = {}
for j in joint_names:
if j in active_joints:
if j in default_joint_positions:
active_config.append(default_joint_positions[j])
else:
active_config.append(0.0)
else:
locked_joints[j] = 0.0
if j in default_joint_positions:
locked_joints[j] = default_joint_positions[j]
acceleration_limits = return_value_if_exists(cspace_dict, 'acceleration_limits', logger)
jerk_limits = return_value_if_exists(cspace_dict, 'jerk_limits', logger)
max_acc = max(acceleration_limits)
max_jerk = max(jerk_limits)
output_dict['lock_joints'] = locked_joints
all_joint_names = active_joints + list(locked_joints.keys())
output_cspace = {
'joint_names': all_joint_names,
'retract_config': active_config + list(locked_joints.values()),
'null_space_weight': [1.0 for _ in range(len(all_joint_names))],
'cspace_distance_weight': [1.0 for _ in range(len(all_joint_names))],
'max_acceleration': acceleration_limits
+ [max_acc for _ in range(len(all_joint_names) - len(active_joints))],
'max_jerk': jerk_limits
+ [max_jerk for _ in range(len(all_joint_names) - len(active_joints))],
}
output_dict['cspace'] = output_cspace
extra_links = {}
if 'modifiers' in input_dict:
for k in range(len(input_dict['modifiers'])):
mod_list = list(input_dict['modifiers'][k].keys())
if len(mod_list) > 1:
logger.error('Each modifier should have only one key')
raise ValueError('Each modifier should have only one key')
mod_type = mod_list[0]
if mod_type == 'set_base_frame':
base_link = input_dict['modifiers'][k]['set_base_frame']
elif mod_type == 'add_frame':
frame_data = input_dict['modifiers'][k]['add_frame']
extra_links[frame_data['frame_name']] = {
'parent_link_name': frame_data['parent_frame_name'],
'link_name': frame_data['frame_name'],
'joint_name': frame_data['joint_name'],
'joint_type': frame_data['joint_type'],
'fixed_transform': frame_data['fixed_transform']['position']
+ [frame_data['fixed_transform']['orientation']['w']]
+ frame_data['fixed_transform']['orientation']['xyz'],
}
else:
logger.warn('XRDF modifier "' + mod_type + '" not recognized')
output_dict['extra_links'] = extra_links
output_dict['base_link'] = base_link
output_dict['urdf_path'] = urdf_string
output_dict = {'robot_cfg': {'kinematics': output_dict}}
return output_dict
| 6,828 |
Python
| 41.949685 | 99 | 0.619801 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion/params/robot_segmentation_params.yaml
|
/**:
ros__parameters:
robot: ur5e.yml
depth_image_topics: [/depth_image]
depth_camera_infos: [/rgb/camera_info]
robot_mask_publish_topics: [/cumotion/camera_1/robot_mask]
world_depth_publish_topics: [/cumotion/camera_1/world_depth]
joint_states_topic: /joint_states
debug_robot_topic: /cumotion/robot_segmenter/robot_spheres
distance_threshold: 0.2
time_sync_slop: 0.1
tf_lookup_duration: 5.0
cuda_device: 0
log_debug: False
urdf_path: null
| 496 |
YAML
| 30.062498 | 64 | 0.679435 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion/params/isaac_ros_cumotion_params.yaml
|
/**:
ros__parameters:
robot: ""
time_dilation_factor: 0.5
collision_cache_mesh: 20
collision_cache_cuboid: 20
interpolation_dt: 0.02
voxel_dims: [2.0, 2.0, 2.0]
voxel_size: 0.05
read_esdf_world: False
publish_curobo_world_as_voxels: False
add_ground_plane: False
publish_voxel_size: 0.05
max_publish_voxels: 50000
tool_frame: ""
grid_position: [0.0, 0.0, 0.0]
esdf_service_name: "/nvblox_node/get_esdf_and_gradient"
urdf_path: ""
enable_curobo_debug_mode: False
override_moveit_scaling_factors: False
| 574 |
YAML
| 27.749999 | 59 | 0.644599 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/cumotion_planner_plugin_description.xml
|
<library path="libisaac_ros_cumotion_moveit">
<class name="isaac_ros_cumotion_moveit/CumotionPlanner" type="nvidia::isaac::manipulation::CumotionPlannerManager" base_class_type="planning_interface::PlannerManager">
<description>
The cuMotion planner generates collision-free trajectories leveraging CUDA compute.
</description>
</class>
</library>
| 366 |
XML
| 44.874994 | 170 | 0.770492 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/src/cumotion_interface.cpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#include "isaac_ros_cumotion_moveit/cumotion_interface.hpp"
#include <chrono>
#include <memory>
#include "moveit/planning_interface/planning_interface.h"
#include "moveit/planning_scene/planning_scene.h"
#include "moveit/robot_state/conversions.h"
#include "rclcpp/rclcpp.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
namespace
{
constexpr unsigned kSleepIntervalInMs = 5;
constexpr unsigned kTimeoutIntervalInSeconds = 5;
} // namespace
bool CumotionInterface::solve(
const planning_scene::PlanningSceneConstPtr & planning_scene,
const planning_interface::MotionPlanRequest & request,
planning_interface::MotionPlanDetailedResponse & response)
{
RCLCPP_INFO(node_->get_logger(), "Planning trajectory");
if (!planner_busy) {
action_client_->updateGoal(planning_scene, request);
action_client_->sendGoal();
planner_busy = true;
}
rclcpp::Time start_time = node_->now();
while (
!action_client_->result_ready &&
node_->now().seconds() - start_time.seconds() < kTimeoutIntervalInSeconds)
{
action_client_->getGoal();
std::this_thread::sleep_for(std::chrono::milliseconds(kSleepIntervalInMs));
}
if (!action_client_->result_ready) {
RCLCPP_ERROR(node_->get_logger(), "Timed out!");
planner_busy = false;
return false;
}
RCLCPP_INFO(node_->get_logger(), "Received trajectory result");
if (!action_client_->success) {
RCLCPP_ERROR(node_->get_logger(), "No trajectory");
response.error_code_.val = moveit_msgs::msg::MoveItErrorCodes::PLANNING_FAILED;
planner_busy = false;
return false;
}
RCLCPP_INFO(node_->get_logger(), "Trajectory success!");
response.error_code_ = action_client_->plan_response.error_code;
response.description_ = action_client_->plan_response.description;
auto result_traj = std::make_shared<robot_trajectory::RobotTrajectory>(
planning_scene->getRobotModel(), request.group_name);
moveit::core::RobotState robot_state(planning_scene->getRobotModel());
moveit::core::robotStateMsgToRobotState(
action_client_->plan_response.trajectory_start,
robot_state);
result_traj->setRobotTrajectoryMsg(
robot_state,
action_client_->plan_response.trajectory[0]);
response.trajectory_.clear();
response.trajectory_.push_back(result_traj);
response.processing_time_ = action_client_->plan_response.processing_time;
planner_busy = false;
return true;
}
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
| 3,225 |
C++
| 30.627451 | 83 | 0.726822 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/src/cumotion_move_group_client.cpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#include "isaac_ros_cumotion_moveit/cumotion_move_group_client.hpp"
#include <chrono>
#include <future>
#include <memory>
#include <string>
#include "moveit_msgs/action/move_group.hpp"
#include "moveit_msgs/msg/planning_options.hpp"
#include "rclcpp/rclcpp.hpp"
#include "rclcpp_action/rclcpp_action.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
namespace
{
constexpr unsigned kGetGoalWaitIntervalInMs = 10;
} // namespace
CumotionMoveGroupClient::CumotionMoveGroupClient(const rclcpp::Node::SharedPtr & node)
: result_ready(false),
success(false),
get_goal_handle_(false),
get_result_handle_(false),
node_(node),
client_cb_group_(node->create_callback_group(rclcpp::CallbackGroupType::MutuallyExclusive))
{
client_ = rclcpp_action::create_client<moveit_msgs::action::MoveGroup>(
node_,
"cumotion/move_group",
client_cb_group_);
send_goal_options_ = rclcpp_action::Client<moveit_msgs::action::MoveGroup>::SendGoalOptions();
send_goal_options_.goal_response_callback = std::bind(
&CumotionMoveGroupClient::goalResponseCallback, this, std::placeholders::_1);
send_goal_options_.feedback_callback = std::bind(
&CumotionMoveGroupClient::feedbackCallback, this, std::placeholders::_1, std::placeholders::_2);
send_goal_options_.result_callback = std::bind(
&CumotionMoveGroupClient::resultCallback, this, std::placeholders::_1);
}
void CumotionMoveGroupClient::updateGoal(
const planning_scene::PlanningSceneConstPtr & planning_scene,
const planning_interface::MotionPlanRequest & req)
{
planning_request_ = req;
planning_scene->getPlanningSceneMsg(planning_scene_);
}
bool CumotionMoveGroupClient::sendGoal()
{
result_ready = false;
success = false;
moveit_msgs::msg::PlanningOptions plan_options;
plan_options.planning_scene_diff = planning_scene_;
if (!client_->wait_for_action_server()) {
RCLCPP_ERROR(node_->get_logger(), "Action server not available after waiting");
rclcpp::shutdown();
}
auto goal_msg = moveit_msgs::action::MoveGroup::Goal();
goal_msg.planning_options = plan_options;
goal_msg.request = planning_request_;
RCLCPP_INFO(node_->get_logger(), "Sending goal");
auto goal_handle_future = client_->async_send_goal(goal_msg, send_goal_options_);
goal_h_ = goal_handle_future;
get_result_handle_ = true;
get_goal_handle_ = true;
return true;
}
void CumotionMoveGroupClient::getGoal()
{
using namespace std::chrono_literals;
if (get_goal_handle_) {
if (goal_h_.wait_for(std::chrono::milliseconds(kGetGoalWaitIntervalInMs)) !=
std::future_status::ready)
{
return;
}
GoalHandle::SharedPtr goal_handle = goal_h_.get();
if (!goal_handle) {
RCLCPP_ERROR(node_->get_logger(), "Goal was rejected by server");
return;
}
auto result_future = client_->async_get_result(goal_handle);
result_future_ = result_future;
get_goal_handle_ = false;
}
if (get_result_handle_) {
if (result_future_.wait_for(std::chrono::milliseconds(kGetGoalWaitIntervalInMs)) !=
std::future_status::ready)
{
return;
}
auto res = result_future_.get();
RCLCPP_INFO(node_->get_logger(), "Checking results");
if (res.code == rclcpp_action::ResultCode::SUCCEEDED) {
RCLCPP_INFO(node_->get_logger(), "Success");
result_ready = true;
success = false;
plan_response.error_code = res.result->error_code;
if (plan_response.error_code.val == 1) {
success = true;
plan_response.trajectory_start = res.result->trajectory_start;
plan_response.group_name = planning_request_.group_name;
plan_response.trajectory.resize(1);
plan_response.trajectory[0] = res.result->planned_trajectory;
plan_response.processing_time = {res.result->planning_time};
}
} else {
RCLCPP_INFO(node_->get_logger(), "Failed");
}
get_result_handle_ = false;
}
}
void CumotionMoveGroupClient::goalResponseCallback(const GoalHandle::SharedPtr & future)
{
auto goal_handle = future.get();
if (!goal_handle) {
RCLCPP_ERROR(node_->get_logger(), "Goal was rejected by server");
result_ready = true;
success = false;
} else {
RCLCPP_INFO(node_->get_logger(), "Goal accepted by server, waiting for result");
}
}
void CumotionMoveGroupClient::feedbackCallback(
GoalHandle::SharedPtr,
const std::shared_ptr<const moveit_msgs::action::MoveGroup::Feedback> feedback)
{
std::string status = feedback->state;
RCLCPP_INFO(node_->get_logger(), "Checking status");
RCLCPP_INFO(node_->get_logger(), status.c_str());
}
void CumotionMoveGroupClient::resultCallback(const GoalHandle::WrappedResult & result)
{
RCLCPP_INFO(node_->get_logger(), "Received result");
result_ready = true;
success = false;
switch (result.code) {
case rclcpp_action::ResultCode::SUCCEEDED:
break;
case rclcpp_action::ResultCode::ABORTED:
RCLCPP_ERROR(node_->get_logger(), "Goal was aborted");
return;
case rclcpp_action::ResultCode::CANCELED:
RCLCPP_ERROR(node_->get_logger(), "Goal was canceled");
return;
default:
RCLCPP_ERROR(node_->get_logger(), "Unknown result code");
return;
}
plan_response.error_code = result.result->error_code;
if (plan_response.error_code.val == 1) {
success = true;
plan_response.trajectory_start = result.result->trajectory_start;
plan_response.group_name = planning_request_.group_name;
plan_response.trajectory = {result.result->planned_trajectory};
plan_response.processing_time = {result.result->planning_time};
}
}
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
| 6,424 |
C++
| 29.595238 | 100 | 0.696762 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/src/cumotion_planning_context.cpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#include "isaac_ros_cumotion_moveit/cumotion_planning_context.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
bool CumotionPlanningContext::solve(planning_interface::MotionPlanDetailedResponse & res)
{
return cumotion_interface_->solve(planning_scene_, request_, res);
}
bool CumotionPlanningContext::solve(planning_interface::MotionPlanResponse & res)
{
planning_interface::MotionPlanDetailedResponse res_detailed;
bool planning_success = solve(res_detailed);
res.error_code_ = res_detailed.error_code_;
if (planning_success) {
res.trajectory_ = res_detailed.trajectory_[0];
res.planning_time_ = res_detailed.processing_time_[0];
}
return planning_success;
}
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
| 1,514 |
C++
| 29.299999 | 89 | 0.752972 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/src/cumotion_planner_manager.cpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#include "isaac_ros_cumotion_moveit/cumotion_planner_manager.hpp"
#include "moveit/planning_interface/planning_interface.h"
#include "moveit/planning_scene/planning_scene.h"
#include "pluginlib/class_list_macros.hpp"
#include "isaac_ros_cumotion_moveit/cumotion_planning_context.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
bool CumotionPlannerManager::initialize(
const moveit::core::RobotModelConstPtr & model,
const rclcpp::Node::SharedPtr & node,
const std::string & parameter_namespace)
{
node_ = node;
for (const std::string & group_name : model->getJointModelGroupNames()) {
planning_contexts_[group_name] =
std::make_shared<CumotionPlanningContext>("cumotion_planning_context", group_name, node);
}
static_cast<void>(model); // Suppress "unused" warning.
static_cast<void>(parameter_namespace); // Suppress "unused" warning.
return true;
}
std::string CumotionPlannerManager::getDescription() const
{
return "Generate minimum-jerk trajectories using NVIDIA Isaac ROS cuMotion";
}
void CumotionPlannerManager::getPlanningAlgorithms(std::vector<std::string> & algs) const
{
algs.clear();
algs.push_back(kCumotionPlannerId);
}
planning_interface::PlanningContextPtr CumotionPlannerManager::getPlanningContext(
const planning_scene::PlanningSceneConstPtr & planning_scene,
const planning_interface::MotionPlanRequest & req,
moveit_msgs::msg::MoveItErrorCodes & error_code) const
{
error_code.val = moveit_msgs::msg::MoveItErrorCodes::SUCCESS;
if (!planning_scene) {
RCLCPP_ERROR(node_->get_logger(), "No planning scene supplied as input");
error_code.val = moveit_msgs::msg::MoveItErrorCodes::FAILURE;
return planning_interface::PlanningContextPtr();
}
if (req.group_name.empty()) {
RCLCPP_ERROR(node_->get_logger(), "No group specified to plan for");
error_code.val = moveit_msgs::msg::MoveItErrorCodes::INVALID_GROUP_NAME;
return planning_interface::PlanningContextPtr();
}
// Retrieve and configure existing context.
const std::shared_ptr<CumotionPlanningContext> & context = planning_contexts_.at(req.group_name);
context->setPlanningScene(planning_scene);
context->setMotionPlanRequest(req);
error_code.val = moveit_msgs::msg::MoveItErrorCodes::SUCCESS;
return context;
}
void CumotionPlannerManager::setPlannerConfigurations(
const planning_interface::PlannerConfigurationMap & pcs)
{
planner_configs_ = pcs;
}
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
// Register the `CumotionPlannerManager` class as a plugin.
PLUGINLIB_EXPORT_CLASS(
nvidia::isaac::manipulation::CumotionPlannerManager,
planning_interface::PlannerManager)
| 3,434 |
C++
| 32.349514 | 99 | 0.753058 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/include/isaac_ros_cumotion_moveit/cumotion_planner_manager.hpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#ifndef ISAAC_ROS_CUMOTION_PLANNER_MANAGER_H
#define ISAAC_ROS_CUMOTION_PLANNER_MANAGER_H
#include <map>
#include <string>
#include <vector>
#include "moveit/planning_interface/planning_interface.h"
#include "moveit/planning_scene/planning_scene.h"
#include "isaac_ros_cumotion_moveit/cumotion_planning_context.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
class CumotionPlannerManager : public planning_interface::PlannerManager
{
inline static constexpr char kCumotionPlannerId[] = "cuMotion";
public:
CumotionPlannerManager()
{
}
bool initialize(
const moveit::core::RobotModelConstPtr & model,
const rclcpp::Node::SharedPtr & node,
const std::string & parameter_namespace) override;
bool canServiceRequest(const planning_interface::MotionPlanRequest & req) const override
{
return req.planner_id == kCumotionPlannerId;
}
std::string getDescription() const override;
void getPlanningAlgorithms(std::vector<std::string> & algs) const override;
planning_interface::PlanningContextPtr getPlanningContext(
const planning_scene::PlanningSceneConstPtr & planning_scene,
const planning_interface::MotionPlanRequest & req,
moveit_msgs::msg::MoveItErrorCodes & error_code) const override;
void setPlannerConfigurations(const planning_interface::PlannerConfigurationMap & pcs) override;
private:
std::shared_ptr<rclcpp::Node> node_;
std::map<std::string, std::shared_ptr<CumotionPlanningContext>> planning_contexts_;
planning_interface::PlannerConfigurationMap planner_configs_;
};
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
#endif // ISAAC_ROS_CUMOTION_PLANNER_MANAGER_H
| 2,423 |
C++
| 30.076923 | 98 | 0.761865 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/include/isaac_ros_cumotion_moveit/cumotion_move_group_client.hpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#ifndef ISAAC_ROS_CUMOTION_MOVE_GROUP_CLIENT_H
#define ISAAC_ROS_CUMOTION_MOVE_GROUP_CLIENT_H
#include <future>
#include <memory>
#include "moveit/planning_interface/planning_interface.h"
#include "moveit/planning_scene/planning_scene.h"
#include "moveit_msgs/action/move_group.hpp"
#include "rclcpp/rclcpp.hpp"
#include "rclcpp_action/rclcpp_action.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
class CumotionMoveGroupClient
{
using GoalHandle = rclcpp_action::ClientGoalHandle<moveit_msgs::action::MoveGroup>;
public:
CumotionMoveGroupClient(const rclcpp::Node::SharedPtr & node);
bool sendGoal();
void updateGoal(
const planning_scene::PlanningSceneConstPtr & planning_scene,
const planning_interface::MotionPlanRequest & req);
void getGoal();
bool result_ready;
bool success;
moveit_msgs::msg::MotionPlanDetailedResponse plan_response;
private:
void goalResponseCallback(const GoalHandle::SharedPtr & future);
void feedbackCallback(
GoalHandle::SharedPtr,
const std::shared_ptr<const moveit_msgs::action::MoveGroup::Feedback> feedback);
void resultCallback(const GoalHandle::WrappedResult & result);
bool get_goal_handle_;
bool get_result_handle_;
std::shared_ptr<rclcpp::Node> node_;
rclcpp::CallbackGroup::SharedPtr client_cb_group_;
rclcpp_action::Client<moveit_msgs::action::MoveGroup>::SharedPtr client_;
rclcpp_action::Client<moveit_msgs::action::MoveGroup>::SendGoalOptions send_goal_options_;
std::shared_future<GoalHandle::SharedPtr> goal_h_;
std::shared_future<GoalHandle::WrappedResult> result_future_;
moveit_msgs::msg::PlanningScene planning_scene_;
planning_interface::MotionPlanRequest planning_request_;
};
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
#endif // ISAAC_ROS_CUMOTION_MOVE_GROUP_CLIENT_H
| 2,577 |
C++
| 30.439024 | 92 | 0.759022 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/include/isaac_ros_cumotion_moveit/cumotion_interface.hpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#ifndef ISAAC_ROS_CUMOTION_INTERFACE_H
#define ISAAC_ROS_CUMOTION_INTERFACE_H
#include <memory>
#include "moveit/planning_interface/planning_interface.h"
#include "rclcpp/rclcpp.hpp"
#include "isaac_ros_cumotion_moveit/cumotion_move_group_client.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
class CumotionInterface
{
public:
CumotionInterface(const rclcpp::Node::SharedPtr & node)
: node_(node),
action_client_(std::make_shared<CumotionMoveGroupClient>(node))
{
}
bool solve(
const planning_scene::PlanningSceneConstPtr & planning_scene,
const planning_interface::MotionPlanRequest & request,
planning_interface::MotionPlanDetailedResponse & response);
bool planner_busy = false;
private:
std::shared_ptr<rclcpp::Node> node_;
std::shared_ptr<CumotionMoveGroupClient> action_client_;
};
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
#endif // ISAAC_ROS_CUMOTION_INTERFACE_H
| 1,698 |
C++
| 26.852459 | 75 | 0.750883 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/include/isaac_ros_cumotion_moveit/cumotion_planning_context.hpp
|
// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
// Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
#ifndef ISAAC_ROS_CUMOTION_PLANNING_CONTEXT_H
#define ISAAC_ROS_CUMOTION_PLANNING_CONTEXT_H
#include <memory>
#include <string>
#include "moveit/planning_interface/planning_interface.h"
#include "isaac_ros_cumotion_moveit/cumotion_interface.hpp"
namespace nvidia
{
namespace isaac
{
namespace manipulation
{
class CumotionPlanningContext : public planning_interface::PlanningContext
{
public:
CumotionPlanningContext(
const std::string & context_name,
const std::string & group_name,
const rclcpp::Node::SharedPtr & node)
: planning_interface::PlanningContext(context_name, group_name),
cumotion_interface_(std::make_shared<CumotionInterface>(node))
{
}
~CumotionPlanningContext() override
{
}
bool solve(planning_interface::MotionPlanResponse & res) override;
bool solve(planning_interface::MotionPlanDetailedResponse & res) override;
bool terminate() override
{
return true;
}
void clear() override
{
}
private:
std::shared_ptr<CumotionInterface> cumotion_interface_;
};
} // namespace manipulation
} // namespace isaac
} // namespace nvidia
#endif // ISAAC_ROS_CUMOTION_PLANNING_CONTEXT_H
| 1,893 |
C++
| 24.945205 | 76 | 0.745378 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_moveit/config/isaac_ros_cumotion_planning.yaml
|
planning_plugin: isaac_ros_cumotion_moveit/CumotionPlanner
request_adapters: >-
default_planner_request_adapters/FixWorkspaceBounds
default_planner_request_adapters/FixStartStateBounds
default_planner_request_adapters/FixStartStateCollision
default_planner_request_adapters/FixStartStatePathConstraints
start_state_max_bounds_error: 0.1
num_steps: 32
| 359 |
YAML
| 38.999996 | 63 | 0.849582 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_examples/launch/franka.launch.py
|
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# To avoid code duplication, we patch and then execute the Franka demo launch file provided by
# the moveit2_tutorials package.
from os import path
from ament_index_python.packages import get_package_share_directory
import yaml
def augment_moveit_config(moveit_config):
"""Add cuMotion and its config to the planning_pipelines dict of a MoveItConfigs object."""
config_file_path = path.join(
get_package_share_directory('isaac_ros_cumotion_moveit'),
'config',
'isaac_ros_cumotion_planning.yaml'
)
with open(config_file_path) as config_file:
config = yaml.safe_load(config_file)
moveit_config.planning_pipelines['planning_pipelines'].append('isaac_ros_cumotion')
moveit_config.planning_pipelines['isaac_ros_cumotion'] = config
def generate_launch_description():
franka_demo_launch_file = path.join(
get_package_share_directory('moveit2_tutorials'),
'launch',
'demo.launch.py'
)
lf = open(franka_demo_launch_file).read()
# Rename generate_launch_description() in base launch file.
lf = lf.replace('generate_launch_description', 'generate_base_launch_description')
# The standard way to make isaac_ros_cumotion_planning.yaml available to MoveIt would be to
# copy the file into the config/ directory within a given robot's moveit_config package.
# It would then suffice to add "isaac_ros_cumotion" to the list of planning_pipelines in the
# MoveItConfigsBuilder, e.g., via the following substitution.
#
# lf = lf.replace('"ompl"', '"isaac_ros_cumotion", "ompl"')
#
# Here we avoid adding the file to the moveit_resources_panda package by loading the file
# manually and augmenting the MoveItConfigs object after it's built.
lf = lf.replace(
'run_move_group_node =',
'augment_moveit_config(moveit_config)\n run_move_group_node ='
)
# Execute modified launch file.
exec(lf, globals())
return generate_base_launch_description() # noqa: F821
| 2,742 |
Python
| 38.185714 | 96 | 0.71663 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_examples/launch/franka_isaac_sim.launch.py
|
# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This launch file was originally derived from
# https://github.com/ros-planning/moveit2_tutorials/blob/efef1d3/doc/how_to_guides/isaac_panda/launch/isaac_demo.launch.py # noqa
#
# BSD 3-Clause License
#
# Copyright (c) 2008-2013, Willow Garage, Inc.
# Copyright (c) 2015-2023, PickNik, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from moveit_configs_utils import MoveItConfigsBuilder
import yaml
def generate_launch_description():
# Command-line arguments
ros2_control_hardware_type = DeclareLaunchArgument(
'ros2_control_hardware_type',
default_value='isaac',
description=(
'ROS2 control hardware interface type to use for the launch file -- '
'possible values: [mock_components, isaac]'
)
)
moveit_config = (
MoveItConfigsBuilder('moveit_resources_panda')
.robot_description(
file_path='config/panda.urdf.xacro',
mappings={
'ros2_control_hardware_type': LaunchConfiguration(
'ros2_control_hardware_type'
)
},
)
.robot_description_semantic(file_path='config/panda.srdf')
.trajectory_execution(file_path='config/gripper_moveit_controllers.yaml')
.planning_pipelines(pipelines=['ompl', 'pilz_industrial_motion_planner'])
.to_moveit_configs()
)
# Add cuMotion to list of planning pipelines.
cumotion_config_file_path = os.path.join(
get_package_share_directory('isaac_ros_cumotion_moveit'),
'config',
'isaac_ros_cumotion_planning.yaml'
)
with open(cumotion_config_file_path) as cumotion_config_file:
cumotion_config = yaml.safe_load(cumotion_config_file)
moveit_config.planning_pipelines['planning_pipelines'].append('isaac_ros_cumotion')
moveit_config.planning_pipelines['isaac_ros_cumotion'] = cumotion_config
# The current Franka asset in Isaac Sim 2023.1.1 tends to drift slightly from commanded joint
# positions, which prevents trajectory execution if the drift exceeds `allowed_start_tolerance`
# for any joint; the default tolerance is 0.01 radians. This is more likely to occur if the
# robot hasn't fully settled when the trajectory is computed or if significant time has
# elapsed between trajectory computation and execution. For this simulation use case,
# there's little harm in disabling this check by setting `allowed_start_tolerance` to 0.
moveit_config.trajectory_execution['trajectory_execution']['allowed_start_tolerance'] = 0.0
# Start the actual move_group node/action server
move_group_node = Node(
package='moveit_ros_move_group',
executable='move_group',
output='screen',
parameters=[moveit_config.to_dict()],
arguments=['--ros-args', '--log-level', 'info'],
)
# RViz
rviz_config_file = os.path.join(
get_package_share_directory('isaac_ros_cumotion_examples'),
'rviz',
'franka_moveit_config.rviz',
)
rviz_node = Node(
package='rviz2',
executable='rviz2',
name='rviz2',
output='log',
arguments=['-d', rviz_config_file],
parameters=[
moveit_config.robot_description,
moveit_config.robot_description_semantic,
moveit_config.robot_description_kinematics,
moveit_config.planning_pipelines,
moveit_config.joint_limits,
],
)
# Static TF
world2robot_tf_node = Node(
package='tf2_ros',
executable='static_transform_publisher',
name='static_transform_publisher',
output='log',
arguments=['--frame-id', 'world', '--child-frame-id', 'panda_link0'],
)
hand2camera_tf_node = Node(
package='tf2_ros',
executable='static_transform_publisher',
name='static_transform_publisher',
output='log',
arguments=[
'0.04',
'0.0',
'0.04',
'0.0',
'0.0',
'0.0',
'panda_hand',
'sim_camera',
],
)
# Publish TF
robot_state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='both',
parameters=[moveit_config.robot_description],
)
# ros2_control using FakeSystem as hardware
ros2_controllers_path = os.path.join(
get_package_share_directory('moveit_resources_panda_moveit_config'),
'config',
'ros2_controllers.yaml',
)
ros2_control_node = Node(
package='controller_manager',
executable='ros2_control_node',
parameters=[ros2_controllers_path],
remappings=[
('/controller_manager/robot_description', '/robot_description'),
],
output='screen',
)
joint_state_broadcaster_spawner = Node(
package='controller_manager',
executable='spawner',
arguments=[
'joint_state_broadcaster',
'--controller-manager',
'/controller_manager',
],
)
panda_arm_controller_spawner = Node(
package='controller_manager',
executable='spawner',
arguments=['panda_arm_controller', '-c', '/controller_manager'],
)
panda_hand_controller_spawner = Node(
package='controller_manager',
executable='spawner',
arguments=['panda_hand_controller', '-c', '/controller_manager'],
)
return LaunchDescription(
[
ros2_control_hardware_type,
rviz_node,
world2robot_tf_node,
hand2camera_tf_node,
robot_state_publisher,
move_group_node,
ros2_control_node,
joint_state_broadcaster_spawner,
panda_arm_controller_spawner,
panda_hand_controller_spawner,
]
)
| 8,349 |
Python
| 35.946902 | 130 | 0.662714 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_examples/launch/ur.launch.py
|
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# To avoid code duplication, we patch and then execute the launch file provided by the
# ur_moveit_config package.
from os import path
from ament_index_python.packages import get_package_share_directory
import yaml
def cumotion_params():
# The standard way to make isaac_ros_cumotion_planning.yaml available to MoveIt would be to
# copy the file into the config/ directory within a given robot's moveit_config package.
# It would then suffice to add "isaac_ros_cumotion" to the list of planning_pipelines.
# Here we avoid adding the file to the ur_moveit_config package by loading the file manually
# and adding its contents to the parameter list.
config_file_path = path.join(
get_package_share_directory('isaac_ros_cumotion_moveit'),
'config',
'isaac_ros_cumotion_planning.yaml'
)
with open(config_file_path) as config_file:
config = yaml.safe_load(config_file)
return (
{'planning_pipelines': ['ompl', 'isaac_ros_cumotion']},
{'isaac_ros_cumotion': config}
)
def generate_launch_description():
ur_moveit_launch_file = path.join(
get_package_share_directory('ur_moveit_config'),
'launch',
'ur_moveit.launch.py'
)
lf = open(ur_moveit_launch_file).read()
# Rename generate_launch_description() in base launch file.
lf = lf.replace('generate_launch_description', 'generate_base_launch_description')
# Add required parameters to the move_group node. This substitution relies on the fact that
# the string "moveit_controllers," appears only once in the base launch file.
lf = lf.replace(
'moveit_controllers,',
'moveit_controllers, *cumotion_params(),'
)
# Execute modified launch file.
exec(lf, globals())
return generate_base_launch_description() # noqa: F821
| 2,565 |
Python
| 35.657142 | 96 | 0.710331 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_cumotion_examples/isaac_sim_scripts/start_isaac_sim_franka.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Portions contributed by PickNik, LLC under BSD 3-Clause License
#
# Copyright (c) 2023, PickNik, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This Isaac Sim example is derived from
# https://github.com/ros-planning/moveit2_tutorials/blob/efef1d3/doc/how_to_guides/isaac_panda/launch/isaac_moveit.py
# which in turn was derived from an example provided with Isaac Sim 2022.2.1, found at
# standalone_examples/api/omni.isaac.ros2_bridge/moveit.py
#
# flake8: noqa
import sys
import re
import os
import carb
import numpy as np
from omni.isaac.kit import SimulationApp
FRANKA_STAGE_PATH = "/Franka"
FRANKA_USD_PATH = "/Isaac/Robots/Franka/franka_alt_fingers.usd"
CAMERA_PRIM_PATH = f"{FRANKA_STAGE_PATH}/panda_hand/geometry/realsense/realsense_camera"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Room/simple_room.usd"
GRAPH_PATH = "/ActionGraph"
REALSENSE_VIEWPORT_NAME = "realsense_viewport"
CONFIG = {"renderer": "RayTracedLighting", "headless": False}
# Example ROS2 bridge sample demonstrating the manual loading of stages
# and creation of ROS components
simulation_app = SimulationApp(CONFIG)
# More imports that need to compare after we create the app
from omni.isaac.core import SimulationContext # noqa E402
from omni.isaac.core.utils.prims import set_targets
from omni.isaac.core.utils import ( # noqa E402
extensions,
nucleus,
prims,
rotations,
stage,
viewports,
)
from pxr import Gf, UsdGeom # noqa E402
import omni.graph.core as og # noqa E402
import omni
# enable ROS2 bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")
simulation_context = SimulationContext(stage_units_in_meters=1.0)
# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
simulation_app.close()
sys.exit()
# Preparing stage
viewports.set_camera_view(eye=np.array([1.2, 1.2, 0.8]), target=np.array([0, 0, 0.5]))
# Loading the simple_room environment
stage.add_reference_to_stage(
assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH
)
# Loading the franka robot USD
prims.create_prim(
FRANKA_STAGE_PATH,
"Xform",
position=np.array([0, -0.64, 0]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(0, 0, 1), 90)),
usd_path=assets_root_path + FRANKA_USD_PATH,
)
# add some objects, spread evenly along the X axis
# with a fixed offset from the robot in the Y and Z
prims.create_prim(
"/cracker_box",
"Xform",
position=np.array([-0.2, -0.25, 0.15]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(1, 0, 0), -90)),
usd_path=assets_root_path
+ "/Isaac/Props/YCB/Axis_Aligned_Physics/003_cracker_box.usd",
)
prims.create_prim(
"/sugar_box",
"Xform",
position=np.array([-0.07, -0.25, 0.1]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(0, 1, 0), -90)),
usd_path=assets_root_path
+ "/Isaac/Props/YCB/Axis_Aligned_Physics/004_sugar_box.usd",
)
prims.create_prim(
"/soup_can",
"Xform",
position=np.array([0.1, -0.25, 0.10]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(1, 0, 0), -90)),
usd_path=assets_root_path
+ "/Isaac/Props/YCB/Axis_Aligned_Physics/005_tomato_soup_can.usd",
)
prims.create_prim(
"/mustard_bottle",
"Xform",
position=np.array([0.0, 0.15, 0.12]),
orientation=rotations.gf_rotation_to_np_array(Gf.Rotation(Gf.Vec3d(1, 0, 0), -90)),
usd_path=assets_root_path
+ "/Isaac/Props/YCB/Axis_Aligned_Physics/006_mustard_bottle.usd",
)
simulation_app.update()
try:
ros_domain_id = int(os.environ["ROS_DOMAIN_ID"])
print("Using ROS_DOMAIN_ID: ", ros_domain_id)
except ValueError:
print("Invalid ROS_DOMAIN_ID integer value. Setting value to 0")
ros_domain_id = 0
except KeyError:
print("ROS_DOMAIN_ID environment variable is not set. Setting value to 0")
ros_domain_id = 0
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": GRAPH_PATH, "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("Context", "omni.isaac.ros2_bridge.ROS2Context"),
("PublishJointState", "omni.isaac.ros2_bridge.ROS2PublishJointState"),
(
"SubscribeJointState",
"omni.isaac.ros2_bridge.ROS2SubscribeJointState",
),
(
"ArticulationController",
"omni.isaac.core_nodes.IsaacArticulationController",
),
("PublishClock", "omni.isaac.ros2_bridge.ROS2PublishClock"),
("OnTick", "omni.graph.action.OnTick"),
("createViewport", "omni.isaac.core_nodes.IsaacCreateViewport"),
(
"getRenderProduct",
"omni.isaac.core_nodes.IsaacGetViewportRenderProduct",
),
("setCamera", "omni.isaac.core_nodes.IsaacSetCameraOnRenderProduct"),
("cameraHelperRgb", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperInfo", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
("cameraHelperDepth", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
],
og.Controller.Keys.CONNECT: [
("OnImpulseEvent.outputs:execOut", "PublishJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "SubscribeJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "PublishClock.inputs:execIn"),
(
"OnImpulseEvent.outputs:execOut",
"ArticulationController.inputs:execIn",
),
("Context.outputs:context", "PublishJointState.inputs:context"),
("Context.outputs:context", "SubscribeJointState.inputs:context"),
("Context.outputs:context", "PublishClock.inputs:context"),
(
"ReadSimTime.outputs:simulationTime",
"PublishJointState.inputs:timeStamp",
),
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
(
"SubscribeJointState.outputs:jointNames",
"ArticulationController.inputs:jointNames",
),
(
"SubscribeJointState.outputs:positionCommand",
"ArticulationController.inputs:positionCommand",
),
(
"SubscribeJointState.outputs:velocityCommand",
"ArticulationController.inputs:velocityCommand",
),
(
"SubscribeJointState.outputs:effortCommand",
"ArticulationController.inputs:effortCommand",
),
("OnTick.outputs:tick", "createViewport.inputs:execIn"),
("createViewport.outputs:execOut", "getRenderProduct.inputs:execIn"),
("createViewport.outputs:viewport", "getRenderProduct.inputs:viewport"),
("getRenderProduct.outputs:execOut", "setCamera.inputs:execIn"),
(
"getRenderProduct.outputs:renderProductPath",
"setCamera.inputs:renderProductPath",
),
("setCamera.outputs:execOut", "cameraHelperRgb.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperInfo.inputs:execIn"),
("setCamera.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
("Context.outputs:context", "cameraHelperRgb.inputs:context"),
("Context.outputs:context", "cameraHelperInfo.inputs:context"),
("Context.outputs:context", "cameraHelperDepth.inputs:context"),
(
"getRenderProduct.outputs:renderProductPath",
"cameraHelperRgb.inputs:renderProductPath",
),
(
"getRenderProduct.outputs:renderProductPath",
"cameraHelperInfo.inputs:renderProductPath",
),
(
"getRenderProduct.outputs:renderProductPath",
"cameraHelperDepth.inputs:renderProductPath",
),
],
og.Controller.Keys.SET_VALUES: [
("Context.inputs:domain_id", ros_domain_id),
# Setting the /Franka target prim to Articulation Controller node
("ArticulationController.inputs:usePath", True),
("ArticulationController.inputs:robotPath", FRANKA_STAGE_PATH),
("PublishJointState.inputs:topicName", "isaac_joint_states"),
("SubscribeJointState.inputs:topicName", "isaac_joint_commands"),
("createViewport.inputs:name", REALSENSE_VIEWPORT_NAME),
("createViewport.inputs:viewportId", 1),
("cameraHelperRgb.inputs:frameId", "sim_camera"),
("cameraHelperRgb.inputs:topicName", "rgb"),
("cameraHelperRgb.inputs:type", "rgb"),
("cameraHelperInfo.inputs:frameId", "sim_camera"),
("cameraHelperInfo.inputs:topicName", "camera_info"),
("cameraHelperInfo.inputs:type", "camera_info"),
("cameraHelperDepth.inputs:frameId", "sim_camera"),
("cameraHelperDepth.inputs:topicName", "depth"),
("cameraHelperDepth.inputs:type", "depth"),
],
},
)
except Exception as e:
print(e)
# Setting the /Franka target prim to Publish JointState node
set_targets(
prim=stage.get_current_stage().GetPrimAtPath("/ActionGraph/PublishJointState"),
attribute="inputs:targetPrim",
target_prim_paths=[FRANKA_STAGE_PATH],
)
# Fix camera settings since the defaults in the realsense model are inaccurate
realsense_prim = camera_prim = UsdGeom.Camera(
stage.get_current_stage().GetPrimAtPath(CAMERA_PRIM_PATH)
)
realsense_prim.GetHorizontalApertureAttr().Set(20.955)
realsense_prim.GetVerticalApertureAttr().Set(15.7)
realsense_prim.GetFocalLengthAttr().Set(18.8)
realsense_prim.GetFocusDistanceAttr().Set(400)
set_targets(
prim=stage.get_current_stage().GetPrimAtPath(GRAPH_PATH + "/setCamera"),
attribute="inputs:cameraPrim",
target_prim_paths=[CAMERA_PRIM_PATH],
)
simulation_app.update()
# need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()
# Dock the second camera window
viewport = omni.ui.Workspace.get_window("Viewport")
rs_viewport = omni.ui.Workspace.get_window(REALSENSE_VIEWPORT_NAME)
rs_viewport.dock_in(viewport, omni.ui.DockPosition.RIGHT)
while simulation_app.is_running():
# Run with a fixed step size
simulation_context.step(render=True)
# Tick the Publish/Subscribe JointState, Publish TF and Publish Clock nodes each frame
og.Controller.set(
og.Controller.attribute("/ActionGraph/OnImpulseEvent.state:enableImpulse"), True
)
simulation_context.stop()
simulation_app.close()
| 13,417 |
Python
| 40.670807 | 117 | 0.656853 |
NVIDIA-ISAAC-ROS/isaac_ros_cumotion/isaac_ros_esdf_visualizer/isaac_ros_esdf_visualizer/esdf_visualizer.py
|
# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from curobo.geom.sdf.world import CollisionCheckerType
from curobo.geom.sdf.world import WorldCollisionConfig
from curobo.geom.sdf.world_voxel import WorldVoxelCollision
from curobo.geom.types import Cuboid as CuCuboid
from curobo.geom.types import VoxelGrid as CuVoxelGrid
from curobo.geom.types import WorldConfig
from curobo.types.base import TensorDeviceType
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
import numpy as np
from nvblox_msgs.srv import EsdfAndGradients
import rclpy
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from rclpy.node import Node
from std_msgs.msg import ColorRGBA
import torch
from visualization_msgs.msg import Marker
class ESDFVisualizer(Node):
def __init__(self):
super().__init__('esdf_visualizer')
self.declare_parameter('voxel_dims', [1.25, 1.8, 1.8])
self.declare_parameter('grid_position', [0.0, 0.0, 0.0])
self.declare_parameter('voxel_size', 0.05)
self.declare_parameter('publish_voxel_size', 0.025)
self.declare_parameter('max_publish_voxels', 50000)
self.declare_parameter('esdf_service_name', '/nvblox_node/get_esdf_and_gradient')
self.declare_parameter('robot_base_frame', 'base_link')
self.__esdf_future = None
# Voxel publisher
self.__voxel_pub = self.create_publisher(Marker, '/curobo/voxels', 10)
# ESDF service
esdf_service_name = (
self.get_parameter('esdf_service_name').get_parameter_value().string_value
)
esdf_service_cb_group = MutuallyExclusiveCallbackGroup()
self.__esdf_client = self.create_client(
EsdfAndGradients, esdf_service_name, callback_group=esdf_service_cb_group
)
while not self.__esdf_client.wait_for_service(timeout_sec=1.0):
self.get_logger().info(f'Service({esdf_service_name}) not available, waiting again...')
self.__esdf_req = EsdfAndGradients.Request()
# Timer for calling the ESDF service
timer_period = 0.01
timer_cb_group = MutuallyExclusiveCallbackGroup()
self.timer = self.create_timer(
timer_period, self.timer_callback, callback_group=timer_cb_group
)
self.__voxel_dims = (
self.get_parameter('voxel_dims').get_parameter_value().double_array_value
)
self.__grid_position = (
self.get_parameter('grid_position').get_parameter_value().double_array_value
)
self.__voxel_size = self.get_parameter('voxel_size').get_parameter_value().double_value
self.__publish_voxel_size = (
self.get_parameter('publish_voxel_size').get_parameter_value().double_value
)
self.__max_publish_voxels = (
self.get_parameter('max_publish_voxels').get_parameter_value().integer_value
)
# Init WorldVoxelCollision
world_cfg = WorldConfig.from_dict(
{
'voxel': {
'world_voxel': {
'dims': self.__voxel_dims,
'pose': [
self.__grid_position[0],
self.__grid_position[1],
self.__grid_position[2],
1,
0,
0,
0,
], # x, y, z, qw, qx, qy, qz
'voxel_size': self.__voxel_size,
'feature_dtype': torch.float32,
},
},
},
)
tensor_args = TensorDeviceType()
world_collision_config = WorldCollisionConfig.load_from_dict(
{
'checker_type': CollisionCheckerType.VOXEL,
'max_distance': 10.0,
'n_envs': 1,
},
world_cfg,
tensor_args,
)
self.__world_collision = WorldVoxelCollision(world_collision_config)
self.__robot_base_frame = (
self.get_parameter('robot_base_frame').get_parameter_value().string_value
)
self.__tensor_args = tensor_args
esdf_grid = CuVoxelGrid(
name='world_voxel',
dims=self.__voxel_dims,
pose=[
self.__grid_position[0],
self.__grid_position[1],
self.__grid_position[2],
1,
0,
0,
0,
],
voxel_size=self.__voxel_size,
feature_dtype=torch.float32,
)
self.__grid_shape, _, _ = esdf_grid.get_grid_shape()
def timer_callback(self):
if self.__esdf_future is None:
self.get_logger().info('Calling ESDF service')
# This is half of x,y and z dims
aabb_min = Point()
aabb_min.x = (-0.5 * self.__voxel_dims[0]) + self.__grid_position[0]
aabb_min.y = (-0.5 * self.__voxel_dims[1]) + self.__grid_position[1]
aabb_min.z = (-0.5 * self.__voxel_dims[2]) + self.__grid_position[2]
# This is a voxel size.
voxel_dims = Vector3()
voxel_dims.x = self.__voxel_dims[0]
voxel_dims.y = self.__voxel_dims[1]
voxel_dims.z = self.__voxel_dims[2]
self.__esdf_future = self.send_request(aabb_min, voxel_dims)
if self.__esdf_future is not None and self.__esdf_future.done():
response = self.__esdf_future.result()
self.fill_marker(response)
self.__esdf_future = None
def send_request(self, aabb_min_m, aabb_size_m):
self.__esdf_req.aabb_min_m = aabb_min_m
self.__esdf_req.aabb_size_m = aabb_size_m
self.get_logger().info(
f'ESDF req = {self.__esdf_req.aabb_min_m}, {self.__esdf_req.aabb_size_m}'
)
esdf_future = self.__esdf_client.call_async(self.__esdf_req)
return esdf_future
def get_esdf_voxel_grid(self, esdf_data):
esdf_array = esdf_data.esdf_and_gradients
array_shape = [
esdf_array.layout.dim[0].size,
esdf_array.layout.dim[1].size,
esdf_array.layout.dim[2].size,
]
array_data = np.array(esdf_array.data)
array_data = self.__tensor_args.to_device(array_data)
# Array data is reshaped to x y z channels
array_data = array_data.view(array_shape[0], array_shape[1], array_shape[2]).contiguous()
# Array is squeezed to 1 dimension
array_data = array_data.reshape(-1, 1)
# nvblox uses negative distance inside obstacles, cuRobo needs the opposite:
array_data = -1 * array_data
# nvblox assigns a value of -1000.0 for unobserved voxels, making
array_data[array_data >= 1000.0] = -1000.0
# nvblox distance are at origin of each voxel, cuRobo's esdf needs it to be at faces
array_data = array_data + 0.5 * self.__voxel_size
esdf_grid = CuVoxelGrid(
name='world_voxel',
dims=self.__voxel_dims,
pose=[
self.__grid_position[0],
self.__grid_position[1],
self.__grid_position[2],
1,
0.0,
0.0,
0,
], # x, y, z, qw, qx, qy, qz
voxel_size=self.__voxel_size,
feature_dtype=torch.float32,
feature_tensor=array_data,
)
return esdf_grid
def fill_marker(self, esdf_data):
esdf_grid = self.get_esdf_voxel_grid(esdf_data)
self.__world_collision.update_voxel_data(esdf_grid)
vox_size = self.__publish_voxel_size
voxels = self.__world_collision.get_esdf_in_bounding_box(
CuCuboid(
name='test',
pose=[0.0, 0.0, 0.0, 1, 0, 0, 0], # x, y, z, qw, qx, qy, qz
dims=self.__voxel_dims,
),
voxel_size=vox_size,
)
xyzr_tensor = voxels.xyzr_tensor.clone()
xyzr_tensor[..., 3] = voxels.feature_tensor
self.publish_voxels(xyzr_tensor)
def publish_voxels(self, voxels):
vox_size = 0.25 * self.__publish_voxel_size
# create marker:
marker = Marker()
marker.header.frame_id = self.__robot_base_frame
marker.id = 0
marker.type = 6 # cube list
marker.ns = 'curobo_world'
marker.action = 0
marker.pose.orientation.w = 1.0
marker.lifetime = rclpy.duration.Duration(seconds=1000.0).to_msg()
marker.frame_locked = False
marker.scale.x = vox_size
marker.scale.y = vox_size
marker.scale.z = vox_size
# get only voxels that are inside surfaces:
voxels = voxels[voxels[:, 3] >= 0.0]
vox = voxels.view(-1, 4).cpu().numpy()
marker.points = []
for i in range(min(len(vox), self.__max_publish_voxels)):
pt = Point()
pt.x = float(vox[i, 0])
pt.y = float(vox[i, 1])
pt.z = float(vox[i, 2])
color = ColorRGBA()
d = vox[i, 3]
rgba = [min(1.0, 1.0 - float(d)), 0.0, 0.0, 1.0]
color.r = rgba[0]
color.g = rgba[1]
color.b = rgba[2]
color.a = rgba[3]
marker.colors.append(color)
marker.points.append(pt)
# publish voxels:
marker.header.stamp = self.get_clock().now().to_msg()
self.__voxel_pub.publish(marker)
def main(args=None):
# Initialize the rclpy library
rclpy.init(args=args)
# Create the node
esdf_client = ESDFVisualizer()
# Spin the node so the callback function is called.
try:
esdf_client.get_logger().info('Starting ESDFVisualizer node')
rclpy.spin(esdf_client)
except KeyboardInterrupt:
esdf_client.get_logger().info('Destroying ESDFVisualizer node')
# Destroy the node explicitly
esdf_client.destroy_node()
# Shutdown the ROS client library for Python
rclpy.shutdown()
if __name__ == '__main__':
main()
| 10,873 |
Python
| 35.006622 | 99 | 0.566265 |
Tbarkin121/GuardDog/README.md
|
# GuardDog
Makeing a hypoallergenic robot quadruped pet from scratch
| 69 |
Markdown
| 22.333326 | 57 | 0.84058 |
Tbarkin121/GuardDog/OmniIsaacGymEnvs/omniisaacgymenvs/robots/articulations/guarddog.py
|
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from pxr import PhysxSchema
class Guarddog(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Guarddog",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
self._usd_path = usd_path
self._name = name
self._usd_path = r"C:\Users\Plutonium\MyProjects\GuardDog\isaac\IsaacGymEnvs\assets\urdf\QuadCoordFix\urdf\QuadCoordFix\QuadCoordFix.usd"
# self._usd_path = r"C:\Users\Plutonium\MyProjects\GuardDog\isaac\IsaacGymEnvs\assets\urdf\Quad_Foot\urdf\Quad_Foot\Quad_Foot.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
def set_guarddog_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(False)
rb.GetRetainAccelerationsAttr().Set(False)
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.0)
rb.GetMaxAngularVelocityAttr().Set(64 / np.pi * 180)
def prepare_contacts(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
if "_Hip" not in str(link_prim.GetPrimPath()):
print('!!')
print(link_prim.GetPrimPath())
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.CreateSleepThresholdAttr().Set(0)
cr_api = PhysxSchema.PhysxContactReportAPI.Apply(link_prim)
cr_api.CreateThresholdAttr().Set(0)
| 3,911 |
Python
| 43.965517 | 146 | 0.685502 |
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/test_onnx.py
|
import numpy as np
import onnx
import onnxruntime as ort
onnx_model = onnx.load("pendulum.onnx")
# Check that the model is well formed
onnx.checker.check_model(onnx_model)
ort_model = ort.InferenceSession("pendulum.onnx")
outputs = ort_model.run(
None,
{"obs": np.zeros((1, 2)).astype(np.float32)},
)
print(outputs)
| 327 |
Python
| 19.499999 | 49 | 0.718654 |
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/quadwalker.py
|
# Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os
import torch
from isaacgym import gymutil, gymtorch, gymapi
from .base.vec_task import VecTask
from .keyboard import Keyboard
from isaacgymenvs.utils.torch_jit_utils import to_torch, get_axis_params, torch_rand_float, quat_rotate, quat_rotate_inverse
class QuadWalker(VecTask):
def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):
self.cfg = cfg
# normalization
self.lin_vel_scale = self.cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self.cfg["env"]["learn"]["angularVelocityScale"]
self.dof_vel_scale = 1/self.cfg["env"]["control"]["maxVelocity"]
self.max_dof_effort = self.cfg["env"]['control']["maxEffort"]
self.max_dof_velocity = self.cfg["env"]['control']["maxVelocity"]
self.dof_friction = self.cfg["env"]['control']["friction"]
# reward scales
self.rew_scales = {}
self.rew_scales["lin_vel_xy"] = self.cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["ang_vel_z"] = self.cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["torque"] = self.cfg["env"]["learn"]["torqueRewardScale"]
self.reset_dist = self.cfg["env"]["resetDist"]
# randomization
self.randomization_params = self.cfg["task"]["randomization_params"]
self.randomize = self.cfg["task"]["randomize"]
# command ranges
self.command_x_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self.cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self.cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# plane params
self.plane_static_friction = self.cfg["env"]["plane"]["staticFriction"]
self.plane_dynamic_friction = self.cfg["env"]["plane"]["dynamicFriction"]
self.plane_restitution = self.cfg["env"]["plane"]["restitution"]
# base init state
pos = self.cfg["env"]["baseInitState"]["pos"]
rot = self.cfg["env"]["baseInitState"]["rot"]
v_lin = self.cfg["env"]["baseInitState"]["vLinear"]
v_ang = self.cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = state
# default joint positions
self.named_default_joint_angles = self.cfg["env"]["defaultJointAngles"]
self.cfg["env"]["numObservations"] = 60
self.cfg["env"]["numActions"] = 12
self.Kp = self.cfg["env"]["control"]["stiffness"]
self.Kd = self.cfg["env"]["control"]["damping"]
super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render)
# other
self.dt = self.sim_params.dt
self.max_episode_length_s = self.cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
if self.viewer != None:
p = self.cfg["env"]["viewer"]["pos"]
lookat = self.cfg["env"]["viewer"]["lookat"]
cam_pos = gymapi.Vec3(p[0], p[1], p[2])
cam_target = gymapi.Vec3(lookat[0], lookat[1], lookat[2])
self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# cam_pos = gymapi.Vec3(10.0, 9.95, 0.5)
# cam_target = gymapi.Vec3(10.0, -20.0, 0.5)
# self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)
# get gym state tensors
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
net_contact_forces = self.gym.acquire_net_contact_force_tensor(self.sim)
torques = self.gym.acquire_dof_force_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
# create some wrapper tensors for different slices
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
self.contact_forces = gymtorch.wrap_tensor(net_contact_forces).view(self.num_envs, -1, 3) # shape: num_envs, num_bodies, xyz axis
self.torques = gymtorch.wrap_tensor(torques).view(self.num_envs, self.num_dof)
self.up_axis_idx = 1 # index of up axis: Y=1, Z=2
self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.commands = torch.zeros(self.num_envs, 3, dtype=torch.float, device=self.device, requires_grad=False)
self.commands_x = self.commands.view(self.num_envs, 3)[..., 0]
self.commands_y = self.commands.view(self.num_envs, 3)[..., 1]
self.commands_yaw = self.commands.view(self.num_envs, 3)[..., 2]
self.default_dof_pos = torch.zeros_like(self.dof_pos, dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.cfg["env"]["numActions"]):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
self.initial_root_states = self.root_states.clone()
self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)
self.gravity_vec = to_torch(get_axis_params(-1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.actions = torch.zeros(self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False)
self.keys = Keyboard(3)
self.reset_idx(torch.arange(self.num_envs, device=self.device))
def create_sim(self):
# set the up axis to be z-up given that assets are y-up by default
self.up_axis = self.cfg["sim"]["up_axis"]
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
if self.randomize:
self.apply_randomizations(self.randomization_params)
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
# set the normal force to be z dimension
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) if self.up_axis == 'z' else gymapi.Vec3(0.0, 1.0, 0.0)
plane_params.static_friction = self.plane_static_friction
plane_params.dynamic_friction = self.plane_dynamic_friction
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
# define plane on which environments are initialized
lower = gymapi.Vec3(-spacing, -spacing, 0.0) if self.up_axis == 'z' else gymapi.Vec3(0.5 * -spacing, 0.0, -spacing)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
asset_file = "urdf/QuadCoordFix/urdf/QuadCoordFix.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
asset_options.collapse_fixed_joints = True
asset_options.replace_cylinder_with_capsule = True
asset_options.fix_base_link = False
asset_options.angular_damping = 0.0
asset_options.linear_damping = 0.0
asset_options.max_angular_velocity = 10000
asset_options.armature = 0.0
asset_options.thickness = 0.01
asset_options.disable_gravity = False
quadwalker_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(quadwalker_asset)
self.body_names = self.gym.get_asset_rigid_body_names(quadwalker_asset)
self.dof_names = self.gym.get_asset_dof_names(quadwalker_asset)
print('self.num_dof')
print(self.num_dof)
print('self.body_names')
print(self.body_names)
print('self.dof_names')
print(self.dof_names)
hip_names = [s for s in self.body_names if "Hip" in s]
thigh_names = [s for s in self.body_names if "Thigh" in s]
shin_names = [s for s in self.body_names if "Shin" in s]
foot_names = [s for s in self.body_names if "Foot" in s]
self.hip_indices = torch.zeros(len(hip_names), dtype=torch.long, device=self.device, requires_grad=False)
self.thigh_indices = torch.zeros(len(thigh_names), dtype=torch.long, device=self.device, requires_grad=False)
self.shin_indices = torch.zeros(len(shin_names), dtype=torch.long, device=self.device, requires_grad=False)
self.foot_indices = torch.zeros(len(foot_names), dtype=torch.long, device=self.device, requires_grad=False)
pose = gymapi.Transform()
pose.p = gymapi.Vec3(*self.base_init_state[:3])
self.quadwalker_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
quadwalker_handle = self.gym.create_actor(env_ptr, quadwalker_asset, pose, "quadwalker", i, 1, 0)
rand_color = torch.rand((3), device=self.device)
self.gym.set_rigid_body_color(env_ptr, quadwalker_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2]))
rand_color = torch.rand((3), device=self.device)
self.gym.set_rigid_body_color(env_ptr, quadwalker_handle, 1, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2]))
dof_props = self.gym.get_actor_dof_properties(env_ptr, quadwalker_handle)
dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT
dof_props['stiffness'][:] = self.Kp
dof_props['damping'][:] = self.Kd
dof_props['velocity'][:] = self.max_dof_velocity
dof_props['effort'].fill(0.0)
dof_props['friction'][:] = self.dof_friction
self.gym.set_actor_dof_properties(env_ptr, quadwalker_handle, dof_props)
self.gym.enable_actor_dof_force_sensors(env_ptr, quadwalker_handle)
self.envs.append(env_ptr)
self.quadwalker_handles.append(quadwalker_handle)
for i in range(len(hip_names)):
self.hip_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], hip_names[i])
for i in range(len(thigh_names)):
self.thigh_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], thigh_names[i])
for i in range(len(shin_names)):
self.shin_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], shin_names[i])
for i in range(len(foot_names)):
self.foot_indices[i] = self.gym.find_actor_rigid_body_handle(self.envs[0], self.quadwalker_handles[0], foot_names[i])
print(self.hip_indices)
print(self.thigh_indices)
print(self.shin_indices)
print(self.foot_indices)
def compute_reward(self):
base_quat = self.root_states[:, 3:7]
base_lin_vel = quat_rotate_inverse(base_quat, self.root_states[:, 7:10])
base_ang_vel = quat_rotate_inverse(base_quat, self.root_states[:, 10:13])
# velocity tracking reward
# lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - base_lin_vel[:, :2]), dim=1)
# ang_vel_error = torch.square(self.commands[:, 2] - base_ang_vel[:, 2])
# print(base_lin_vel[0, :2])
# print('!!!')
# print(self.commands[0, :2])
# print(base_lin_vel[0, [0,2]])
# print(self.progress_buf)
self.rew_buf[:], self.reset_buf[:] = compute_quadwalker_reward(self.root_states,
self.commands,
self.torques,
self.dof_vel,
self.contact_forces,
self.reset_buf,
self.progress_buf,
self.hip_indices,
self.thigh_indices,
self.shin_indices,
self.rew_scales,
self.reset_dist,
self.max_episode_length)
# print(self.rew_buf[0])
# print(self.reset_buf)
def compute_observations(self, env_ids=None):
if env_ids is None:
env_ids = np.arange(self.num_envs)
self.gym.refresh_dof_state_tensor(self.sim) # done in step
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_net_contact_force_tensor(self.sim)
self.gym.refresh_dof_force_tensor(self.sim)
self.obs_buf[:] = compute_quadwalker_observations( # tensors
self.root_states,
self.commands,
self.dof_pos,
self.default_dof_pos,
self.dof_vel,
self.gravity_vec,
self.actions_tensor,
# scales
self.lin_vel_scale,
self.ang_vel_scale,
self.dof_vel_scale)
# obs = torch.cat((sin_encode,
# cos_encode,
# dof_vel * dof_vel_scale,
# base_lin_vel * lin_vel_scale,
# base_ang_vel * ang_vel_scale,
# projected_gravity,
# commands_scaled,
# actions
# ), dim=-1)
# print(self.obs_buf[0, 3*self.num_dof:3*self.num_dof+9])
return self.obs_buf
def reset_idx(self, env_ids):
# Randomization can happen only at reset time, since it can reset actor positions on GPU
if self.randomize:
self.apply_randomizations(self.randomization_params)
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
# positions_offset = torch.ones((len(env_ids), self.num_dof), device=self.device)
# velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
velocities = torch.zeros((len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset[:]
self.dof_vel[env_ids, :] = velocities[:]
env_ids_int32 = env_ids.to(dtype=torch.int32)
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.initial_root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.reset_commands(env_ids)
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
def reset_commands(self, env_ids):
self.commands_x[env_ids] = torch_rand_float(self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands_y[env_ids] = torch_rand_float(self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device).squeeze()
self.commands_yaw[env_ids] = torch_rand_float(self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device).squeeze()
def pre_physics_step(self, actions):
self.actions_tensor = torch.zeros( [self.num_envs, self.num_dof], device=self.device, dtype=torch.float)
self.actions_tensor[:, 0:self.num_dof] = actions.to(self.device) * self.max_dof_effort
# a = self.keys.get_keys()
# scale = torch.tensor([10, self.max_dof_effort, self.max_dof_effort])
# self.actions_tensor[0,0:3] = a*scale
forces = gymtorch.unwrap_tensor(self.actions_tensor)
self.gym.set_dof_actuation_force_tensor(self.sim, forces)
# print(actions_tensor[0])
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
env_ids = torch.where(self.progress_buf % 100 == 0, torch.ones_like(self.progress_buf), torch.zeros_like(self.progress_buf)).nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_commands(env_ids)
self.compute_observations()
a = self.keys.get_keys()
scale = torch.tensor([5., 1., 0.5])
self.obs_buf[0, 45:48] = a*scale
# print(self.obs_buf[0,45:48])
self.compute_reward()
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def convert_angle(angle):
# Apply sine and cosine functions
sin_component = torch.sin(angle)
cos_component = torch.cos(angle)
# Normalize angle to [-pi, pi]
normalized_angle = torch.remainder(angle + np.pi, 2 * np.pi) - np.pi
# Apply offset
# normalized_angle += np.pi
# Normalize again if needed
# normalized_angle = torch.remainder(normalized_angle + np.pi, 2 * np.pi) - np.pi
# Normalize angle to [-1, 1]
normalized_angle /= torch.pi
return sin_component, cos_component, normalized_angle
@torch.jit.script
def compute_quadwalker_reward(
# tensors
root_states,
commands,
torques,
dof_vel,
contact_forces,
reset_buf,
progress_buf,
hip_idx,
thigh_idx,
shin_idx,
# Dict
rew_scales,
# other
reset_dist,
max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Dict[str, float], float, float) -> Tuple[Tensor, Tensor]
# prepare quantities (TODO: return from obs ?)
base_quat = root_states[:, 3:7]
base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10])
base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13])
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(commands[:, :2] - base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(commands[:, 2] - base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error/0.25) * rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error/0.25) * rew_scales["ang_vel_z"]
# torque penalty
rew_torque = torch.sum(torch.square(torques), dim=1) * rew_scales["torque"]
# joint speed penalty
# rew_joint_speed = torch.sum(torch.square(dof_vel), dim=1) * rew_scales["torque"]/12
total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_torque
total_reward = torch.clip(total_reward, 0., None)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
# This is a hacky fix, the contact forces sometimes don't update when an environment resets causing a double reset.
# This waits 10 environment steps before factoring in contact forces
check_forces = torch.where(progress_buf >= 10, torch.ones_like(reset_buf), reset_buf)
reset = reset | ((torch.norm(contact_forces[:, 0, :], dim=1) > 1.) & check_forces) # Body Collision
reset = reset | ((torch.any(torch.norm(contact_forces[:, hip_idx, :], dim=2) > 1., dim=1)) & check_forces)
reset = reset | ((torch.any(torch.norm(contact_forces[:, thigh_idx, :], dim=2) > 1., dim=1)) & check_forces)
# reset = reset | (torch.any(torch.norm(contact_forces[:, shin_idx, :], dim=2) > 1., dim=1))
return total_reward.detach(), reset
@torch.jit.script
def compute_quadwalker_observations(root_states,
commands,
dof_pos,
default_dof_pos,
dof_vel,
gravity_vec,
actions,
lin_vel_scale,
ang_vel_scale,
dof_vel_scale
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float, float) -> Tensor
base_quat = root_states[:, 3:7]
base_lin_vel = quat_rotate_inverse(base_quat, root_states[:, 7:10])
base_ang_vel = quat_rotate_inverse(base_quat, root_states[:, 10:13])
projected_gravity = quat_rotate(base_quat, gravity_vec)
commands_scaled = commands*torch.tensor([lin_vel_scale, lin_vel_scale, ang_vel_scale], requires_grad=False, device=commands.device)
sin_encode, cos_encode, motor_angle = convert_angle(dof_pos.squeeze())
obs = torch.cat((sin_encode, #12 (0:12)
cos_encode, #12 (12:24)
dof_vel * dof_vel_scale, #12 (24:36)
base_lin_vel * lin_vel_scale, #3 (36:39)
base_ang_vel * ang_vel_scale, #3 (39:42)
projected_gravity, #3 (42:45)
commands_scaled, #3 (45:48)
actions #12 (48:60)
), dim=-1)
return obs
| 25,614 |
Python
| 47.330189 | 217 | 0.569454 |
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/stm32_comms.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 4 12:52:12 2024
@author: tylerbarkin
"""
import serial
import struct
import time
import numpy as np
import platform
class MCU_Comms():
def __init__(self, enabled=1):
self.enabled = enabled
if(self.enabled):
self.act_data = np.zeros(12)
self.obs_data = np.zeros(48)
if platform.system() == 'Windows':
self.port = 'COM6'
else:
self.port = '/dev/ttyACM1'
print('Using Port : {}'.format(self.port))
self.open_port()
def open_port(self):
# Configure the serial connection
if(self.enabled):
try:
self.ser = serial.Serial(
port=self.port, # Serial port
baudrate=460800, # Baud rate, should match STM32 setting
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1 # Read timeout in seconds
)
except:
print("No Comms Object Found")
def close_port(self):
if(self.enabled):
if self.ser.is_open:
self.ser.close()
print("Serial port closed")
def read_data(self):
if(self.enabled):
try:
# Read 16 bytes from the serial port (size of 4 floats)
data = self.ser.read(12 * 4)
# Check if we received 48 bytes
if len(data) == 48:
# Unpack the bytes to four floats
float_values = struct.unpack('12f', data)
self.act_data = np.array(float_values)
# print(f"Received floats: {float_values}")
else:
print("Incomplete data received")
except KeyboardInterrupt:
print("Exiting...")
else:
self.act_data = np.zeros(12)
def write_data(self):
if(self.enabled):
# Pack the floats into bytes
data_to_send = struct.pack('48f', *self.obs_data)
try:
# Send the packed bytes over the serial connection
self.ser.write(data_to_send)
# print("Data sent")
except Exception as e:
print(f"Error: {e}")
def __del__(self):
# Destructor: close the serial port
self.close_port()
# comm_obj = MCU_Comms()
# comm_obj.obs_data = np.zeros((48))
# for i in range(48):
# comm_obj.obs_data[i] = i
# for _ in range(1):
# start_time = time.perf_counter()
# comm_obj.write_data()
# comm_obj.read_data()
# elapsed_time = time.perf_counter() - start_time
# print('Total Time = {}'.format(elapsed_time))
# comm_obj.close_port()
| 3,192 |
Python
| 28.027272 | 108 | 0.45614 |
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/tasks/keyboard.py
|
import pygame
import numpy as np
import torch
class Keyboard():
def __init__(self, num_actions=1):
pygame.quit()
# Initialize Pygame
pygame.init()
# Set up the screen
screen = pygame.display.set_mode((400, 300))
self.num_actions = num_actions
def refresh(self):
pygame.event.pump()
def get_keys(self):
pygame.event.pump()
# Check for arrow key presses
a = torch.zeros(self.num_actions)
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
a[0] = 1.0
if keys[pygame.K_DOWN]:
a[0] = -1.0
if keys[pygame.K_LEFT]:
a[1] = 1.0
if keys[pygame.K_RIGHT]:
a[1] = -1.0
if keys[pygame.K_a]:
a[2] = 1.0
if keys[pygame.K_d]:
a[2] = -1.0
return a
| 884 |
Python
| 21.692307 | 52 | 0.49095 |
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/cfg/task/QuadWalker.yaml
|
# used to create the object
name: QuadWalker
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:512,${...num_envs}}
envSpacing: 5.0
resetDist: 3.0
maxEpisodeLen: 20
clipObservations: 5.0
clipActions: 1.0
plane:
staticFriction: 1.0 # [-]
dynamicFriction: 1.0 # [-]
restitution: 0. # [-]
baseInitState:
pos: [0.0, 0.0, 0.5] # x,y,z [m]
rot: [0.0, 0.0, 0.0, 1.0] # x,y,z,w [quat]
vLinear: [0.0, 0.0, 0.0] # x,y,z [m/s]
vAngular: [0.0, 0.0, 0.0] # x,y,z [rad/s]
asset:
assetRoot: "../../assets"
assetFileName: "urdf/QuadCoordFix/urdf/QuadCoordFix.urdf"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
randomCommandVelocityRanges:
linear_x: [-3., 3.] # min max [m/s]
linear_y: [-1., 1.] # min max [m/s]
yaw: [-0.5, 0.5] # min max [rad/s]
control:
# PD Drive parameters:
stiffness: 0.0 # [N*m/rad]
damping: 0.0 # [N*m*s/rad]
maxEffort: 3.0 # [N*m]
maxVelocity: 25.0 #[rad/s]
friction: 0.01
controlFrequencyInv: 1 # 60 Hz
defaultJointAngles: # = target angles when action = 0.0
FR_J1: 0.0 # [rad]
FL_J1: 0.0 # [rad]
BR_J1: 0.0 # [rad]
BL_J1: 0.0 # [rad]
FR_J2: 0.5 # [rad]
FL_J2: -0.5 # [rad]
BR_J2: -0.5 # [rad]
BL_J2: 0.5 # [rad]
FR_J3: -2.0 # [rad]
FL_J3: 2.0 # [rad]
BR_J3: 2.0 # [rad]
BL_J3: -2.0 # [rad]
learn:
# rewards
linearVelocityXYRewardScale: 1.0
angularVelocityZRewardScale: 0.5
torqueRewardScale: -0.000025
# normalization
linearVelocityScale: 2.0
angularVelocityScale: 0.25
dofPositionScale: 1.0
dofVelocityScale: 0.05
# episode length in seconds
episodeLength_s: 10
# viewer cam:
viewer:
refEnv: 0
pos: [0, 0, 4] # [m]
lookat: [1., 1, 3.3] # [m]
sim:
dt: 0.01 # 1/100 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 2
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.0
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 100.0
default_buffer_size_multiplier: 5.0
# max_gpu_contact_pairs: 1048576 # 1024*1024
max_gpu_contact_pairs: 8388608 # 8*1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 2 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
# None of this randomizer stuff seems to work
# Like... It will color the stls if i turn it randomize to true and torquepole is in the actor_params
# but if i set color: False it doesn't respect that, and friction values appear to not change ever...
task:
randomize: False
randomization_params:
frequency: 10 # Define how many environment steps between generating new randomizations
# observations:
# range: [0, .002] # range for the white noise
# operation: "additive"
# distribution: "gaussian"
# actions:
# range: [0., .02]
# operation: "additive"
# distribution: "gaussian"
# sim_params:
# gravity:
# range: [0, 0.4]
# operation: "additive"
# distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 3000
actor_params:
quadwalker:
# color: False
dof_properties:
friction:
range: [0.0, 1.0]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 500
| 4,063 |
YAML
| 28.23741 | 171 | 0.591927 |
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/isaacgymenvs/cfg/task/TorquePole.yaml
|
# used to create the object
name: TorquePole
physics_engine: ${..physics_engine}
# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:512,${...num_envs}}
envSpacing: 0.5
resetDist: 3.0
maxEffort: 0.15
maxEpisodeLen: 1000
clipObservations: 5.0
clipActions: 1.0
asset:
assetRoot: "../../assets"
assetFileName: "urdf/TorquePole/urdf/TorquePole.urdf"
# set to True if you use camera sensors in the environment
enableCameraSensors: False
sim:
dt: 0.01 # 1/100 s
substeps: 2
up_axis: "z"
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
gravity: [0.0, 0.0, -9.81]
physx:
num_threads: ${....num_threads}
solver_type: ${....solver_type}
use_gpu: ${contains:"cuda",${....sim_device}} # set to False to run on CPU
num_position_iterations: 4
num_velocity_iterations: 0
contact_offset: 0.02
rest_offset: 0.001
bounce_threshold_velocity: 0.2
max_depenetration_velocity: 100.0
default_buffer_size_multiplier: 2.0
max_gpu_contact_pairs: 1048576 # 1024*1024
num_subscenes: ${....num_subscenes}
contact_collection: 0 # 0: CC_NEVER (don't collect contact info), 1: CC_LAST_SUBSTEP (collect only contacts on last substep), 2: CC_ALL_SUBSTEPS (broken - do not use!)
# None of this randomizer stuff seems to work
# Like... It will color the stls if i turn it randomize to true and torquepole is in the actor_params
# but if i set color: False it doesn't respect that, and friction values appear to not change ever...
task:
randomize: False
randomization_params:
frequency: 10 # Define how many environment steps between generating new randomizations
# observations:
# range: [0, .002] # range for the white noise
# operation: "additive"
# distribution: "gaussian"
# actions:
# range: [0., .02]
# operation: "additive"
# distribution: "gaussian"
# sim_params:
# gravity:
# range: [0, 0.4]
# operation: "additive"
# distribution: "gaussian"
# schedule: "linear" # "linear" will linearly interpolate between no rand and max rand
# schedule_steps: 3000
actor_params:
torquepole:
# color: False
dof_properties:
friction:
range: [0.0, 1.0]
operation: "scaling"
distribution: "uniform"
schedule: "linear" # "linear" will scale the current random sample by `min(current num steps, schedule_steps) / schedule_steps`
schedule_steps: 500
| 2,548 |
YAML
| 32.539473 | 171 | 0.641287 |
Tbarkin121/GuardDog/isaac/IsaacGymEnvs/assets/urdf/Quad/config/joint_names_Quad.yaml
|
controller_joint_names: ['', 'FR_J1', 'FR_J2', 'FR_J3', 'FL_J1', 'FL_J2', 'FL_J3', 'BR_J1', 'BR_J2', 'BR_J3', 'BL_J1', 'BL_J2', 'BL_J3', ]
| 139 |
YAML
| 68.999966 | 138 | 0.489209 |
Tbarkin121/GuardDog/isaac/python/examples/torquepole_tester.py
|
"""
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
DOF control methods example
---------------------------
An example that demonstrates various DOF control methods:
- Load cartpole asset from an urdf
- Get/set DOF properties
- Set DOF position and velocity targets
- Get DOF positions
- Apply DOF efforts
"""
import math
from isaacgym import gymutil, gymtorch, gymapi
import time
import numpy as np
from joystick import Joystick
from keyboard import Keyboard
import stm32_comms
import torch
import onnx
import onnxruntime as ort
comm_obj = stm32_comms.MCU_Comms(0)
max_push_effort = 0.1
def convert_angle(angle):
# Apply sine and cosine functions
sin_component = torch.sin(angle)
cos_component = torch.cos(angle)
# Normalize angle to [-pi, pi]
normalized_angle = torch.remainder(angle + np.pi, 2 * np.pi) - np.pi
# Apply offset
normalized_angle += np.pi
# Normalize again if needed
normalized_angle = torch.remainder(normalized_angle + np.pi, 2 * np.pi) - np.pi
# Normalize angle to [-1, 1]
normalized_angle /= torch.pi
return sin_component, cos_component, normalized_angle
# initialize gym
gym = gymapi.acquire_gym()
# parse arguments
args = gymutil.parse_arguments(description="Joint control Methods Example")
# create a simulator
sim_params = gymapi.SimParams()
sim_params.substeps = 2
sim_params.dt = 1.0 / 100.0
sim_params.physx.solver_type = 1
sim_params.physx.num_position_iterations = 4 #8
sim_params.physx.num_velocity_iterations = 0 #2
sim_params.physx.num_threads = args.num_threads
sim_params.physx.use_gpu = args.use_gpu
sim_params.use_gpu_pipeline = False
if args.use_gpu_pipeline:
print("WARNING: Forcing CPU pipeline.")
sim_params.up_axis = gymapi.UP_AXIS_Z
sim_params.gravity = gymapi.Vec3(0.0, 0.0, -9.81)
sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, args.physics_engine, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
# add ground plane
plane_params = gymapi.PlaneParams()
# set the normal force to be z dimension
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
gym.add_ground(sim, plane_params)
# set up the env grid
num_envs = 1
spacing = 1.5
env_lower = gymapi.Vec3(-spacing, 0.0, -spacing)
env_upper = gymapi.Vec3(spacing, 0.0, spacing)
# add cartpole urdf asset
asset_root = "../../IsaacGymEnvs/assets"
asset_file = "urdf/TorquePole/urdf/TorquePole.urdf"
# asset_file = "urdf/WalkBot_3DOF_330/urdf/WalkBot_3DOF.urdf"
# Load asset with default control type of position for all joints
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = True
asset_options.angular_damping = 0.0
asset_options.max_angular_velocity = 10000
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_EFFORT
print("Loading asset '%s' from '%s'" % (asset_file, asset_root))
cubebot_asset = gym.load_asset(sim, asset_root, asset_file, asset_options)
num_dof = gym.get_asset_dof_count(cubebot_asset)
# initial root pose for cartpole actors
initial_pose = gymapi.Transform()
initial_pose.p = gymapi.Vec3(0.0, 0.0, 1.0)
initial_pose.r = gymapi.Quat.from_euler_zyx(1.5708, 0.0, 0.0)
# Create environment 0
# Cart held steady using position target mode.
# Pole held at a 45 degree angle using position target mode.
env0 = gym.create_env(sim, env_lower, env_upper, 2)
cubebot0 = gym.create_actor(env0, cubebot_asset, initial_pose, 'CubeBot', 0, 0)
# Configure DOF properties
props = gym.get_actor_dof_properties(env0, cubebot0)
props["driveMode"].fill(gymapi.DOF_MODE_EFFORT)
props["stiffness"].fill(0.0)
props['damping'].fill(0.0)
props['velocity'].fill(100.0)
props['effort'].fill(0.0)
props['friction'].fill(0.01)
gym.set_actor_dof_properties(env0, cubebot0, props)
# Set DOF drive targets
dof_dict = gym.get_actor_dof_dict(env0, cubebot0)
joint_dict = gym.get_actor_joint_dict(env0, cubebot0)
dof_keys = list(dof_dict.keys())
actor_root_state = gym.acquire_actor_root_state_tensor(sim)
root_states = gymtorch.wrap_tensor(actor_root_state)
# targets = torch.tensor([1000, 0, 0, 0, 0, 0])
# gym.set_dof_velocity_target_tensor(env0, gymtorch.unwrap_tensor(targets))
# create viewer using the default camera properties
viewer = gym.create_viewer(sim, gymapi.CameraProperties())
if viewer is None:
raise ValueError('*** Failed to create viewer')
print('Tester')
# Look at the first env
cam_pos = gymapi.Vec3(2, 1, 1)
cam_target = initial_pose.p
gym.viewer_camera_look_at(viewer, None, cam_pos, cam_target)
# Simulate
joint_idx = 0
control_idx = 0
loop_counter = 1
max_loops = 250
# joy = Joystick()
key = Keyboard()
dof_state_tensor = gym.acquire_dof_state_tensor(sim)
dof_state = gymtorch.wrap_tensor(dof_state_tensor)
print(dof_state)
dof_pos = dof_state.view(num_envs, num_dof, 2)[..., 0]
dof_vel = dof_state.view(num_envs, num_dof, 2)[..., 1]
positions = 6.0 * (torch.rand((1)) - 0.5) - np.pi
velocities = 2.0 * (torch.rand((1)) - 0.5)
dof_pos[0, :] = positions[:]
dof_vel[0, :] = velocities[:]
env_ids = torch.tensor([0])
env_ids_int32 = env_ids.to(dtype=torch.int32)
gym.set_dof_state_tensor_indexed(sim,
gymtorch.unwrap_tensor(dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
onnx_model = onnx.load("pendulum.onnx")
ort_model = ort.InferenceSession("pendulum.onnx")
while not gym.query_viewer_has_closed(viewer):
gym.refresh_actor_root_state_tensor(sim)
# print(root_states)
# step the physics
gym.simulate(sim)
gym.fetch_results(sim, True)
# update the viewer
gym.step_graphics(sim)
gym.draw_viewer(viewer, sim, True)
# a = joy.get_axis()
a = key.get_keys()
enc_sin, enc_cos, pole_pos = convert_angle(dof_pos)
pole_vel = dof_vel/20.0
# print('~~~~~~~~~~~~~~~~~~~')
# print(pole_pos)
# print(pole_vel)
comm_obj.out_data = np.array([enc_sin, enc_cos, pole_vel, 0.0])
if(0):
comm_obj.write_data()
comm_obj.read_data()
action = comm_obj.in_data[0] * max_push_effort
else:
outputs = ort_model.run(
None,
{"obs": comm_obj.out_data[0:3].reshape(1,-1).astype(np.float32)},
)
# print(outputs[0])
action=outputs[0]* max_push_effort
print(comm_obj.out_data)
# print(action)
# gym.apply_dof_effort(env0, joint_idx, a[0]/20.0)
if(a[0]):
action = 0.0
action = a[0]* max_push_effort
gym.apply_dof_effort(env0, joint_idx, action)
# Wait for dt to elapse in real time.
# This synchronizes the physics simulation with the rendering rate.
gym.sync_frame_time(sim)
print('Done')
gym.destroy_viewer(viewer)
gym.destroy_sim(sim)
| 7,040 |
Python
| 29.218884 | 102 | 0.694034 |
Tbarkin121/GuardDog/isaac/python/examples/joystick.py
|
import pygame
import numpy as np
class Joystick():
def __init__(self):
pygame.joystick.quit()
pygame.quit()
pygame.display.init()
pygame.joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
self.joystick = joysticks[0]
self.num_axis = self.joystick.get_numaxes()
self.num_buttons = self.joystick.get_numbuttons()
self.num_hats = self.joystick.get_numhats()
self.joystick.rumble(1, 1, 1)
self.zero_vals = np.zeros(self.num_axis)
self.zero()
def zero(self):
pygame.event.pump()
for i in range(self.num_axis):
self.zero_vals[i] = self.joystick.get_axis(i)
def refresh(self):
pygame.event.pump()
def get_axis(self):
pygame.event.pump()
a = np.zeros(self.num_axis)
for i in range(self.num_axis):
a[i] = self.joystick.get_axis(i) - self.zero_vals[i]
return a
def get_button(self):
pygame.event.pump()
b = np.zeros(self.num_buttons)
for i in range(self.num_buttons):
b[i] = self.joystick.get_button(i)
return b
def get_dpad(self):
pygame.event.pump()
x,y = self.joystick.get_hat(0)
d = [x,y]
return d
| 1,324 |
Python
| 29.113636 | 93 | 0.570997 |
Tbarkin121/GuardDog/python/SanityCheck.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 30 02:54:59 2023
@author: PandorasBox
"""
# https://studywolf.wordpress.com/2013/09/02/robot-control-jacobians-velocity-and-force/
import torch
theta1 = torch.tensor(torch.pi/4)
theta2 = torch.tensor(3*torch.pi/8)*5
t1 = -torch.sin(theta1) - torch.sin(theta1 + theta2)
t2 = torch.cos(theta1) + torch.cos(theta1 + theta2)
t3 = -torch.sin(theta1+theta2)
t4 = torch.cos(theta1+theta2)
J = torch.tensor([[t1,t2],[t3,t4]])
F = torch.tensor([[1.3],[1.0]])
t = torch.matmul(J,F)
print(t)
| 559 |
Python
| 20.538461 | 88 | 0.67263 |
Tbarkin121/GuardDog/python/pendulum_dynamic_jacobian.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 01:37:05 2023
@author: Plutonium
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 21 09:58:15 2023
@author: Plutonium
"""
import torch
import matplotlib.pyplot as plt
import numpy as np
import time
from torchviz import make_dot
torch.set_default_device('cuda')
# torch.autograd.set_detect_anomaly(True)
#%%
class Pendulum:
def __init__(self):
print('Init Arm')
self.num_segments = 10
self.angle_offset = 3.1415/2 # So gravity is down
self.joint_angles = torch.zeros(self.num_segments, requires_grad=True)
self.joint_velocity = torch.ones(self.num_segments, requires_grad=True)
self.joint_acceleration = torch.zeros(self.num_segments, requires_grad=True)
self.link_lengths = torch.ones(self.num_segments, requires_grad=False)/self.num_segments
self.link_mass = torch.ones(self.num_segments, requires_grad=False)
self.xs = torch.zeros(self.num_segments+1, requires_grad=False)
self.ys = torch.zeros(self.num_segments+1, requires_grad=False)
self.x_targ=torch.tensor(-0.33, requires_grad=False)
self.y_targ=torch.tensor(0.44, requires_grad=False)
self.I = (1/3)*self.link_mass*self.link_lengths
plt.close('all')
xp = torch.cat((torch.tensor([0.0]), self.xs)).detach().cpu().numpy()
yp = torch.cat((torch.tensor([0.0]), self.ys)).detach().cpu().numpy()
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.grid(True)
self.ax.set_xlim([-2, 2])
self.ax.set_ylim([-2, 2])
self.line1, = self.ax.plot(xp, yp, 'r-') # Returns a tuple of line objects, thus the comma
self.line2, = self.ax.plot(self.x_targ.detach().cpu().numpy(),self.y_targ.detach().cpu().numpy(), 'o') # Returns a tuple of line objects, thus the comma
def forward_kinematics(self):
self.xs = torch.zeros(self.num_segments+1, requires_grad=False)
self.ys = torch.zeros(self.num_segments+1, requires_grad=False)
for s in range(1, self.num_segments+1):
self.xs[s] = self.xs[s-1] + self.link_lengths[s-1]*torch.cos(torch.sum(self.joint_angles[0:s]))
self.ys[s] = self.ys[s-1] + self.link_lengths[s-1]*torch.sin(torch.sum(self.joint_angles[0:s]))
def ComputeLagrange(self):
# Reset energies to zero
self.T = 0.0
self.U = 0.0
# Cumulative angle for kinematics
cumulative_angle = 0.0
# Cumulative velocity for translational kinetic energy
cumulative_velocity = 0.0 # Cumulative velocity for translational kinetic energy
cumulative_x = 0.0
cumulative_y = 0.0
for i in range(self.num_segments):
# Update the cumulative angle
cumulative_angle = cumulative_angle + self.joint_angles[i]
cumulative_velocity = cumulative_velocity + self.joint_velocity[i]
# Position of the COM of the current segment
com_x = cumulative_x + self.link_lengths[i] / 2 * torch.cos(cumulative_angle)
com_y = cumulative_y + self.link_lengths[i] / 2 * torch.sin(cumulative_angle)
# Update cumulative_x and cumulative_y for the next segment
cumulative_x = cumulative_x + self.link_lengths[i] * torch.cos(cumulative_angle)
cumulative_y = cumulative_y + self.link_lengths[i] * torch.sin(cumulative_angle)
# Translational velocity of the segment's center of mass
# This requires calculating the derivative of com_x and com_y with respect to time
# For simplicity, we'll assume constant velocity for this example
# translational_velocity = torch.sqrt(com_x**2 + com_y**2) * cumulative_velocity
# Translational kinetic energy
# self.T += 0.5 * self.link_mass[i] * translational_velocity**2
# Rotational Kinetic energy
self.T += 0.5 * self.I[i] * self.joint_velocity[i]**2
# Height of the segment's center of mass
# h = (self.link_lengths[i] / 2) * (1 - torch.cos(cumulative_angle + self.angle_offset))
h = com_y-1 # Height of the center of mass
# Potential energy
self.U += self.link_mass[i] * 9.81 * h
self.F = self.joint_velocity * 0.01 # Friction Energy Lost
# I think I can just add the friction in here?
self.L = self.T-self.U # Legrangeian
def EulerLagrange(self, delta_t): #(d/dt)(dL/dthetadot)-(dL/dtheta) = tau
# self.tau = self.I*self.joint_acceleration + torch.sin(self.joint_angles)*self.link_mass*9.81*self.link_lengths/2
# Compute current Lagrangian
self.ComputeLagrange()
# Save current values of dL/dthetadot
# current_dL_dthetadot = self.I * self.joint_velocity
self.ClearGrads()
self.L.backward()
current_dL_dthetadot = self.joint_velocity.grad.clone()
manual_dL_dthetadot = self.I*self.joint_velocity
# print("Automatic differentiation:", current_dL_dthetadot)
# print("Manual calculation:", manual_dL_dthetadot)
# Update state to t + delta_t
self.UpdateState(delta_t)
# Compute new Lagrangian at t + delta_t
self.ComputeLagrange()
# New value of dL/dthetadot at t + delta_t
self.ClearGrads()
self.L.backward()
# new_dL_dthetadot = self.I * self.joint_velocity
new_dL_dthetadot = self.joint_velocity.grad.clone()
# Numerical approximation of time derivative
dL_dthetadot_dt = (new_dL_dthetadot - current_dL_dthetadot) / delta_t
manual_dL_dthetadot_dt = self.I*self.joint_acceleration
# print("Automatic differentiation:", dL_dthetadot_dt)
# print("Manual calculation:", manual_dL_dthetadot_dt)
# dL/dtheta
dL_dtheta = self.joint_angles.grad
manual_dL_dtheta = -self.link_mass*9.81*self.link_lengths/2*torch.sin(self.joint_angles)
# print("Automatic differentiation:", dL_dtheta)
# print("Manual calculation:", manual_dL_dtheta)
# Euler-Lagrange equation
self.tau = dL_dthetadot_dt - dL_dtheta
friction_torque = self.joint_velocity * 0.1
# input_tau = self.tau + friction_torque
input_tau = 0
total_tau = input_tau-self.tau-friction_torque
self.joint_acceleration = total_tau/self.I
def UpdateState(self, delta_t):
# Update joint angles and velocities using a simple Euler integration
with torch.no_grad():
# Update joint angles and velocities using a simple Euler integration
self.joint_angles += self.joint_velocity * delta_t
self.joint_velocity += self.joint_acceleration * delta_t
def ClearGrads(self):
if self.joint_angles.grad is not None:
self.joint_angles.grad = None
if self.joint_velocity.grad is not None:
self.joint_velocity.grad = None
if self.joint_acceleration.grad is not None:
self.joint_acceleration.grad = None
def Plot(self):
self.forward_kinematics()
xp = torch.cat((torch.tensor([0.0]), self.xs)).detach().cpu().numpy()
yp = torch.cat((torch.tensor([0.0]), self.ys)).detach().cpu().numpy()
self.line1.set_xdata(xp)
self.line1.set_ydata(yp)
self.line2.set_xdata(self.x_targ.detach().cpu().numpy())
self.line2.set_ydata(self.y_targ.detach().cpu().numpy())
self.fig.canvas.draw()
self.fig.canvas.flush_events()
# plt.connect('motion_notify_event', self.mouse_move)
def mouse_move(self, event):
x, y = event.xdata, event.ydata
if(x and y):
self.x_targ = torch.tensor(x, dtype=torch.float, requires_grad=False)
self.y_targ = torch.tensor(y, dtype=torch.float, requires_grad=False)
#%%
env = Pendulum()
env.Plot()
#%%
for _ in range(10000):
t1 = time.perf_counter()
env.EulerLagrange(0.01)
if ( (_ % 10) == 0):
env.Plot()
t2 = time.perf_counter()
print(t2-t1)
# print(env.T)
# print(env.U)
# print(env.L)
# print(env.tau)
| 8,733 |
Python
| 38.342342 | 160 | 0.582503 |
Tbarkin121/GuardDog/python/kinematic_jacobian_test.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 01:37:05 2023
@author: Plutonium
"""
import torch
import matplotlib.pyplot as plt
import numpy as np
import time
import torchviz
torch.set_default_device('cpu')
#%%
class PlanarArm:
def __init__(self, num_segments):
print('Init Arm')
self.num_segments = num_segments
self.joint_angles = torch.zeros(num_segments, requires_grad=True)
with torch.no_grad():
self.joint_angles[0] = torch.pi/4
self.joint_angles[1] = torch.pi/4
self.joint_lengths = torch.ones(num_segments, requires_grad=False)*1.0
with torch.no_grad():
self.joint_lengths[1] = self.joint_lengths[1]/2
self.xs = torch.zeros(num_segments+1, requires_grad=False)
self.ys = torch.zeros(num_segments+1, requires_grad=False)
self.x_targ=torch.tensor(-0.33, requires_grad=False)
self.y_targ=torch.tensor(0.44, requires_grad=False)
self.weights = torch.ones([num_segments,1])
self.weights[0] = 0
plt.close('all')
xp = torch.cat((torch.tensor([0.0]), self.xs)).detach().cpu().numpy()
yp = torch.cat((torch.tensor([0.0]), self.ys)).detach().cpu().numpy()
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.grid(True)
self.ax.set_xlim([-2, 2])
self.ax.set_ylim([-2, 2])
self.line1, = self.ax.plot(xp, yp, 'r-') # Returns a tuple of line objects, thus the comma
self.line2, = self.ax.plot(self.x_targ.detach().cpu().numpy(),self.y_targ.detach().cpu().numpy(), 'o') # Returns a tuple of line objects, thus the comma
self.line3, = self.ax.plot([0,0],[0,0], 'm-') # Returns a tuple of line objects, thus the comma
def forward_kinematics(self):
self.xs = torch.zeros(self.num_segments+1, requires_grad=False)
self.ys = torch.zeros(self.num_segments+1, requires_grad=False)
for s in range(1, self.num_segments+1):
self.xs[s] = self.xs[s-1] + self.joint_lengths[s-1]*torch.cos(torch.sum(self.joint_angles[0:s]))
self.ys[s] = self.ys[s-1] + self.joint_lengths[s-1]*torch.sin(torch.sum(self.joint_angles[0:s]))
def get_residual(self):
self.dx = self.xs[-1] - self.x_targ
self.dy = self.ys[-1] - self.y_targ
# error = torch.sqrt(dx**2 + dy**2)
def compute_jacobian(self):
# Compute forward kinematics
self.forward_kinematics()
self.get_residual()
if self.joint_angles.grad is not None:
self.joint_angles.grad = None
self.dx.backward()
self.jacobian_x = self.joint_angles.grad.clone()
# Zero out the gradients before computing the next one
# self.joint_angles.grad = None
if self.joint_angles.grad is not None:
self.joint_angles.grad = None
self.dy.backward()
self.jacobian_y = self.joint_angles.grad.clone()
self.J = torch.stack((env.jacobian_x, env.jacobian_y))
# Manual 2 segment jacobian calc (Checked out vs torch, it matches)
# self.test_J = torch.zeros(2,2)
# self.test_J[0,0]= - self.joint_lengths[0]*torch.sin(self.joint_angles[0]) - self.joint_lengths[1]*torch.sin(self.joint_angles[0] + self.joint_angles[1])
# self.test_J[0,1]= - self.joint_lengths[1]*torch.sin(self.joint_angles[0] + self.joint_angles[1])
# self.test_J[1,0]= self.joint_lengths[0]*torch.cos(self.joint_angles[0]) + self.joint_lengths[1]*torch.cos(self.joint_angles[0] + self.joint_angles[1])
# self.test_J[1,1]= self.joint_lengths[1]*torch.cos(self.joint_angles[0] + self.joint_angles[1])
def update_angles(self, dtheta):
with torch.no_grad():
# self.joint_angles -= dtheta.view(-1)
self.joint_angles[1] -= dtheta[1][0]
def plot(self):
# self.forward_kinematics()
xp = torch.cat((torch.tensor([0.0]), self.xs)).detach().cpu().numpy()
yp = torch.cat((torch.tensor([0.0]), self.ys)).detach().cpu().numpy()
self.line1.set_xdata(xp)
self.line1.set_ydata(yp)
self.line2.set_xdata(self.x_targ.detach().cpu().numpy())
self.line2.set_ydata(self.y_targ.detach().cpu().numpy())
self.line3.set_xdata([xp[-1], xp[-1] + 0.1*self.EndEffector_F[0].detach().cpu().numpy()[0]])
self.line3.set_ydata([yp[-1], yp[-1] + 0.1*self.EndEffector_F[1].detach().cpu().numpy()[0]])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
plt.connect('motion_notify_event', self.mouse_move)
def control(self):
m = 2
n = self.num_segments
gamma = .5
self.compute_jacobian()
# self.J_inv = torch.linalg.pinv(self.J)
# self.delta_theta = torch.matmul(self.J_inv, torch.tensor([self.dx, self.dy]))
# self.update_angles(self.delta_theta)
JJT = torch.matmul(self.J + torch.eye(self.J.shape[0])*0.00001, self.J.permute([1,0]) + torch.eye(self.J.shape[0])*0.00001)
Im = torch.eye(m)
R = torch.stack((env.dx, env.dy)).view(-1,1)
M1 = torch.linalg.solve(JJT, self.J)
M2 = torch.linalg.solve(JJT+gamma**2*Im, R)
In = torch.eye(n)
Zp = In - torch.matmul(self.J.permute([1,0]), M1)
DeltaThetaPrimary = torch.matmul(self.J.permute([1,0]), M2)
DeltaThetaSecondary = torch.matmul(Zp, self.joint_angles.view(-1,1) * self.weights)
DeltaTheta = DeltaThetaPrimary + DeltaThetaSecondary
self.update_angles(DeltaTheta)
def mouse_move(self, event):
x, y = event.xdata, event.ydata
if(x and y):
self.x_targ = torch.tensor(x, dtype=torch.float, requires_grad=False)
self.y_targ = torch.tensor(y, dtype=torch.float, requires_grad=False)
def endeffector_forces(self):
with torch.no_grad():
self.J_inv = torch.linalg.pinv(self.J.T + torch.eye(self.J.shape[0])*0.00001)
# self.J_inv = torch.linalg.pinv(self.J.T)
# Matches the numbers from :
# https://studywolf.wordpress.com/2013/09/02/robot-control-jacobians-velocity-and-force/
end_effector_force = torch.tensor([[1.0],[1.0]])
joint_torques = torch.matmul(self.J.T, end_effector_force)
recalc_force = torch.matmul(self.J_inv, joint_torques)
print(joint_torques)
print(recalc_force)
demand_force = torch.tensor([[1.0],[0.0]])
demand_torques = torch.matmul(self.J.T, demand_force)
# demand_torques = torch.tensor([[3.0],[0.0]])
calc_forces = torch.matmul(self.J_inv, demand_torques)
# print(self.J)
# print(self.J_inv)
print('----')
print(demand_torques)
print(calc_forces)
self.EndEffector_F = calc_forces
pass
#%%
env = PlanarArm(2)
#%%
for i in range(10000):
ang = torch.tensor(i*torch.pi/180)
# env.control(-2.0, -1.5)
start = time.perf_counter()
env.control()
env.endeffector_forces()
end = time.perf_counter()
dt = end-start
# print(f"Control Time : {dt}")
# print(f"end effector pos : ({env.xs[-1]},{env.ys[-1]})")
env.plot()
| 7,612 |
Python
| 36.502463 | 167 | 0.565423 |
Tbarkin121/GuardDog/urdf/Biped_Longfoot/config/joint_names_Biped_Longfoot.yaml
|
controller_joint_names: ['', 'L_J1', 'L_J2', 'L_J3', 'R_J1', 'R_J2', 'R_J3', 'Wheel_X', 'Wheel_Y', ]
| 101 |
YAML
| 49.999975 | 100 | 0.49505 |
Tbarkin121/GuardDog/stm32/onnx_test.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 5 18:38:17 2024
@author: tylerbarkin
"""
import numpy as np
import onnx
import onnxruntime as ort
onnx_model = onnx.load("pendulum.onnx")
# Check that the model is well formed
onnx.checker.check_model(onnx_model)
ort_model = ort.InferenceSession("pendulum.onnx")
in_data = np.zeros((1,2))
in_data[0,0] = 0.52
in_data[0,1] = -0.32
outputs = ort_model.run(
None,
{"obs": in_data.astype(np.float32)},
)
print(outputs)
#Results from STM32 with 0.52 and -0.32 as inputs
# 1.80677, -0.67291, 0.82512
# Results from python with 0.52 and -0.32 as inputs
# 1.80461, -0.67291, 0.82535
# Minor differences due to quantization and compression? The more compression the worse the inference does
| 748 |
Python
| 20.399999 | 107 | 0.697861 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/system_stm32g4xx.c
|
/**
******************************************************************************
* @file system_stm32g4xx.c
* @author MCD Application Team
* @brief CMSIS Cortex-M4 Device Peripheral Access Layer System Source File
*
* This file provides two functions and one global variable to be called from
* user application:
* - SystemInit(): This function is called at startup just after reset and
* before branch to main program. This call is made inside
* the "startup_stm32g4xx.s" file.
*
* - SystemCoreClock variable: Contains the core clock (HCLK), it can be used
* by the user application to setup the SysTick
* timer or configure other parameters.
*
* - SystemCoreClockUpdate(): Updates the variable SystemCoreClock and must
* be called whenever the core clock is changed
* during program execution.
*
* After each device reset the HSI (16 MHz) is used as system clock source.
* Then SystemInit() function is called, in "startup_stm32g4xx.s" file, to
* configure the system clock before to branch to main program.
*
* This file configures the system clock as follows:
*=============================================================================
*-----------------------------------------------------------------------------
* System Clock source | HSI
*-----------------------------------------------------------------------------
* SYSCLK(Hz) | 16000000
*-----------------------------------------------------------------------------
* HCLK(Hz) | 16000000
*-----------------------------------------------------------------------------
* AHB Prescaler | 1
*-----------------------------------------------------------------------------
* APB1 Prescaler | 1
*-----------------------------------------------------------------------------
* APB2 Prescaler | 1
*-----------------------------------------------------------------------------
* PLL_M | 1
*-----------------------------------------------------------------------------
* PLL_N | 16
*-----------------------------------------------------------------------------
* PLL_P | 7
*-----------------------------------------------------------------------------
* PLL_Q | 2
*-----------------------------------------------------------------------------
* PLL_R | 2
*-----------------------------------------------------------------------------
* Require 48MHz for RNG | Disabled
*-----------------------------------------------------------------------------
*=============================================================================
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/** @addtogroup CMSIS
* @{
*/
/** @addtogroup stm32g4xx_system
* @{
*/
/** @addtogroup STM32G4xx_System_Private_Includes
* @{
*/
#include "stm32g4xx.h"
#if !defined (HSE_VALUE)
#define HSE_VALUE 24000000U /*!< Value of the External oscillator in Hz */
#endif /* HSE_VALUE */
#if !defined (HSI_VALUE)
#define HSI_VALUE 16000000U /*!< Value of the Internal oscillator in Hz*/
#endif /* HSI_VALUE */
/**
* @}
*/
/** @addtogroup STM32G4xx_System_Private_TypesDefinitions
* @{
*/
/**
* @}
*/
/** @addtogroup STM32G4xx_System_Private_Defines
* @{
*/
/************************* Miscellaneous Configuration ************************/
/* Note: Following vector table addresses must be defined in line with linker
configuration. */
/*!< Uncomment the following line if you need to relocate the vector table
anywhere in Flash or Sram, else the vector table is kept at the automatic
remap of boot address selected */
/* #define USER_VECT_TAB_ADDRESS */
#if defined(USER_VECT_TAB_ADDRESS)
/*!< Uncomment the following line if you need to relocate your vector Table
in Sram else user remap will be done in Flash. */
/* #define VECT_TAB_SRAM */
#if defined(VECT_TAB_SRAM)
#define VECT_TAB_BASE_ADDRESS SRAM_BASE /*!< Vector Table base address field.
This value must be a multiple of 0x200. */
#define VECT_TAB_OFFSET 0x00000000U /*!< Vector Table base offset field.
This value must be a multiple of 0x200. */
#else
#define VECT_TAB_BASE_ADDRESS FLASH_BASE /*!< Vector Table base address field.
This value must be a multiple of 0x200. */
#define VECT_TAB_OFFSET 0x00000000U /*!< Vector Table base offset field.
This value must be a multiple of 0x200. */
#endif /* VECT_TAB_SRAM */
#endif /* USER_VECT_TAB_ADDRESS */
/******************************************************************************/
/**
* @}
*/
/** @addtogroup STM32G4xx_System_Private_Macros
* @{
*/
/**
* @}
*/
/** @addtogroup STM32G4xx_System_Private_Variables
* @{
*/
/* The SystemCoreClock variable is updated in three ways:
1) by calling CMSIS function SystemCoreClockUpdate()
2) by calling HAL API function HAL_RCC_GetHCLKFreq()
3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency
Note: If you use this function to configure the system clock; then there
is no need to call the 2 first functions listed above, since SystemCoreClock
variable is updated automatically.
*/
uint32_t SystemCoreClock = HSI_VALUE;
const uint8_t AHBPrescTable[16] = {0U, 0U, 0U, 0U, 0U, 0U, 0U, 0U, 1U, 2U, 3U, 4U, 6U, 7U, 8U, 9U};
const uint8_t APBPrescTable[8] = {0U, 0U, 0U, 0U, 1U, 2U, 3U, 4U};
/**
* @}
*/
/** @addtogroup STM32G4xx_System_Private_FunctionPrototypes
* @{
*/
/**
* @}
*/
/** @addtogroup STM32G4xx_System_Private_Functions
* @{
*/
/**
* @brief Setup the microcontroller system.
* @param None
* @retval None
*/
void SystemInit(void)
{
/* FPU settings ------------------------------------------------------------*/
#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
SCB->CPACR |= ((3UL << (10*2))|(3UL << (11*2))); /* set CP10 and CP11 Full Access */
#endif
/* Configure the Vector Table location add offset address ------------------*/
#if defined(USER_VECT_TAB_ADDRESS)
SCB->VTOR = VECT_TAB_BASE_ADDRESS | VECT_TAB_OFFSET; /* Vector Table Relocation in Internal SRAM */
#endif /* USER_VECT_TAB_ADDRESS */
}
/**
* @brief Update SystemCoreClock variable according to Clock Register Values.
* The SystemCoreClock variable contains the core clock (HCLK), it can
* be used by the user application to setup the SysTick timer or configure
* other parameters.
*
* @note Each time the core clock (HCLK) changes, this function must be called
* to update SystemCoreClock variable value. Otherwise, any configuration
* based on this variable will be incorrect.
*
* @note - The system frequency computed by this function is not the real
* frequency in the chip. It is calculated based on the predefined
* constant and the selected clock source:
*
* - If SYSCLK source is HSI, SystemCoreClock will contain the HSI_VALUE(**)
*
* - If SYSCLK source is HSE, SystemCoreClock will contain the HSE_VALUE(***)
*
* - If SYSCLK source is PLL, SystemCoreClock will contain the HSE_VALUE(***)
* or HSI_VALUE(*) multiplied/divided by the PLL factors.
*
* (**) HSI_VALUE is a constant defined in stm32g4xx_hal.h file (default value
* 16 MHz) but the real value may vary depending on the variations
* in voltage and temperature.
*
* (***) HSE_VALUE is a constant defined in stm32g4xx_hal.h file (default value
* 24 MHz), user has to ensure that HSE_VALUE is same as the real
* frequency of the crystal used. Otherwise, this function may
* have wrong result.
*
* - The result of this function could be not correct when using fractional
* value for HSE crystal.
*
* @param None
* @retval None
*/
void SystemCoreClockUpdate(void)
{
uint32_t tmp, pllvco, pllr, pllsource, pllm;
/* Get SYSCLK source -------------------------------------------------------*/
switch (RCC->CFGR & RCC_CFGR_SWS)
{
case 0x04: /* HSI used as system clock source */
SystemCoreClock = HSI_VALUE;
break;
case 0x08: /* HSE used as system clock source */
SystemCoreClock = HSE_VALUE;
break;
case 0x0C: /* PLL used as system clock source */
/* PLL_VCO = (HSE_VALUE or HSI_VALUE / PLLM) * PLLN
SYSCLK = PLL_VCO / PLLR
*/
pllsource = (RCC->PLLCFGR & RCC_PLLCFGR_PLLSRC);
pllm = ((RCC->PLLCFGR & RCC_PLLCFGR_PLLM) >> 4) + 1U ;
if (pllsource == 0x02UL) /* HSI used as PLL clock source */
{
pllvco = (HSI_VALUE / pllm);
}
else /* HSE used as PLL clock source */
{
pllvco = (HSE_VALUE / pllm);
}
pllvco = pllvco * ((RCC->PLLCFGR & RCC_PLLCFGR_PLLN) >> 8);
pllr = (((RCC->PLLCFGR & RCC_PLLCFGR_PLLR) >> 25) + 1U) * 2U;
SystemCoreClock = pllvco/pllr;
break;
default:
break;
}
/* Compute HCLK clock frequency --------------------------------------------*/
/* Get HCLK prescaler */
tmp = AHBPrescTable[((RCC->CFGR & RCC_CFGR_HPRE) >> 4)];
/* HCLK clock frequency */
SystemCoreClock >>= tmp;
}
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
| 10,571 |
C
| 35.965035 | 101 | 0.4781 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/stm32g4xx_hal_msp.c
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file stm32g4xx_hal_msp.c
* @brief This file provides code for the MSP Initialization
* and de-Initialization codes.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Includes ------------------------------------------------------------------*/
#include "main.h"
/* USER CODE BEGIN Includes */
/* USER CODE END Includes */
/* Private typedef -----------------------------------------------------------*/
/* USER CODE BEGIN TD */
/* USER CODE END TD */
/* Private define ------------------------------------------------------------*/
/* USER CODE BEGIN Define */
/* USER CODE END Define */
/* Private macro -------------------------------------------------------------*/
/* USER CODE BEGIN Macro */
/* USER CODE END Macro */
/* Private variables ---------------------------------------------------------*/
/* USER CODE BEGIN PV */
/* USER CODE END PV */
/* Private function prototypes -----------------------------------------------*/
/* USER CODE BEGIN PFP */
/* USER CODE END PFP */
/* External functions --------------------------------------------------------*/
/* USER CODE BEGIN ExternalFunctions */
/* USER CODE END ExternalFunctions */
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
/**
* Initializes the Global MSP.
*/
void HAL_MspInit(void)
{
/* USER CODE BEGIN MspInit 0 */
/* USER CODE END MspInit 0 */
__HAL_RCC_SYSCFG_CLK_ENABLE();
__HAL_RCC_PWR_CLK_ENABLE();
/* System interrupt init*/
/** Disable the internal Pull-Up in Dead Battery pins of UCPD peripheral
*/
HAL_PWREx_DisableUCPDDeadBattery();
/* USER CODE BEGIN MspInit 1 */
/* USER CODE END MspInit 1 */
}
/* USER CODE BEGIN 1 */
/* USER CODE END 1 */
| 2,268 |
C
| 25.080459 | 80 | 0.454145 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/sysmem.c
|
/**
******************************************************************************
* @file sysmem.c
* @author Generated by STM32CubeIDE
* @brief STM32CubeIDE System Memory calls file
*
* For more information about which C functions
* need which of these lowlevel functions
* please consult the newlib libc manual
******************************************************************************
* @attention
*
* Copyright (c) 2023 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* Includes */
#include <errno.h>
#include <stdint.h>
/**
* Pointer to the current high watermark of the heap usage
*/
static uint8_t *__sbrk_heap_end = NULL;
/**
* @brief _sbrk() allocates memory to the newlib heap and is used by malloc
* and others from the C library
*
* @verbatim
* ############################################################################
* # .data # .bss # newlib heap # MSP stack #
* # # # # Reserved by _Min_Stack_Size #
* ############################################################################
* ^-- RAM start ^-- _end _estack, RAM end --^
* @endverbatim
*
* This implementation starts allocating at the '_end' linker symbol
* The '_Min_Stack_Size' linker symbol reserves a memory for the MSP stack
* The implementation considers '_estack' linker symbol to be RAM end
* NOTE: If the MSP stack, at any point during execution, grows larger than the
* reserved size, please increase the '_Min_Stack_Size'.
*
* @param incr Memory size
* @return Pointer to allocated memory
*/
void *_sbrk(ptrdiff_t incr)
{
extern uint8_t _end; /* Symbol defined in the linker script */
extern uint8_t _estack; /* Symbol defined in the linker script */
extern uint32_t _Min_Stack_Size; /* Symbol defined in the linker script */
const uint32_t stack_limit = (uint32_t)&_estack - (uint32_t)&_Min_Stack_Size;
const uint8_t *max_heap = (uint8_t *)stack_limit;
uint8_t *prev_heap_end;
/* Initialize heap end at first call */
if (NULL == __sbrk_heap_end)
{
__sbrk_heap_end = &_end;
}
/* Protect heap from growing into the reserved MSP stack */
if (__sbrk_heap_end + incr > max_heap)
{
errno = ENOMEM;
return (void *)-1;
}
prev_heap_end = __sbrk_heap_end;
__sbrk_heap_end += incr;
return (void *)prev_heap_end;
}
| 2,726 |
C
| 33.0875 | 79 | 0.537784 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/tim.c
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file tim.c
* @brief This file provides code for the configuration
* of the TIM instances.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Includes ------------------------------------------------------------------*/
#include "tim.h"
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
TIM_HandleTypeDef htim2;
/* TIM2 init function */
void MX_TIM2_Init(void)
{
/* USER CODE BEGIN TIM2_Init 0 */
/* USER CODE END TIM2_Init 0 */
TIM_ClockConfigTypeDef sClockSourceConfig = {0};
TIM_MasterConfigTypeDef sMasterConfig = {0};
/* USER CODE BEGIN TIM2_Init 1 */
/* USER CODE END TIM2_Init 1 */
htim2.Instance = TIM2;
htim2.Init.Prescaler = 170;
htim2.Init.CounterMode = TIM_COUNTERMODE_UP;
htim2.Init.Period = 4.294967295E9;
htim2.Init.ClockDivision = TIM_CLOCKDIVISION_DIV1;
htim2.Init.AutoReloadPreload = TIM_AUTORELOAD_PRELOAD_DISABLE;
if (HAL_TIM_Base_Init(&htim2) != HAL_OK)
{
Error_Handler();
}
sClockSourceConfig.ClockSource = TIM_CLOCKSOURCE_INTERNAL;
if (HAL_TIM_ConfigClockSource(&htim2, &sClockSourceConfig) != HAL_OK)
{
Error_Handler();
}
sMasterConfig.MasterOutputTrigger = TIM_TRGO_RESET;
sMasterConfig.MasterSlaveMode = TIM_MASTERSLAVEMODE_DISABLE;
if (HAL_TIMEx_MasterConfigSynchronization(&htim2, &sMasterConfig) != HAL_OK)
{
Error_Handler();
}
/* USER CODE BEGIN TIM2_Init 2 */
/* USER CODE END TIM2_Init 2 */
}
void HAL_TIM_Base_MspInit(TIM_HandleTypeDef* tim_baseHandle)
{
if(tim_baseHandle->Instance==TIM2)
{
/* USER CODE BEGIN TIM2_MspInit 0 */
/* USER CODE END TIM2_MspInit 0 */
/* TIM2 clock enable */
__HAL_RCC_TIM2_CLK_ENABLE();
/* USER CODE BEGIN TIM2_MspInit 1 */
/* USER CODE END TIM2_MspInit 1 */
}
}
void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef* tim_baseHandle)
{
if(tim_baseHandle->Instance==TIM2)
{
/* USER CODE BEGIN TIM2_MspDeInit 0 */
/* USER CODE END TIM2_MspDeInit 0 */
/* Peripheral clock disable */
__HAL_RCC_TIM2_CLK_DISABLE();
/* USER CODE BEGIN TIM2_MspDeInit 1 */
/* USER CODE END TIM2_MspDeInit 1 */
}
}
/* USER CODE BEGIN 1 */
/* USER CODE END 1 */
| 2,698 |
C
| 24.704762 | 80 | 0.59192 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/stm32g4xx_it.c
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file stm32g4xx_it.c
* @brief Interrupt Service Routines.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Includes ------------------------------------------------------------------*/
#include "main.h"
#include "stm32g4xx_it.h"
/* Private includes ----------------------------------------------------------*/
/* USER CODE BEGIN Includes */
/* USER CODE END Includes */
/* Private typedef -----------------------------------------------------------*/
/* USER CODE BEGIN TD */
/* USER CODE END TD */
/* Private define ------------------------------------------------------------*/
/* USER CODE BEGIN PD */
/* USER CODE END PD */
/* Private macro -------------------------------------------------------------*/
/* USER CODE BEGIN PM */
/* USER CODE END PM */
/* Private variables ---------------------------------------------------------*/
/* USER CODE BEGIN PV */
/* USER CODE END PV */
/* Private function prototypes -----------------------------------------------*/
/* USER CODE BEGIN PFP */
/* USER CODE END PFP */
/* Private user code ---------------------------------------------------------*/
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
/* External variables --------------------------------------------------------*/
extern UART_HandleTypeDef huart2;
/* USER CODE BEGIN EV */
/* USER CODE END EV */
/******************************************************************************/
/* Cortex-M4 Processor Interruption and Exception Handlers */
/******************************************************************************/
/**
* @brief This function handles Non maskable interrupt.
*/
void NMI_Handler(void)
{
/* USER CODE BEGIN NonMaskableInt_IRQn 0 */
/* USER CODE END NonMaskableInt_IRQn 0 */
/* USER CODE BEGIN NonMaskableInt_IRQn 1 */
while (1)
{
}
/* USER CODE END NonMaskableInt_IRQn 1 */
}
/**
* @brief This function handles Hard fault interrupt.
*/
void HardFault_Handler(void)
{
/* USER CODE BEGIN HardFault_IRQn 0 */
/* USER CODE END HardFault_IRQn 0 */
while (1)
{
/* USER CODE BEGIN W1_HardFault_IRQn 0 */
/* USER CODE END W1_HardFault_IRQn 0 */
}
}
/**
* @brief This function handles Memory management fault.
*/
void MemManage_Handler(void)
{
/* USER CODE BEGIN MemoryManagement_IRQn 0 */
/* USER CODE END MemoryManagement_IRQn 0 */
while (1)
{
/* USER CODE BEGIN W1_MemoryManagement_IRQn 0 */
/* USER CODE END W1_MemoryManagement_IRQn 0 */
}
}
/**
* @brief This function handles Prefetch fault, memory access fault.
*/
void BusFault_Handler(void)
{
/* USER CODE BEGIN BusFault_IRQn 0 */
/* USER CODE END BusFault_IRQn 0 */
while (1)
{
/* USER CODE BEGIN W1_BusFault_IRQn 0 */
/* USER CODE END W1_BusFault_IRQn 0 */
}
}
/**
* @brief This function handles Undefined instruction or illegal state.
*/
void UsageFault_Handler(void)
{
/* USER CODE BEGIN UsageFault_IRQn 0 */
/* USER CODE END UsageFault_IRQn 0 */
while (1)
{
/* USER CODE BEGIN W1_UsageFault_IRQn 0 */
/* USER CODE END W1_UsageFault_IRQn 0 */
}
}
/**
* @brief This function handles System service call via SWI instruction.
*/
void SVC_Handler(void)
{
/* USER CODE BEGIN SVCall_IRQn 0 */
/* USER CODE END SVCall_IRQn 0 */
/* USER CODE BEGIN SVCall_IRQn 1 */
/* USER CODE END SVCall_IRQn 1 */
}
/**
* @brief This function handles Debug monitor.
*/
void DebugMon_Handler(void)
{
/* USER CODE BEGIN DebugMonitor_IRQn 0 */
/* USER CODE END DebugMonitor_IRQn 0 */
/* USER CODE BEGIN DebugMonitor_IRQn 1 */
/* USER CODE END DebugMonitor_IRQn 1 */
}
/**
* @brief This function handles Pendable request for system service.
*/
void PendSV_Handler(void)
{
/* USER CODE BEGIN PendSV_IRQn 0 */
/* USER CODE END PendSV_IRQn 0 */
/* USER CODE BEGIN PendSV_IRQn 1 */
/* USER CODE END PendSV_IRQn 1 */
}
/**
* @brief This function handles System tick timer.
*/
void SysTick_Handler(void)
{
/* USER CODE BEGIN SysTick_IRQn 0 */
/* USER CODE END SysTick_IRQn 0 */
HAL_IncTick();
/* USER CODE BEGIN SysTick_IRQn 1 */
/* USER CODE END SysTick_IRQn 1 */
}
/******************************************************************************/
/* STM32G4xx Peripheral Interrupt Handlers */
/* Add here the Interrupt Handlers for the used peripherals. */
/* For the available peripheral interrupt handler names, */
/* please refer to the startup file (startup_stm32g4xx.s). */
/******************************************************************************/
/**
* @brief This function handles USART2 global interrupt / USART2 wake-up interrupt through EXTI line 26.
*/
void USART2_IRQHandler(void)
{
/* USER CODE BEGIN USART2_IRQn 0 */
/* USER CODE END USART2_IRQn 0 */
HAL_UART_IRQHandler(&huart2);
/* USER CODE BEGIN USART2_IRQn 1 */
/* USER CODE END USART2_IRQn 1 */
}
/* USER CODE BEGIN 1 */
/* USER CODE END 1 */
| 5,569 |
C
| 24.550459 | 105 | 0.510864 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/usart.c
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file usart.c
* @brief This file provides code for the configuration
* of the USART instances.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Includes ------------------------------------------------------------------*/
#include "usart.h"
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
UART_HandleTypeDef huart2;
/* USART2 init function */
void MX_USART2_UART_Init(void)
{
/* USER CODE BEGIN USART2_Init 0 */
/* USER CODE END USART2_Init 0 */
/* USER CODE BEGIN USART2_Init 1 */
/* USER CODE END USART2_Init 1 */
huart2.Instance = USART2;
huart2.Init.BaudRate = 115200;
huart2.Init.WordLength = UART_WORDLENGTH_8B;
huart2.Init.StopBits = UART_STOPBITS_1;
huart2.Init.Parity = UART_PARITY_NONE;
huart2.Init.Mode = UART_MODE_TX_RX;
huart2.Init.HwFlowCtl = UART_HWCONTROL_NONE;
huart2.Init.OverSampling = UART_OVERSAMPLING_16;
huart2.Init.OneBitSampling = UART_ONE_BIT_SAMPLE_DISABLE;
huart2.Init.ClockPrescaler = UART_PRESCALER_DIV1;
huart2.AdvancedInit.AdvFeatureInit = UART_ADVFEATURE_NO_INIT;
if (HAL_UART_Init(&huart2) != HAL_OK)
{
Error_Handler();
}
if (HAL_UARTEx_SetTxFifoThreshold(&huart2, UART_TXFIFO_THRESHOLD_1_8) != HAL_OK)
{
Error_Handler();
}
if (HAL_UARTEx_SetRxFifoThreshold(&huart2, UART_RXFIFO_THRESHOLD_1_8) != HAL_OK)
{
Error_Handler();
}
if (HAL_UARTEx_DisableFifoMode(&huart2) != HAL_OK)
{
Error_Handler();
}
/* USER CODE BEGIN USART2_Init 2 */
/* USER CODE END USART2_Init 2 */
}
void HAL_UART_MspInit(UART_HandleTypeDef* uartHandle)
{
GPIO_InitTypeDef GPIO_InitStruct = {0};
RCC_PeriphCLKInitTypeDef PeriphClkInit = {0};
if(uartHandle->Instance==USART2)
{
/* USER CODE BEGIN USART2_MspInit 0 */
/* USER CODE END USART2_MspInit 0 */
/** Initializes the peripherals clocks
*/
PeriphClkInit.PeriphClockSelection = RCC_PERIPHCLK_USART2;
PeriphClkInit.Usart2ClockSelection = RCC_USART2CLKSOURCE_PCLK1;
if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInit) != HAL_OK)
{
Error_Handler();
}
/* USART2 clock enable */
__HAL_RCC_USART2_CLK_ENABLE();
__HAL_RCC_GPIOA_CLK_ENABLE();
/**USART2 GPIO Configuration
PA2 ------> USART2_TX
PA3 ------> USART2_RX
*/
GPIO_InitStruct.Pin = USART2_TX_Pin|USART2_RX_Pin;
GPIO_InitStruct.Mode = GPIO_MODE_AF_PP;
GPIO_InitStruct.Pull = GPIO_NOPULL;
GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW;
GPIO_InitStruct.Alternate = GPIO_AF7_USART2;
HAL_GPIO_Init(GPIOA, &GPIO_InitStruct);
/* USART2 interrupt Init */
HAL_NVIC_SetPriority(USART2_IRQn, 0, 0);
HAL_NVIC_EnableIRQ(USART2_IRQn);
/* USER CODE BEGIN USART2_MspInit 1 */
/* USER CODE END USART2_MspInit 1 */
}
}
void HAL_UART_MspDeInit(UART_HandleTypeDef* uartHandle)
{
if(uartHandle->Instance==USART2)
{
/* USER CODE BEGIN USART2_MspDeInit 0 */
/* USER CODE END USART2_MspDeInit 0 */
/* Peripheral clock disable */
__HAL_RCC_USART2_CLK_DISABLE();
/**USART2 GPIO Configuration
PA2 ------> USART2_TX
PA3 ------> USART2_RX
*/
HAL_GPIO_DeInit(GPIOA, USART2_TX_Pin|USART2_RX_Pin);
/* USART2 interrupt Deinit */
HAL_NVIC_DisableIRQ(USART2_IRQn);
/* USER CODE BEGIN USART2_MspDeInit 1 */
/* USER CODE END USART2_MspDeInit 1 */
}
}
/* USER CODE BEGIN 1 */
/* USER CODE END 1 */
| 3,922 |
C
| 25.869863 | 82 | 0.608108 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/gpio.c
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file gpio.c
* @brief This file provides code for the configuration
* of all used GPIO pins.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Includes ------------------------------------------------------------------*/
#include "gpio.h"
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
/*----------------------------------------------------------------------------*/
/* Configure GPIO */
/*----------------------------------------------------------------------------*/
/* USER CODE BEGIN 1 */
/* USER CODE END 1 */
/** Configure pins as
* Analog
* Input
* Output
* EVENT_OUT
* EXTI
*/
void MX_GPIO_Init(void)
{
GPIO_InitTypeDef GPIO_InitStruct = {0};
/* GPIO Ports Clock Enable */
__HAL_RCC_GPIOA_CLK_ENABLE();
__HAL_RCC_GPIOB_CLK_ENABLE();
/*Configure GPIO pin Output Level */
HAL_GPIO_WritePin(LD2_GPIO_Port, LD2_Pin, GPIO_PIN_RESET);
/*Configure GPIO pin : PtPin */
GPIO_InitStruct.Pin = LD2_Pin;
GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP;
GPIO_InitStruct.Pull = GPIO_NOPULL;
GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW;
HAL_GPIO_Init(LD2_GPIO_Port, &GPIO_InitStruct);
}
/* USER CODE BEGIN 2 */
/* USER CODE END 2 */
| 1,840 |
C
| 26.893939 | 80 | 0.451087 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/main.c
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file : main.c
* @brief : Main program body
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Includes ------------------------------------------------------------------*/
#include "main.h"
#include "crc.h"
#include "tim.h"
#include "usart.h"
#include "gpio.h"
#include "app_x-cube-ai.h"
/* Private includes ----------------------------------------------------------*/
/* USER CODE BEGIN Includes */
#include <stdint.h>
#include "network.h"
/* USER CODE END Includes */
/* Private typedef -----------------------------------------------------------*/
/* USER CODE BEGIN PTD */
/* USER CODE END PTD */
/* Private define ------------------------------------------------------------*/
/* USER CODE BEGIN PD */
/* USER CODE END PD */
/* Private macro -------------------------------------------------------------*/
/* USER CODE BEGIN PM */
/* USER CODE END PM */
/* Private variables ---------------------------------------------------------*/
/* USER CODE BEGIN PV */
union floatUnion {
float floatValue[4];
uint8_t bytes[4 * sizeof(float)];
};
union floatUnion data;
ai_float in_data1[AI_NETWORK_IN_1_SIZE];
ai_float out_data1[AI_NETWORK_OUT_1_SIZE];
ai_float out_data2[AI_NETWORK_OUT_2_SIZE];
ai_float out_data3[AI_NETWORK_OUT_3_SIZE];
uint8_t data_flag;
/* USER CODE END PV */
/* Private function prototypes -----------------------------------------------*/
void SystemClock_Config(void);
/* USER CODE BEGIN PFP */
/* USER CODE END PFP */
/* Private user code ---------------------------------------------------------*/
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
/**
* @brief The application entry point.
* @retval int
*/
int main(void)
{
/* USER CODE BEGIN 1 */
data_flag=0;
/* USER CODE END 1 */
/* MCU Configuration--------------------------------------------------------*/
/* Reset of all peripherals, Initializes the Flash interface and the Systick. */
HAL_Init();
/* USER CODE BEGIN Init */
/* USER CODE END Init */
/* Configure the system clock */
SystemClock_Config();
/* USER CODE BEGIN SysInit */
/* USER CODE END SysInit */
/* Initialize all configured peripherals */
MX_GPIO_Init();
MX_USART2_UART_Init();
MX_CRC_Init();
MX_TIM2_Init();
MX_X_CUBE_AI_Init();
/* USER CODE BEGIN 2 */
HAL_UART_Receive_IT(&huart2, data.bytes, sizeof(data.bytes));
/* USER CODE END 2 */
/* Infinite loop */
/* USER CODE BEGIN WHILE */
while (1)
{
// HAL_UART_Transmit(&huart2, data.bytes, sizeof(data.bytes), 100);
// Read 16 bytes into data.bytes
// HAL_UART_Receive(&huart2, data.bytes, sizeof(data.bytes), 1000);
if(data_flag)
{
TIM2->CNT = 0;
HAL_TIM_Base_Start(&htim2);
in_data1[0] = data.floatValue[0]; // Sin Encoding
in_data1[1] = data.floatValue[1]; // Cosine Encoding
in_data1[2] = data.floatValue[2]; // Velocity
MX_X_CUBE_AI_Process();
HAL_TIM_Base_Stop(&htim2);
data.floatValue[0] = out_data1[0]; // Value
data.floatValue[1] = out_data2[0]; // Mu
data.floatValue[2] = out_data3[0]; // STD
data.floatValue[3] = (float)TIM2->CNT/(1000000); // Execution Time
data_flag = 0;
HAL_UART_Transmit(&huart2, data.bytes, sizeof(data.bytes), 100);
HAL_UART_Receive_IT(&huart2, data.bytes, sizeof(data.bytes));
}
/* USER CODE END WHILE */
/* USER CODE BEGIN 3 */
}
/* USER CODE END 3 */
}
/**
* @brief System Clock Configuration
* @retval None
*/
void SystemClock_Config(void)
{
RCC_OscInitTypeDef RCC_OscInitStruct = {0};
RCC_ClkInitTypeDef RCC_ClkInitStruct = {0};
/** Configure the main internal regulator output voltage
*/
HAL_PWREx_ControlVoltageScaling(PWR_REGULATOR_VOLTAGE_SCALE1_BOOST);
/** Initializes the RCC Oscillators according to the specified parameters
* in the RCC_OscInitTypeDef structure.
*/
RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI;
RCC_OscInitStruct.HSIState = RCC_HSI_ON;
RCC_OscInitStruct.HSICalibrationValue = RCC_HSICALIBRATION_DEFAULT;
RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON;
RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSI;
RCC_OscInitStruct.PLL.PLLM = RCC_PLLM_DIV4;
RCC_OscInitStruct.PLL.PLLN = 85;
RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2;
RCC_OscInitStruct.PLL.PLLQ = RCC_PLLQ_DIV2;
RCC_OscInitStruct.PLL.PLLR = RCC_PLLR_DIV2;
if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK)
{
Error_Handler();
}
/** Initializes the CPU, AHB and APB buses clocks
*/
RCC_ClkInitStruct.ClockType = RCC_CLOCKTYPE_HCLK|RCC_CLOCKTYPE_SYSCLK
|RCC_CLOCKTYPE_PCLK1|RCC_CLOCKTYPE_PCLK2;
RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK;
RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1;
RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV1;
RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1;
if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_4) != HAL_OK)
{
Error_Handler();
}
}
/* USER CODE BEGIN 4 */
void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart)
{
// We will set a data flag here and execute in the main loop
data_flag = 1;
}
void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart)
{
}
/* USER CODE END 4 */
/**
* @brief This function is executed in case of error occurrence.
* @retval None
*/
void Error_Handler(void)
{
/* USER CODE BEGIN Error_Handler_Debug */
/* User can add his own implementation to report the HAL error return state */
__disable_irq();
while (1)
{
}
/* USER CODE END Error_Handler_Debug */
}
#ifdef USE_FULL_ASSERT
/**
* @brief Reports the name of the source file and the source line number
* where the assert_param error has occurred.
* @param file: pointer to the source file name
* @param line: assert_param error line source number
* @retval None
*/
void assert_failed(uint8_t *file, uint32_t line)
{
/* USER CODE BEGIN 6 */
/* User can add his own implementation to report the file name and line number,
ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */
/* USER CODE END 6 */
}
#endif /* USE_FULL_ASSERT */
| 6,694 |
C
| 27.248945 | 82 | 0.582761 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Src/crc.c
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file crc.c
* @brief This file provides code for the configuration
* of the CRC instances.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Includes ------------------------------------------------------------------*/
#include "crc.h"
/* USER CODE BEGIN 0 */
/* USER CODE END 0 */
CRC_HandleTypeDef hcrc;
/* CRC init function */
void MX_CRC_Init(void)
{
/* USER CODE BEGIN CRC_Init 0 */
/* USER CODE END CRC_Init 0 */
/* USER CODE BEGIN CRC_Init 1 */
/* USER CODE END CRC_Init 1 */
hcrc.Instance = CRC;
hcrc.Init.DefaultPolynomialUse = DEFAULT_POLYNOMIAL_ENABLE;
hcrc.Init.DefaultInitValueUse = DEFAULT_INIT_VALUE_ENABLE;
hcrc.Init.InputDataInversionMode = CRC_INPUTDATA_INVERSION_NONE;
hcrc.Init.OutputDataInversionMode = CRC_OUTPUTDATA_INVERSION_DISABLE;
hcrc.InputDataFormat = CRC_INPUTDATA_FORMAT_BYTES;
if (HAL_CRC_Init(&hcrc) != HAL_OK)
{
Error_Handler();
}
/* USER CODE BEGIN CRC_Init 2 */
/* USER CODE END CRC_Init 2 */
}
void HAL_CRC_MspInit(CRC_HandleTypeDef* crcHandle)
{
if(crcHandle->Instance==CRC)
{
/* USER CODE BEGIN CRC_MspInit 0 */
/* USER CODE END CRC_MspInit 0 */
/* CRC clock enable */
__HAL_RCC_CRC_CLK_ENABLE();
/* USER CODE BEGIN CRC_MspInit 1 */
/* USER CODE END CRC_MspInit 1 */
}
}
void HAL_CRC_MspDeInit(CRC_HandleTypeDef* crcHandle)
{
if(crcHandle->Instance==CRC)
{
/* USER CODE BEGIN CRC_MspDeInit 0 */
/* USER CODE END CRC_MspDeInit 0 */
/* Peripheral clock disable */
__HAL_RCC_CRC_CLK_DISABLE();
/* USER CODE BEGIN CRC_MspDeInit 1 */
/* USER CODE END CRC_MspDeInit 1 */
}
}
/* USER CODE BEGIN 1 */
/* USER CODE END 1 */
| 2,235 |
C
| 23.571428 | 80 | 0.564206 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Inc/stm32g4xx_it.h
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file stm32g4xx_it.h
* @brief This file contains the headers of the interrupt handlers.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __STM32G4xx_IT_H
#define __STM32G4xx_IT_H
#ifdef __cplusplus
extern "C" {
#endif
/* Private includes ----------------------------------------------------------*/
/* USER CODE BEGIN Includes */
/* USER CODE END Includes */
/* Exported types ------------------------------------------------------------*/
/* USER CODE BEGIN ET */
/* USER CODE END ET */
/* Exported constants --------------------------------------------------------*/
/* USER CODE BEGIN EC */
/* USER CODE END EC */
/* Exported macro ------------------------------------------------------------*/
/* USER CODE BEGIN EM */
/* USER CODE END EM */
/* Exported functions prototypes ---------------------------------------------*/
void NMI_Handler(void);
void HardFault_Handler(void);
void MemManage_Handler(void);
void BusFault_Handler(void);
void UsageFault_Handler(void);
void SVC_Handler(void);
void DebugMon_Handler(void);
void PendSV_Handler(void);
void SysTick_Handler(void);
void USART2_IRQHandler(void);
/* USER CODE BEGIN EFP */
/* USER CODE END EFP */
#ifdef __cplusplus
}
#endif
#endif /* __STM32G4xx_IT_H */
| 1,890 |
C
| 26.808823 | 80 | 0.481481 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Inc/gpio.h
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file gpio.h
* @brief This file contains all the function prototypes for
* the gpio.c file
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __GPIO_H__
#define __GPIO_H__
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "main.h"
/* USER CODE BEGIN Includes */
/* USER CODE END Includes */
/* USER CODE BEGIN Private defines */
/* USER CODE END Private defines */
void MX_GPIO_Init(void);
/* USER CODE BEGIN Prototypes */
/* USER CODE END Prototypes */
#ifdef __cplusplus
}
#endif
#endif /*__ GPIO_H__ */
| 1,264 |
C
| 24.3 | 80 | 0.476266 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Inc/stm32g4xx_hal_conf.h
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file stm32g4xx_hal_conf.h
* @author MCD Application Team
* @brief HAL configuration file
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef STM32G4xx_HAL_CONF_H
#define STM32G4xx_HAL_CONF_H
#ifdef __cplusplus
extern "C" {
#endif
/* Exported types ------------------------------------------------------------*/
/* Exported constants --------------------------------------------------------*/
/* ########################## Module Selection ############################## */
/**
* @brief This is the list of modules to be used in the HAL driver
*/
#define HAL_MODULE_ENABLED
/*#define HAL_ADC_MODULE_ENABLED */
/*#define HAL_COMP_MODULE_ENABLED */
/*#define HAL_CORDIC_MODULE_ENABLED */
#define HAL_CRC_MODULE_ENABLED
/*#define HAL_CRYP_MODULE_ENABLED */
/*#define HAL_DAC_MODULE_ENABLED */
/*#define HAL_FDCAN_MODULE_ENABLED */
/*#define HAL_FMAC_MODULE_ENABLED */
/*#define HAL_HRTIM_MODULE_ENABLED */
/*#define HAL_IRDA_MODULE_ENABLED */
/*#define HAL_IWDG_MODULE_ENABLED */
/*#define HAL_I2C_MODULE_ENABLED */
/*#define HAL_I2S_MODULE_ENABLED */
/*#define HAL_LPTIM_MODULE_ENABLED */
/*#define HAL_NAND_MODULE_ENABLED */
/*#define HAL_NOR_MODULE_ENABLED */
/*#define HAL_OPAMP_MODULE_ENABLED */
/*#define HAL_PCD_MODULE_ENABLED */
/*#define HAL_QSPI_MODULE_ENABLED */
/*#define HAL_RNG_MODULE_ENABLED */
/*#define HAL_RTC_MODULE_ENABLED */
/*#define HAL_SAI_MODULE_ENABLED */
/*#define HAL_SMARTCARD_MODULE_ENABLED */
/*#define HAL_SMBUS_MODULE_ENABLED */
/*#define HAL_SPI_MODULE_ENABLED */
/*#define HAL_SRAM_MODULE_ENABLED */
#define HAL_TIM_MODULE_ENABLED
#define HAL_UART_MODULE_ENABLED
/*#define HAL_USART_MODULE_ENABLED */
/*#define HAL_WWDG_MODULE_ENABLED */
#define HAL_GPIO_MODULE_ENABLED
#define HAL_EXTI_MODULE_ENABLED
#define HAL_DMA_MODULE_ENABLED
#define HAL_RCC_MODULE_ENABLED
#define HAL_FLASH_MODULE_ENABLED
#define HAL_PWR_MODULE_ENABLED
#define HAL_CORTEX_MODULE_ENABLED
/* ########################## Register Callbacks selection ############################## */
/**
* @brief This is the list of modules where register callback can be used
*/
#define USE_HAL_ADC_REGISTER_CALLBACKS 0U
#define USE_HAL_COMP_REGISTER_CALLBACKS 0U
#define USE_HAL_CORDIC_REGISTER_CALLBACKS 0U
#define USE_HAL_CRYP_REGISTER_CALLBACKS 0U
#define USE_HAL_DAC_REGISTER_CALLBACKS 0U
#define USE_HAL_EXTI_REGISTER_CALLBACKS 0U
#define USE_HAL_FDCAN_REGISTER_CALLBACKS 0U
#define USE_HAL_FMAC_REGISTER_CALLBACKS 0U
#define USE_HAL_HRTIM_REGISTER_CALLBACKS 0U
#define USE_HAL_I2C_REGISTER_CALLBACKS 0U
#define USE_HAL_I2S_REGISTER_CALLBACKS 0U
#define USE_HAL_IRDA_REGISTER_CALLBACKS 0U
#define USE_HAL_LPTIM_REGISTER_CALLBACKS 0U
#define USE_HAL_NAND_REGISTER_CALLBACKS 0U
#define USE_HAL_NOR_REGISTER_CALLBACKS 0U
#define USE_HAL_OPAMP_REGISTER_CALLBACKS 0U
#define USE_HAL_PCD_REGISTER_CALLBACKS 0U
#define USE_HAL_QSPI_REGISTER_CALLBACKS 0U
#define USE_HAL_RNG_REGISTER_CALLBACKS 0U
#define USE_HAL_RTC_REGISTER_CALLBACKS 0U
#define USE_HAL_SAI_REGISTER_CALLBACKS 0U
#define USE_HAL_SMARTCARD_REGISTER_CALLBACKS 0U
#define USE_HAL_SMBUS_REGISTER_CALLBACKS 0U
#define USE_HAL_SPI_REGISTER_CALLBACKS 0U
#define USE_HAL_SRAM_REGISTER_CALLBACKS 0U
#define USE_HAL_TIM_REGISTER_CALLBACKS 0U
#define USE_HAL_UART_REGISTER_CALLBACKS 0U
#define USE_HAL_USART_REGISTER_CALLBACKS 0U
#define USE_HAL_WWDG_REGISTER_CALLBACKS 0U
/* ########################## Oscillator Values adaptation ####################*/
/**
* @brief Adjust the value of External High Speed oscillator (HSE) used in your application.
* This value is used by the RCC HAL module to compute the system frequency
* (when HSE is used as system clock source, directly or through the PLL).
*/
#if !defined (HSE_VALUE)
#define HSE_VALUE (8000000UL) /*!< Value of the External oscillator in Hz */
#endif /* HSE_VALUE */
#if !defined (HSE_STARTUP_TIMEOUT)
#define HSE_STARTUP_TIMEOUT (100UL) /*!< Time out for HSE start up, in ms */
#endif /* HSE_STARTUP_TIMEOUT */
/**
* @brief Internal High Speed oscillator (HSI) value.
* This value is used by the RCC HAL module to compute the system frequency
* (when HSI is used as system clock source, directly or through the PLL).
*/
#if !defined (HSI_VALUE)
#define HSI_VALUE (16000000UL) /*!< Value of the Internal oscillator in Hz*/
#endif /* HSI_VALUE */
/**
* @brief Internal High Speed oscillator (HSI48) value for USB FS and RNG.
* This internal oscillator is mainly dedicated to provide a high precision clock to
* the USB peripheral by means of a special Clock Recovery System (CRS) circuitry.
* When the CRS is not used, the HSI48 RC oscillator runs on it default frequency
* which is subject to manufacturing process variations.
*/
#if !defined (HSI48_VALUE)
#define HSI48_VALUE (48000000UL) /*!< Value of the Internal High Speed oscillator for USB FS/RNG in Hz.
The real value my vary depending on manufacturing process variations.*/
#endif /* HSI48_VALUE */
/**
* @brief Internal Low Speed oscillator (LSI) value.
*/
#if !defined (LSI_VALUE)
/*!< Value of the Internal Low Speed oscillator in Hz
The real value may vary depending on the variations in voltage and temperature.*/
#define LSI_VALUE (32000UL) /*!< LSI Typical Value in Hz*/
#endif /* LSI_VALUE */
/**
* @brief External Low Speed oscillator (LSE) value.
* This value is used by the UART, RTC HAL module to compute the system frequency
*/
#if !defined (LSE_VALUE)
#define LSE_VALUE (32768UL) /*!< Value of the External Low Speed oscillator in Hz */
#endif /* LSE_VALUE */
#if !defined (LSE_STARTUP_TIMEOUT)
#define LSE_STARTUP_TIMEOUT (5000UL) /*!< Time out for LSE start up, in ms */
#endif /* LSE_STARTUP_TIMEOUT */
/**
* @brief External clock source for I2S and SAI peripherals
* This value is used by the I2S and SAI HAL modules to compute the I2S and SAI clock source
* frequency, this source is inserted directly through I2S_CKIN pad.
*/
#if !defined (EXTERNAL_CLOCK_VALUE)
#define EXTERNAL_CLOCK_VALUE (12288000UL) /*!< Value of the External oscillator in Hz*/
#endif /* EXTERNAL_CLOCK_VALUE */
/* Tip: To avoid modifying this file each time you need to use different HSE,
=== you can define the HSE value in your toolchain compiler preprocessor. */
/* ########################### System Configuration ######################### */
/**
* @brief This is the HAL system configuration section
*/
#define VDD_VALUE (3300UL) /*!< Value of VDD in mv */
#define TICK_INT_PRIORITY (0UL) /*!< tick interrupt priority (lowest by default) */
#define USE_RTOS 0U
#define PREFETCH_ENABLE 0U
#define INSTRUCTION_CACHE_ENABLE 1U
#define DATA_CACHE_ENABLE 1U
/* ########################## Assert Selection ############################## */
/**
* @brief Uncomment the line below to expanse the "assert_param" macro in the
* HAL drivers code
*/
/* #define USE_FULL_ASSERT 1U */
/* ################## SPI peripheral configuration ########################## */
/* CRC FEATURE: Use to activate CRC feature inside HAL SPI Driver
* Activated: CRC code is present inside driver
* Deactivated: CRC code cleaned from driver
*/
#define USE_SPI_CRC 0U
/* Includes ------------------------------------------------------------------*/
/**
* @brief Include module's header file
*/
#ifdef HAL_RCC_MODULE_ENABLED
#include "stm32g4xx_hal_rcc.h"
#endif /* HAL_RCC_MODULE_ENABLED */
#ifdef HAL_GPIO_MODULE_ENABLED
#include "stm32g4xx_hal_gpio.h"
#endif /* HAL_GPIO_MODULE_ENABLED */
#ifdef HAL_DMA_MODULE_ENABLED
#include "stm32g4xx_hal_dma.h"
#endif /* HAL_DMA_MODULE_ENABLED */
#ifdef HAL_CORTEX_MODULE_ENABLED
#include "stm32g4xx_hal_cortex.h"
#endif /* HAL_CORTEX_MODULE_ENABLED */
#ifdef HAL_ADC_MODULE_ENABLED
#include "stm32g4xx_hal_adc.h"
#endif /* HAL_ADC_MODULE_ENABLED */
#ifdef HAL_COMP_MODULE_ENABLED
#include "stm32g4xx_hal_comp.h"
#endif /* HAL_COMP_MODULE_ENABLED */
#ifdef HAL_CORDIC_MODULE_ENABLED
#include "stm32g4xx_hal_cordic.h"
#endif /* HAL_CORDIC_MODULE_ENABLED */
#ifdef HAL_CRC_MODULE_ENABLED
#include "stm32g4xx_hal_crc.h"
#endif /* HAL_CRC_MODULE_ENABLED */
#ifdef HAL_CRYP_MODULE_ENABLED
#include "stm32g4xx_hal_cryp.h"
#endif /* HAL_CRYP_MODULE_ENABLED */
#ifdef HAL_DAC_MODULE_ENABLED
#include "stm32g4xx_hal_dac.h"
#endif /* HAL_DAC_MODULE_ENABLED */
#ifdef HAL_EXTI_MODULE_ENABLED
#include "stm32g4xx_hal_exti.h"
#endif /* HAL_EXTI_MODULE_ENABLED */
#ifdef HAL_FDCAN_MODULE_ENABLED
#include "stm32g4xx_hal_fdcan.h"
#endif /* HAL_FDCAN_MODULE_ENABLED */
#ifdef HAL_FLASH_MODULE_ENABLED
#include "stm32g4xx_hal_flash.h"
#endif /* HAL_FLASH_MODULE_ENABLED */
#ifdef HAL_FMAC_MODULE_ENABLED
#include "stm32g4xx_hal_fmac.h"
#endif /* HAL_FMAC_MODULE_ENABLED */
#ifdef HAL_HRTIM_MODULE_ENABLED
#include "stm32g4xx_hal_hrtim.h"
#endif /* HAL_HRTIM_MODULE_ENABLED */
#ifdef HAL_IRDA_MODULE_ENABLED
#include "stm32g4xx_hal_irda.h"
#endif /* HAL_IRDA_MODULE_ENABLED */
#ifdef HAL_IWDG_MODULE_ENABLED
#include "stm32g4xx_hal_iwdg.h"
#endif /* HAL_IWDG_MODULE_ENABLED */
#ifdef HAL_I2C_MODULE_ENABLED
#include "stm32g4xx_hal_i2c.h"
#endif /* HAL_I2C_MODULE_ENABLED */
#ifdef HAL_I2S_MODULE_ENABLED
#include "stm32g4xx_hal_i2s.h"
#endif /* HAL_I2S_MODULE_ENABLED */
#ifdef HAL_LPTIM_MODULE_ENABLED
#include "stm32g4xx_hal_lptim.h"
#endif /* HAL_LPTIM_MODULE_ENABLED */
#ifdef HAL_NAND_MODULE_ENABLED
#include "stm32g4xx_hal_nand.h"
#endif /* HAL_NAND_MODULE_ENABLED */
#ifdef HAL_NOR_MODULE_ENABLED
#include "stm32g4xx_hal_nor.h"
#endif /* HAL_NOR_MODULE_ENABLED */
#ifdef HAL_OPAMP_MODULE_ENABLED
#include "stm32g4xx_hal_opamp.h"
#endif /* HAL_OPAMP_MODULE_ENABLED */
#ifdef HAL_PCD_MODULE_ENABLED
#include "stm32g4xx_hal_pcd.h"
#endif /* HAL_PCD_MODULE_ENABLED */
#ifdef HAL_PWR_MODULE_ENABLED
#include "stm32g4xx_hal_pwr.h"
#endif /* HAL_PWR_MODULE_ENABLED */
#ifdef HAL_QSPI_MODULE_ENABLED
#include "stm32g4xx_hal_qspi.h"
#endif /* HAL_QSPI_MODULE_ENABLED */
#ifdef HAL_RNG_MODULE_ENABLED
#include "stm32g4xx_hal_rng.h"
#endif /* HAL_RNG_MODULE_ENABLED */
#ifdef HAL_RTC_MODULE_ENABLED
#include "stm32g4xx_hal_rtc.h"
#endif /* HAL_RTC_MODULE_ENABLED */
#ifdef HAL_SAI_MODULE_ENABLED
#include "stm32g4xx_hal_sai.h"
#endif /* HAL_SAI_MODULE_ENABLED */
#ifdef HAL_SMARTCARD_MODULE_ENABLED
#include "stm32g4xx_hal_smartcard.h"
#endif /* HAL_SMARTCARD_MODULE_ENABLED */
#ifdef HAL_SMBUS_MODULE_ENABLED
#include "stm32g4xx_hal_smbus.h"
#endif /* HAL_SMBUS_MODULE_ENABLED */
#ifdef HAL_SPI_MODULE_ENABLED
#include "stm32g4xx_hal_spi.h"
#endif /* HAL_SPI_MODULE_ENABLED */
#ifdef HAL_SRAM_MODULE_ENABLED
#include "stm32g4xx_hal_sram.h"
#endif /* HAL_SRAM_MODULE_ENABLED */
#ifdef HAL_TIM_MODULE_ENABLED
#include "stm32g4xx_hal_tim.h"
#endif /* HAL_TIM_MODULE_ENABLED */
#ifdef HAL_UART_MODULE_ENABLED
#include "stm32g4xx_hal_uart.h"
#endif /* HAL_UART_MODULE_ENABLED */
#ifdef HAL_USART_MODULE_ENABLED
#include "stm32g4xx_hal_usart.h"
#endif /* HAL_USART_MODULE_ENABLED */
#ifdef HAL_WWDG_MODULE_ENABLED
#include "stm32g4xx_hal_wwdg.h"
#endif /* HAL_WWDG_MODULE_ENABLED */
/* Exported macro ------------------------------------------------------------*/
#ifdef USE_FULL_ASSERT
/**
* @brief The assert_param macro is used for function's parameters check.
* @param expr: If expr is false, it calls assert_failed function
* which reports the name of the source file and the source
* line number of the call that failed.
* If expr is true, it returns no value.
* @retval None
*/
#define assert_param(expr) ((expr) ? (void)0U : assert_failed((uint8_t *)__FILE__, __LINE__))
/* Exported functions ------------------------------------------------------- */
void assert_failed(uint8_t *file, uint32_t line);
#else
#define assert_param(expr) ((void)0U)
#endif /* USE_FULL_ASSERT */
#ifdef __cplusplus
}
#endif
#endif /* STM32G4xx_HAL_CONF_H */
| 12,935 |
C
| 32.952756 | 118 | 0.647313 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Inc/RTE_Components.h
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file
* @author MCD Application Team
* @version V2.0.0
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __RTE_COMPONENTS_H__
#define __RTE_COMPONENTS_H__
/* Defines ------------------------------------------------------------------*/
/* STMicroelectronics.X-CUBE-AI.8.1.0 */
#define AI_ApplicationTemplate
#endif /* __RTE_COMPONENTS_H__ */
| 1,003 |
C
| 33.620688 | 82 | 0.440678 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Core/Inc/main.h
|
/* USER CODE BEGIN Header */
/**
******************************************************************************
* @file : main.h
* @brief : Header for main.c file.
* This file contains the common defines of the application.
******************************************************************************
* @attention
*
* Copyright (c) 2024 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
/* USER CODE END Header */
/* Define to prevent recursive inclusion -------------------------------------*/
#ifndef __MAIN_H
#define __MAIN_H
#ifdef __cplusplus
extern "C" {
#endif
/* Includes ------------------------------------------------------------------*/
#include "stm32g4xx_hal.h"
/* Private includes ----------------------------------------------------------*/
/* USER CODE BEGIN Includes */
/* USER CODE END Includes */
/* Exported types ------------------------------------------------------------*/
/* USER CODE BEGIN ET */
/* USER CODE END ET */
/* Exported constants --------------------------------------------------------*/
/* USER CODE BEGIN EC */
/* USER CODE END EC */
/* Exported macro ------------------------------------------------------------*/
/* USER CODE BEGIN EM */
/* USER CODE END EM */
/* Exported functions prototypes ---------------------------------------------*/
void Error_Handler(void);
/* USER CODE BEGIN EFP */
/* USER CODE END EFP */
/* Private defines -----------------------------------------------------------*/
#define USART2_TX_Pin GPIO_PIN_2
#define USART2_TX_GPIO_Port GPIOA
#define USART2_RX_Pin GPIO_PIN_3
#define USART2_RX_GPIO_Port GPIOA
#define T_SWDIO_Pin GPIO_PIN_13
#define T_SWDIO_GPIO_Port GPIOA
#define T_SWCLK_Pin GPIO_PIN_14
#define T_SWCLK_GPIO_Port GPIOA
#define T_SWO_Pin GPIO_PIN_3
#define T_SWO_GPIO_Port GPIOB
#define LD2_Pin GPIO_PIN_8
#define LD2_GPIO_Port GPIOB
/* USER CODE BEGIN Private defines */
/* USER CODE END Private defines */
#ifdef __cplusplus
}
#endif
#endif /* __MAIN_H */
| 2,310 |
C
| 27.182926 | 80 | 0.467532 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/formats_list.h
|
/**
******************************************************************************
* @file format_list.h
* @author AST Embedded Analytics Research Platform
* @brief Definitions of AI platform public APIs types
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
/* FMT_ENTRY( exp_(0/1 only), name_, type_id_,
* sign_bit_, float_bit_, pmask_, bits_, fbits_, ldiv_bits_)
* Specifications (in order of the bit fields, little endian):
- name_ : it is the enum used to define both the ai_array_format and
ai_buffer_format.
- exp_ (1bit) : it is a boolean flag (0 or 1) indicating whether the format
is available as a public APIs ai_buffer format. in this case the field
exp_name_ indicates the enum name of the ai_buffer format
- (7 bits): reserved for flags
- sign_bit_ (1bit) : codes whether or not the format is of a signed type
- float_bit_ (1bit) : codes if the format is float
- ldiv_bits (2 bits) : right shift value for computing the byte size of the
format
- type_id_ (4bits) : it is used to define the "family" of the format:
see @ref AI_FMT_Q as an example. Currently supported types are:
AI_FMT_Q (fixed point types), AI_FMT_FLOAT (floating point values),
AI_FMT_LUT4 or AI_FMT_LUT8 (compressed formats)
- pmask_ (3bits) : padding mask bits for the format
- bits_ (7bits) : size in bits of the format (NB: integer+fractional bits)
- fbits_ (7bits) : number of fractional bits for the format
(for AI_FMT_Q only)
*/
/* Format none entry */
FMT_ENTRY(1, NONE, AI_FMT_NONE, 0, 0, 0x0, 0, 0, 0)
/* Floating point formats */
FMT_ENTRY(1, FLOAT, AI_FMT_FLOAT, 1, 1, 0x0, 32, 0, 0)
FMT_ENTRY(0, FLOAT64, AI_FMT_FLOAT, 1, 1, 0x0, 64, 0, 0)
FMT_ENTRY(0, FLOAT16, AI_FMT_FLOAT, 1, 1, 0x0, 16, 0, 0)
/* Integer formats (i.e. fractional bits = 0!) */
FMT_ENTRY(1, U8, AI_FMT_Q, 0, 0, 0x0, 8, 0, 0)
FMT_ENTRY(1, U16, AI_FMT_Q, 0, 0, 0x0, 16, 0, 0)
FMT_ENTRY(1, U32, AI_FMT_Q, 0, 0, 0x0, 32, 0, 0)
FMT_ENTRY(0, U64, AI_FMT_Q, 0, 0, 0x0, 64, 0, 0)
FMT_ENTRY(1, U1, AI_FMT_Q, 0, 0, 0x0, 1, 0, 0)
FMT_ENTRY(0, U4, AI_FMT_Q, 0, 0, 0x0, 4, 0, 0)
FMT_ENTRY(1, S8, AI_FMT_Q, 1, 0, 0x0, 8, 0, 0)
FMT_ENTRY(1, S16, AI_FMT_Q, 1, 0, 0x0, 16, 0, 0)
FMT_ENTRY(1, S32, AI_FMT_Q, 1, 0, 0x0, 32, 0, 0)
FMT_ENTRY(0, S64, AI_FMT_Q, 1, 0, 0x0, 64, 0, 0)
FMT_ENTRY(1, S1, AI_FMT_Q, 1, 0, 0x0, 1, 0, 0)
FMT_ENTRY(0, S4, AI_FMT_Q, 1, 0, 0x0, 4, 0, 0)
/* Fixed-point formats including ARM CMSIS Q7, Q15, Q31 ones */
FMT_ENTRY(1, Q, AI_FMT_Q, 1, 0, 0x0, 0, 0, 0)
FMT_ENTRY(1, Q7, AI_FMT_Q, 1, 0, 0x0, 8, 7, 0)
FMT_ENTRY(1, Q15, AI_FMT_Q, 1, 0, 0x0, 16, 15, 0)
FMT_ENTRY(0, Q31, AI_FMT_Q, 1, 0, 0x0, 32, 31, 0)
FMT_ENTRY(1, UQ, AI_FMT_Q, 0, 0, 0x0, 0, 0, 0)
FMT_ENTRY(1, UQ7, AI_FMT_Q, 0, 0, 0x0, 8, 7, 0)
FMT_ENTRY(1, UQ15, AI_FMT_Q, 0, 0, 0x0, 16, 15, 0)
FMT_ENTRY(0, UQ31, AI_FMT_Q, 0, 0, 0x0, 32, 31, 0)
/* Compressed formats */
FMT_ENTRY(0, LUT4_FLOAT, AI_FMT_LUT4, 1, 1, 0x0, 32, 0, 3)
FMT_ENTRY(0, LUT8_FLOAT, AI_FMT_LUT8, 1, 1, 0x0, 32, 0, 2)
FMT_ENTRY(0, LUT4_Q15, AI_FMT_LUT4, 1, 0, 0x0, 16, 15, 2)
FMT_ENTRY(0, LUT8_Q15, AI_FMT_LUT8, 1, 0, 0x0, 16, 15, 1)
FMT_ENTRY(0, LUT4_UQ15, AI_FMT_LUT4, 0, 0, 0x0, 16, 15, 2)
FMT_ENTRY(0, LUT8_UQ15, AI_FMT_LUT8, 0, 0, 0x0, 16, 15, 1)
/* Boolean format */
FMT_ENTRY(1, BOOL, AI_FMT_BOOL, 0, 0, 0x0, 8, 0, 0)
#undef FMT_ENTRY
| 3,930 |
C
| 41.72826 | 80 | 0.564885 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/core_convert.h
|
/**
******************************************************************************
* @file core_convert.h
* @author AST Embedded Analytics Research Platform
* @brief header file of core utils routines
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef CORE_CONVERT_H
#define CORE_CONVERT_H
#pragma once
#include "ai_platform.h"
#include "ai_platform_interface.h"
#include "core_common.h"
AI_API_DECLARE_BEGIN
/*!
* @defgroup core_convert Core Convert Routines
* @brief Implementation of core node format convertion routines
* (Q7 to float, ... etc.)
*/
/*!
* @brief Convert tensors from float to quantized or viceversa
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert(ai_node *pNode);
/*!
* @brief Convert integer tensors between QM.N formats (8/16 bits)
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_fixed(ai_node *pNode);
/*!
* @brief Convert integer tensors between signed and usigned (int8/uint8) formats
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_integer(ai_node *pNode);
/*!
* @brief Convert float tensor to binary
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_if32os1(ai_node *pNode);
/*!
* @brief Convert binary tensor to float
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_is8os1(ai_node *pNode);
/*!
* @brief Convert binary tensor to signed int 8 bit
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_is1os8(ai_node *pNode);
/*!
* @brief Convert binary tensor to signed int 16 bit
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_is1os16(ai_node *pNode);
/*!
* @brief Convert binary tensor to float
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_is1of32(ai_node *pNode);
/*!
* @brief Convert signed int 16 bit tensor to float
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_is16of32(ai_node *pNode);
/*!
* @brief Convert unsigned int 16 bit tensor to float
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_iu16of32(ai_node *pNode);
/*!
* @brief Convert float tensor to signed int 16 bit
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_if32os16(ai_node *pNode);
/*!
* @brief Convert float tensor to unsigned int 16 bit
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_if32ou16(ai_node *pNode);
/*!
* @brief Convert signed int 16 bit tensor to unsigned int 16 bit
* @ingroup core_convert
* @param[in] pNode in a handler to node (layer or operator)
*/
AI_INTERNAL_API
void node_convert_is16ou16(ai_node *pNode);
/*!
* @brief Convert a shape struct into a stride struct
* @ingroup core_convert
* @param[in] in a pointer to a shape to convert
* @return a condverted stride datastruct
*/
AI_INTERNAL_API
void core_shape_to_stride(ai_stride* out, const ai_shape* in);
#endif /*CORE_CONVERT_H*/
| 4,123 |
C
| 23.993939 | 81 | 0.65171 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_conv2d.h
|
/**
******************************************************************************
* @file layers_conv2d.h
* @author AST Embedded Analytics Research Platform
* @brief header file of AI platform conv2d layers datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_CONV2D_H
#define LAYERS_CONV2D_H
#pragma once
#include "layers_nl.h"
#include "layers_pool.h"
#define AI_LAYER_CONV2D_FIELDS_DECLARE \
AI_LAYER_COMMON_FIELDS_DECLARE \
ai_u32 groups; /*!< groups for separable convolution */ \
AI_CONST ai_array* nl_params; /*!< array pointer to non linear parameters */ \
func_nl nl_func; /*!< function pointer to non linear transform */ \
ai_shape_2d filter_stride; /*!< filter stride, how much the filter moves */ \
ai_shape_2d dilation; /*!< dilation value along axis of the filter */ \
ai_shape filter_pad; /*!< filter pad 4d */ \
ai_layer_format_type in_ch_format; /*!< Input format (Channel 1st vs Channel last */ \
ai_layer_format_type out_ch_format; /*!< Output format (Channel 1st vs Channel last */
/*!
* @defgroup layers_conv2d Convolutive Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_dense
* @ingroup layers_conv2d
* @brief Dense (fully connected) layer
*/
typedef ai_layer_base ai_layer_dense;
/*!
* @struct ai_layer_gemm
* @ingroup layers_conv2d
* @brief layer for General Matrix Multiplication
*
* Layer for General Matrix Multiplication (GEMM):
* \f{equation}{ Y = \alpha A \cdot B + \beta C \f}
* \f$\alpha\f$ and \f$\beta\f$ are paramaters, A and B are matrices,
* C is a matrix or an array. Size checks for A, B, C, and Y are performed and
* broadcast is applied on C if necessary.
* This is a sequential layer (see @ref ai_layer).
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gemm_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_float alpha; /*!< alpha coefficient */
ai_float beta; /*!< beta coefficient */
ai_u8 tA; /*!< transpose A flag */
ai_u8 tB; /*!< transpose B flag */
} ai_layer_gemm;
/*!
* @struct ai_layer_conv2d
* @ingroup layers_conv2d
* @brief 2D convolutional layer with strides and pads
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_ {
AI_LAYER_CONV2D_FIELDS_DECLARE
} ai_layer_conv2d;
/*!
* @struct ai_layer_conv2d_nl_pool
* @ingroup layers_conv2d
* @brief 2D convolutional layer + nl + pooling with strides and pads
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_nl_pool_ {
AI_LAYER_CONV2D_FIELDS_DECLARE
ai_shape_2d pool_size; /*!< pooling size */
ai_shape_2d pool_stride; /*!< pooling stride */
ai_shape pool_pad; /*!< pooling pad */
ai_handle pool_func; /*!< function pointer to pooling transform */
} ai_layer_conv2d_nl_pool;
AI_INTERNAL_API
void ai_dict8_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut,
const ai_float* data1, const ai_size data_size);
AI_INTERNAL_API
void ai_dict4_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut,
const ai_float* data1, const ai_size data_size);
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Computes the activations of a floating point 32 2D convolutional layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_if32of32wf32(ai_layer* layer);
/*!
* @brief Computes the activations of a floating point 32 2D dw layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_if32of32wf32(ai_layer* layer);
/*!
* @brief Computes the activations of a floating point 32 2D convolutional group layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_if32of32wf32_group(ai_layer* layer);
/*!
* @brief Computes the activations of a 2D floating point 32 pool fused convolutional layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_if32of32wf32_pool(ai_layer* layer);
/*!
* @brief Computes the activations of a 2D floating point 32 pool fused dw layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_if32of32wf32_pool(ai_layer* layer);
/*!
* @brief Computes the activations of a 2D floating point 32 pool fused convolutional group layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_if32of32wf32_group_pool(ai_layer* layer);
/*!
* @brief Computes the activations of a GEMM layer.
* @ingroup layers
* @param layer the layer including output and input tensors
*/
AI_INTERNAL_API
void forward_gemm(ai_layer* layer);
/*!
* @brief Computes matmul layer, intended as numpy.matmul(A,B).
* @ingroup layers
* @param layer the layer including output and input tensors
*/
AI_INTERNAL_API
void forward_matmul(ai_layer* layer);
/*!
* @brief Computes the activations of a dense (fully connected) layer.
* @ingroup layers_conv2d
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense(ai_layer* layer);
/*!
* @brief Computes the activations of a fixed point 2D convolutional layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_fixed(ai_layer *pLayer);
/*!
* @brief Computes the activations of a fixed point @ref ai_layer_conv2d_nl_pool
* layer.
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_fixed(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_integer(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer
* for SSSA per layer quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_integer_SSSA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer
* for SSSA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is8os8ws8_sssa_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme, with 3x3 kernels
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_3x3_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme, with 3x3 kernels and input are
* channel first
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_3x3_ch1st_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme with depth multiplier > 1
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_dm_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of int8 quantized DW layers.
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_all_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized PW layer
* for SSSA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_pw_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized dilated Conv2d layer
* for SSSA per channel quantized scheme (valid padding)
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_dilated_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 non dilated Conv2d layer
* for SSSA per channel quantized scheme (valid padding)
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_deep_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 non dilated Conv2d layer
* for SSSA per channel quantized scheme (valid padding)
* number of output channel is greater than 8
* Kernels shall be 3x3 and stride is (1,1)
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_deep_3x3_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 non dilated Conv2d layer
* for SSSA per channel quantized scheme (valid or same padding)
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized Conv2d layer
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_all_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized RGB Conv2d layer
* for SSSA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_rgb_sssa8_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme with pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme, with 3x3 kernels,
* with pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_3x3_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme, with 3x3 kernels,
* with pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_3x3_ch1st_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized DW layer
* for SSSA per channel quantized scheme with depth multiplier > 1
* with pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_dm_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of int8 quantized DW layers, with pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_all_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized PW layer,
* with pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_pw_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized dilated Conv2d layer
* for SSSA per channel quantized scheme (valid padding) and pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_dilated_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized non dilated Conv2d layer
* for SSSA per channel quantized scheme (valid padding) and pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_deep_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 non dilated Conv2d layer
* for SSSA per channel quantized scheme (valid padding) and pooling fused
* number of output channel is greater than 8
* Kernels shall be 3x3 and stride is (1,1)
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_deep_3x3_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized non dilated Conv2d layer
* for SSSA per channel quantized scheme (valid or same padding) and pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a int8 quantized Conv2d layer and pooling fused
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_all_sssa8_ch_nl_pool(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer
* for SSUA per layer quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_integer_SSUA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer
* for SSUA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_integer_SSUA_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer
* for UAUA per layer quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_integer_UAUA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer
* for UAUA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_integer_UAUA_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer.
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_integer(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
* for SSSA per layer quantized scheme
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_integer_SSSA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
* for SSSA per channel quantized scheme
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_integer_SSSA_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
* for SSUA per layer quantized scheme
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_integer_SSUA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
* for SSUA per channel quantized scheme
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_integer_SSUA_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
* for UAUA per layer quantized scheme
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_integer_UAUA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
* for UAUA per channel quantized scheme
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
* layer + optional pooling / nonlinearity (average, max)
* @ingroup layers_conv2d
* @param layer see @ai_layer_conv2d_nl_pool
*/
AI_INTERNAL_API
void forward_conv2d_nl_pool_integer_UAUA_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer dense (fully connected) layer.
* @ingroup layers_dense
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense_integer(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer dense (fully connected) layer
* for SSSA per layer quantized scheme
* @ingroup layers_dense
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense_integer_SSSA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer dense (fully connected) layer
* for SSSA per channel quantized scheme
* @ingroup layers_dense
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense_integer_SSSA_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer dense (fully connected) layer
* for SSUA per layer quantized scheme
* @ingroup layers_dense
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense_integer_SSUA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer dense (fully connected) layer
* for SSUA per channel quantized scheme
* @ingroup layers_dense
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense_integer_SSUA_ch(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer dense (fully connected) layer
* for UAUA per layer quantized scheme
* @ingroup layers_dense
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense_integer_UAUA(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer dense (fully connected) layer
* for UAUA per channel quantized scheme
* @ingroup layers_dense
* @param layer the dense layer
*/
AI_INTERNAL_API
void forward_dense_integer_UAUA_ch(ai_layer *pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_CONV2D_H*/
| 19,921 |
C
| 30.8752 | 98 | 0.69359 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/ai_lite_math_helpers.h
|
#ifndef AI_LITE_MATH_HELPERS_H
#define AI_LITE_MATH_HELPERS_H
#include <math.h>
#include "ai_platform.h"
#include "ai_platform_interface.h"
#include "ai_datatypes_defines.h"
#define AI_FLOAT_TOLERANCE (6.19209290e-5F) /* Used for small calculation
noise issues */
#define AI_FLOAT_EPSILON (1.19209290e-7F)
#define AI_I8_EPSILON (0.00787401F) /* 1/(2^7 - 1) */
#define AI_I16_EPSILON (3.051851e-5F) /* 1/(2^15 - 1) */
#define AI_FLT_MAX (3.40282346638528859812e+38f)
#define AI_MIN(x,y) ( ((x)<(y)) ? (x) : (y) )
#define AI_MAX(x,y) ( ((x)>(y)) ? (x) : (y) )
#define AI_SIGN(x) (((x)>0) ? 1 : -1)
#define AI_CLAMP(x, min, max) AI_MIN(AI_MAX(x,min), max)
#define AI_ABS(x) fabsf(x)
#define AI_ABS_DIFF(x, y) ( ((x)>(y)) ? ((x)-(y)) : ((y)-(x)) )
#define AI_NEG(x) ( -1 * (x) )
#define AI_NOT(x) ( ((x)==true) ? false : true)
#define AI_RECIPROCAL(x) ( 1.0f / (x) )
#define AI_CEIL(x) ceilf(x)
#define AI_FLOOR(x) floorf(x)
#define AI_FLOOR_DIV(x, y) AI_FLOOR((x)/(y)) /* floor division: x // y */
#define AI_FLOOR_MOD(x, y) fmodf(x, y)
#define AI_ROUND(x) roundf(x)
#define AI_POW(x,y) powf(x, y)
#define AI_SQUARED_DIFF(x, y) (((x)-(y)) * ((x)-(y)))
#define AI_FLOAT_NEGATIVE_HALF (-0.5f + AI_FLOAT_EPSILON)
#define AI_FLOAT_POSITIVE_HALF (0.5f)
#define AI_MATH_ACOS(x) acosf(x)
#define AI_MATH_ACOSH(x) acoshf(x)
#define AI_MATH_ASIN(x) asinf(x)
#define AI_MATH_ASINH(x) asinhf(x)
#define AI_MATH_ATAN(x) atanf(x)
#define AI_MATH_ATANH(x) atanhf(x)
#define AI_MATH_COS(x) cosf(x)
#define AI_MATH_COSH(x) coshf(x)
#define AI_MATH_ERF(x) erff(x)
#define AI_MATH_EXP(x) expf(x)
#define AI_MATH_LOG(x) logf(x)
#define AI_MATH_POW(x, e) powf((x), (e))
#define AI_MATH_RSQRT(x) (1.0f / AI_MATH_SQRT(x))
#define AI_MATH_SIN(x) sinf(x)
#define AI_MATH_SINH(x) sinhf(x)
#define AI_MATH_SQRT(x) ai_math_sqrt(x)
#define AI_MATH_TAN(x) tanf(x)
#define AI_MATH_TANH(x) tanhf(x)
#define AI_MATH_SQUARE(x) AI_MATH_POW(x, 2.0f)
#define AI_MATH_ACOS(x) acosf(x)
#define AI_MATH_ACOSH(x) acoshf(x)
#define AI_MATH_ASIN(x) asinf(x)
#define AI_MATH_ASINH(x) asinhf(x)
#define AI_MATH_ATAN(x) atanf(x)
#define AI_MATH_ATANH(x) atanhf(x)
#define AI_MATH_COS(x) cosf(x)
#define AI_MATH_COSH(x) coshf(x)
#define AI_MATH_ERF(x) erff(x)
#define AI_MATH_EXP(x) expf(x)
#define AI_MATH_LOG(x) logf(x)
#define AI_MATH_POW(x, e) powf((x), (e))
#define AI_MATH_RSQRT(x) (1.0f / AI_MATH_SQRT(x))
#define AI_MATH_SIN(x) sinf(x)
#define AI_MATH_SINH(x) sinhf(x)
#define AI_MATH_SQRT(x) ai_math_sqrt(x)
#define AI_MATH_TAN(x) tanf(x)
#define AI_MATH_TANH(x) tanhf(x)
#define AI_MATH_SQUARE(x) AI_MATH_POW(x, 2.0f)
#define AI_MATH_RELU_TEST(x, thr, min, max) \
(((x)<=(thr)) ? (min) : (max))
#define AI_MATH_CLIP_LINEAR_REMAP(x, alpha, beta) \
(AI_MAX(0, AI_MIN(1, ((x) * (alpha) + (beta)))))
#define AI_MATH_RELU_GENERIC(x, thr, alpha, max) \
AI_MATH_RELU_TEST(x, max, AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha), max)
#define AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha) \
AI_MATH_RELU_TEST(x, thr, ((alpha)*((x)-(thr))), x)
#define AI_MATH_RELU_THRESHOLDED(x, thr) \
AI_MATH_RELU_TEST(x, thr, 0, (x))
#define AI_MATH_LEAKY_RELU(x, neg_slope, pos_slope) \
AI_MATH_RELU_TEST(x, 0, (x)*(neg_slope), (x)*(pos_slope))
// ( ((x)>0) ? (x)*(pos_slope) : (x)*(neg_slope) )
#define AI_MATH_PRELU(x, slope) \
AI_MATH_RELU_TEST(x, 0, (x)*(slope), (x))
// AI_MATH_LEAKY_RELU(x, slope, 1)
#define AI_MATH_RELU(x) \
AI_MATH_RELU_TEST(x, 0, 0, x)
// AI_MAX(x, 0)
#define AI_MATH_ELU(x, alpha) \
(AI_MAX(0.0f, (x)) + AI_MIN(0.0f, (alpha) * (AI_MATH_EXP(x)-1.0f)))
#define AI_MATH_SELU(x, alpha, scale) \
((scale)*AI_MATH_ELU(x, alpha))
#define AI_MATH_SCALED_TANH(x, alpha, beta) \
((alpha)*AI_MATH_TANH((beta)*(x)))
#define AI_MATH_SIGMOID(x) \
(1.0f / (1.0f + AI_MATH_EXP(-(x))))
#define AI_MATH_LOGISTIC(x)\
(x < 0) ? (1.0f -(1.0f / (1.0f + AI_MATH_EXP(-AI_ABS(x))))) :\
(1.0f / (1.0f + AI_MATH_EXP(-AI_ABS(x))))
#define AI_MATH_HARD_SIGMOID(x, alpha, beta) \
AI_MATH_CLIP_LINEAR_REMAP(x, alpha, beta)
/* Formula with higher accuracy */
#define AI_MATH_SWISH(x) \
((x) * AI_MATH_SIGMOID(x))
#define AI_MATH_HARD_SWISH(x) \
((x) * AI_MATH_CLIP_LINEAR_REMAP(x, 1.0f/6, 0.5f))
#define AI_MATH_SOFT_PLUS(x) \
AI_MATH_LOG(1.0f + AI_MATH_EXP(x))
#define AI_MATH_SOFT_SIGN(x) \
((x) / (1.0f + AI_ABS(x)))
AI_API_DECLARE_BEGIN
/*!
* @brief platform optimized square root on a float value
* @ingroup math_helpers
* @param x input value
* @return square root of the value
*/
AI_INTERFACE_ENTRY ai_float ai_math_sqrt(const ai_float x);
AI_API_DECLARE_END
#endif /* AI_LITE_MATH_HELPERS_H */
| 5,197 |
C
| 33.197368 | 79 | 0.556667 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_pad_dqnn.h
|
/**
******************************************************************************
* @file lite_pad_dqnn.h
* @author AIS
* @brief header file of AI platform lite padding kernel datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LITE_PADDING_DQNN_H
#define LITE_PADDING_DQNN_H
#pragma once
#include "ai_lite_interface.h"
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Handles padding with binary input and binary output - Lite I/F
* @ingroup lite_padding_dqnn
*/
LITE_API_ENTRY
void forward_lite_pad_is1os1(const ai_u32 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_i32 width_in,
const ai_i32 width_out,
const ai_i32 height_in,
const ai_i32 height_out,
const ai_u32 n_channel_out,
const ai_i32 mode,
const ai_u16 pads_x,
const ai_u16 pads_y,
const ai_u16 pads_x_r,
const ai_u16 pads_y_b,
const ai_u32 pad_value);
#endif /*LITE_PADDING_DQNN_H*/
| 2,016 |
C
| 37.788461 | 80 | 0.381448 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_conv2d_dqnn.h
|
/**
******************************************************************************
* @file lite_conv2d_dqnn.h
* @author AIS
* @brief header file of AI platform lite conv kernel datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LITE_CONV2D_DQNN_H
#define LITE_CONV2D_DQNN_H
#pragma once
#include "ai_lite_interface.h"
# define AI_16_OVERFLOW_CHECK(val_) (val_ <= 32767)
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
AI_API_DECLARE_BEGIN
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os1ws1_bn_pad0(const ai_u32 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* - Optimized thanks to Optim0 assumptions
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os1ws1_bn_pad0_optim0(const ai_u32 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os8ws1_bn_pad0(const ai_u32 *pDataIn_init,
ai_i8 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale,
const ai_float *pOffset);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os1ws1_bn_pad1(const ai_u32 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold,
const ai_i32 pad_value);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* - Optimized thanks to Optim2 assumptions
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os1ws1_bn_pad1_optim2(const ai_u32 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold,
const ai_i32 pad_value);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os8ws1_bn_pad1(const ai_u32 *pDataIn_init,
ai_i8 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale,
const ai_float *pOffset,
const ai_i32 pad_value);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* - Optimized thanks to Optim1 assumptions
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os8ws1_bn_pad1_optim1(const ai_u32 *pDataIn_init,
ai_i8 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale,
const ai_float *pOffset,
const ai_i32 pad_value);
/**
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os16ws1_bn_pad0_fxp(const ai_u32 *pDataIn_init,
ai_i16 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale_init,
const ai_float *pOffset_init);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
*
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os16ws1_bn_pad1_fxp(const ai_u32 *pDataIn_init,
ai_i16 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale_init,
const ai_float *pOffset_init,
const ai_i32 pad_value);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* - Optimized thanks to Optim1 assumptions
*
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1os16ws1_bn_pad1_optim1_fxp(const ai_u32 *pDataIn_init,
ai_i16 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale_init,
const ai_float *pOffset_init,
const ai_i32 pad_value);
/**
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
*
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1ou16ws1_bn_pad1_fxp(const ai_u32 *pDataIn_init,
ai_u16 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale_init,
const ai_float *pOffset_init,
const ai_i32 pad_value);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
*
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1ou16ws1_bn_pad0_fxp(const ai_u32 *pDataIn_init,
ai_u16 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale_init,
const ai_float *pOffset_init);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F.
* - Optimized thanks to Optim1 assumptions
*
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is1ou16ws1_bn_pad1_optim1_fxp(const ai_u32 *pDataIn_init,
ai_u16 *pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_float *pScale_init,
const ai_float *pOffset_init,
const ai_i32 pad_value);
/*!
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
* binary output - Lite I/F
* @ingroup lite_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
LITE_API_ENTRY
void forward_lite_conv2d_is8os1ws8(const ai_i8 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_i8 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold,
const ai_i8 in_zeropoint);
/*!
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
* binary output - Lite I/F - Optimized thanks to Optim2 assumptions
* @ingroup lite_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
LITE_API_ENTRY
void forward_lite_conv2d_is8os1ws8_optim2(const ai_i8 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_i8 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold,
const ai_i8 in_zeropoint);
/*!
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
* binary output - quantized with DoReFa SotA quantizer, lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_dorefa_is8os1ws8(const ai_i8 *pDataIn_init,
ai_u32 *pDataOut_init,
const ai_u8 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold,
const ai_i8 in_zeropoint);
/*!
* @brief Handles 2D convolution with 8-bits quantized input, output and weights
* - quantized with with different quantization for channel
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is8os8ws8_sssa_ch(const ai_i8 *pData_in,
ai_i8 *pData_out,
const ai_i8 *pWeights,
const ai_i32 *pBias,
ai_u16 *pBuffer_a,
const ai_size width_in,
const ai_size height_in,
const ai_size width_out,
const ai_size height_out,
const ai_u16 n_channel_in,
const ai_u16 n_channel_out,
const ai_size filt_width,
const ai_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_float in_scale,
const ai_float out_scale,
const ai_float *pWt_scale,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_i32 scratch_size);
/*!
* @brief Handles 2D convolution with 16-bits quantized inputs, binary outputs and binary weights - Lite I/F.
* Vanilla version.
* @ingroup lite_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
LITE_API_ENTRY
void forward_lite_conv2d_is16os1ws1_bn_fxp(const ai_i16 *pIn,
ai_u32 *pOut_32,
const ai_u32 *pWeights,
const ai_i32 *pThreshold,
ai_i8 *pBufferA,
const ai_i32 dim_kernel,
const ai_i16 dim_im_in_x,
const ai_i16 dim_im_in_y,
const ai_i16 dim_im_out_x,
const ai_i16 dim_im_out_y,
const ai_i16 ch_im_in,
const ai_i16 ch_im_out,
const ai_i16 dim_kernel_x,
const ai_i16 dim_kernel_y,
const ai_i16 padding_x,
const ai_i16 padding_y,
const ai_i16 stride_x,
const ai_i16 stride_y,
const ai_i16 dilation_x,
const ai_i16 dilation_y,
const ai_i16 in_zeropoint);
/**
* @brief Handles 2D convolution with 16-bits quantized inputs, 16-bits quantized outputs and binary weights - Lite I/F
*
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_conv2d_is16os16ws1_fxp(const ai_i16 *pIn,
ai_i16 *pOut,
const ai_u32 *pWeights,
ai_i8 *pBufferA,
const ai_i16 dim_im_in_x,
const ai_i16 dim_im_in_y,
const ai_i16 dim_im_out_x,
const ai_i16 dim_im_out_y,
const ai_i16 ch_im_in,
const ai_i16 ch_im_out,
const ai_u32 dim_kernel,
const ai_i16 dim_kernel_x,
const ai_i16 dim_kernel_y,
const ai_i16 padding_x,
const ai_i16 padding_y,
const ai_i16 stride_x,
const ai_i16 stride_y,
const ai_i16 dilation_x,
const ai_i16 dilation_y,
const ai_i16 in_zeropoint);
AI_API_DECLARE_END
#endif /*LITE_CONV2D_DQNN_H*/
| 30,870 |
C
| 55.333942 | 119 | 0.351085 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/core_private.h
|
/**
******************************************************************************
* @file core_private.h
* @author AST Embedded Analytics Research Platform
* @brief private header file of common private core module defines
******************************************************************************
* @attention
*
* Copyright (c) 2019 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef CORE_PRIVATE_H
#define CORE_PRIVATE_H
#pragma once
#include "ai_math_helpers.h"
#include "ai_datatypes_internal.h"
#include "core_log.h"
/*!
* @defgroup core_private Core Library Private macros and datatypes
* @brief Common macros, datatypes and routines for core private rounites
* @details This module contains the definitons and implementations of some
* internal routines and datatypes that are supposed to not be exposed as
* public headers. So usually this file should be include only on .c files or
* headers that are private as well
*/
/*** Foreground Colors ****************************************************/
#define CORE_COLOR_BLACK "\x1b[30m"
#define CORE_COLOR_RED "\x1b[31m"
#define CORE_COLOR_GREEN "\x1b[32m"
#define CORE_COLOR_YELLOW "\x1b[33m"
#define CORE_COLOR_BLUE "\x1b[94m"
#define CORE_COLOR_MAGENTA "\x1b[35m"
#define CORE_COLOR_CYAN "\x1b[36m"
#define CORE_COLOR_WHYTE "\x1b[37m"
#define CORE_COLOR_DEFAULT "\x1b[39m"
#define CORE_COLOR_LGRAY "\x1b[90m"
#define CORE_COLOR_LRED "\x1b[91m"
#define CORE_COLOR_LGREEN "\x1b[92m"
#define CORE_COLOR_LYELLOW "\x1b[93m"
#define CORE_COLOR_LBLUE "\x1b[94m"
#define CORE_COLOR_LMAGENTA "\x1b[95m"
#define CORE_COLOR_LCYAN "\x1b[96m"
#define CORE_COLOR_LWHITE "\x1b[97m"
/*** Text Attributes Colors *********************************************/
#define CORE_COLOR_OFF "\x1b[0m"
#define CORE_COLOR_BOLD "\x1b[1m"
#define CORE_COLOR_UNDERLINE "\x1b[4m"
#define CORE_COLOR_BLINK "\x1b[5m"
#define CORE_COLOR_BOLD_OFF "\x1b[21m"
#define CORE_COLOR_UNDERLINE_OFF "\x1b[24m"
#define CORE_COLOR_BLINK_OFF "\x1b[25m"
/*** Background Colors ****************************************************/
#define CORE_COLOR_BG_BLACK "\x1b[40m"
#define CORE_COLOR_BG_RED "\x1b[41m"
#define CORE_COLOR_BG_GREEN "\x1b[42m"
#define CORE_COLOR_BG_YELLOW "\x1b[43m"
#define CORE_COLOR_BG_BLUE "\x1b[44m"
#define CORE_COLOR_BG_MAGENTA "\x1b[45m"
#define CORE_COLOR_BG_CYAN "\x1b[46m"
#define CORE_COLOR_BG_WHITE "\x1b[47m"
#define CORE_COLOR_BG_DEFAULT "\x1b[49m"
#define CORE_COLOR_BG_LGRAY "\x1b[100m"
#define CORE_COLOR_BG_LRED "\x1b[101m"
#define CORE_COLOR_BG_LGREEN "\x1b[102m"
#define CORE_COLOR_BG_LYELLOW "\x1b[103m"
#define CORE_COLOR_BG_LBLUE "\x1b[104m"
#define CORE_COLOR_BG_LMAGENTA "\x1b[105m"
#define CORE_COLOR_BG_LCYAN "\x1b[106m"
#define CORE_COLOR_BG_LWHITE "\x1b[107m"
/*****************************************************************************/
#define CORE_ADDRESS_RANGE_INIT(start_, end_) \
core_address_range_init(start_, end_)
#define CORE_GET_BUFFER_META_INFO(meta_info_, tensor_ptr_) \
core_get_buffer_meta_info(meta_info_, tensor_ptr_)
#define CORE_ADDRESS_RANGE_END(range_) \
( (ai_ptr)(((range_)->start)+((range_)->size)) )
#define CORE_ADDRESS_RANGE_OVERLAP(overlap_) \
( ((overlap_)->start) && (((overlap_)->size)>0) )
#define CORE_ADDRESS_RANGE_OVERLAP_PARTIAL(overlap_, ref_) \
( ((overlap_)->start) && (((overlap_)->size)<((ref_)->size)) )
#define CORE_MEMORY_OVERLAP_INIT(partial_, range_, chain_id_, tensor_id_) { \
.partial = (partial_), .range = AI_PACK(range_), \
.chain_id = (chain_id_), .tensor_id = (tensor_id_) \
}
#define CORE_OFFSET(offset_, max_) \
((ai_i32)(((offset_)<0) ? AI_MAX((max_) - (offset_), 0) : AI_MIN(offset_, max_)))
/*****************************************************************************/
/** Network Context Handlers **/
/*****************************************************************************/
/*****************************************************************************/
/** Network Tensors Handlers **/
/*****************************************************************************/
#define AI_TENSOR_HAS_INTQ_INFO \
AI_BUFFER_META_HAS_INTQ_INFO
#define CORE_TENSOR_GET_SHAPE_SIZE(tensor_) \
ai_shape_get_size(AI_TENSOR_SHAPE(tensor_))
#define CORE_ASSERT_SHAPE_MATCH(x, y) \
do { \
AI_ASSERT(AI_SHAPE_H(y) == 1 || AI_SHAPE_H(x)==1 || AI_SHAPE_H(y)==AI_SHAPE_H(x)) \
AI_ASSERT(AI_SHAPE_W(y) == 1 || AI_SHAPE_W(x)==1 || AI_SHAPE_W(y)==AI_SHAPE_W(x)) \
AI_ASSERT(AI_SHAPE_D(y) == 1 || AI_SHAPE_D(x)==1 || AI_SHAPE_D(y)==AI_SHAPE_D(x)) \
AI_ASSERT(AI_SHAPE_E(y) == 1 || AI_SHAPE_E(x)==1 || AI_SHAPE_E(y)==AI_SHAPE_E(x)) \
AI_ASSERT(AI_SHAPE_CH(y) == 1 || AI_SHAPE_CH(x)==1|| AI_SHAPE_CH(y)==AI_SHAPE_CH(x)) \
AI_ASSERT(AI_SHAPE_IN_CH(y) == 1 || AI_SHAPE_IN_CH(x)==1|| AI_SHAPE_IN_CH(y)==AI_SHAPE_IN_CH(x)) \
} while(0);
#define AI_TENSOR_ARRAY_BYTE_SIZE(t_) \
AI_ARRAY_OBJ_BYTE_SIZE(AI_ARRAY_OBJ(t_->data))
#define AI_TENSOR_ARRAY_GET_DATA_ADDR(t_) \
AI_HANDLE_PTR(AI_ARRAY_OBJ_DATA_START(t_->data, void))
#define AI_TENSOR_ARRAY_UPDATE_DATA_ADDR(t_, addr_) \
{ ai_array *arr_ = AI_ARRAY_OBJ(t_->data); \
const uintptr_t off_ = (uintptr_t)arr_->data - (uintptr_t)arr_->data_start; \
arr_->data_start = AI_PTR(addr_); \
arr_->data = AI_PTR((uintptr_t)addr_ + off_); \
}
#define AI_TENSOR_INTEGER_GET_SIZE(t_) \
((t_->klass) ? (AI_KLASS_GET_INTQ_INFO_LIST(t_))->size : 0)
#define AI_TENSOR_INTEGER_GET_SCALE(t_, idx_) \
AI_INTQ_INFO_LIST_SCALE(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_float, idx_)
#define AI_TENSOR_INTEGER_GET_ZEROPOINT_I8(t_, idx_) \
AI_INTQ_INFO_LIST_ZEROPOINT(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_i8, idx_)
#define AI_TENSOR_INTEGER_GET_ZEROPOINT_U8(t_, idx_) \
AI_INTQ_INFO_LIST_ZEROPOINT(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_u8, idx_)
#define AI_TENSOR_FMT_GET_SIGN(t_) \
AI_BUFFER_FMT_GET_SIGN(AI_ARRAY_OBJ(t_->data)->format)
#define AI_TENSOR_FMT_GET_BITS(t_) \
AI_BUFFER_FMT_GET_BITS(AI_ARRAY_OBJ(t_->data)->format)
#define AI_TENSOR_FMT_GET_FBITS(t_) \
AI_BUFFER_FMT_GET_FBITS(AI_ARRAY_OBJ(t_->data)->format)
#define AI_TENSOR_FMT_GET_TYPE(t_) \
AI_BUFFER_FMT_GET_TYPE(AI_ARRAY_OBJ(t_->data)->format)
#define AI_TENSOR_GET_FMT(t_) \
(AI_ARRAY_OBJ(t_->data)->format)
/*****************************************************************************/
/** Network Buffers Handlers **/
/*****************************************************************************/
#define AI_FOR_EACH_BUFFER_ARRAY_ITEM(buffer_ptr_, buffer_array_ptr_, start_pos_, end_pos_) \
ai_buffer* buffer_ptr_ = AI_BUFFER_ARRAY_ITEM(buffer_array_ptr_, \
CORE_OFFSET(end_pos_, AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_))); \
for ( ; buffer_ptr_ && AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_) && \
(buffer_ptr_>=AI_BUFFER_ARRAY_ITEM(buffer_array_ptr_, \
CORE_OFFSET(start_pos_, AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_)))); buffer_ptr_--)
/*****************************************************************************/
/** Network Arrays Handlers **/
/*****************************************************************************/
#define AI_ARRAY_OBJ_FMT(array_) \
AI_CAST(ai_array_format, AI_ARRAY_OBJ(array_)->format)
#define AI_ARRAY_OBJ_SIZE(array_) \
(AI_ARRAY_OBJ(array_)->size)
#define AI_ARRAY_OBJ_BYTE_SIZE(array_) \
AI_SIZE(AI_ARRAY_GET_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \
AI_ARRAY_OBJ_SIZE(array_)))
#define AI_ARRAY_OBJ_DATA_SIZE(array_) \
AI_ARRAY_GET_DATA_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \
AI_ARRAY_OBJ_SIZE(array_))
#define AI_ARRAY_OBJ_DATA(array_, type_) \
AI_CAST(type_*, AI_ARRAY_OBJ(array_)->data)
#define AI_ARRAY_OBJ_DATA_START(array_, type_) \
AI_CAST(type_*, AI_ARRAY_OBJ(array_)->data_start)
#define AI_ARRAY_OBJ_ELEM(array_, type_, pos_) \
AI_ARRAY_OBJ_DATA(array_, type_)[(pos_)]
/*****************************************************************************/
/** Network Tensors Chains / Lists Handlers **/
/*****************************************************************************/
#define SET_TENSOR_IN(chain_, pos_) \
(GET_TENSOR_LIST_IN(chain_)->tensor[(pos_)])
#define SET_TENSOR_OUT(chain_, pos_) \
(GET_TENSOR_LIST_OUT(chain_)->tensor[(pos_)])
#define AI_NODE_IO_GET(node_, in_, out_) \
ASSERT_NODE_SANITY(node_) \
ai_tensor* in_ = GET_TENSOR_IN((node_)->tensors, 0); \
ai_tensor* out_ = GET_TENSOR_OUT((node_)->tensors, 0); \
ASSERT_TENSOR_SANITY(in_) \
ASSERT_TENSOR_SANITY(out_)
/*****************************************************************************/
#define AI_BITS_TO_BYTES(bits_) \
(((bits_)+0x7) >> 3)
#define AI_BYTES_TO_BITS(bytes_) \
((bytes_) << 3)
/*****************************************************************************/
/** Network Nodes Handlers **/
/*****************************************************************************/
#define AI_NODE_IS_FIRST(node) \
(AI_NODE_OBJ(node)==AI_NODE_OBJ(AI_NODE_OBJ(node)->network->input_node))
#define AI_NODE_IS_LAST(node_) \
((AI_NODE_OBJ(node_)==AI_NODE_OBJ(node_)->next) || \
(AI_NODE_OBJ(node_)->next==NULL))
#define AI_FOR_EACH_NODE_DO(node_, nodes_) \
for (ai_node* node_ = AI_NODE_OBJ(nodes_); (node_); \
node_ = ((AI_NODE_IS_LAST(node_)) ? NULL : (node_)->next))
/*****************************************************************************/
typedef struct {
ai_ptr start;
ai_size size;
} ai_address_range;
typedef struct {
ai_address_range range;
ai_u16 chain_id;
ai_u16 tensor_id;
ai_bool partial;
} ai_memory_overlap;
/*****************************************************************************/
AI_DECLARE_STATIC
ai_address_range core_address_range_init(
const ai_handle start, const ai_handle end)
{
ai_address_range r;
r.start = (start<end) ? start : end;
r.size = (ai_size) ((start<end)
? ((ai_uptr)end-(ai_uptr)start) : ((ai_uptr)start-(ai_uptr)end));
return r;
}
AI_DECLARE_STATIC
ai_buffer_meta_info* core_get_buffer_meta_info(
ai_buffer_meta_info* meta,
const ai_tensor* t)
{
if (!meta) return NULL;
AI_ASSERT(t && t->data)
ai_bool ok;
meta->flags = 0x0;
meta->intq_info = AI_KLASS_GET_INTQ_INFO_LIST(t);
ok = (meta->intq_info && (meta->intq_info->size>0));
meta->flags |= (ok) ? AI_BUFFER_META_HAS_INTQ_INFO : 0x0;
return (ok) ? meta : NULL;
}
#if 0
#include <stdio.h>
#include <stdarg.h>
AI_DECLARE_STATIC
void _dump_file_print(
const char* fname, const char* fmt, ...)
{
static FILE* fp = NULL;
if (fname) {
if (!fp) {
fp = fopen(fname, "a");
}
}
if (fp) {
va_list args;
va_start(args, fmt);
vfprintf(fp, fmt, args);
va_end(args);
fflush(fp);
}
}
AI_DECLARE_STATIC
void _dump_bytearray(
const char* fname,
const ai_handle src, const ai_size src_size, const ai_u8 src_id,
const char* name)
{
static FILE* fp = NULL;
if (fname && src && (src_size>0)) {
if (!fp) {
fp = fopen(fname, "a");
}
}
if (fp) {
switch (src_id) {
case 1:
{
const ai_float* src_value = (const ai_float*)src;
fprintf(fp, "ai_float %s[%u] = {%f", name, src_size, src_value[0]);
for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %f", src_value[i]); }
} break;
case 2:
{
const ai_i8* src_value = (const ai_i8*)src;
fprintf(fp, "ai_i8 %s[%u] = {%d", name, src_size, src_value[0]);
for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %d", src_value[i]); }
} break;
case 3:
{
const ai_u8* src_value = (const ai_u8*)src;
fprintf(fp, "ai_u8 %s[%u] = {%u", name, src_size, src_value[0]);
for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %u", src_value[i]); }
} break;
default:
fprintf(fp, "format not supported: %u {", src_id);
break;
}
fprintf(fp, "};\n");
fflush(fp);
}
}
#endif
#endif /* CORE_PRIVATE_H */
| 13,110 |
C
| 34.822404 | 114 | 0.510297 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_operators.h
|
#ifndef LITE_OPERATORS_H
#define LITE_OPERATORS_H
#pragma once
#include "lite_bn_f32.h"
#include "lite_bn_integer.h"
#include "lite_conv2d.h"
#include "lite_conv2d_dqnn.h"
#include "lite_convert_dqnn.h"
#include "lite_dense_if32.h"
#include "lite_dense_is1.h"
#include "lite_dense_is1ws1.h"
#include "lite_dense_ws1.h"
#include "lite_gru_f32.h"
#include "lite_dw_dqnn.h"
#include "lite_pw_dqnn.h"
#include "lite_dense_is8os8ws8.h"
#include "lite_generic_float.h"
#include "lite_pool_f32.h"
#include "lite_maxpool_dqnn.h"
#include "lite_nl_generic_float.h"
#include "lite_nl_generic_integer.h"
#include "lite_pad_generic.h"
#include "lite_pad_dqnn.h"
#include "lite_upsample_generic.h"
#endif /* LITE_OPERATORS_H */
| 718 |
C
| 23.793103 | 36 | 0.727019 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_norm.h
|
/**
******************************************************************************
* @file layers_norm.h
* @author AST Embedded Analytics Research Platform
* @brief header file of AI platform normalization layers datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_NORM_H
#define LAYERS_NORM_H
#pragma once
#include "layers_common.h"
/*!
* @defgroup layers_norm Normalization Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_bn
* @ingroup layers_norm
* @brief Batch normalization (scale with bias) layer
*/
typedef ai_layer_base ai_layer_bn;
/*!
* @struct ai_layer_lrn
* @ingroup layers_norm
* @brief Local Response Normalization layer
*
* Divides each element by a scale factor computed
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lrn_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_u32 local_size; /*!< size of the normalization window */
ai_float k; /*!< bias term */
ai_float alpha; /*!< input scale */
ai_float beta; /*!< scale exponent */
} ai_layer_lrn;
/*!
* @enum ai_norm_type_e
* @ingroup layers_norm
* @brief store the type of normalization algorithm to apply
*/
typedef enum ai_norm_type_ {
NONE = 0,
L1 = 1,
L2 = 2,
MAX = 3,
} ai_norm_type_e;
/*!
* @struct ai_layer_norm
* @ingroup layers_norm
* @brief Lp Normalization layer
*
* Normalizes the tensor along the 'axis' direction using the Lp norm.
* Optionally divides the result by the number of the elements.
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_norm_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_shape_idx axis; /*! normalization axis */
ai_float exponent; /*!< normalization exponent p */
ai_bool scale; /*!< multiplies by the pth root of the number of elements */
ai_norm_type_e norm_type;
} ai_layer_norm;
/*!
* @brief Local response normalization computed on a float array
* @ingroup layers_norm
* @param out opaque handler to float output channel
* @param in opaque handler to float input channel
* @param pad amount of padding for the channels
*/
AI_INTERNAL_API
void func_lrn_array_f32(ai_handle out, const ai_handle in,
const ai_size in_size, const ai_size channel_size,
const ai_i32 pad, const ai_float k,
const ai_float alpha, const ai_float beta);
/*!
* @brief Lp normalization computed on a float array
* @ingroup layers_norm
* @param out opaque handler to float output channel
* @param in opaque handler to float input channel
* @param exponent p exponent for the Lp normalization
* @param axis_stride stride (in array elements) of the normalization axis
* @param axis_size size of the normalization axis
* @param outer_size number of tensor slices (including the normalization axis)
* on which compute the normalization
*/
AI_INTERNAL_API
void func_norm_array_f32(ai_handle out, const ai_handle in,
const ai_float exponent,
const ai_float norm,
const ai_size axis_stride,
const ai_size axis_size,
const ai_size outer_size);
/*!
* @brief Max normalization computed on float array
* @ingroup layers_norm
* @param out opaque handler to float output channel
* @param in opaque handler to float input channel
* @param axis_stride stride (in array elements) of the normalization axis
* @param axis_size size of the normalization axis
* @param outer_size number of tensor slices (including the normalization axis)
*/
AI_INTERNAL_API
void func_norm_max_array_f32(ai_handle out, const ai_handle in,
const ai_float norm,
const ai_size axis_size,
const ai_size n_el);
/*!
* @brief Fast L2 normalization computed on a float array
* @ingroup layers_norm
* @param out opaque handler to float output channel
* @param in opaque handler to float input channel
* @param axis_size size of the normalization axis
* @param n_el total number of elements in the tensor
*/
AI_INTERNAL_API
void func_norm_l2_fast_array_f32(ai_handle out, const ai_handle in,
const ai_float norm,
const ai_size axis_size,
const ai_size outer_size);
/*!
* @brief Fast L1 normalization computed on a float array
* @ingroup layers_norm
* @param out opaque handler to float output channel
* @param in opaque handler to float input channel
* @param axis_size size of the normalization axis
* @param n_el total number of elements in the tensor
*/
AI_INTERNAL_API
void func_norm_l1_fast_array_f32(ai_handle out, const ai_handle in,
const ai_float norm,
const ai_size axis_size,
const ai_size n_el);
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Computes the activations of a batchnorm (scale + bias) layer.
* @ingroup layers_norm
* @param layer the batch normalization (bn) layer
*/
AI_INTERNAL_API
void forward_bn(ai_layer* layer);
/*!
* @brief Computes the activations of a batchnorm (scale + bias) layer with
* integer format
* @ingroup layers_norm
* @param layer the batch normalization (bn) layer
*/
AI_INTERNAL_API
void forward_bn_integer(ai_layer* layer);
/*!
* @brief Computes the activations of a Local Response Normalization Layer.
* @ingroup layers_norm
* @param layer the local response normalization (lrn) layer
*/
AI_INTERNAL_API
void forward_lrn(ai_layer* layer);
/*!
* @brief Computes the activations of a normalization layer.
* @ingroup layers_norm
* @param layer the normalization (norm) layer
*/
AI_INTERNAL_API
void forward_norm(ai_layer* layer);
/*!
* @brief Batch Normalization with 16-bit input, 16-bit threshold and binary output.
* It is implemented using a threshold, and this is possible because the output is binary.
* @param layer the batch normalization layer
*/
AI_INTERNAL_API
void forward_bn_is16os1ws16(ai_layer *pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_NORM_H*/
| 6,910 |
C
| 31.909524 | 97 | 0.612156 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_conv2d_dqnn.h
|
/**
******************************************************************************
* @file layers_conv2d_dqnn.h
* @author AIS
* @brief header file of AI platform DQNN conv datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_CONV2D_DQNN_H
#define LAYERS_CONV2D_DQNN_H
#pragma once
#include "layers_common.h"
#include "layers_conv2d.h"
/*!
* @defgroup layers_conv2d_dqnn Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
#define AI_DQNN_PAD_1_KEY (1)
#define AI_DQNN_PAD_M1_KEY (-1)
#define AI_DQNN_PAD_0_KEY (0)
#define AI_DQNN_PAD_1_VALUE (0x0)
#define AI_DQNN_PAD_M1_VALUE (0xFFFFFFFF)
#define AI_DQNN_PAD_0_VALUE (0x2)
/*!
* @struct ai_layer_conv2d_dqnn
* @ingroup layers_conv2d_dqnn
* @brief conv2d_dqnn layer
*
* @ref forward_conv2d_is1os1ws1
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_dqnn_ {
AI_LAYER_CONV2D_FIELDS_DECLARE
ai_i32 pad_value;
} ai_layer_conv2d_dqnn;
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Handles point wise convolution with binary input, binary output and
* binary weights
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_pw_is1os1ws1_bn(ai_layer *pLayer);
/*!
* @brief Handles point wise convolution with binary input, binary output and
* binary weights - Optimized thanks to Optim2 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_pw_is1os1ws1_bn_optim2(ai_layer *pLayer);
/*!
* @brief Handles point wise convolution with binary input, 8-bits output and
* binary weights
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_pw_is1os8ws1_bn(ai_layer *pLayer);
/*!
* @brief Handles point wise convolution with binary input, 8-bits output and
* binary weights - Optimized thanks to Optim1 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_pw_is1os8ws1_bn_optim1(ai_layer *pLayer);
/*!
* @brief Handles point-wise convolution with binary input, float32 output
* and binary weights
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_pw_is1of32ws1_bn(ai_layer *pLayer);
/*!
* @brief Handles point-wise convolution with binary input, float32 output
* and binary weights - Optimized thanks to Optim1 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_pw_is1of32ws1_bn_optim1(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os1ws1_bn(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - Optimized thanks to Optim2 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os1ws1_bn_optim2(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os8ws1_bn(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights - Optimized thanks to Optim1 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os8ws1_bn_optim1(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like)
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os1ws1_bn_pad0(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like) - Optimized thanks to
* Optim0 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os1ws1_bn_pad0_optim0(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights - with 0 padding (QKeras like)
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os8ws1_bn_pad0(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like)
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os1ws1_bn_pad1(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like) - Optimized thanks
* to Optim2 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os1ws1_bn_pad1_optim2(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights - with +1/-1 padding (Larq like)
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os8ws1_bn_pad1(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, 8-bits output and
* binary weights - with +1/-1 padding (Larq like) - Optimized thanks
* to Optim1 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os8ws1_bn_pad1_optim1(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
* binary output
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is8os1ws8(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
* binary output - Optimized thanks to Optim2 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is8os1ws8_optim2(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
* binary output - quantized with DoReFa SotA quantizer
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_dorefa_is8os1ws8(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with 16-bits quantized input, binary weights
and binary output
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is16os1ws1_bn_fxp(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with 16-bits quantized input, binary weights
and 16-bits quantized output
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is16os16ws1_fxp(ai_layer *pLayer);
/*!
* @brief Handles depth-wise convolution with binary input, binary output and
* binary weights
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_dw_is1os1ws1_bn(ai_layer *pLayer);
/*!
* @brief Handles depth-wise convolution with binary input, binary output and
* binary weights - Optimized thanks to Optim3 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_dw_is1os1ws1_bn_optim3(ai_layer *pLayer);
/*!
* @brief Handles depth-wise convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like)
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_dw_is1os1ws1_bn_pad0(ai_layer *pLayer);
/*!
* @brief Handles depth-wise convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like) - Optimized thanks to
* Optim3 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_dw_is1os1ws1_bn_pad0_optim3(ai_layer *pLayer);
/*!
* @brief Handles depth-wise convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like)
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_dw_is1os1ws1_bn_pad1(ai_layer *pLayer);
/*!
* @brief Handles depth-wise convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like) - Optimized thanks to
* Optim3 assumptions
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_dw_is1os1ws1_bn_pad1_optim3(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with 8-bits quantized Input and output and
* binary weights
* @ingroup layers_conv2d_dqnn
* @param layer conv2d_dqnn layer
*/
AI_INTERNAL_API
void forward_conv2d_is8os8ws1(ai_layer *pLayer);
/**
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os16ws1_bn_pad0_fxp(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os16ws1_bn_pad1_fxp(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* - Optimized thanks to Optim1 assumptions
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is1os16ws1_bn_pad1_optim1_fxp(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is1ou16ws1_bn_pad0_fxp(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* @ingroup lite_conv2d_dqnn
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is1ou16ws1_bn_pad1_fxp(ai_layer *pLayer);
/*!
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsiged output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* - Optimized thanks to Optim1 assumptions
* @ingroup lite_conv2d_dqnn
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is1ou16ws1_bn_pad1_optim1_fxp(ai_layer *pLayer);
/*!
* @brief Computes the activations of a integer quantized 2D convolutional layer
* for SSSA per channel quantized RGB scheme using n_channel_in = 3
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_conv2d_is8os8ws8_sssa_ch_rgb(const ai_i8 *pData_in,
ai_i8 *pData_out,
const ai_i8 *pWeights,
const ai_i32 *pBias,
ai_u16 *pBuffer_a,
const ai_size width_in,
const ai_size height_in,
const ai_size width_out,
const ai_size height_out,
const ai_u16 n_channel_in,
const ai_u16 n_channel_out,
const ai_size filt_width,
const ai_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_float in_scale,
const ai_float out_scale,
const ai_float *pWt_scale,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_bool out_ch_format,
ai_i16 *p_out_r_shift,
ai_i32 *p_out_factor);
/*!
* @brief Computes the activations of a point-wise integer quantized convolution
for SSSA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_pw_is8os8ws8_sssa_ch(const ai_i8 *pData_in,
ai_i8 *pData_out,
const ai_i8 *pWeights,
const ai_i32 *pBias,
ai_u16 *pBuffer_a,
const ai_size width_in,
const ai_size height_in,
const ai_size width_out,
const ai_size height_out,
const ai_u16 n_channel_in,
const ai_u16 n_channel_out,
const ai_size filt_width,
const ai_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_float in_scale,
const ai_float out_scale,
const ai_float *pWt_scale,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
ai_i16 *p_out_r_shift,
ai_i32 *p_out_factor,
ai_i32 AI_PWOverlay,
ai_i16 *bufferA,
ai_i32 scratch_size);
// st_nn_context_t context);
/*!
* @brief Computes the activations of a depth-wise integer quantized convolution
for SSSA per channel quantized scheme
* @ingroup layers_conv2d
* @param layer the convolutional (conv) layer
*/
AI_INTERNAL_API
void forward_dw_is8os8ws8_sssa_ch(const ai_i8 *pData_in,
ai_i8 *pData_out,
const ai_i8 *pWeights,
const ai_i32 *pBias,
ai_u16 *pBuffer_a,
const ai_size width_in,
const ai_size height_in,
const ai_size width_out,
const ai_size height_out,
const ai_u16 n_channel_in,
const ai_u16 n_channel_out,
const ai_size filt_width,
const ai_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_float in_scale,
const ai_float out_scale,
const ai_float *pWt_scale,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
ai_i16 *p_out_r_shift,
ai_i32 *p_out_factor);
AI_API_DECLARE_END
#endif /*LAYERS_CONV2D_DQNN_H*/
| 17,573 |
C
| 34.647059 | 91 | 0.579981 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_upsample_generic.h
|
/**
******************************************************************************
* @file layers_upsample_generic.h
* @author Cyril Enault
* @brief header file of AI platform padding generic datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2022 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_UPSAMPLE_H
#define LAYERS_UPSAMPLE_H
#pragma once
#include "layers_generic.h"
/*!
* @defgroup layers_pad_generic Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Handles generic upsmapling in nearest mode
* @ingroup layers_generic
* @param layer upsample layer
*/
AI_INTERNAL_API
void forward_upsample_nearest(ai_layer *pLayer);
/*!
* @brief Handles generic upsmapling in zeros mode
* @ingroup layers_generic
* @param layer upsample layer
*/
AI_INTERNAL_API
void forward_upsample_zeros(ai_layer *pLayer);
/*!
* @brief Handles generic upsmapling in bilinear mode
* @ingroup layers_generic
* @param layer upsample layer
*/
AI_INTERNAL_API
void forward_upsample_bilinear(ai_layer *pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_PAD_GENERIC_H*/
| 1,845 |
C
| 26.552238 | 80 | 0.509485 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_bn_f32.h
|
#ifndef LITE_BN_F32_H
#define LITE_BN_F32_H
#pragma once
#include "ai_lite_interface.h"
/*!
* @brief Forward function for a batch normalization (BN) layer with
* signed float input, signed float output, and float parameters.
* @ingroup lite_bn_f32
* @param output The pointer to output buffer.
* @param input The pointer to input buffer.
* @param scale The pointer to BN scale param.
* @param bias The pointer to bias.
* @param n_elements The number of elements in the input tensor.
* @param n_channel_in The number of channel in the input tensor.
*/
LITE_API_ENTRY
void forward_lite_bn_if32of32wf32(
ai_float* output, const ai_float* input,
const ai_float* scale, const ai_float* bias,
const ai_u32 n_elements, const ai_u32 n_channel_in);
#endif /* LITE_BN_F32_H */
| 791 |
C
| 29.461537 | 69 | 0.718078 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_dense_is1ws1.h
|
#ifndef _LITE_DENSE_IS1WS1_H
#define _LITE_DENSE_IS1WS1_H
#pragma once
#include "ai_lite_interface.h"
/*!
* @brief Forward function for a dense layer with signed binary input,
* signed binary output, and signed binary weights.
* @ingroup lite_dense_is1ws1
* @param output The pointer to output buffer.
* @param input The pointer to input buffer.
* @param weights The pointer to weights.
* @param bias The pointer to bias (NULL if not available).
* @param scratch The pointer to the scratch buffer.
* @param n_channel_in The number of channels of the input.
* @param n_channel_ouy The number of channels of the output, i.e.,
* the number of dense hidden neurons.
*/
LITE_API_ENTRY
void forward_lite_dense_is1os1ws1(
ai_pbits *output, const ai_pbits *input, const ai_pbits *weights,
const ai_pbits *bias, ai_i32 *scratch,
const ai_u32 n_channel_in, const ai_u32 n_channel_out
);
/*!
* @brief Forward function for a dense layer with signed binary input,
* signed binary output, and signed binary weights.
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
* weights are those of the dense layer, scale is that of the BN, and the offset
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
* agree with such convention, the behavior is undefined.
* @ingroup lite_dense_is1ws1
* @param output The pointer to output buffer.
* @param input The pointer to input buffer.
* @param weights The pointer to weights.
* @param scale The pointer to scale.
* @param offset The pointer to offset.
* @param scratch The pointer to the scratch buffer.
* @param n_channel_in The number of channels of the input.
* @param n_channel_ouy The number of channels of the output, i.e.,
* the number of dense hidden neurons.
*/
LITE_API_ENTRY
void forward_lite_dense_is1os1ws1_bn(
ai_pbits *output, const ai_pbits *input, const ai_pbits *weights,
const ai_float *scale, const ai_float *offset, ai_i32 *scratch,
const ai_u32 n_channel_in, const ai_u32 n_channel_out
);
/*!
* @brief Forward function for a dense layer with signed binary input,
* signed binary output, and signed 16bit weights.
* @ingroup lite_dense_is1ws1
* @param output The pointer to output buffer.
* @param input The pointer to input buffer.
* @param weights The pointer to weights.
* @param bias The pointer to bias (NULL if not available).
* @param scratch The pointer to the scratch buffer (signed 32bit).
* @param n_channel_in The number of channels of the input.
* @param n_channel_ouy The number of channels of the output, i.e.,
* the number of dense hidden neurons.
*/
LITE_API_ENTRY
void forward_lite_dense_is1os16ws1(
ai_i16 *output, const ai_pbits *input, const ai_pbits *weights,
const ai_pbits *bias, ai_i32 *scratch,
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
/*!
* @brief Forward function for a dense layer with signed binary input,
* signed binary output, and signed 16bit weights.
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
* weights are those of the dense layer, scale is that of the BN, and the offset
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
* agree with such convention, the behavior is undefined.
* @ingroup lite_dense_is1ws1
* @param output The pointer to output buffer.
* @param input The pointer to input buffer.
* @param weights The pointer to weights.
* @param bias The pointer to bias (NULL if not available).
* @param scratch The pointer to the scratch buffer (signed 32bit).
* @param n_channel_in The number of channels of the input.
* @param n_channel_ouy The number of channels of the output, i.e.,
* the number of dense hidden neurons.
*/
LITE_API_ENTRY
void forward_lite_dense_is1os16ws1_bn(
ai_i16 *output, const ai_pbits *input, const ai_pbits *weights,
const ai_float *scale, const ai_float *offset, ai_i32 *scratch,
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
/*!
* @brief Forward function for a dense layer with signed binary input,
* signed float output, and signed binary weights.
* @ingroup lite_dense_is1ws1
* @param output The pointer to output buffer.
* @param input The pointer to input buffer.
* @param weights The pointer to weights.
* @param bias The pointer to bias (NULL if not available).
* @param scratch The pointer to the scratch buffer (unused).
* @param n_channel_in The number of channels of the input.
* @param n_channel_ouy The number of channels of the output, i.e.,
* the number of dense hidden neurons.
*/
LITE_API_ENTRY
void forward_lite_dense_is1of32ws1(
ai_float *output, const ai_pbits *input, const ai_pbits *weights,
const ai_pbits *bias, ai_i32 *scratch,
const ai_u32 n_channel_in, const ai_u32 n_channel_out
);
/*!
* @brief Forward function for a dense layer with signed binary input,
* signed float output, and signed binary weights.
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
* weights are those of the dense layer, scale is that of the BN, and the offset
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
* agree with such convention, the behavior is undefined.
* @ingroup lite_dense_is1ws1
* @param output The pointer to output buffer.
* @param input The pointer to input buffer.
* @param weights The pointer to weights.
* @param scale The pointer to scale.
* @param offset The pointer to offset.
* @param scratch The pointer to the scratch buffer (unused).
* @param n_channel_in The number of channels of the input.
* @param n_channel_out The number of channels of the output, i.e.,
* the number of dense hidden neurons.
*/
LITE_API_ENTRY
void forward_lite_dense_is1of32ws1_bn(
ai_float *output, const ai_pbits *input, const ai_pbits *weights,
const ai_float *scale, const ai_float *offset, ai_i32 *scratch,
const ai_u32 n_channel_in, const ai_u32 n_channel_out
);
#endif /*_LITE_DENSE_IS1WS1_H*/
| 5,999 |
C
| 40.666666 | 80 | 0.724287 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_custom.h
|
/**
******************************************************************************
* @file layers_custom.h
* @author Marco Lattuada
* @brief header file of AI platform custom layers datatype
******************************************************************************
* @attention
*
* Copyright (c) 2020 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_CUSTOM_H
#define LAYERS_CUSTOM_H
#pragma once
#include "layers_common.h"
/*!
* @defgroup layers_custom Custom layer definitions
* @brief Definition of structures custom layers
*/
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_custom
* @ingroup layers_custom
* @brief Custom layer wrapper
*
* The custom layer wrapper
*/
typedef ai_layer_stateful ai_layer_custom;
AI_API_DECLARE_END
#endif /*LAYERS_CUSTOM_H*/
| 1,217 |
C
| 24.914893 | 80 | 0.518488 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_conv2d.h
|
/**
******************************************************************************
* @file lite_conv2d.h
* @author AIS
* @brief header file of AI platform lite conv2d kernel datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LITE_CONV2D_H
#define LITE_CONV2D_H
#pragma once
#include "ai_lite_interface.h"
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Handles 2D convolution with float input, float output and
* float weights
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void forward_lite_conv2d_if32of32wf32(const ai_float *pDataIn_init,
ai_float *pDataOut_init,
const ai_ptr_const pWeights_init,
const ai_ptr_const pBias_init,
const ai_size n_channel_in,
const ai_size n_channel_out,
const ai_size width_in,
const ai_size height_in,
const ai_size width_out,
const ai_size height_out,
const ai_size filt_width,
const ai_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_size filt_height_dilated,
const ai_size filt_width_dilated,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_size n_groups);
/*!
* @brief Handles 2D depthwise convolution with float input, float output and
* float weights
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void forward_lite_dw_if32of32wf32(const ai_float *pDataIn_init,
ai_float *pDataOut_init,
const ai_ptr_const pWeights_init,
const ai_ptr_const pBias_init,
const ai_size n_channel_in,
const ai_size n_channel_out,
const ai_size width_in,
const ai_size height_in,
const ai_size width_out,
const ai_size height_out,
const ai_size filt_width,
const ai_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_size filt_height_dilated,
const ai_size filt_width_dilated,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_size n_groups);
/*!
* @brief Handles 2D grouped convolution with float input, float output and
* float weights
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void forward_lite_conv2d_if32of32wf32_group(const ai_float *pDataIn_init,
ai_float *pDataOut_init,
const ai_ptr_const pWeights_init,
const ai_ptr_const pBias_init,
const ai_size n_channel_in,
const ai_size n_channel_out,
const ai_size width_in,
const ai_size height_in,
const ai_size width_out,
const ai_size height_out,
const ai_size filt_width,
const ai_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_size filt_height_dilated,
const ai_size filt_width_dilated,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_size n_groups);
/*!
* @brief Handles dilated conv2d convolutions (valid padding)
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void
forward_lite_conv2d_dilated_sssa8_ch(const ai_i8 *pData_in,
const ai_u16 dim_im_in_x,
const ai_u16 dim_im_in_y,
const ai_u16 n_channel_in,
const ai_i8 *pWeights,
const ai_u16 n_channel_out,
const ai_u16 dim_kernel_x,
const ai_u16 dim_kernel_y,
const ai_u16 stride_x,
const ai_u16 stride_y,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_i32 *pBias,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_layer_format_type out_ch_format,
ai_i8 *pData_out,
const ai_u16 dim_im_out_x,
const ai_u16 dim_im_out_y,
ai_u32 height_loop_cnt,
const ai_u16 weights_prefetch_enabled,
ai_i32 scratch_size,
ai_i16 *pBuffer_a);
/*!
* @brief Handles conv2d convolutions (valid padding) with number of channels >= 8
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void
forward_lite_conv2d_deep_sssa8_ch(const ai_i8 *pData_in,
const ai_u16 dim_im_in_x,
const ai_u16 dim_im_in_y,
const ai_u16 n_channel_in,
const ai_i8 *pWeights,
const ai_u16 n_channel_out,
const ai_u16 dim_kernel_x,
const ai_u16 dim_kernel_y,
const ai_u16 stride_x,
const ai_u16 stride_y,
const ai_i32 *pBias,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_layer_format_type out_ch_format,
ai_i8 *pData_out,
const ai_u16 dim_im_out_x,
const ai_u16 dim_im_out_y,
ai_u32 height_loop_cnt,
const ai_u16 weights_prefetch_enabled,
ai_i32 scratch_size,
ai_i16 *pBuffer_a);
/*!
* @brief Handles conv2d convolutions (valid padding) with number of channels >= 8
* Special forward function for 3x3 kernels and Stride = 1
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void
forward_lite_conv2d_deep_3x3_sssa8_ch(const ai_i8 *pData_in,
const ai_u16 dim_im_in_x,
const ai_u16 dim_im_in_y,
const ai_u16 n_channel_in,
const ai_i8 *pWeights,
const ai_u16 n_channel_out,
const ai_i32 *pBias,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_layer_format_type out_ch_format,
ai_i8 *pData_out,
const ai_u16 dim_im_out_x,
const ai_u16 dim_im_out_y,
ai_u32 height_loop_cnt,
ai_i32 scratch_size,
ai_i16 *pBuffer_a);
/*!
* @brief Handles conv2d convolutions with same padding or with number of channels < 8
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void
forward_lite_conv2d_sssa8_ch(const ai_i8 *pData_in,
const ai_u16 dim_im_in_x,
const ai_u16 dim_im_in_y,
const ai_u16 n_channel_in,
const ai_i8 *pWeights,
const ai_u16 n_channel_out,
const ai_u16 dim_kernel_x,
const ai_u16 dim_kernel_y,
const ai_u16 stride_x,
const ai_u16 stride_y,
const ai_u16 padding_x,
const ai_u16 padding_y,
const ai_i32 *pBias,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_layer_format_type out_ch_format,
ai_i8 *pData_out,
const ai_u16 dim_im_out_x,
const ai_u16 dim_im_out_y,
const ai_u16 weights_prefetch_enabled,
ai_i32 scratch_size,
ai_i16 *pBuffer_a);
/*!
* @brief Handles rgb conv2d convolutions
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void
forward_lite_conv2d_rgb_sssa8_ch(const ai_i8 *pData_in,
const ai_u16 dim_im_in,
const ai_i8 *pWeights,
const ai_u16 n_channel_out,
const ai_u16 dim_kernel,
const ai_u16 padding,
const ai_u16 stride,
const ai_i32 *pBias,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_layer_format_type out_ch_format,
ai_i8 *pData_out,
const ai_u16 dim_im_out,
ai_i32 scratch_size,
ai_i16 *pBuffer_a);
/*!
* @brief Handles 2D convolution with float input, float output and
* float weights with pool fused
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void forward_lite_conv2d_if32of32wf32_pool(const ai_float *pDataIn_init,
ai_float *pDataOut_init,
const ai_float * pWeights_init,
const ai_float *pBias_init,
ai_float *pScratch_init,
const ai_short_size n_channel_in,
const ai_short_size n_channel_out,
const ai_short_size width_in,
const ai_short_size height_in,
const ai_short_size width_out,
const ai_short_size height_out,
const ai_short_size filt_width,
const ai_short_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_short_size filt_height_dilated,
const ai_short_size filt_width_dilated,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_short_size n_groups,
const ai_short_size width_conv_out,
const ai_short_size height_conv_out,
ai_handle pool_func,
const ai_short_size pool_width,
const ai_short_size pool_height,
const ai_short_size pool_stride_x,
const ai_short_size pool_stride_y,
const ai_short_size pool_pad_x,
const ai_short_size pool_pad_y);
/*!
* @brief Handles 2D depthwise convolution with float input, float output and
* float weights with pool fused
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void forward_lite_dw_if32of32wf32_pool(const ai_float *pDataIn_init,
ai_float *pDataOut_init,
const ai_float *pWeights_init,
const ai_float *pBias_init,
ai_float *pScratch_init,
const ai_short_size n_channel_in,
const ai_short_size n_channel_out,
const ai_short_size width_in,
const ai_short_size height_in,
const ai_short_size width_out,
const ai_short_size height_out,
const ai_short_size filt_width,
const ai_short_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_short_size filt_height_dilated,
const ai_short_size filt_width_dilated,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_short_size n_groups,
const ai_short_size width_conv_out,
const ai_short_size height_conv_out,
ai_handle pool_func,
const ai_short_size pool_width,
const ai_short_size pool_height,
const ai_short_size pool_stride_x,
const ai_short_size pool_stride_y,
const ai_short_size pool_pad_x,
const ai_short_size pool_pad_y);
/*!
* @brief Handles 2D grouped convolution with float input, float output and
* float weights with pool fused
* @ingroup lite_conv2d
*/
LITE_API_ENTRY
void forward_lite_conv2d_if32of32wf32_group_pool(const ai_float *pDataIn_init,
ai_float *pDataOut_init,
const ai_float *pWeights_init,
const ai_float *pBias_init,
ai_float *pScratch_init,
const ai_short_size n_channel_in,
const ai_short_size n_channel_out,
const ai_short_size width_in,
const ai_short_size height_in,
const ai_short_size width_out,
const ai_short_size height_out,
const ai_short_size filt_width,
const ai_short_size filt_height,
const ai_u16 filt_pad_x,
const ai_u16 filt_pad_y,
const ai_u16 filt_stride_x,
const ai_u16 filt_stride_y,
const ai_short_size filt_height_dilated,
const ai_short_size filt_width_dilated,
const ai_u16 dilation_x,
const ai_u16 dilation_y,
const ai_short_size n_groups,
const ai_short_size width_conv_out,
const ai_short_size height_conv_out,
ai_handle pool_func,
const ai_short_size pool_width,
const ai_short_size pool_height,
const ai_short_size pool_stride_x,
const ai_short_size pool_stride_y,
const ai_short_size pool_pad_x,
const ai_short_size pool_pad_y);
#endif /*LITE_CONV2D_H*/
| 18,200 |
C
| 49.418282 | 86 | 0.398407 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_formats_converters.h
|
/**
******************************************************************************
* @file layers_formats_converters.h
* @author AST Embedded Analytics Research Platform
* @brief header file of formats converters layers
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_FORMATS_CONVERTERS_H
#define LAYERS_FORMATS_CONVERTERS_H
#pragma once
#include "layers_common.h"
/*!
* @defgroup layers_formats_converters Formats Converters Layers Definition
* @brief this group implements formats converter layers (cast, etc.)
*
*/
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_cast
* @ingroup layers_formats_converters
* @brief C Implementation of cast layer
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_cast_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_array_format to_format; /*!< cast output format */
} ai_layer_cast;
/*****************************************************************************/
/* Forward Functions Section */
/*****************************************************************************/
/*!
* @brief forward function for cast layer.
* @ingroup layers_
* @param layer template layer as an opaque pointer
*/
AI_INTERNAL_API
void forward_cast(ai_layer* layer);
AI_API_DECLARE_END
#endif /*LAYERS_FORMATS_CONVERTERS_H*/
| 1,862 |
C
| 29.048387 | 80 | 0.50913 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_pool.h
|
/**
******************************************************************************
* @file layers_pool.h
* @author AST Embedded Analytics Research Platform
* @brief header file of AI platform pooling layers datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_POOL_H
#define LAYERS_POOL_H
#pragma once
#include "layers_common.h"
#include "lite_maxpool_dqnn.h"
#include "lite_pool_f32.h"
/*!
* @defgroup layers_pool Pooling Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_pool
* @ingroup layers_pool
* @brief Pooling layer
*
* The type of pooling function is handled by the specific forward function
* @ref forward_pool
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pool_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_shape_2d pool_size; /*!< pooling size */
ai_shape_2d pool_stride; /*!< pooling stride */
ai_shape pool_pad; /*!< pooling pad, y,x border sizes */
ai_u8 count_include_pad; /*!< include pad flag */
} ai_layer_pool;
/*!
* @brief Max Pooling on a 8/16 bits fixed point data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to output data
*/
AI_INTERNAL_API
void pool_func_mp_array_fixed(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/*!
* @brief Max Pooling on a 8-bits integer quantized data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to output data
*/
AI_INTERNAL_API
void pool_func_mp_array_integer(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/*!
* @brief Max Pooling on a signed 8-bits integer quantized data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to output data
*/
AI_INTERNAL_API
void pool_func_mp_array_integer_INT8(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/*!
* @brief Max Pooling on a unsigned 8-bits integer quantized data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to output data
*/
AI_INTERNAL_API
void pool_func_mp_array_integer_UINT8(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/*!
* @brief Average Pooling on a 8/16 bits fixed point data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to scratch memory
*/
AI_INTERNAL_API
void pool_func_ap_array_fixed(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/*!
* @brief Average Pooling on a 8-bits integer quantized data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to scratch memory
*/
AI_INTERNAL_API
void pool_func_ap_array_integer(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/*!
* @brief Average Pooling on a signed 8-bits integer quantized data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to scratch memory
*/
AI_INTERNAL_API
void pool_func_ap_array_integer_INT8(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/*!
* @brief Average Pooling on a unsigned 8-bits integer quantized data array
* @ingroup layers_pool
* @param in opaque handler to input data to process
* @param dim_im_in_x input feature map width
* @param dim_im_in_y input feature map height
* @param ch_im_in number of input channels
* @param dim_kernel_x kernel width
* @param dim_kernel_y kernel height
* @param padding_x right padding value
* @param padding_y top padding value
* @param stride_x stride value on x dimension
* @param stride_y stride value on y dimension
* @param dim_im_out_x output feature map width
* @param dim_im_out_y output feature map height
* @param out opaque handler to scratch memory
*/
AI_INTERNAL_API
void pool_func_ap_array_integer_UINT8(ai_handle in,
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
const ai_u16 ch_im_in,
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
const ai_u16 padding_x, const ai_u16 padding_y,
const ai_u16 stride_x, const ai_u16 stride_y,
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
ai_handle out);
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Computes the activations of a max pooling layer.
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_mp(ai_layer* layer);
/*!
* @brief Computes the activations of a fixed point max pooling layer.
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_mp_fixed(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized max pooling layer.
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_mp_integer(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized max pooling layer
* with int8 I/O
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_mp_integer_INT8(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized max pooling layer
* with uint8 I/O
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_mp_integer_UINT8(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized max pooling layer
* with int16 I/O
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_mp_integer_INT16(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized max pooling layer
* with uint16 I/O
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_mp_integer_UINT16(ai_layer *pLayer);
/*!
* @brief Computes the activations of an average pooling layer.
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_ap(ai_layer* layer);
/*!
* @brief Computes the activations of a fixed point average pooling layer.
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_ap_fixed(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized average pooling layer.
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_ap_integer(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized average pooling layer
* with int8 I/O
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_ap_integer_INT8(ai_layer *pLayer);
/*!
* @brief Computes the activations of an integer-quantized average pooling layer
* with uint8 I/O
* @ingroup layers_pool
* @param layer the pooling (pool) layer
*/
AI_INTERNAL_API
void forward_ap_integer_UINT8(ai_layer *pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_POOL_H*/
| 14,171 |
C
| 36.294737 | 81 | 0.624656 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_pad_generic.h
|
/**
******************************************************************************
* @file layers_pad_generic.h
* @author Marco Forleo
* @brief header file of AI platform padding generic datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2022 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_PADDING_DQNN_H
#define LAYERS_PADDING_DQNN_H
#pragma once
#include "layers_generic.h"
/*!
* @defgroup layers_pad_generic Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Handles generic padding in constant mode
* @ingroup layers_generic_dqnn
* @param layer pad layer
*/
AI_INTERNAL_API
void forward_pad_constant(ai_layer *pLayer);
/*!
* @brief Handles generic padding in edge mode
* @ingroup layers_generic_dqnn
* @param layer pad layer
*/
AI_INTERNAL_API
void forward_pad_edge(ai_layer *pLayer);
/*!
* @brief Handles generic padding in reflect mode
* @ingroup layers_generic_dqnn
* @param layer pad layer
*/
AI_INTERNAL_API
void forward_pad_reflect(ai_layer *pLayer);
/*!
* @brief Handles generic padding in constant mode Channel 1st 8bit
* @ingroup layers_generic_dqnn
* @param layer pad layer
*/
AI_INTERNAL_API
void forward_pad_8bit_ch1st_3x3_constant(ai_layer* pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_PAD_GENERIC_H*/
| 2,034 |
C
| 25.776315 | 80 | 0.525074 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_ml_treeensembleclassifier.h
|
/**
******************************************************************************
* @file layers_ml_treeensembleclassifier.h
* @author AIS
* @brief header file of AI platform TreeEnsembleClassifier datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021-2022 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_TREE_ENSEMBLE_CLASSIFIER_H
#define LAYERS_TREE_ENSEMBLE_CLASSIFIER_H
#pragma once
#include "layers_common.h"
#include "layers_nl.h"
/*!
* @defgroup layers_ml_treensembleclassifier Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/* Error return codes */
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_NO 0
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_WRONG_IDX_FMT -1
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNFOUND_LEAF -2
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNSUPPORTED_BRANCH -3
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNSUPPORTED_FEATURE -4
#define AI_TREE_ENSEMBLE_CLASSIFIER_DEPTH_MAX 10000
/* Type of condition in the TreeEnsembleClassifier*/
typedef enum
{
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_LT_IDX = 0,
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_LEQ_IDX,
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_EQ_IDX,
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_END,
} ai_tree_ensenble_classifier_branch_e;
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tree_ensemble_classifier_ {
AI_LAYER_COMMON_FIELDS_DECLARE
func_nl nl_func;
uint8_t all_weights_are_positive;
ai_float nodes_values_scale;
ai_float nodes_values_offset;
ai_float class_weights_scale;
ai_float class_weights_offset;
} ai_layer_tree_ensemble_classifier;
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Decodes the TreeEnsembleClassifier ML operator.
* @ingroup layers_svmreg
* @param layer tree ensemble classifier layer
*/
AI_INTERNAL_API
void forward_tree_ensemble_classifier(ai_layer *pLayer);
AI_INTERNAL_API
ai_i32 decodeEstimator_LEQ_8Bits(const ai_float *pDataIn,
ai_float *pOutDataScores,
const ai_u8 *pFeatureIdxForEstimator,
const ai_float *pValuesForEstimator,
const ai_u8 *pTrueIdxForEstimator,
const ai_u8 *pFalseIdxForEstimator,
const ai_handle pClassWeightsForEstimator,
const ai_array_format classWeightsFormat,
const ai_u8 *pClassNodeIdsForEstimator,
const ai_u16 nbClassWithCurrentEstimator,
const ai_u8 *pClassIdsForEstimator);
AI_INTERNAL_API
ai_i32 decodeEstimator_LEQ_16Bits(const ai_float *pDataIn,
ai_float *pOutDataScores,
const ai_u8 *pFeatureIdxForEstimator,
const ai_float *pValuesForEstimator,
const ai_u16 *pTrueIdxForEstimator,
const ai_u16 *pFalseIdxForEstimator,
ai_handle pClassWeightsForEstimator,
const ai_array_format classWeightsFormat,
const ai_u16 *pClassNodeIdsForEstimator,
const ai_u16 nbClassWithCurrentEstimator,
const ai_u16 *pClassIdsForEstimator);
AI_API_DECLARE_END
#endif /*LAYERS_TREE_ENSEMBLE_CLASSIFIER_H*/
| 4,238 |
C
| 36.513274 | 80 | 0.542237 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_dense_is8os8ws8.h
|
/**
******************************************************************************
* @file lite_dense_is8os8ws8.h
* @author Marco Forleo
* @brief header file of AI platform lite dense kernel datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2022 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LITE_DENSE_IS8OS8WS8_H
#define LITE_DENSE_IS8OS8WS8_H
#pragma once
#include "ai_lite_interface.h"
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Forward function for a dense layer with signed input,
* signed output and signed weights all at 8 bits.
* @ingroup lite_dense_is8os8ws8
* @param input The pointer to input buffer.
* @param output The pointer to output buffer.
* @param weights The pointer to weights.
* @param bias The pointer to bias (NULL if not available).
* @param in_zeropoint The value of the zero point of the input.
* @param out_zeropoint TThe value of the zero point of the output.
* @param n_channel_in The number of channels of the input.
* @param n_channel_out The number of channels of the output, i.e.,
* the number of dense hidden neurons.
* @param n_pixels Total number of pixels.
*/
LITE_API_ENTRY
void forward_lite_dense_is8os8ws8(ai_i8 * pDataOut,
const ai_i8 *pDataIn,
const ai_i8 *pWeights,
const ai_i32 *pBias,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_u16 n_channel_in,
const ai_u16 n_channel_out,
const ai_size n_pixels,
const ai_float in_scale,
const ai_float out_scale,
const ai_float Wt_scale,
ai_i16 *pBuffer_a);
void forward_lite_dense_is8os8ws8_ch(ai_i8 * pDataOut,
const ai_i8 *pDataIn,
const ai_i8 *pWeights,
const ai_i32 *pBias,
const ai_i8 in_zeropoint,
const ai_i8 out_zeropoint,
const ai_u16 n_channel_in,
const ai_u16 n_channel_out,
const ai_size n_pixels,
const ai_float in_scale,
const ai_float out_scale,
const ai_float *pWt_scale,
ai_i16 *pBuffer_a);
#endif /*LITE_DENSE_IS8OS8WS8_H*/
| 3,465 |
C
| 44.605263 | 80 | 0.43088 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/ai_platform_interface.h
|
/**
******************************************************************************
* @file ai_platform_interface.h
* @author AST Embedded Analytics Research Platform
* @brief Definitions of AI platform interface APIs types
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef AI_PLATFORM_INTERFACE_H
#define AI_PLATFORM_INTERFACE_H
#pragma once
#include "ai_platform.h"
#include "datatypes_network.h"
#include "ai_datatypes.h"
#include "ai_datatypes_format.h"
/*!
* @defgroup datatypes_interface Interface Datatypes
* @brief Data structures and defines used to implement neural networks
*/
/******************************************************************************/
#define AI_ERROR_TRAP(net_, type_, code_) \
ai_platform_network_set_error((net_), AI_CONCAT(AI_ERROR_,type_), \
AI_CONCAT(AI_ERROR_CODE_,code_))
/*! AI_PTR HANDLERS SECTION ************************************/
#define AI_PTR(ptr_) AI_CAST(ai_ptr, ptr_)
#define AI_PTR_CONST(ptr_) AI_CAST(ai_ptr_const, ptr_)
/*! STATIC ARRAYS ALLOCATOR SECTION ************************************/
#define AI_PACK_STORAGE_ARRAY(type_, dim_, ...) \
(type_[dim_]) { AI_PACK(__VA_ARGS__) }
/*! AI_STORAGE_KLASS SECTION ************************************/
#define AI_STORAGE_KLASS_PACK(type_, dim_, ...) \
AI_PACK_STORAGE_ARRAY(type_, dim_, __VA_ARGS__)
#define AI_STORAGE_KLASS_INIT(type_, size_, data_) \
{ \
.type = (type_), \
.size = (size_), \
.data = (ai_handle)(data_), \
}
/*!
* @enum ai_storage_klass_type
* @ingroup ai_platform_interface
* @brief @ref ai_storage_class types enum
*/
typedef enum {
AI_STORAGE_KLASS_NONE = 0x00,
AI_STORAGE_KLASS_SHAPE = 0x01,
AI_STORAGE_KLASS_STRIDE = 0x02,
} ai_storage_klass_type;
/*!
* @struct ai_storage_klass
* @ingroup ai_platform_interface
* @brief Generic "Template" klass for generic storage arrays containers
* from this klass several typed containers are derived (see e.g. @ref ai_shape)
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_storage_klass_s {
ai_u32 type : 8;
ai_u32 size : 24;
ai_handle data;
} ai_storage_klass;
AI_PACKED_STRUCT_END
/*! AI_SHAPES SECTION ************************************/
#define AI_SHAPE_MAX_DIMENSION (6)
#define AI_SHAPE_2D_INIT(w_, h_) \
{ .data = { (w_), (h_) } }
#define AI_SHAPE_INIT(dim_, ...) \
AI_STORAGE_KLASS_INIT( \
AI_STORAGE_KLASS_SHAPE, \
dim_, \
AI_STORAGE_KLASS_PACK(ai_shape_dimension, dim_, ## __VA_ARGS__))
#define AI_SHAPE_INIT_FROM_BUFFER(dim_, buffer_) \
AI_STORAGE_KLASS_INIT( \
AI_STORAGE_KLASS_SHAPE, \
dim_, \
buffer_)
#define AI_SHAPE_ALLOCATE_STATIC(num_dim_) \
AI_SHAPE_INIT((num_dim_), 0)
typedef ai_u8 ai_shape_idx;
/*!
* @struct ai_shape
* @ingroup ai_platform_interface
* @brief Dimensions for generic 4D tensors
*/
typedef ai_storage_klass ai_shape;
/*! AI_STRIDES HANDLERS SECTION ************************************/
#define AI_STRIDE_INIT(dim_, ...) \
AI_STORAGE_KLASS_INIT( \
AI_STORAGE_KLASS_STRIDE, \
dim_, \
AI_STORAGE_KLASS_PACK(ai_stride_dimension, dim_, ## __VA_ARGS__))
#define AI_STRIDE_INIT_FROM_BUFFER(dim_, buffer_) \
AI_STORAGE_KLASS_INIT( \
AI_STORAGE_KLASS_STRIDE, \
dim_, \
buffer_)
#define AI_STRIDE_ALLOCATE_STATIC(num_dims_) \
AI_STRIDE_INIT((num_dims_), 0)
/*!
* @struct ai_stride
* @ingroup ai_platform_interface
* @brief Stride dimensions for generic 4D tensors (in number of elements)
*/
typedef ai_storage_klass ai_stride;
/*! BASIC_TYPES HANDLERS SECTION ************************************/
#define AI_SIZE(value_) \
AI_CAST(ai_size, value_)
/*! AI_KLASS_OBJ HANDLERS SECTION ************************************/
#define AI_KLASS_OBJ(obj_) \
AI_CAST(ai_klass_obj, obj_)
/*! GENERIC HANDLERS SECTION ************************************/
#define AI_OBJ_DATA(obj_, type_) \
AI_CAST(type_, (obj_)->data)
/*! AI_BUFFER HANDLERS SECTION ************************************/
#define AI_BUFFER_OBJ(ptr_) \
AI_CAST(ai_buffer*, ptr_)
/*! AI_ARRAY HANDLERS SECTION ************************************/
#define AI_ARRAY_OBJ(ptr_) \
AI_CAST(ai_array*, ptr_)
#define AI_ARRAY_OBJ_INIT_STATIC(type_, format_, size_, ...) { \
.format = AI_FMT_OBJ(format_), \
.size = (ai_array_size)(size_), \
.data = (ai_ptr)((type_[]){ __VA_ARGS__ }), \
.data_start = AI_PTR(0), \
}
#define AI_ARRAY_OBJ_INIT(format_, data_, data_start_, size_) { \
.format = AI_FMT_OBJ(format_), \
.size = AI_CAST(ai_array_size, size_), \
.data = AI_PTR(data_), \
.data_start = AI_PTR(data_start_) }
#define AI_ARRAY_OBJ_DECLARE_STATIC(name_, type_, format_, attr_, size_, ...) \
AI_ALIGNED(4) \
attr_ ai_array name_ = AI_ARRAY_OBJ_INIT_STATIC(type_, format_, size_, __VA_ARGS__);
#define AI_ARRAY_OBJ_DECLARE(name_, format_, data_, data_start_, size_, attr_) \
AI_ALIGNED(4) \
attr_ ai_array name_ = AI_ARRAY_OBJ_INIT(format_, data_, data_start_, size_);
/********************************* ai_array macros ***************************/
#define AI_PACK_ARRAYS(...) \
(ai_array[]) { AI_PACK(__VA_ARGS__) }
#define AI_ARRAY_LIST_OBJ_INIT(arrays_ptr_) \
((ai_array*)(arrays_ptr_))
#define AI_ARRAY_LIST_FLAGS(list_) \
((list_) ? (list_)->flags : 0x0)
#define AI_ARRAY_LIST_SIZE(list_) \
((list_) ? (list_)->size : 0)
#define AI_ARRAY_LIST_DATA(list_, pos_) \
((list_) ? &((list_)->data[pos_]) : NULL)
/********************************* ai_tensor macros **************************/
#define AI_TENSOR_OBJ(obj_) \
AI_CAST(ai_tensor*, obj_)
#define AI_TENSOR_INFO_OBJ_INIT(id_, flags_, data_size_) { \
.id = (id_), \
.flags = (flags_), \
.data_size = (data_size_) \
}
#define AI_TENSOR_OBJ_INIT(id_, flags_, shape_, stride_, arrays_size_, arrays_ptr_, klass_obj_) { \
.klass = (ai_klass_obj)(klass_obj_), \
.info = AI_TENSOR_INFO_OBJ_INIT(id_, flags_, arrays_size_), \
.shape = shape_, \
.stride = stride_, \
.data = AI_ARRAY_LIST_OBJ_INIT(AI_PACK(arrays_ptr_)), \
}
#define AI_TENSOR_OBJ_DECLARE(name_, attr_, id_, flags_, shape_, stride_, \
arrays_size_, arrays_ptr_, klass_obj_) \
AI_ALIGNED(4) \
attr_ ai_tensor name_ = AI_TENSOR_OBJ_INIT(id_, flags_, AI_PACK(shape_), AI_PACK(stride_), \
arrays_size_, AI_PACK(arrays_ptr_), AI_PACK(klass_obj_));
/********************************* TENSOR STATE MACROS ***********************/
#define AI_TENSOR_STATE_OBJ_INIT(end_ptr_ , curr_ptr_, stride_, size_) \
{ (end_ptr_), (curr_ptr_), (stride_), (size_) }
/********************************* TENSOR LIST MACROS ************************/
#if (AI_TOOLS_API_VERSION <= AI_TOOLS_API_VERSION_1_3)
#pragma message ("Including deprecated AI_TENSOR_LIST_ENTRY, AI_TENSOR_LIST_EMPTY, AI_TENSOR_LIST_IO_ENTRY")
AI_DEPRECATED
#define AI_TENSOR_LIST_EMPTY \
AI_TENSOR_LIST_OBJ_EMPTY
AI_DEPRECATED
#define AI_TENSOR_LIST_ENTRY(...) \
AI_TENSOR_LIST_OBJ_INIT(AI_FLAG_NONE, AI_NUMARGS(__VA_ARGS__), __VA_ARGS__)
AI_DEPRECATED
#define AI_TENSOR_LIST_IO_ENTRY(flags_, size_, ...) \
AI_TENSOR_LIST_IO_OBJ_INIT(flags_, size_, __VA_ARGS__)
#endif /* AI_TOOLS_API_VERSION_1_3 */
#define AI_TENSOR_LIST_OBJ_INIT(flags_, size_, ...) \
{ .size = (size_), .flags = (flags_), \
.tensor = (ai_tensor*[]) { __VA_ARGS__ }, .info = NULL \
}
#define AI_TENSOR_LIST_OBJ_EMPTY \
{ .size = 0, .flags = AI_FLAG_NONE, \
.tensor = (ai_tensor*[]) { NULL }, .info = NULL \
}
#define AI_TENSOR_LIST_OBJ_DECLARE(name_, attr_, flags_, size_, ...) \
AI_ALIGNED(4) \
attr_ ai_tensor_list name_ = AI_TENSOR_LIST_OBJ_INIT( \
flags_, size_, __VA_ARGS__);
/********************************* TENSOR LIST I/O MACROS ********************/
#define AI_TENSOR_LIST_IO_OBJ_INIT(flags_, size_, ...) \
{ .size = (size_), .flags = (flags_), \
.tensor = (ai_tensor*[]) { __VA_ARGS__ }, \
.info = (ai_tensor_list_info[1]) { { \
.buffer = (ai_buffer[size_]){AI_STRUCT_INIT}, \
.state = (ai_tensor_state[size_]){AI_STRUCT_INIT}, \
.meta = (ai_buffer_meta_info[size_]){AI_STRUCT_INIT} \
} } \
}
/********************************* TENSOR CHAIN MACROS ***********************/
#define AI_TENSOR_CHAIN_OBJ_INIT(flags_, size_, ...) \
{ .size = (size_), .flags = (flags_), \
.chain = (ai_tensor_list[]){ __VA_ARGS__ } }
#define AI_TENSOR_CHAIN_OBJ_DECLARE(name_, attr_, size_, ...) \
AI_ALIGNED(4) \
attr_ ai_tensor_chain name_ = \
AI_TENSOR_CHAIN_OBJ_INIT(AI_FLAG_NONE, size_, __VA_ARGS__);
/********************************* TENSOR CHAIN I/O MACROS *******************/
#define AI_TENSOR_CHAIN_IO_OBJ_INIT(flags_, in_tensor_list_, out_tensor_list_) \
{ .chain = (ai_tensor_list[]){ in_tensor_list_, out_tensor_list_ }, \
.size = 2, .flags = (flags_) }
#define AI_TENSOR_CHAIN_IO_OBJ_DECLARE( \
name_, attr_, flags_, in_tensor_list_, out_tensor_list_) \
AI_ALIGNED(4) \
attr_ ai_tensor_chain_io name_ = \
AI_TENSOR_CHAIN_IO_OBJ_INIT(flags_, in_tensor_list_, out_tensor_list_);
/******************************* NETWORK SECTION ****************************/
#define AI_NETWORK_OBJ(obj_) \
((ai_network*)(obj_))
#if (AI_TOOLS_API_VERSION < AI_TOOLS_API_VERSION_1_5)
AI_DEPRECATED
#define AI_NETWORK_OBJ_INIT( \
weights_buffer_, activations_buffer_, \
in_tensor_list_ptr_, out_tensor_list_ptr_, \
in_node_ptr_, signature_, klass_obj_) { \
.magic = 0x0, \
.signature = signature_, \
.klass = AI_KLASS_OBJ(klass_obj_), \
.flags = AI_FLAG_NONE, \
.error = AI_ERROR_INIT(NONE, NONE), \
.n_batches = 0, \
.batch_id = 0, \
.buffers = AI_NETWORK_BUFFERS_INIT( \
AI_BUFFER_ARRAY_OBJ_INIT_STATIC(AI_FLAG_NONE, 1, AI_PACK(weights_buffer_)), \
AI_BUFFER_ARRAY_OBJ_INIT_STATIC(AI_FLAG_NONE, 1, AI_PACK(activations_buffer_))), \
.tensors = AI_TENSOR_CHAIN_IO_OBJ_INIT(AI_FLAG_NONE, \
AI_PACK(in_tensor_list_ptr_), \
AI_PACK(out_tensor_list_ptr_)), \
.input_node = AI_NODE_OBJ(in_node_ptr_), \
.current_node = AI_NODE_OBJ(NULL), \
.on_node_exec = NULL, \
.data_exec = NULL, \
.lite_cb = NULL, \
}
#else
#define AI_NETWORK_OBJ_INIT( \
weights_buffer_, activations_buffer_, \
in_tensor_list_ptr_, out_tensor_list_ptr_, \
in_node_ptr_, signature_, klass_obj_) { \
.magic = 0x0, \
.signature = signature_, \
.klass = AI_KLASS_OBJ(klass_obj_), \
.flags = AI_FLAG_NONE, \
.error = AI_ERROR_INIT(NONE, NONE), \
.n_batches = 0, \
.batch_id = 0, \
.buffers = AI_NETWORK_BUFFERS_INIT(AI_PACK(weights_buffer_), \
AI_PACK(activations_buffer_)), \
.tensors = AI_TENSOR_CHAIN_IO_OBJ_INIT(AI_FLAG_NONE, \
AI_PACK(in_tensor_list_ptr_), \
AI_PACK(out_tensor_list_ptr_)), \
.input_node = AI_NODE_OBJ(in_node_ptr_), \
.current_node = AI_NODE_OBJ(NULL), \
.on_node_exec = NULL, \
.data_exec = NULL, \
.lite_cb = NULL, \
}
#endif // AI_TOOLS_API_VERSION
#define AI_NETWORK_OBJ_DECLARE( \
name_, attr_, \
weights_buffer_, activations_buffer_, \
in_tensor_list_ptr_, out_tensor_list_ptr_, \
in_node_ptr_, signature_, klass_obj_) \
AI_ALIGNED(4) \
attr_ ai_network name_ = AI_NETWORK_OBJ_INIT( \
AI_PACK(weights_buffer_), \
AI_PACK(activations_buffer_), \
AI_PACK(in_tensor_list_ptr_), \
AI_PACK(out_tensor_list_ptr_), \
(in_node_ptr_), (signature_), (klass_obj_));
#define AI_NETWORK_ACQUIRE_CTX(handle_) \
AI_NETWORK_OBJ(ai_platform_context_acquire(handle_))
/******************************************************************************/
AI_API_DECLARE_BEGIN
/*!
* @typedef ai_version
* @ingroup ai_platform_interface
* @brief Packed representation for @ref ai_platform_version
*/
typedef uint32_t ai_version;
/*!
* @typedef ai_klass_obj
* @ingroup ai_platform_interface
* @brief handler to (private) generic subclass derivatives implementation
*/
typedef void* ai_klass_obj;
/*!
* @typedef ai_ptr
* @ingroup ai_platform_interface
* @brief Byte pointer data addressing
*/
typedef uint8_t* ai_ptr;
/*!
* @typedef ai_ptr_const
* @ingroup ai_platform_interface
* @brief Constant byte pointer data addressing
*/
typedef const uint8_t* ai_ptr_const;
/*!
* @typedef ai_ptr_offset
* @ingroup ai_platform_interface
* @brief byte offset for computing strides
*/
typedef int32_t ai_ptr_offset;
/*!
* @typedef ai_magic
* @ingroup ai_platform_interface
* @brief magic field to mark internal datatstructures
*/
typedef uint32_t ai_magic;
/*!
* @typedef ai_any_ptr
* @ingroup ai_platform_interface
* @brief union for defining any pointer
*/
typedef union {
ai_handle handle;
ai_ptr ptr;
ai_float* float32;
ai_double* float64;
ai_u8* u8;
ai_i8* s8;
ai_u16* u16;
ai_i16* s16;
ai_u32* u32;
ai_i32* s32;
ai_u64* u64;
ai_i64* s64;
} ai_any_ptr;
#define AI_ANY_PTR_INIT(ptr_) \
{ .handle = (ai_handle)(ptr_) }
#define AI_CONTEXT_FIELDS \
ai_magic magic; /*!< magic word to mark valid contexts datastructs*/ \
ai_signature signature; /*!< 32bit signature for network consistency checks */
#define AI_CONTEXT_OBJ(obj) ((ai_context*)(obj))
/*!
* @typedef ai_context
* @ingroup ai_platform_interface
* @brief Abstract internal context header exposed to codegen interface
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_context_ {
AI_CONTEXT_FIELDS
} ai_context;
AI_PACKED_STRUCT_END
/*!
* @enum ai_shape_2d_type
* @ingroup ai_platform_interface
* @brief Codes for the 2D tensor dimensions
*/
typedef enum {
AI_SHAPE_2D_MAX_DIMENSION = 0x2,
AI_SHAPE_2D_HEIGHT = 0x1,
AI_SHAPE_2D_WIDTH = 0x0,
} ai_shape_2d_type;
/*!
* @struct ai_shape_2d
* @ingroup ai_platform_interface
* @brief Dimensions for generic 2D tensors
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_shape_2d_s {
ai_shape_dimension data[AI_SHAPE_2D_MAX_DIMENSION]; /*!< 2D tensor dimensions */
} ai_shape_2d;
AI_PACKED_STRUCT_END
/*!
* @struct ai_array
* @ingroup ai_platform_interface
* @brief Generic flattened array with size
* and (byte) stride of each item
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_array_s {
ai_array_format format; /*!< array format (see @ref ai_array_format) */
ai_array_size size; /*!< number of elements in the array (NOT number
of bytes!). The size of the array could be
determine using @ref AI_ARRAY_GET_BYTE_SIZE
macro */
ai_ptr data; /*!< pointer to data */
ai_ptr data_start; /*!< pointer to parent's data start address */
} ai_array;
AI_PACKED_STRUCT_END
/*!
* @struct ai_tensor_info
* @ingroup ai_platform_interface
* @brief ai_tensor_info info structure for storing size of the array list,
* tensor dimensionality, etc.
*
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_info_s {
ai_u16 id;
ai_u8 flags;
ai_u8 data_size;
} ai_tensor_info;
AI_PACKED_STRUCT_END
/*!
* @struct ai_tensor
* @ingroup ai_platform_interface
* @brief Generic tensor structure for storing parameters and activations
*
* The data is stored in a flattened array with an implicit order given by the
* reverse order in @ref ai_shape_dimension:
* in_channels, channels, width, height.
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_s {
ai_klass_obj klass; /*!< opaque pointer to klass context */
ai_tensor_info info; /*!< tensor info metadata see @ref ai_tensor_info)*/
ai_shape shape; /*!< tensor shape see @ref ai_shape */
ai_stride stride; /*!< tensor stride see @ref ai_stride */
ai_array* data; /*!< flattened array pointer to tensor data */
} ai_tensor;
AI_PACKED_STRUCT_END
/*!
* @struct ai_tensor_state
* @ingroup ai_platform_interface
* @brief state context for tensor management (used for I/O network tensors)
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_state_s {
ai_ptr end_ptr; /*!< end address of the I/O tensor data buffer */
ai_ptr curr_ptr; /*!< current address of the I/O tensor data buffer (for batching) */
ai_ptr_offset stride; /*!< single batch buffer size (in bytes) */
ai_size size; /*!< total size in bytes of the I/O tensor buffer */
} ai_tensor_state;
AI_PACKED_STRUCT_END
/*!
* @struct ai_tensor_list_info
* @ingroup ai_platform_interface
* @brief info metadata for tensor list management (used for I/O network tensors)
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_list_info_s {
ai_tensor_state* state; /*!< I/O buffer internal pointers state */
ai_buffer* buffer; /*!< I/O buffer pointer */
ai_buffer_meta_info* meta; /*!< I/O buffer meta informations */
} ai_tensor_list_info;
AI_PACKED_STRUCT_END
/********************************* INTEGER QUANTIZATION DATATYPES ************/
#define AI_INTQ_INFO_OBJ_INIT(flags_, scale_ , zeropoint_) { \
.scale = (scale_), \
.zeropoint = (ai_handle)(zeropoint_), \
.flags = (flags_), \
}
#define AI_PACK_INTQ_INFO_LIST(...) \
(ai_intq_info_list[]) { AI_PACK(__VA_ARGS__) }
#define AI_PACK_INTQ_INFO(scale_, zp_) \
(INTQ_CONST ai_intq_info[1]) { { \
.scale = (INTQ_CONST ai_float*) AI_PACK(scale_), \
.zeropoint = (ai_handle) AI_PACK(zp_) \
} }
#define AI_PACK_INTQ_SCALE(...) \
(INTQ_CONST ai_float[]) { AI_PACK(__VA_ARGS__) }
#define AI_PACK_INTQ_ZP(...) \
(INTQ_CONST ai_i8[]) { AI_PACK(__VA_ARGS__) }
#define AI_PACK_UINTQ_ZP(...) \
(INTQ_CONST ai_u8[]) { AI_PACK(__VA_ARGS__) }
#define AI_PACK_INTQ_ZP16(...) \
(INTQ_CONST ai_i16[]) { AI_PACK(__VA_ARGS__) }
#define AI_PACK_UINTQ_ZP16(...) \
(INTQ_CONST ai_u16[]) { AI_PACK(__VA_ARGS__) }
#define AI_INTQ_INFO_LIST_OBJ_INIT(flags_, size_, info_) \
{ \
.flags = (flags_), \
.size = (size_), \
.info = (info_), \
}
#define AI_INTQ_INFO_LIST_OBJ_EMPTY { 0 }
#define AI_INTQ_INFO_LIST_OBJ_DECLARE(name_, attr_, flags_, size_, info_) \
AI_ALIGNED(4) \
attr_ ai_intq_info_list name_ = \
AI_INTQ_INFO_LIST_OBJ_INIT(flags_, size_, AI_PACK(info_));
#define AI_INTQ_INFO_LIST_OBJ_DECLARE_EMPTY(name_, attr_) \
AI_ALIGNED(4) \
attr_ ai_intq_info_list name_ = AI_INTQ_INFO_LIST_OBJ_EMPTY;
/********************************* TENSOR CHAINS DATATYPES *******************/
/*!
* @enum ai_tensor_chain_type
* @ingroup ai_platform_interface
* @brief Enum for the different tensor chains supported in the library
*/
typedef enum {
AI_TENSOR_CHAIN_INPUT = 0x0,
AI_TENSOR_CHAIN_OUTPUT = 0x1,
AI_TENSOR_CHAIN_WEIGHTS = 0x2,
AI_TENSOR_CHAIN_SCRATCH = 0x3,
AI_TENSOR_CHAIN_SIZE
} ai_tensor_chain_type;
/*!
* @struct ai_tensor_list
* @ingroup ai_platform_interface
* @brief list (in form of arrays) of internal nodes tensor pointers
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_list_s {
ai_u16 size; /*!< number of elements in the the tensor list */
ai_u16 flags; /*!< optional flags to store tensor list attributes */
ai_tensor** tensor; /*!< array of linked tensor pointer */
ai_tensor_list_info* info; /*!< pointer to an array of metainfo associated to the tensors */
} ai_tensor_list;
AI_PACKED_STRUCT_END
/*!
* @struct ai_tensor_chain
* @ingroup ai_platform_interface
* @brief tensor chain datastruct for internal network nodes
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_chain_s {
ai_u16 size;
ai_u16 flags;
ai_tensor_list* chain; /*!< pointer to a 4 sized array see @ref ai_tensor_chain_type */
} ai_tensor_chain;
AI_PACKED_STRUCT_END
/************************************** LAYER DATATYPES *******************/
/*!
* @struct ai_layer
* @ingroup ai_platform_interface
* @brief Structure encoding a generic opaque layer in the network
*
*/
typedef void ai_layer;
/************************************** OBSERVER DATATYPES *******************/
/* forward function */
struct ai_node_s;
/*!
* @struct ai_observer_node
* @ingroup ai_observer_interface
* @brief observer node data struct for internal network nodes
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_observer_node_s {
ai_u16 c_idx; /*!< node index (position in the execution list) */
ai_u16 type; /*!< node type info @see ai_node datastruct */
ai_u16 id; /*!< node id assigned by codegen tool to identify the model layer*/
ai_u16 unused; /*!< unused field for alignment */
const ai_tensor_chain* inner_tensors; /*!< pointer to the inner tensor if available */
const ai_tensor_chain* tensors; /*!< pointer to a 4 sized array see @ref ai_tensor_chain_type */
} ai_observer_node;
AI_PACKED_STRUCT_END
#define AI_OBSERVER_NONE_EVT (0) /*!< No event */
#define AI_OBSERVER_INIT_EVT (1 << 0) /*!< called at the end of the init function */
#define AI_OBSERVER_PRE_EVT (1 << 1) /*!< before c-node execution */
#define AI_OBSERVER_POST_EVT (1 << 2) /*!< after c-node execution */
#define AI_OBSERVER_FIRST_EVT (1 << 8) /*!< indicate the first c-node */
#define AI_OBSERVER_LAST_EVT (1 << 9) /*!< indicate the last c-node */
#define AI_OBSERVER_REGISTERED (1 << 24) /*!< internal flag */
#define AI_OBSERVER_MASK_EVT (0xFF) /*!< mask for requested user event */
/* Client callback definition */
typedef ai_u32 (*ai_observer_node_cb)(
const ai_handle cookie,
const ai_u32 flags,
const ai_observer_node *node);
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_observer_exec_ctx_s {
ai_observer_node_cb on_node; /*!< registered user observer call-back function */
ai_handle cookie; /*!< reference of the user context */
ai_u32 flags; /*!< flags definition */
ai_u16 c_idx; /*!< store/indicate the index of the current c_node */
ai_u16 n_nodes; /*!< total number of c_node */
struct ai_node_s *cur; /*!< pointer of the current node (pre or post) */
} ai_observer_exec_ctx;
AI_PACKED_STRUCT_END
typedef enum {
AI_NODE_EXEC_INIT = 0x0,
AI_NODE_EXEC_START = 0x1,
AI_NODE_EXEC_PRE = 0x2,
AI_NODE_EXEC_POST = 0x3,
} ai_node_exec_state;
/* Internal/private definition of node execution callback */
typedef ai_u32 (*ai_node_exec_cb)(
const ai_node_exec_state state,
struct ai_node_s *cur,
const ai_handle ctx);
/********************************* NETWORK DATATYPES *************************/
/*!
* @struct ai_network
* @ingroup layers
* @brief Structure encoding a sequential neural network
*/
AI_PACKED_STRUCT_START
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_network_s {
AI_CONTEXT_FIELDS
ai_klass_obj klass; /*!< opaque handler to specific network implementations */
ai_flags flags; /*!< bitflags mask to track some network state info */
ai_error error; /*!< track 1st error code in the network */
ai_u16 n_batches; /*!< number of batches to process */
ai_u16 batch_id; /*!< current batch to to process btw [0, n_batches)*/
// New 6.1 context storing explicitly network buffers. This allow also management of network persistent state now
ai_network_buffers buffers; /*!< network buffers datastruct */
ai_tensor_chain tensors; /*!< I/O tensor chain list see @ref ai_tensor_list */
struct ai_node_s* input_node; /*!< first node to execute */
struct ai_node_s* current_node; /*!< current node to execute */
ai_node_exec_cb on_node_exec; /*!< registered call-back function called when
a node/operator is scheduled */
ai_handle data_exec; /*!< private reference for the runtime context */
ai_handle lite_cb; /*!< registered opaque call-back handler for lite APIs */
ai_version tool_api_version; /*! Tools Codegen API version */
} ai_network;
AI_PACKED_STRUCT_END
/*!
* @brief Get platform runtime lib revision version as string.
* @ingroup ai_platform_interface
* @return a string containing the revision of the runtime library
*/
AI_INTERFACE_TYPE
const char* ai_platform_runtime_get_revision(void);
/*!
* @brief Get platform runtime lib version as datastruct.
* @ingroup ai_platform_interface
* @return a datastruct containing the version of the runtime library
*/
AI_INTERFACE_TYPE
ai_platform_version ai_platform_runtime_get_version(void);
/*!
* @brief Get platform public APIs version as datastruct.
* @ingroup ai_platform_interface
* @return a datastruct containing the version of the public APIs
*/
AI_INTERFACE_TYPE
ai_platform_version ai_platform_api_get_version(void);
/*!
* @brief Get platform interface private APIs version as datastruct.
* @ingroup ai_platform_interface
* @return a datastruct containing the version of the interface private APIs
*/
AI_INTERFACE_TYPE
ai_platform_version ai_platform_interface_api_get_version(void);
/****************************************************************************
** Context APIs
****************************************************************************/
/*!
* @brief Get platform context.
* @ingroup ai_platform_interface
* @return a valid context handle or NULL otherwise
*/
AI_INTERFACE_TYPE
ai_context* ai_platform_context_acquire(const ai_handle handle);
/*!
* @brief Release platform context.
* @ingroup ai_platform_interface
* @return an opaque handle to the released object
*/
AI_INTERFACE_TYPE
ai_handle ai_platform_context_release(ai_context* ctx);
/****************************************************************************
** Platform Network Params APIs
****************************************************************************/
/*!
* @brief get the weights map from user provided network params info
* @ingroup ai_platform_interface
* @param params a pointer to ai_network_params struct
* @param map table pointer to the table map to initialize
* @param map_size the number of entries of the table to initialize
* @return true if initialization succeeded, false otherwise
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_get_weights_map(
ai_ptr* map, const ai_size map_size, const ai_network_params* params);
/*!
* @brief get the activations map from user provided network params info
* @ingroup ai_platform_interface
* @param params a pointer to ai_network_params struct
* @param map table pointer to the table map to initialize
* @param map_size the number of entries of the table to initialize
* @return true if initialization succeeded, false otherwise
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_get_activations_map(
ai_ptr* map, const ai_size map_size, const ai_network_params* params);
/*!
* @brief bind code generated weights and activations map arrays to ai_netwoek_params
* @ingroup ai_platform_interface
* @param[out] params the network params struct reporting binded params
* @param[in] map_weights pointer to the codegened weights map array to be bound
* @param[in] map_activations pointer to the codegened activation map array to be bound
* @return true if network parameters binding succeed, false otherwise
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_bind_network_params(
ai_network_params* params,
const ai_buffer_array* map_weights, const ai_buffer_array* map_activations);
/****************************************************************************
** Platform Network APIs
****************************************************************************/
/*!
* @brief get **first** error tracked when using the network
* @ingroup ai_platform_interface
* @param network an opaque handler to the network context
* @return ai_error the FIRST error generated during network processing
*/
AI_INTERFACE_TYPE
ai_error ai_platform_network_get_error(ai_handle network);
/*!
* @brief Set specific error code of the network. if an error is already present
* keep it
* @ingroup ai_platform_interface
* @param net_ctx a pointer to the network context
* @param type error type as defined in @ref ai_error_type
* @param code error code as defined in @ref ai_error_code
* @return true if no previous errors where recorded, false if a previous error
* is present or context is invalid
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_network_set_error(
ai_network* net_ctx, const ai_error_type type, const ai_error_code code);
/*!
* @brief Finalize network report datastruct with I/O buffer infos
* @ingroup ai_platform_interface
* @return bool if the report has been finalized correctly. false otherwise
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_api_get_network_report(
ai_handle network, ai_network_report* r);
/*!
* @brief Get network inputs array pointer as a ai_buffer array pointer.
* @ingroup network
* @param network an opaque handler to the network context
* @param n_buffer optional parameter to return the number of inputs
* @return a ai_buffer pointer to the inputs arrays
*/
AI_INTERFACE_TYPE
ai_buffer* ai_platform_inputs_get(ai_handle network, ai_u16 *n_buffer);
/*!
* @brief Get network outputs array pointer as a ai_buffer array pointer.
* @ingroup network
* @param network an opaque handler to the network context
* @param n_buffer optional parameter to return the number of outputs
* @return a ai_buffer pointer to the inputs arrays
*/
AI_INTERFACE_TYPE
ai_buffer* ai_platform_outputs_get(ai_handle network, ai_u16 *n_buffer);
/*!
* @brief create a network context with some error check
* @ingroup ai_platform_interface
* @param a pointer to an opaque handle of the network context
* @param an (optional) pointer to the network config buffer info
* @param net_ctx a pointer to the network context structure to initialize
* @param tool_major major version id of the tool used to generate the network
* @param tool_minor minor version id of the tool used to generate the network
* @param tool_micro micro version id of the tool used to generate the network
* @return the error during network creation or error none if ok
*/
AI_INTERFACE_TYPE
ai_error ai_platform_network_create(
ai_handle* network, const ai_buffer* network_config,
ai_network* net_ctx,
const ai_u8 tool_major, const ai_u8 tool_minor, const ai_u8 tool_micro);
/*!
* @brief destroy a network context
* @ingroup ai_platform_interface
* @param network a pointer to an opaque handle of the network context
* @return AI_HANDLE_NULL if deallocation OK, same network handle if failed
*/
AI_INTERFACE_TYPE
ai_handle ai_platform_network_destroy(ai_handle network);
/*!
* @brief initialize the network context
* @ingroup ai_platform_interface
* @param network a pointer to an opaque handle of the network context
* @return a valid network context, NULL if initialization failed
*/
AI_INTERFACE_TYPE
ai_network* ai_platform_network_init(
ai_handle network, const ai_network_params* params);
/*!
* @brief post-initialize of the network context.
* @ingroup ai_platform_interface
* @param network a pointer to an opaque handle of the network context
* @return a valid network context, NULL if initialization failed
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_network_post_init(ai_handle network);
/*!
* @brief main platform runtime execute of a network
* @ingroup ai_platform_interface
* @param network an opaque handler to the network context
* @param input a pointer to the input buffer data to process
* @param output a pointer to the output buffer
* @return the number of batches processed from the input. A result <=0 in case
* of error
*/
AI_INTERFACE_TYPE
ai_i32 ai_platform_network_process(
ai_handle network, const ai_buffer* input, ai_buffer* output);
/****************************************************************************
** Observer APIs
****************************************************************************/
/*!
* @brief Return the info of a requested c-node (defined by the
* c_idx field). Should be called after the initialization phase.
* @ingroup ai_platform_observer
* @param network a pointer to an opaque handle of the network context
* @param node_info a pointer to a reference of the node description
* @return true if the node_info->c_idx designates a valid index else
* false (network error is updated).
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_observer_node_info(
ai_handle network, ai_observer_node *node_info);
/*!
* @brief Register an observer context. Allows to register a client CB which
* will be called before or/and after the execution of a c-node with
* the references of the used tensors (see @ref ai_observer_node).
* @ingroup ai_platform_observer
* @param network a pointer to an opaque handle of the network context
* @param cb reference of the user callback function
* @param cookie reference of a user object/ctx
* @param flags indicate expected events (see AI_OBSERVER_XX_EVT flag definition)
* @return false if the registration has failed (network error is updated) else true
* of error.
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_observer_register(
ai_handle network,
ai_observer_node_cb cb,
ai_handle cookie,
ai_u32 flags);
AI_INTERFACE_TYPE
ai_bool ai_platform_observer_register_s(ai_handle network,
ai_observer_exec_ctx *ctx);
/*!
* @brief un-register the observer context.
* @ingroup ai_platform_observer
* @param network a pointer to an opaque handle of the network context
* @param ctx a pointer to a reference of the registered platform observer context
* @param cb reference of the registered user callback function
* @param cookie reference of the registered user object/ctx
* @return false if the un-registration has failed (network error is updated) else true
* of error.
*/
AI_INTERFACE_TYPE
ai_bool ai_platform_observer_unregister(ai_handle network,
ai_observer_node_cb cb, ai_handle cookie);
AI_INTERFACE_TYPE
ai_bool ai_platform_observer_unregister_s(ai_handle network,
ai_observer_exec_ctx *ctx);
AI_API_DECLARE_END
#endif /*AI_PLATFORM_INTERFACE_H*/
| 35,318 |
C
| 33.091699 | 115 | 0.615012 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/core_common.h
|
/**
******************************************************************************
* @file core_common.h
* @author AST Embedded Analytics Research Platform
* @brief header file of common core datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef CORE_COMMON_H
#define CORE_COMMON_H
#pragma once
#include "ai_platform.h"
#include "ai_platform_interface.h"
#include "core_datatypes.h"
// #include "core_log.h"
/*!
* @defgroup core_common Common Core Library Routines
* @brief Common macros, datatypes and routines of core common module
* @details This module contains the definitons and handling of the @ref ai_node
* datastructures. An ai_node is a generic abstraction for a network node that
* could be either a fixed function layer or an operator. Ideally the platform
* interface defined in api module should handle an process generic nodes in the
* network, not relying on the fact that they are layers or operators datastructs
* Specific implementative details should be kept inside layers and operators
* modules. The core module implements additionally common routines used in the
* layers and operators modules.
*/
/******************************************************************************/
#ifdef HAS_AI_ASSERT
#define ASSERT_ARRAY_SANITY(a_) \
AI_ASSERT((a_) && (a_)->size>0)
#define ASSERT_ARRAY_DATA_SANITY(a_) \
ASSERT_ARRAY_SANITY(a_) \
AI_ASSERT((a_)->data && (a_)->data_start)
#define ASSERT_TENSOR_SANITY(t_) \
AI_ASSERT((t_) && (t_)->data) \
AI_ASSERT(CORE_TENSOR_GET_SHAPE_SIZE(t_)>0) \
ASSERT_ARRAY_SANITY((t_)->data)
#define ASSERT_TENSOR_LIST_SANITY(tlist_) \
AI_ASSERT((tlist_) && (GET_TENSOR_LIST_SIZE(tlist_)>0)) \
#define ASSERT_TENSOR_DATA_SANITY(t_) \
ASSERT_TENSOR_SANITY(t_) \
ASSERT_ARRAY_DATA_SANITY((t_)->data)
#define ASSERT_NODE_SANITY(node_) \
do { \
AI_ASSERT(AI_NODE_OBJ(node_)->tensors && AI_NODE_OBJ(node_)->tensors->chain) \
ASSERT_TENSOR_SANITY(GET_TENSOR_IN(AI_NODE_OBJ(node_)->tensors, 0)) \
ASSERT_TENSOR_SANITY(GET_TENSOR_OUT(AI_NODE_OBJ(node_)->tensors, 0)) \
} while (0);
#else
#define ASSERT_ARRAY_SANITY(a_) /* ASSERT_ARRAY_SANITY */
#define ASSERT_ARRAY_DATA_SANITY(a_) /* ASSERT_ARRAY_DATA_SANITY */
#define ASSERT_TENSOR_SANITY(t_) /* ASSERT_TENSOR_SANITY */
#define ASSERT_TENSOR_LIST_SANITY(tlist_) /* ASSERT_TENSOR_LIST_SANITY */
#define ASSERT_TENSOR_DATA_SANITY(t_) /* ASSERT_TENSOR_DATA_SANITY */
#define ASSERT_NODE_SANITY(node_) /* ASSERT_NODE_SANITY */
#endif /*HAS_AI_ASSERT*/
#if defined(__GNUC__) || defined(__clang__)
/* Suppress unused function warnings */
#define AI_UNUSED_FUNCTION __attribute__((unused))
/* Manage false positives in address sanitizer */
#define AI_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
#else
#define AI_UNUSED_FUNCTION /* AI_UNUSED_FUNCTION */
#define AI_NO_SANITIZE_ADDRESS /* AI_NO_SANITIZE_ADDRESS */
#endif
/******************************************************************************/
#define AI_NODE_TYPE(type_) \
((ai_node_type)((ai_u32)(type_)&0xFFFF))
#define AI_NODE_OBJ(obj_) \
((ai_node*)(obj_))
#define AI_NODE_FUNC(func_) \
((node_func)(func_))
#define AI_NODE_COMMON_FIELDS_DECLARE \
ai_node_type type; /*!< node type id (see @ref ai_node_type) */ \
ai_id_obj id; /*!< node object instance id (see @ref ai_id_obj) */ \
ai_flags flags; /*!< node object flags */ \
ai_klass_obj klass; /*!< opaque handler to specific layer implementations */ \
struct ai_network_s* network; /*!< handle to global network context */ \
struct ai_node_s* next; /*!< the next node object in the sequence */ \
node_func forward; /*!< forward function for the node */ \
AI_CONST ai_tensor_chain* tensors; /*!< pointer to node tensor chain */
#define AI_NODE_STATEFUL_FIELDS_DECLARE \
AI_NODE_COMMON_FIELDS_DECLARE \
ai_handle state; \
node_func init; \
node_func update; \
node_func destroy;
#define AI_NODE_COMMON_INIT(type_, id_, flags_, klass_, network_, next_, forward_) \
.type = AI_NODE_TYPE(type_), \
.id = AI_ID_OBJ(id_), \
.flags = (flags_), \
.klass = AI_KLASS_OBJ(klass_), \
.network = AI_NETWORK_OBJ(network_), \
.next = AI_NODE_OBJ(next_), \
.forward = AI_NODE_FUNC(forward_)
/*****************************************************************************/
/** Network Tensors Chains / Lists Handlers **/
/*****************************************************************************/
#define AI_FOR_EACH_TENSOR_CHAIN_DO(tlist_ptr_, chain_) \
ai_tensor_list* tlist_ptr_ = (chain_)->chain; \
for (; tlist_ptr_<(((chain_)->chain)+((chain_)->size)); tlist_ptr_++)
#define AI_FOR_EACH_TENSOR_LIST_DO(idx_, t_ptr_, tlist_ptr_) \
ai_tensor* t_ptr_ = NULL; \
for (ai_size idx_ = 0; (idx_ < GET_TENSOR_LIST_SIZE(tlist_ptr_)) && \
((t_ptr_ = GET_TENSOR_LIST_ITEM(tlist_ptr_, idx_)) != NULL); ++idx_)
#define GET_TENSOR_LIST_INFO(list_) \
((list_)->info)
#define GET_TENSOR_LIST_META(list_, pos_) \
(&(GET_TENSOR_LIST_INFO(list_)->meta[pos_]))
#define GET_TENSOR_LIST_STATE(list_, pos_) \
(&(GET_TENSOR_LIST_INFO(list_)->state[pos_]))
#define GET_TENSOR_LIST_BUFFER(list_, pos_) \
(&(GET_TENSOR_LIST_INFO(list_)->buffer[pos_]))
#define GET_TENSOR_LIST_ITEM(list_, pos_) \
((NULL!=GET_TENSOR_LIST_ITEMS(list_)) \
? GET_TENSOR_LIST_ITEMS(list_)[(pos_)] : NULL)
#define GET_TENSOR_LIST_ITEMS(list_) \
((list_)->tensor)
#define GET_TENSOR_LIST_SIZE(list_) \
((NULL!=(list_)) ? (list_)->size : 0)
#define GET_TENSOR_CHAIN_SIZE(chain_) \
((NULL!=(chain_)) ? (chain_)->size : 0)
#define GET_TENSOR_LIST(chain_, type_) \
((AI_CONCAT(AI_TENSOR_CHAIN_, type_)<(chain_)->size) \
? &(chain_)->chain[AI_CONCAT(AI_TENSOR_CHAIN_, type_)] : NULL)
#define GET_TENSOR_LIST_IN(chain_) \
(GET_TENSOR_LIST(chain_, INPUT))
#define GET_TENSOR_LIST_OUT(chain_) \
(GET_TENSOR_LIST(chain_, OUTPUT))
#define GET_TENSOR_LIST_WEIGTHS(chain_) \
(GET_TENSOR_LIST(chain_, WEIGHTS))
#define GET_TENSOR_LIST_SCRATCH(chain_) \
(GET_TENSOR_LIST(chain_, SCRATCH))
#define GET_TENSOR_IN(chain_, pos_) \
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_IN(chain_), (pos_)))
#define GET_TENSOR_OUT(chain_, pos_) \
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_OUT(chain_), (pos_)))
#define GET_TENSOR_WEIGHTS(chain_, pos_) \
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_WEIGTHS(chain_), (pos_)))
#define GET_TENSOR_SCRATCH(chain_, pos_) \
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_SCRATCH(chain_), (pos_)))
/******************************************************************************/
#if 1
#define SECTION_SERIAL(expr) expr
#define SECTION_PARALLEL(expr)
#else
#define SECTION_SERIAL(expr)
#define SECTION_PARALLEL(expr) expr
#endif
AI_API_DECLARE_BEGIN
/*!
* @struct ai_node_type
* @ingroup core_common
* @brief generic network node numeric type ID
*
*/
typedef uint16_t ai_node_type;
/*!
* @typedef void (*node_func)(struct ai_node_s* node)
* @ingroup core_common
* @brief Callback signatures for all forward functions
*/
typedef void (*node_func)(struct ai_node_s* node);
/*!
* @typedef ai_float (*func_nl_el)(const ai_float x)
* @ingroup core_common
* @brief Fuction pointer for generic elementwise transforms
*
* This function pointer abstracts a generic nonlinear function applied to a
* single element. See @ref ai_math_sqrt in @ref math_helpers as examples.
*/
typedef ai_float (*func_nl_el)(const ai_float x);
/*!
* @struct ai_node
* @ingroup core_common
* @brief Structure encoding a generic node of the network
*
* The node struct includes information about the network it belong to, the
* next node in a sequential network and the forward function. The forward
* functions are implemented in the @ref layers module.
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_node_s {
AI_NODE_COMMON_FIELDS_DECLARE
} ai_node;
/*!
* @struct ai_node_stateful
* @ingroup core_common
* @brief Structure encoding a stateful node of the network
*
* The node struct includes information about the network it belong to, the
* next node in a sequential network and the init, update and forward functions.
* The node functions are implemented in the @ref layers module.
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_node_stateful_s {
AI_NODE_STATEFUL_FIELDS_DECLARE
} ai_node_stateful;
/*!
* @brief initialize core module
* @ingroup core_common
* @return false if initialization fails, false otherwise
*/
AI_INTERNAL_API
ai_bool core_init(void);
/*!
* @brief get 1st error raised during processing
* @ingroup core_common
* @param[out] error the @ref ai_error recorded during processing
* @return the 1st error generated during processing. If no errors AI_ERROR_NONE
*/
AI_INTERNAL_API
ai_error core_get_error(ai_error* error);
/*!
* @brief set error recorded during processing
* @ingroup core_common
* @param[out] error the @ref ai_error to set
* @param[in] type the specific error type to set
* @param[in] code the specific error code to set
* @return true if the error is set, false in case a precedent error was already
*/
AI_INTERNAL_API
ai_bool core_set_error(
ai_error* error, const ai_error_type type, const ai_error_code code);
AI_API_DECLARE_END
#endif /*CORE_COMMON_H*/
| 9,995 |
C
| 33.588235 | 92 | 0.615408 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers.h
|
/**
******************************************************************************
* @file layers.h
* @author AST Embedded Analytics Research Platform
* @brief header file of AI platform layers datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_H
#define LAYERS_H
#pragma once
#include "layers_common.h"
#include "layers_conv2d.h"
#include "layers_custom.h"
#include "layers_dense.h"
#include "layers_formats_converters.h"
#include "layers_generic.h"
#include "layers_lite_graph.h"
#include "layers_nl.h"
#include "layers_norm.h"
#include "layers_pad_dqnn.h"
#include "layers_pad_generic.h"
#include "layers_pool.h"
#include "layers_rnn.h"
#include "layers_upsample_generic.h"
#include "layers_sm.h"
#include "layers_ml.h"
#include "layers_ml_iforest.h"
#include "layers_ml_svc.h"
#include "layers_ml.h"
#include "layers_ml_linearclassifier.h"
#include "layers_ml_treeensembleclassifier.h"
#include "layers_ml_treeensembleregressor.h"
#include "layers_ml_svmregressor.h"
#include "layers_conv2d_dqnn.h"
#include "layers_dense_dqnn.h"
#include "layers_pool_dqnn.h"
#include "layers_generic_dqnn.h"
#include "layers_upsample_generic.h"
// #include "layers_template.h"
AI_API_DECLARE_BEGIN
/*!
* @struct ai_any_layer_ptr
* @ingroup layers
* @brief Generic union for typed layers pointers
*/
typedef struct {
ai_layer_type type; /*!< layer type id (see @ref ai_layer_type) */
union {
#define LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_) \
AI_CONCAT(ai_layer_, struct_)* struct_;
#include "layers_list.h"
};
} ai_any_layer_ptr;
AI_API_DECLARE_END
#endif /*LAYERS_H*/
| 2,190 |
C
| 27.089743 | 84 | 0.603653 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/core_net_inspect_interface.h
|
/**
******************************************************************************
* @file core_net_inspect_interface.h
* @author AST Embedded Analytics Research Platform
* @brief header file of core network inspection interface APIs
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef __CORE_NET_INSPECT_INTERFACE_H_
#define __CORE_NET_INSPECT_INTERFACE_H_
#pragma once
#include "ai_platform.h"
AI_API_DECLARE_BEGIN
/*!
* @defgroup core_validation Validation Core
* @brief Implementation of the validation network interface headers
*/
/*!
* @struct ai_inspect_node_info
* @brief network node inspection context: there is one of this datastruct
* for each node of the network
*/
typedef struct ai_inspect_node_info_s {
ai_u16 type; /*!< node type info @see ai_node datastruct */
ai_u16 id; /*!< node id assigned by codegen tool to identify
the specific node instance */
ai_u16 batch_id; /*!< current node batch processed */
ai_u16 n_batches; /*!< total number of node batches to process */
ai_float elapsed_ms; /*!< node performance analysys: time in
milliseconds to execute the node forward
function */
ai_u16 in_size; /*!< number of node's input activation buffers */
ai_u16 out_size; /*!< number of node's output activation buffers */
ai_buffer* in; /*!< input node activation buffer see @ref ai_buffer */
ai_buffer* out; /*!< output node activation buffer see @ref ai_buffer */
} ai_inspect_node_info;
/*!
* @struct ai_inspect_net_report
* @brief network inspection report context
*/
typedef struct ai_inspect_net_report_s {
ai_u32 id; /*!< id of the report */
ai_signature signature; /*!< network identification checksum */
ai_u32 num_inferences; /*!< total number of inferences processed
during the inspection */
ai_u32 n_nodes; /*!< number of nodes in the network */
ai_float elapsed_ms; /*!< network total time (in ms) for processing
num_inferences inferences */
ai_inspect_node_info* node; /*!< pointer to the array of size n_nodes where
a single node report is reported. see @ref
ai_inspect_node_info datastruct */
} ai_inspect_net_report;
/*!
* @enum net inspector inspection mode
* @brief configuration flags to set net inspection mode
*/
typedef enum {
VALIDATION_INSPECT = (0x1<<0), /**< Network validation inspection mode */
STORE_ALL_IO_ACTIVATIONS = (0x1<<7), /**< Store all I/O activations on snapshot datastruct */
} ai_inspect_mode;
typedef enum {
AI_NODE_EXEC_PRE_FORWARD_STAGE = 0x0,
AI_NODE_EXEC_POST_FORWARD_STAGE = 0x1,
} ai_node_exec_stage;
/*!
* @brief function pointer to callback report
*/
typedef void (*ai_inspect_report_cb_func)(
const ai_handle cookie,
const ai_inspect_net_report* report);
/*!
* @brief function pointer to node execute
*/
typedef void (*ai_inspect_exec_node_cb_func)(
const ai_handle cookie,
const ai_inspect_node_info* node_info,
const ai_node_exec_stage stage);
/*!
* @struct ai_inspect_config
* @brief inspection config datastruct
*/
typedef struct ai_inspect_config_s {
ai_u8 validation_mode; /*!< validation mode flags
see @ref ai_inspect_mode */
ai_u8 log_level; /*!< log class level see @ref LOG_SUDO */
ai_bool log_quiet; /*!< log class quiet mode */
ai_inspect_report_cb_func on_report_destroy; /*!< callback function
called when a report datastruct
is released from memory */
ai_inspect_exec_node_cb_func on_exec_node; /*!< callback function
called when a node is executed (pre & post) */
ai_handle cookie;
} ai_inspect_config;
AI_API_DECLARE_END
#endif /*__CORE_NET_INSPECT_INTERFACE_H_*/
| 4,734 |
C
| 37.495935 | 98 | 0.562315 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_rnn.h
|
/**
******************************************************************************
* @file layers_rnn.h
* @author AST Embedded Analytics Research Platform
* @brief header file of RNN layers
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_RNN_H
#define LAYERS_RNN_H
#pragma once
#include "layers_common.h"
#include "layers_nl.h"
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_lstm
* @ingroup layers
* @brief LSTM layer with generic nonlinearities and peephole connections
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lstm_ {
AI_LAYER_STATEFUL_FIELDS_DECLARE
ai_size n_units; /**< size of the hidden RNN state */
func_nl activation_nl; /**< activation nonlinearity (input to cell) */
func_nl recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */
func_nl out_nl; /**< output nonlinearity (cell to hidden) */
ai_bool go_backwards; /**< process reversed input */
ai_bool return_state; /**< return state */
ai_bool reverse_seq; /**< reverse output sequence */
ai_float cell_clip; /**< cell clip value */
} ai_layer_lstm;
/*!
* @struct ai_layer_gru
* @ingroup layers
* @brief Gated Recurrent Unit (GRU) layer with generic nonlinearities
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gru_ {
AI_LAYER_STATEFUL_FIELDS_DECLARE
ai_size n_units; /**< size of the hidden RNN state */
func_nl activation_nl; /**< activation nonlinearity (input to cell) */
func_nl recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */
ai_bool reset_after;
ai_bool return_state;
ai_bool go_backwards; /**< process reversed input */
ai_bool reverse_seq; /**< reverse output sequence */
} ai_layer_gru;
/*!
* @struct ai_layer_rnn
* @ingroup layers
* @brief Simple Recurrent Neural Network (RNN) layer
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_rnn_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_size n_units; /**< size of the hidden RNN state */
func_nl activation_nl; /**< activation nonlinearity (input to hidden) */
ai_bool go_backwards; /**< process reversed input */
ai_bool reverse_seq; /**< reverse output sequence */
ai_bool return_state;
} ai_layer_rnn;
/*!
* @brief Initialize a Long-Short Term Memory (LSTM) layer.
* @ingroup layers
*
* Function used to initialize lstm internal state
*/
AI_INTERNAL_API
void init_lstm(ai_layer * layer);
/*!
* @brief Destroy a Long-Short Term Memory (LSTM) layer state.
* @ingroup layers
*
* Function used to destroy lstm internal state
*/
AI_INTERNAL_API
void destroy_lstm(ai_layer * layer);
/*!
* @brief Computes the activations of a Long-Short Term Memory (LSTM) layer.
* @ingroup layers
*
* Implements a Long-Short Term Layer with peephole connections:
* \f{eqnarray*}{
* i_t &=& \sigma_a(x_t W_{xi} + h_{t-1} W_{hi}
* + w_{ci} \odot c_{t-1} + b_i)\\
* f_t &=& \sigma_a(x_t W_{xf} + h_{t-1} W_{hf}
* + w_{cf} \odot c_{t-1} + b_f)\\
* c_t &=& f_t \odot c_{t - 1}
* + i_t \odot \sigma_r(x_t W_{xc} + h_{t-1} W_{hc} + b_c)\\
* o_t &=& \sigma_a(x_t W_{xo} + h_{t-1} W_{ho} + w_{co} \odot c_t + b_o)\\
* h_t &=& o_t \odot \sigma_o(c_t)
* \f}
* where \f$\sigma_a\f$ is the activation nonlinearity, \f$\sigma_r\f$ is the
* recurrent nonlinearity and \f$\sigma_o\f$ is the out nonlinearity. The
* \f$W_x\f$, \f$W_h\f$ and \f$W_c\f$ weights are sliced from the kernel,
* recurrent and peephole weights.
*
* @param layer the LSTM layer
*/
AI_INTERNAL_API
void forward_lstm(ai_layer * layer);
/*!
* @brief Initialize a Gated Recurrent Unit (GRU) layer.
* @ingroup layers
*
* Function used to initialize gru internal state
*/
AI_INTERNAL_API
void init_gru(ai_layer * layer);
/*!
* @brief Destroy a Gated Recurrent Unit (GRU) layer state.
* @ingroup layers
*
* Function used to destroy gru internal state
*/
AI_INTERNAL_API
void destroy_gru(ai_layer * layer);
/*!
* @brief Computes the activations of a Gated Recurrent Unit (GRU) layer.
* @ingroup layers
*
* Implements a Gated Recurrent Unit with the formula:
* \f{eqnarray*}{
* r_t &=& \sigma_a(x_t W_{xr} + h_{t - 1} W_{hr} + b_r) \\
* z_t &=& \sigma_a(x_t W_{xz} + h_{t - 1} W_{hz} + b_z) \\
* c_t &=& \sigma_r(x_t W_{xc} + r_t \odot (h_{t - 1} W_{hc} + b_{hc}) + b_c)
* \qquad \textnormal{when reset after is true} \\
* c_t &=& \sigma_r(x_t W_{xc} + (r_t \odot h_{t - 1}) W_{hc} + b_{hc} + b_c)
* \qquad \textnormal{when reset after is false (default)} \\
* h_t &=& (1 - z_t) \odot h_{t - 1} + z_t \odot c_t
* \f}
* where \f$\sigma_a\f$ is the activation nonlinearity and \f$\sigma_r\f$ is
* the recurrent nonlinearity. The weights are sliced from the kernel and
* recurrent weights.
*
* @param layer the GRU layer
*/
AI_INTERNAL_API
void forward_gru(ai_layer * layer);
/*!
* @brief Computes the activations of a Recurrent Neural Network (RNN) layer.
* @ingroup layers
*
* Implements a recurrent layer with the formula:
* \f{eqnarray*}{
* h_t &=& \sigma_a(x_t W_{xr} + h_{t - 1} W_{hr} + b_r)
* \f}
* where \f$\sigma_a\f$ is the activation nonlinearity. The weights are sliced
* from the kernel and recurrent weights.
*
* @param layer the RNN layer
*/
AI_INTERNAL_API
void forward_rnn(ai_layer * layer);
AI_API_DECLARE_END
#endif /* LAYERS_RNN_H */
| 5,866 |
C
| 30.713513 | 80 | 0.596147 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_lite_graph.h
|
/**
******************************************************************************
* @file layers_lite_graph.h
* @author AST Embedded Analytics Research Platform
* @brief header file of AI platform lite graph layers wrapper interface
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_LITE_GRAPH_H
#define LAYERS_LITE_GRAPH_H
#pragma once
#include "core_common.h"
/*!
* @defgroup layers_lite_graph Lite Graph Wrapper Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_lite_graph
* @ingroup layers_lite_graph
* @brief Generic Lite Graph Layer Wrapper
*
* The type of lite graph is handled by the specific forward lite graph function.
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lite_graph_ {
AI_NODE_COMMON_FIELDS_DECLARE
ai_handle* activations_map; /*!< array of pointers to shared activations memory pools */
ai_handle* weights_map; /*!< array of pointers to shared weights memory pools */
} ai_layer_lite_graph;
AI_API_DECLARE_END
#endif /*LAYERS_LITE_GRAPH_H*/
| 1,598 |
C
| 29.169811 | 98 | 0.558824 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/core_datatypes.h
|
/**
******************************************************************************
* @file core_datatypes.h
* @author AST Embedded Analytics Research Platform
* @brief header file of core module private defines and datatypes
* to public nor codegen tool
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef AI_CORE_DATATYPES_H
#define AI_CORE_DATATYPES_H
#pragma once
#include <stdint.h>
/*!
* @defgroup Core Module Datatypes
* @brief Data structures and defines used by core module
*/
/*!
* @brief platform runtime core library version
*/
#define AI_PLATFORM_RUNTIME_MAJOR 8
#define AI_PLATFORM_RUNTIME_MINOR 1
#define AI_PLATFORM_RUNTIME_MICRO 0
#define AI_PLATFORM_RUNTIME_BUILD A1-SNAPSHOT
#define AI_MAGIC_CONTEXT_TOKEN (0xA1C00100) /*!< AI Cool! Magic Token */
#define AI_MAGIC_INSPECTOR_TOKEN (0xA1C00101) /*!< AI Cool! Magic Token */
#define AI_ID_OBJ(id) \
((ai_id_obj)(id))
#define AI_C_ARRAY_COUNT(array_) \
( sizeof(array_) / sizeof((array_)[0]) )
#define AI_C_ARRAY_BYTE_SIZE(array_) \
( sizeof(array_) )
/*!
* @typedef ai_id_obj
* @ingroup core_datatypes
* @brief numeric identifier for generic object instances (e.g. layers,
* operators, etc.) It is used by codegen tool to keep tracks of specific
* instances created
*/
typedef uint16_t ai_id_obj;
#endif /*AI_CORE_DATATYPES_H*/
| 1,901 |
C
| 27.818181 | 80 | 0.570752 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_ml_svmregressor.h
|
/**
******************************************************************************
* @file layers_svmregressor.h
* @author AIS
* @brief header file of AI platform SVM Regressor datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_SVMREGRESSOR_H
#define LAYERS_SVMREGRESSOR_H
#pragma once
#include "layers_common.h"
/*!
* @defgroup layers_svmreg Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/* SVM regressor kernel types */
typedef enum ai_svm_kernel_e_ {
AI_SVMREG_KERNEL_LINEAR = 0,
AI_SVMREG_KERNEL_POLYNOMIAL,
AI_SVMREG_KERNEL_RBF,
AI_SVMREG_KERNEL_SIGMOID,
AI_SVMREG_KERNEL_UNSUPPORTED,
} ai_svm_kernel_e;
/*!
* @struct ai_layer_svmreg
* @ingroup layers_svmreg
* @brief SVM Regressor layer
*
* The type of svmreg function is handled by the specific forward function
* @ref forward_svm_regressor
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_svmreg_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_float intercept; /*!< constant used in the decision function */
ai_float gamma; /*!< kernel coefficient for rbf, polynomial and sigmoid functions */
ai_float coef0; /*!< term in polynomial and sigmoid functions */
ai_u32 degree; /*!< polynomial function degree */
ai_svm_kernel_e kernel_type; /*!< kernel type : see ai_svm_kernel_e */
} ai_layer_svmreg;
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Decodes the SVM Regressor ML operator.
* @ingroup layers_svmreg
* @param layer svm regressor layer
*/
AI_INTERNAL_API
void forward_svm_regressor(ai_layer *pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_SVMREGRESSOR_H*/
| 2,397 |
C
| 27.891566 | 96 | 0.53567 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_bn_integer.h
|
#ifndef LITE_BN_INTEGER_H
#define LITE_BN_INTEGER_H
#pragma once
#include "ai_lite_interface.h"
/**
* @brief Batch Normalization with 16-bit input, 16-bit threshold and binary output.
* It is implemented using a threshold, and this is possible because the output is binary.
*
* @param[in] pIn Input data pointer
* @param[out] pOut_32 Output data pointer
* @param[in] pThreshold Thresholds pointer (one per channel)
* @param[in] dim_x X dimension
* @param[in] dim_y Y dimension
* @param[in] channels_num Channels number
*/
LITE_API_ENTRY
void forward_lite_bn_is16os1ws16(const ai_i16 *pIn,
ai_u32 *pOut_32,
const ai_i16 *pThreshold,
const ai_i16 dim_x,
const ai_i16 dim_y,
const ai_i16 channels_num);
#endif /* LITE_BN_INTEGER_H */
| 913 |
C
| 34.153845 | 90 | 0.591457 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/ai_datatypes_internal.h
|
/**
******************************************************************************
* @file ai_datatypes_internal.h
* @author AST Embedded Analytics Research Platform
* @brief Definitions of AI platform private APIs types
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef AI_DATATYPES_INTERNAL_H
#define AI_DATATYPES_INTERNAL_H
#pragma once
#include "ai_datatypes.h"
#include "ai_datatypes_defines.h"
/*!
* @defgroup datatypes_internal Internal Datatypes
* @brief Data structures used internally to implement neural networks
*
* The layers are defined as structs; a generic layer type defines the basic
* layer parameters and type-specific parameters are handled by specializations
* implemented as a C union. The layers keep also a pointer to the parent
* network and the next layer in the network.
* The input, output and parameters are tensor with an hard-coded maximum
* dimension of 4. Tensors are floating point arrays with a notion of size.
* The network is a linked list of layers, and thus it stores only the pointer
* to the first layer.
*/
/*!
* @section Offsets
* @ingroup datatypes_internal
* Macros to handle (byte) stride addressing on tensors. The `AI_PTR` macro
* is used to always cast a pointer to byte array. The macros `AI_OFFSET_X` are
* used to compute (byte) offsets of respectively adjacents row elements, col
* elements, channel elements and `channel_in` elements.
* @{
*/
/*! AI_STORAGE_KLASS SECTION ************************************/
#define AI_STORAGE_KLASS_TYPE(s_) \
( (s_)->type )
#define AI_STORAGE_KLASS_SIZE(s_) \
( (s_)->size )
#define AI_STORAGE_KLASS_DATA(s_, type_) \
( (type_*)((s_)->data) )
#define AI_STORAGE_KLASS_COPY(dst_, dst_type_, src_, src_type_) \
{ \
AI_ASSERT(AI_STORAGE_KLASS_SIZE(src_)>=AI_STORAGE_KLASS_SIZE(dst_)) \
AI_STORAGE_KLASS_SIZE(dst_) = AI_STORAGE_KLASS_SIZE(src_); \
for (ai_size i=0; i<AI_STORAGE_KLASS_SIZE(dst_); i++ ) { \
AI_STORAGE_KLASS_DATA(dst_, dst_type_)[i] = \
AI_STORAGE_KLASS_DATA(src_, src_type_)[i]; \
} \
}
#define AI_STORAGE_KLASS_DUMP(s_, pfx_, post_, fmt_, type_) \
{ \
AI_ASSERT(s_) \
AI_DEBUG_PRINT(pfx_, AI_STORAGE_KLASS_SIZE(s_)) \
for ( ai_u32 i=0; i<AI_STORAGE_KLASS_SIZE(s_); i++ ) { \
if ( (i % 8)==0 ) { AI_DEBUG_PRINT("\n ") } \
AI_DEBUG_PRINT(fmt_, AI_STORAGE_KLASS_DATA(s_, type_)[i]) \
} \
AI_DEBUG_PRINT(post_) \
}
/*! AI_SHAPES SECTION ************************************/
#define AI_SHAPE_2D_H(shape_) \
AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_HEIGHT)
#define AI_SHAPE_2D_W(shape_) \
AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_WIDTH)
#define AI_SHAPE_ELEM(shape_, pos_) \
AI_STORAGE_KLASS_DATA(shape_, ai_shape_dimension)[pos_]
#define AI_SHAPE_GET_ELEM(shape_, pos_) \
(((pos_) < AI_SHAPE_SIZE(shape_)) ? AI_SHAPE_ELEM(shape_, pos_) : 1)
#define AI_SHAPE_SET_ELEM(shape_, pos_, val_) \
if ((pos_) < AI_SHAPE_SIZE(shape_)) { AI_SHAPE_ELEM(shape_, pos_) = (val_); }
#define AI_SHAPE_TYPE(shape_) \
AI_STORAGE_KLASS_TYPE(shape_)
#define AI_SHAPE_SIZE(shape_) \
AI_STORAGE_KLASS_SIZE(shape_)
#define AI_SHAPE_CLONE(dst_, src_) \
AI_STORAGE_KLASS_COPY(dst_, ai_shape_dimension, src_, ai_shape_dimension)
#define AI_SHAPE_BCAST_CLONE(dst_, src_) \
{ \
for (ai_size i = 0; i < AI_SHAPE_SIZE(dst_); i++) { \
AI_SHAPE_SET_ELEM(dst_, i, AI_SHAPE_GET_ELEM(src_, i)); \
} \
}
//#define AI_SHAPE_BATCH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_BATCH_CHANNEL)
#define AI_SHAPE_H(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_HEIGHT)
#define AI_SHAPE_W(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_WIDTH)
#define AI_SHAPE_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_CHANNEL)
#define AI_SHAPE_IN_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_IN_CHANNEL)
#define AI_SHAPE_D(shape_) ((AI_SHAPE_SIZE((shape_)) > AI_SHAPE_DEPTH) \
? AI_SHAPE_ELEM((shape_), AI_SHAPE_DEPTH) : 1)
#define AI_SHAPE_E(shape_) ((AI_SHAPE_SIZE((shape_)) > AI_SHAPE_EXTENSION) \
? AI_SHAPE_ELEM((shape_), AI_SHAPE_EXTENSION) : 1)
#define AI_SHAPE_T(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_TIME)
#define AI_CONV_SHAPE_H AI_SHAPE_W
#define AI_CONV_SHAPE_W AI_SHAPE_CH
#define AI_CONV_SHAPE_CH AI_SHAPE_H
#define AI_CONV_SHAPE_IN_CH AI_SHAPE_IN_CH
/*! AI_STRIDES SECTION ***********************************/
#define AI_STRIDE_2D_H(stride_) \
AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_HEIGHT)
#define AI_STRIDE_2D_W(stride_) \
AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_WIDTH)
#define AI_STRIDE_ELEM(stride_, pos_) \
AI_STORAGE_KLASS_DATA(stride_, ai_stride_dimension)[pos_]
#define AI_STRIDE_GET_ELEM(stride_, pos_) \
(((pos_) < AI_STRIDE_SIZE(stride_)) ? AI_STRIDE_ELEM(stride_, pos_) : 0)
#define AI_STRIDE_SET_ELEM(stride_, pos_, val_) \
if ((pos_) < AI_STRIDE_SIZE(stride_)) AI_STRIDE_ELEM(stride_, pos_) = (val_);
#define AI_STRIDE_TYPE(stride_) \
AI_STORAGE_KLASS_TYPE(stride_)
#define AI_STRIDE_SIZE(stride_) \
AI_STORAGE_KLASS_SIZE(stride_)
#define AI_STRIDE_CLONE(dst_, src_) \
AI_STORAGE_KLASS_COPY(dst_, ai_stride_dimension, src_, ai_stride_dimension)
#define AI_STRIDE_BCAST_CLONE(dst_, src_) \
{ \
for (ai_size i=0; i<AI_STRIDE_SIZE(dst_); i++) { \
AI_STRIDE_SET_ELEM(dst_, i, AI_STRIDE_GET_ELEM(src_, i)); \
} \
}
//#define AI_STRIDE_BATCH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_BATCH_CHANNEL)
#define AI_STRIDE_H(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_HEIGHT)
#define AI_STRIDE_W(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_WIDTH)
#define AI_STRIDE_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_CHANNEL)
#define AI_STRIDE_IN_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_IN_CHANNEL)
#define AI_STRIDE_D(stride) ((AI_STRIDE_SIZE((stride)) >= 5) ? AI_STRIDE_ELEM((stride), AI_SHAPE_DEPTH) : 0)
#define AI_STRIDE_E(stride) ((AI_STRIDE_SIZE((stride)) == 6) ? AI_STRIDE_ELEM((stride), AI_SHAPE_EXTENSION) : 0)
#define AI_STRIDE_T(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_TIME)
#define AI_STRIDE_SET_H(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_HEIGHT, val)
#define AI_STRIDE_SET_W(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_WIDTH, val)
#define AI_STRIDE_SET_CH(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_CHANNEL, val)
#define AI_STRIDE_SET_IN_CH(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_IN_CHANNEL, val)
#define AI_STRIDE_SET_D(stride, val) if (AI_STRIDE_SIZE((stride)) >= 5) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_DEPTH, val)
#define AI_STRIDE_SET_E(stride, val) if (AI_STRIDE_SIZE((stride)) == 6) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_EXTENSION, val)
/*! AI_TENSORS SECTION ***********************************/
#define AI_TENSOR_KLASS(tensor_) \
((tensor_) ? (tensor_)->klass : NULL)
#define AI_TENSOR_SHAPE(tensor_) \
(&((tensor_)->shape))
#define AI_TENSOR_STRIDE(tensor_) \
(&((tensor_)->stride))
#define AI_TENSOR_INFO(tensor_) \
(&((tensor_)->info))
#define AI_TENSOR_ARRAY(tensor_) \
((tensor_) ? (tensor_)->data : NULL)
#define AI_TENSOR_ID(tensor_) \
((tensor_) ? AI_TENSOR_INFO(tensor_)->id : 0)
#define AI_TENSOR_FLAGS(tensor_) \
((tensor_) ? AI_TENSOR_INFO(tensor_)->flags : 0)
#define AI_TENSOR_DATA_SIZE(tensor_) \
((tensor_) ? AI_TENSOR_INFO(tensor_)->data_size : 0)
/*! AI_OFFSETS SECTION ***********************************/
//#define AI_OFFSET_BATCH(b, stride) ((ai_ptr_offset)(b) * AI_STRIDE_BATCH(stride))
#define AI_OFFSET_H(y, stride) ((ai_ptr_offset)(y) * AI_STRIDE_H(stride))
#define AI_OFFSET_W(x, stride) ((ai_ptr_offset)(x) * AI_STRIDE_W(stride))
#define AI_OFFSET_CH(ch, stride) ((ai_ptr_offset)(ch) * AI_STRIDE_CH(stride))
#define AI_OFFSET_IN_CH(in_ch, stride) ((ai_ptr_offset)(in_ch) * \
AI_STRIDE_IN_CH(stride))
#define AI_OFFSET_D(d, stride) ((ai_ptr_offset)(d) * AI_STRIDE_D(stride))
#define AI_OFFSET_E(e, stride) ((ai_ptr_offset)(e) * AI_STRIDE_E(stride))
#define AI_OFFSET_5D(y, x, d, e, ch, stride) ( \
AI_OFFSET_H((y), (stride)) + AI_OFFSET_W((x), (stride)) + \
AI_OFFSET_D((d), (stride)) + AI_OFFSET_E((e), (stride)) + \
AI_OFFSET_CH((ch), (stride)) )
#define AI_OFFSET(y, x, ch, z, stride) ( \
AI_OFFSET_H((y), (stride)) + AI_OFFSET_W((x), (stride)) + \
AI_OFFSET_CH((ch), (stride)) + \
((AI_STRIDE_SIZE((stride)) == 4) ? AI_OFFSET_IN_CH((z), (stride)) : AI_OFFSET_D((z), (stride))) )
/*! @} */
#define AI_GET_CONV_OUT_SIZE(in_size, filt_size, pad_l, pad_r, filt_stride) \
((((in_size) - (filt_size) + (pad_l) + (pad_r)) / (filt_stride)) + 1)
/** Tensors datatypes defines handlers ****************************************/
#define AI_TENSOR_SIZE(tensor_) \
get_tensor_size(tensor_, true)
#define AI_TENSOR_SIZE_UNPAD(tensor_) \
get_tensor_size(tensor_, false)
#define AI_TENSOR_BYTE_SIZE(tensor_) \
get_tensor_byte_size(tensor_)
/******************************************************************************/
#define AI_PLATFORM_VERSION_INIT(major_, minor_, micro_) \
{ .major = (major_), .minor = (minor_), .micro = (micro_), .reserved = 0x0 }
/** Integer tensor info extraction ********************************************/
#define AI_INTQ_INFO_LIST_SCALE_ARRAY(list_, type_) \
( ((list_) && (list_)->info) \
? ((type_*)((list_)->info->scale)) : NULL )
#define AI_INTQ_INFO_LIST_ZEROPOINT_ARRAY(list_, type_) \
( ((list_) && (list_)->info) \
? ((type_*)((list_)->info->zeropoint)) : NULL )
#define AI_KLASS_GET_INTQ_INFO_LIST(tensor_) \
((ai_intq_info_list*)((tensor_)->klass))
AI_API_DECLARE_BEGIN
/*!
* @brief Check whether 2 shapes have identical dimensions.
* @ingroup datatypes_internal
* @param shape0 the 1st tensor shape to compare
* @param shape1 the 2nd tensor shape to compare
* @return true if shape0 and shape1 have same dimensions. false otherwise
*/
AI_DECLARE_STATIC
ai_bool ai_shape_is_same(
const ai_shape* shape0, const ai_shape* shape1)
{
AI_ASSERT(shape0 && shape1)
if (AI_SHAPE_SIZE(shape0) != AI_SHAPE_SIZE(shape1))
return false;
ai_size dim = AI_SHAPE_SIZE(shape0);
while ( dim>0 ) {
dim--;
if ( AI_SHAPE_ELEM(shape0, dim)!=AI_SHAPE_ELEM(shape1, dim) )
return false;
}
return true;
}
/*!
* @brief Check whether the shapes is 1*1*1... for a scalar value content.
* @ingroup datatypes_internal
* @param shape the tensor shape to evaluate
* @return true if shape0 is scalar false otherwise
*/
AI_DECLARE_STATIC
ai_bool ai_shape_is_scalar(
const ai_shape* shape0)
{
ai_size dim = AI_SHAPE_SIZE(shape0);
while (dim>0) {
dim--;
if (AI_SHAPE_ELEM(shape0, dim) != 1)
return false;
}
return true;
}
/*!
* @brief Check if shape0 is a subshape of shape1
* @ingroup datatypes_internal
* @param shape0 the 1st tensor shape to compare
* @param shape1 the 2nd tensor shape to compare
* @return true if shape0 is a subshape of shape1 (all shape0 dimensions are
* smallers or equal of the shape1 ones). false otherwise
*/
AI_DECLARE_STATIC
ai_bool ai_shape_is_subshape(
const ai_shape* shape0, const ai_shape* shape1)
{
AI_ASSERT(shape0 && shape1)
AI_ASSERT(AI_SHAPE_SIZE(shape0)==AI_SHAPE_SIZE(shape1))
ai_size dim = AI_SHAPE_SIZE(shape0);
while (dim) {
dim--;
if ( AI_SHAPE_ELEM(shape0, dim)>AI_SHAPE_ELEM(shape1, dim) )
return false;
}
return true;
}
/*!
* @brief Computes the total size of a tensor given its dimensions.
* @ingroup datatypes_internal
* @param shape the tensor shape
*/
AI_DECLARE_STATIC
ai_size ai_shape_get_size(const ai_shape* shape)
{
AI_ASSERT(shape)
ai_size dim = AI_SHAPE_SIZE(shape);
ai_size size = 1;
while (dim>0) {
dim--;
size *= AI_SHAPE_ELEM(shape, dim);
}
return size;
}
/*!
* @brief Computes the size of the input image discarding the channels.
* @ingroup datatypes_internal
* @param shape the tensor shape
*/
AI_DECLARE_STATIC
ai_size ai_shape_get_npixels(const ai_shape* shape)
{
AI_ASSERT(shape)
const ai_size npixels = AI_SHAPE_W(shape) * AI_SHAPE_H(shape);
return npixels;
}
/** APIs Section *************************************************************/
/*!
* @brief Get packed version from major, minor, micro representaion.
* @ingroup datatypes_internal
* @param major major version value
* @param minor minor version value
* @param micro micro version value
* @return a packed version info obtained serializing input values
*/
AI_INTERNAL_API
ai_version ai_version_get(const ai_u8 major, const ai_u8 minor, const ai_u8 micro);
/*!
* @brief Get un-packed version from packed version representaion.
* @ingroup datatypes_internal
* @param version a packed varsion info
* @return struct with de-serialized major, minor, micro values
*/
AI_INTERNAL_API
ai_platform_version ai_platform_version_get(const ai_version version);
/*!
* @brief Map from ai_buffer data struct to ai_array data struct.
* @ingroup datatypes_internal
* @param buf a pointer to the ai_buffer to be mapped to ai_array
* @return an initialized @ref ai_array struct representing same data
*/
AI_INTERNAL_API
ai_array ai_from_buffer_to_array(const ai_buffer* buf);
/*!
* @brief Map from ai_array data struct to ai_buffer data struct.
* @ingroup datatypes_internal
* @param array a pointer to the ai_array to be mapped to ai_buffer
* @return an initialized @ref ai_buffer struct representing same data
*/
AI_INTERNAL_API
ai_buffer ai_from_array_to_buffer(const ai_array* array);
/*!
* @brief get the total number of elements of a n-dimensional tensor.
* @ingroup datatypes_internal
* @param t a pointer to an @ref ai_tensor
* @param with_padding when true it considers also padded elements
* @return the number of elements of the tensor (with/without padded ones)
*/
AI_INTERNAL_API
ai_size get_tensor_size(const ai_tensor* t, const ai_bool with_padding);
/*!
* @brief get the total size in bytes of elements of a n-dimensional tensor (excluding padded ones).
* @ingroup datatypes_internal
* @param t a pointer to an @ref ai_tensor
* @return the total size in bytes of elements of the tensor (excluding padded ones)
*/
AI_INTERNAL_API
ai_size get_tensor_byte_size(const ai_tensor* t);
AI_API_DECLARE_END
#endif /*AI_DATATYPES_INTERNAL_H*/
| 14,911 |
C
| 34.336493 | 133 | 0.62303 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_dw_dqnn.h
|
/**
******************************************************************************
* @file lite_dw_dqnn.h
* @author AIS
* @brief header file of AI platform lite dw kernel datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LITE_DW_DQNN_H
#define LITE_DW_DQNN_H
#pragma once
#include "ai_lite_interface.h"
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Handles 2D DW convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_dw_is1os1ws1_bn_pad0(const ai_u32 *pDataIn_init,
ai_u32 * pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold);
/*!
* @brief Handles 2D DW convolution with binary input, binary output and
* binary weights - with 0 padding (QKeras like) - Lite I/F
* - Optimized thanks to Optim3 assumptions
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_dw_is1os1ws1_bn_pad0_optim3(const ai_u32 *pDataIn_init,
ai_u32 * pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_dw_is1os1ws1_bn_pad1(const ai_u32 *pDataIn_init,
ai_u32 * pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold,
const ai_i32 pad_value);
/*!
* @brief Handles 2D convolution with binary input, binary output and
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
* - Optimized thanks to Optim3 assumptions
* @ingroup lite_conv2d_dqnn
*/
LITE_API_ENTRY
void forward_lite_dw_is1os1ws1_bn_pad1_optim3(const ai_u32 *pDataIn_init,
ai_u32 * pDataOut_init,
const ai_u32 *pWeights_init,
ai_float *pScratch_32,
const ai_u32 n_channel_in,
const ai_u32 n_channel_out,
const ai_i32 width_in,
const ai_i32 height_in,
const ai_i32 width_out,
const ai_i32 height_out,
const ai_i32 filt_width,
const ai_i32 filt_height,
const ai_i32 filt_pad_x,
const ai_i32 filt_pad_y,
const ai_i32 filt_stride_x,
const ai_i32 filt_stride_y,
const ai_i32 *pThreshold,
const ai_i32 pad_value);
#endif /*LITE_DW_DQNN_H*/
| 6,834 |
C
| 49.629629 | 80 | 0.362891 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/lite_upsample_generic.h
|
/**
******************************************************************************
* @file lite_upsample.h
* @author AIS
* @brief header file of AI platform lite pw kernel datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LITE_UPSAMPLE_GENERIC_H
#define LITE_UPSAMPLE_GENERIC_H
#pragma once
#include "ai_lite_interface.h"
void forward_lite_upsample_generic_nearest(const ai_u8* in_data,
ai_u8* out_data,
const ai_size width_in,
const ai_size width_out,
const ai_float width_scale,
const ai_size height_out,
const ai_float height_scale,
const ai_u32 output_tensor_w_stride,
const ai_float offset_round_coeff);
void forward_lite_upsample_nearest(ai_ptr in_data,
ai_ptr out_data,
const ai_size width_in,
const ai_float width_scale,
const ai_float height_scale,
const ai_size width_out,
const ai_size height_out,
const ai_ptr_offset stride_w,
const ai_float offset_round_coeff);
void forward_lite_upsample_zeros( ai_ptr in_data,
ai_ptr out_data,
const ai_size width_in,
const ai_size height_in,
const ai_float width_scale,
const ai_float height_scale,
const ai_size width_out,
const ai_size height_out,
const ai_ptr_offset stride_ch,
const ai_ptr_offset stride_w,
const ai_handle p_zero_value);
#endif /*LITE_UPSAMPLE_GENERIC_H*/
| 2,756 |
C
| 44.196721 | 80 | 0.394049 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/ai_lite_interface.h
|
/**
******************************************************************************
* @file ai_lite_interface.h
* @author AST Embedded Analytics Research Platform
* @brief Definitions and implementations of runtime-lite codegen APIs
******************************************************************************
* @attention
*
* Copyright (c) 2022 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef AI_LITE_INTERFACE_H
#define AI_LITE_INTERFACE_H
#pragma once
#include "ai_platform.h"
#include "ai_lite.h"
/*****************************************************************************/
/* Generic Codegen Section */
// #ifdef HAS_LOG
#if 0
#include "core_log.h"
#define LITE_GRAPH_START(_graph_name) \
AI_LOG_DEBUG("[LITE GRAPH START] : " _graph_name)
#define LITE_GRAPH_END(_graph_name) \
AI_LOG_DEBUG("[LITE GRAPH END] : " _graph_name)
#else
#define LITE_GRAPH_START(_graph_name) \
/* LITE_GRAPH_START() */
#define LITE_GRAPH_END(_graph_name) \
/* LITE_GRAPH_END() */
#endif /* HAS_LOG */
#ifdef HAS_AI_ASSERT
#include <assert.h>
#define LITE_ASSERT(_cond) \
{ assert(_cond); }
#else
#define LITE_ASSERT(_cond) \
do { /* LITE_ASSERT() */ } while (0);
#endif /*HAS_AI_ASSERT*/
/*****************************************************************************/
#if defined(_MSC_VER)
#define LITE_DECLARE_STATIC static __inline
#define LITE_HINT_INLINE static __inline
#define LITE_FORCE_INLINE static __inline
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__)
#define LITE_DECLARE_STATIC static inline
#define LITE_HINT_INLINE static inline
#define LITE_FORCE_INLINE static inline
#elif defined(__GNUC__)
#define LITE_DECLARE_STATIC static __inline
#define LITE_HINT_INLINE static __inline
#define LITE_FORCE_INLINE static __inline
#else
#define LITE_DECLARE_STATIC static __inline
#define LITE_HINT_INLINE static __inline
#define LITE_FORCE_INLINE static __inline
#endif /* _MSC_VER */
#define LITE_API_ENTRY /* LITE_API_ENTRY */
#define LITE_PACK(...) \
__VA_ARGS__
#define LITE_UNUSED(_elem) \
((void)(_elem));
#define LITE_KERNEL_SECTION(_code_block) \
{ LITE_PACK(_code_block) }
/*****************************************************************************/
/* Arrays Section */
#define LITE_ARRAY_VALUES(...) \
{ LITE_PACK(__VA_ARGS__) }
#define LITE_ARRAY_DATA(_array, _type) \
((_type*)(_array)->data)
#define LITE_ARRAY_DATA_START(_array, _type) \
((_type*)(_array)->data_start)
/*****************************************************************************/
/* Tensors Section */
#define LITE_TENSOR_ARRAY(_tensor, _pos) \
(((_tensor)->data) + (_pos))
/*****************************************************************************/
/* Tensors List Section */
#define LITE_TENSOR_LIST(_chain, _pos) \
(&(_chain)->chain[_pos])
#define LITE_TENSOR_IN(_chain, _pos) \
(LITE_TENSOR_LIST(_chain, 0)->tensor[_pos])
#define LITE_TENSOR_OUT(_chain, _pos) \
(LITE_TENSOR_LIST(_chain, 1)->tensor[_pos])
#define LITE_TENSOR_WEIGHTS(_chain, _pos) \
(LITE_TENSOR_LIST(_chain, 2)->tensor[_pos])
#define LITE_TENSOR_SCRATCHS(_chain, _pos) \
(LITE_TENSOR_LIST(_chain, 3)->tensor[_pos])
/*****************************************************************************/
#define LITE_LAYER_ACQUIRE(name_, cast_type_, ptr_) \
LITE_ASSERT(ptr_) \
AI_CONCAT(ai_layer_, cast_type_)* name_ = \
(AI_CONCAT(ai_layer_, cast_type_)*)(ptr_);
#define LITE_LAYER_RELEASE(name_, cast_type_) \
/* LITE_LAYER_RELEASE() */
/*****************************************************************************/
AI_API_DECLARE_BEGIN
AI_API_DECLARE_END
#endif /* AI_LITE_INTERFACE_H */
| 4,226 |
C
| 28.767605 | 80 | 0.503076 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_sm.h
|
/**
******************************************************************************
* @file layers_sm.h
* @author AST Embedded Analytics Research Platform
* @brief header file of AI platform non softmax layer datatype
******************************************************************************
* @attention
*
* Copyright (c) 2018 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_SM_H
#define LAYERS_SM_H
#pragma once
#include "layers_common.h"
/*!
* @defgroup layers SoftMax Layer Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/*!
* @brief Softmax normalization computed on an array of fixed point channels
* @ingroup layers_sm
* @param out opaque handler to output channel array
* @param in opaque handler to input channel array
* @param in_size total size (number of elements) to process on the input
* @param channel_size number of elements of the input channel
* @param in_channel_step number of elements to move to next input element
* @param out_channel_step number of elements to move to next output element
*/
AI_INTERNAL_API
void sm_func_sm_array_fixed(ai_handle out, const ai_handle in,
const ai_size in_size,
const ai_size channel_size,
const ai_size in_channel_step,
const ai_size out_channel_step);
/*!
* @brief Computes the activations of a fixed point softmax nonlinear layer.
* @ingroup layers_sm
* @param layer the softmax (sm) layer
*/
AI_INTERNAL_API
void forward_sm_fixed(ai_layer *pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_SM_H*/
| 2,051 |
C
| 30.56923 | 80 | 0.570453 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/datatypes_network.h
|
/**
******************************************************************************
* @file datatypes_network.h
* @author AST Embedded Analytics Research Platform
* @brief Definitions of code generated network types
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef DATATYPES_NETWORK_H
#define DATATYPES_NETWORK_H
#pragma once
/*
* Header to be overriden by the generated version
* by including with <> the include directories are searched in the order
* specified in the compiler
* To enable the override, put the generated path before the API path
*/
#include "ai_platform.h"
AI_API_DECLARE_BEGIN
#ifdef AI_OVERRIDE_CUSTOM_TYPES
#warning "Warning: Custom Types have been already defined!\n"
#endif
#define AI_CUSTOM_TYPES_COUNT (3)
#define AI_CUSTOM_TYPES_SIGNATURE_DECLARE(name) \
const ai_custom_type_signature name[AI_CUSTOM_TYPES_COUNT+1] = { \
AI_CUSTOM_TYPES_COUNT, \
AI_CUSTOM_SIZE(ai_shape_dimension), \
AI_CUSTOM_SIZE(ai_stride_dimension), \
AI_CUSTOM_SIZE(ai_array_size), \
};
typedef ai_i32 ai_stride_dimension;
typedef ai_u32 ai_array_size;
AI_API_DECLARE_END
#endif /*DATATYPES_NETWORK_H*/
| 1,694 |
C
| 27.728813 | 80 | 0.579103 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/ai_layer_custom_interface.h
|
/**
******************************************************************************
* @file ai_layer_custom_interface.h
* @author AST Embedded Analytics Research Platform
* @brief Definitions of AI platform custom layers interface APIs
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef AI_LAYER_CUSTOM_INTERFACE_H
#define AI_LAYER_CUSTOM_INTERFACE_H
#pragma once
#include "ai_platform.h"
#include "ai_platform_interface.h"
#include "layers_custom.h"
#define INTQ_SCALE_FLOAT (AI_BUFFER_META_FLAG_SCALE_FLOAT)
#define INTQ_ZEROPOINT_U8 (AI_BUFFER_META_FLAG_ZEROPOINT_U8)
#define INTQ_ZEROPOINT_S8 (AI_BUFFER_META_FLAG_ZEROPOINT_S8)
#define INTQ_ZEROPOINT_U16 (AI_BUFFER_META_FLAG_ZEROPOINT_U16)
#define INTQ_ZEROPOINT_S16 (AI_BUFFER_META_FLAG_ZEROPOINT_S16)
#define AI_TENSOR_HEIGHT (3)
#define AI_TENSOR_WIDTH (2)
#define AI_TENSOR_CHANNEL (1)
#define AI_TENSOR_IN_CHANNEL (0)
AI_API_DECLARE_BEGIN
typedef enum {
TYPE_NONE = 0x0,
TYPE_FLOAT,
TYPE_BOOL,
TYPE_INTEGER,
TYPE_SIGNED,
TYPE_UNSIGNED,
} ai_tensor_type;
typedef struct {
ai_tensor_type type;
ai_i8 bits;
ai_i8 fbits;
} ai_tensor_format;
typedef struct {
ai_u16 flags; /*!< optional flags to store intq info attributes */
ai_u16 size; /*!< number of elements in the the intq_info list */
ai_float* scale; /*!< array of scales factors */
union {
ai_u8* zeropoint_u8; /*!< array of zeropoints as unsigned */
ai_i8* zeropoint_s8; /*!< array of zeropoints as signed */
};
} ai_tensor_intq_info;
/****************************************************************************
** Layer Custom Interface APIs
****************************************************************************/
/*!
* @brief acquire the custom layer from its handle
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the custom layer
* @return a pointer to ai_layer_custom if found and valid, else NULL
*/
AI_INTERFACE_TYPE
ai_layer_custom* ai_layer_custom_get(
ai_layer* layer);
/*!
* @brief release the custom layer provided its handle
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the custom layer to release
*/
AI_INTERFACE_TYPE
void ai_layer_custom_release(
ai_layer* layer);
/*!
* @brief get the number of inputs tensors of a custom layer
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the custom layer
* @return the number of input tensors of the layer. 0 if no input tensors or error
*/
AI_INTERFACE_TYPE
ai_size ai_layer_get_tensor_in_size(
const ai_layer* layer);
/*!
* @brief get the number of outputs tensors of a custom layer
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the custom layer
* @return the number of outputs tensors of the layer. 0 if no outputs tensors or error
*/
AI_INTERFACE_TYPE
ai_size ai_layer_get_tensor_out_size(
const ai_layer* layer);
/*!
* @brief get the number of weights tensors of a custom layer
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the custom layer
* @return the number of weights tensors of the layer. 0 if no weights tensors or error
*/
AI_INTERFACE_TYPE
ai_size ai_layer_get_tensor_weights_size(
const ai_layer* layer);
/*!
* @brief get the n-th (at index pos) input tensor pointer from a layer
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the layer
* @param pos the index position in the tensor list
* @return a pointer to a tensor if found, else, if invalid or out-of-range NULL
*/
AI_INTERFACE_TYPE
ai_tensor* ai_layer_get_tensor_in(
const ai_layer* layer, const ai_u16 pos);
/*!
* @brief get the n-th (at index pos) output tensor pointer from a layer
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the layer
* @param pos the index position in the tensor list
* @return a pointer to a tensor if found, else, if invalid or out-of-range NULL
*/
AI_INTERFACE_TYPE
ai_tensor* ai_layer_get_tensor_out(
const ai_layer* layer, const ai_u16 pos);
/*!
* @brief get the n-th (at index pos) weight tensor pointer from a layer
* @ingroup ai_layer_custom_interface
* @param layer an opaque handler to the layer
* @param pos the index position in the tensor list
* @return a pointer to a tensor if found, else, if invalid or out-of-range NULL
*/
AI_INTERFACE_TYPE
ai_tensor* ai_layer_get_tensor_weights(
const ai_layer* layer, const ai_u16 pos);
/**** Layer Tensors APIs ***************************************************/
/*!
* @brief check if the tensor has integer quantization informations @ref ai_tensor_intq_info
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return true if tensot has integer quantization informations, false otherwise
*/
AI_INTERFACE_TYPE
ai_bool ai_tensor_has_intq(
const ai_tensor* t);
/*!
* @brief get the tensor integer quantization informations @ref ai_tensor_intq_info
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the integer quantization informations as a struct @ref ai_tensor_intq_info
*/
AI_INTERFACE_TYPE
ai_tensor_intq_info ai_tensor_get_intq(
const ai_tensor* t);
/*!
* @brief get the format of the tensor see @ref ai_tensor_format
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the tensor format
*/
AI_INTERFACE_TYPE
ai_tensor_format ai_tensor_get_format(
const ai_tensor* t);
/**** Shapes Getters ****/
/*!
* @brief get the dimensionality of the tensor shapes
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the dimensionality of the tensor shape
*/
AI_INTERFACE_TYPE
ai_size ai_tensor_get_shape_size(
const ai_tensor* t);
/*!
* @brief get the value of the shape dimensionality pos
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the value of the shape dimensionality at pos of the tensor
*/
AI_INTERFACE_TYPE
ai_shape_dimension ai_tensor_get_shape(
const ai_tensor* t, const ai_u16 pos);
/**** Strides Getters ****/
/*!
* @brief get the dimensionality of the tensor strides
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the dimensionality of the tensor strides @ref ai_stride
*/
AI_INTERFACE_TYPE
ai_size ai_tensor_get_stride_size(
const ai_tensor* t);
/*!
* @brief get the value of the stride dimensionality pos
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the value of the stride dimensionality at pos of the tensor
*/
AI_INTERFACE_TYPE
ai_stride_dimension ai_tensor_get_stride(
const ai_tensor* t, const ai_u16 pos);
/**** Data Storage Getters ****/
/*!
* @brief get tensor storage data buffer pointer
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return a pointer to the tensor data buffer, set to NULL if error
*/
AI_INTERFACE_TYPE
ai_any_ptr ai_tensor_get_data(
const ai_tensor* t);
/*!
* @brief get number of tensor elements
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the number of tensor elements or 0 if error
*/
AI_INTERFACE_TYPE
ai_size ai_tensor_get_data_size(
const ai_tensor* t);
/*!
* @brief get the size in bytes of the tensor data buffer
* @ingroup ai_layer_custom_interface
* @param tensor a pointer to the tensor
* @return the size in bytes of the tensor data buffer. 0 if error
*/
AI_INTERFACE_TYPE
ai_size ai_tensor_get_data_byte_size(
const ai_tensor* t);
AI_API_DECLARE_END
#endif /*AI_LAYER_CUSTOM_INTERFACE_H*/
| 8,272 |
C
| 29.985019 | 92 | 0.66308 |
Tbarkin121/GuardDog/stm32/TorquePoleNet/Middlewares/ST/AI/Inc/layers_pool_dqnn.h
|
/**
******************************************************************************
* @file layers_conv2d_dqnn.h
* @author AIS
* @brief header file of AI platform DQNN pool datatypes
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
@endverbatim
******************************************************************************
*/
#ifndef LAYERS_POOL_DQNN_H
#define LAYERS_POOL_DQNN_H
#pragma once
#include "layers_common.h"
#include "layers_pool.h"
/*!
* @defgroup layers_pool_dqnn Layers Definitions
* @brief definition
*
*/
AI_API_DECLARE_BEGIN
/*!
* @struct ai_layer_pool_dqnn
* @ingroup layers_pool_dqnn
* @brief pool_dqnn layer
*
* @ref forward_maxpool_is1os1
*/
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pool_dqnn_ {
AI_LAYER_COMMON_FIELDS_DECLARE
ai_shape_2d pool_size; /*!< pooling size */
ai_shape_2d pool_stride; /*!< pooling stride */
ai_shape pool_pad; /*!< pooling pad, y,x border sizes */
// ai_u32 pad_value; /*!< pooling pad value */
} ai_layer_pool_dqnn;
/******************************************************************************/
/* Forward Functions Section */
/******************************************************************************/
/*!
* @brief Handles max pooling with binary input and binary output
* @ingroup layers_pool_dqnn
* @param layer conv2d_pool layer
*/
AI_INTERNAL_API
void forward_maxpool_is1os1(ai_layer *pLayer);
AI_API_DECLARE_END
#endif /*LAYERS_POOL_DQNN_H*/
| 1,996 |
C
| 26.356164 | 80 | 0.482966 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.