file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
elharirymatteo/RANS/omniisaacgymenvs/ros/ros_utills.py
|
import numpy as np
import os
from typing import List, Tuple
#from geometry_msgs.msg import Pose
#import rospy
def enable_ros_extension(env_var:str = "ROS_DISTRO"):
"""
Enable the ROS extension.
Args:
env_var (str): The environment variable that contains the ROS distro."""
import omni.ext
ROS_DISTRO: str = os.environ.get(env_var, "noetic")
assert ROS_DISTRO in [
"noetic",
"foxy",
"humble",
], f"${env_var} must be one of [noetic, foxy, humble]"
# Get the extension manager and list of available extensions
extension_manager = omni.kit.app.get_app().get_extension_manager()
extensions = extension_manager.get_extensions()
# Determine the selected ROS extension id
if ROS_DISTRO == "noetic":
ros_extension = [ext for ext in extensions if "ros_bridge" in ext["id"]][0]
elif ROS_DISTRO in "humble":
ros_extension = [
ext
for ext in extensions
if "ros2_bridge" in ext["id"] and "humble" in ext["id"]
][0]
elif ROS_DISTRO == "foxy":
ros_extension = [ext for ext in extensions if "ros2_bridge" in ext["id"]][0]
# Load the ROS extension if it is not already loaded
if not extension_manager.is_extension_enabled(ros_extension["id"]):
extension_manager.set_extension_enabled_immediate(ros_extension["id"], True)
def angular_velocities(q:np.ndarray, dt:np.ndarray, N:int=1) -> np.ndarray:
"""
Calculate the angular velocities from the quaternions.
Args:
q (np.ndarray): The quaternions.
dt (np.ndarray): The time difference between each quaternion.
Returns:
np.ndarray: The angular velocities."""
q = q[0::N]
return (2 / dt) * np.array([
q[:-1,0]*q[1:,1] - q[:-1,1]*q[1:,0] - q[:-1,2]*q[1:,3] + q[:-1,3]*q[1:,2],
q[:-1,0]*q[1:,2] + q[:-1,1]*q[1:,3] - q[:-1,2]*q[1:,0] - q[:-1,3]*q[1:,1],
q[:-1,0]*q[1:,3] - q[:-1,1]*q[1:,2] + q[:-1,2]*q[1:,1] - q[:-1,3]*q[1:,0]])
def derive_velocities(time_buffer:list, pose_buffer: list) -> Tuple[np.ndarray, np.ndarray]:
"""
Derive the velocities from the pose and time buffers.
Args:
time_buffer (List[rospy.Time]): The time buffer.
pose_buffer (List[Pose]): The pose buffer.
Returns:
Tuple(np.ndarray, np.ndarray): The linear and angular velocities."""
dt = (time_buffer[-1] - time_buffer[0]).to_sec() # Time difference between first and last pose
# Calculate linear velocities
linear_positions = np.array([[pose.pose.position.x, pose.pose.position.y, pose.pose.position.z] for pose in pose_buffer])
linear_velocities = np.diff(linear_positions, axis=0) / (dt/len(time_buffer))
average_linear_velocity = np.mean(linear_velocities, axis=0)
# Calculate angular velocities
angular_orientations = np.array([[pose.pose.orientation.w, pose.pose.orientation.x, pose.pose.orientation.y, pose.pose.orientation.z] for pose in pose_buffer])
dt_buff = np.ones((angular_orientations.shape[0] - 1)) * dt / (angular_orientations.shape[0] - 1)
ang_vel = angular_velocities(angular_orientations, dt_buff)
average_angular_velocity = np.mean(ang_vel, axis=1)
return average_linear_velocity, average_angular_velocity
| 3,312 |
Python
| 37.97647 | 163 | 0.626208 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP2D_Virtual_Dock_RGBD.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.MFP2D_thrusters import (
ModularFloatingPlatform,
)
from omniisaacgymenvs.robots.sensors.exteroceptive.camera import (
camera_factory,
)
from omniisaacgymenvs.robots.articulations.views.MFP2D_view import (
ModularFloatingPlatformView,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_thruster_generator import (
VirtualPlatform,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_factory import (
task_factory,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
EnvironmentPenalties,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
Disturbances,
)
from omniisaacgymenvs.robots.articulations.utils.MFP_utils import *
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from typing import Dict, List, Tuple
from gym import spaces
import numpy as np
import wandb
import omni
import time
import math
import torch
from torchvision.utils import make_grid
from torchvision.transforms.functional import to_pil_image as ToPILImage
import os
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class MFP2DVirtual_Dock_RGBD(RLTask):
"""
The main class used to run tasks on the floating platform.
Unlike other class in this repo, this class can be used to run different tasks.
The idea being to extend it to multitask RL in the future."""
def __init__(
self,
name: str, # name of the Task
sim_config, # SimConfig instance for parsing cfg
env, # env instance of VecEnvBase or inherited class
offset=None, # transform offset in World
) -> None:
# parse configurations, set task-specific members
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._platform_cfg = self._task_cfg["env"]["platform"]
self._dock_cfg = self._task_cfg["env"]["dock"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self._discrete_actions = self._task_cfg["env"]["action_mode"]
self._device = self._cfg["sim_device"]
self.step = 0
self.iteration = 0
self.save_image_counter = 0
# Split the maximum amount of thrust across all thrusters.
self.split_thrust = self._task_cfg["env"]["split_thrust"]
# Collects the platform parameters
self.dt = self._task_cfg["sim"]["dt"]
# Collects the task parameters
task_cfg = self._task_cfg["env"]["task_parameters"]
reward_cfg = self._task_cfg["env"]["reward_parameters"]
penalty_cfg = self._task_cfg["env"]["penalties_parameters"]
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Instantiate the task, reward and platform
self.task = task_factory.get(task_cfg, reward_cfg, self._num_envs, self._device)
self._penalties = EnvironmentPenalties(**penalty_cfg)
self.virtual_platform = VirtualPlatform(
self._num_envs, self._platform_cfg, self._device
)
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
self._num_observations = self.task._num_observations
self._max_actions = self.virtual_platform._max_thrusters
self._num_actions = self.virtual_platform._max_thrusters
RLTask.__init__(self, name, env)
# Instantiate the action and observations spaces
self.set_action_and_observation_spaces()
# Sets the initial positions of the target and platform
self._fp_position = torch.tensor([0, 0.0, 0.5])
self._default_marker_position = torch.tensor([0, 0, 0.45])
self._dock_view = None
# Preallocate tensors
self.actions = torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float32,
)
self.heading = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self.all_indices = torch.arange(
self._num_envs, dtype=torch.int32, device=self._device
)
# Extra info
self.extras = {}
self.extras_wandb = {}
# Episode statistics
self.episode_sums = self.task.create_stats({})
self.add_stats(self._penalties.get_stats_name())
self.add_stats(["normed_linear_vel", "normed_angular_vel", "actions_sum"])
return
def set_action_and_observation_spaces(self) -> None:
"""
Sets the action and observation spaces."""
# Defines the observation space
self.observation_space = spaces.Dict(
{
"state": spaces.Box(
np.ones(self._num_observations) * -np.Inf,
np.ones(self._num_observations) * np.Inf,
),
"transforms": spaces.Box(low=-1, high=1, shape=(self._max_actions, 5)),
"masks": spaces.Box(low=0, high=1, shape=(self._max_actions,)),
"masses": spaces.Box(low=-np.inf, high=np.inf, shape=(3,)),
"rgb": spaces.Box(
np.ones(
(
3,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* -np.Inf,
np.ones(
(
3,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* np.Inf,
),
"depth": spaces.Box(
np.ones(
(
1,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* -np.Inf,
np.ones(
(
1,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
)
)
* np.Inf,
),
}
)
# Defines the action space
if self._discrete_actions == "MultiDiscrete":
# RLGames implementation of MultiDiscrete action space requires a tuple of Discrete spaces
self.action_space = spaces.Tuple([spaces.Discrete(2)] * self._max_actions)
elif self._discrete_actions == "Continuous":
pass
elif self._discrete_actions == "Discrete":
raise NotImplementedError("The Discrete control mode is not supported.")
else:
raise NotImplementedError(
"The requested discrete action type is not supported."
)
def add_stats(self, names: List[str]) -> None:
"""
Adds training statistics to be recorded during training.
Args:
names (List[str]): list of names of the statistics to be recorded."""
for name in names:
torch_zeros = lambda: torch.zeros(
self._num_envs,
dtype=torch.float,
device=self._device,
requires_grad=False,
)
if not name in self.episode_sums.keys():
self.episode_sums[name] = torch_zeros()
def cleanup(self) -> None:
"""
Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = {
"state": torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float,
),
"transforms": torch.zeros(
(self._num_envs, self._max_actions, 5),
device=self._device,
dtype=torch.float,
),
"masks": torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float,
),
"masses": torch.zeros(
(self._num_envs, 3),
device=self._device,
dtype=torch.float,
),
"rgb": torch.zeros(
(
self._num_envs,
3,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
),
device=self._device,
dtype=torch.float,
),
"depth": torch.zeros(
(
self._num_envs,
1,
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][-1],
self._task_cfg["env"]["sensors"]["camera"]["RLCamera"][
"resolution"
][0],
),
device=self._device,
dtype=torch.float,
),
}
self.states_buf = torch.zeros(
(self._num_envs, self._num_states), device=self._device, dtype=torch.float
)
self.rew_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.float
)
self.reset_buf = torch.ones(
self._num_envs, device=self._device, dtype=torch.long
)
self.progress_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.long
)
self.extras = {}
self.extras_wandb = {}
def set_up_scene(self, scene) -> None:
"""
Sets up the USD scene inside Omniverse for the task.
Args:
scene (Usd.Stage): the USD scene to be set up."""
# Add the floating platform, and the marker
self.get_floating_platform()
self.get_target()
if self._task_cfg["sim"].get("add_lab", False):
self.get_zero_g_lab()
RLTask.set_up_scene(self, scene, replicate_physics=False)
# Collects the interactive elements in the scene
root_path = "/World/envs/.*/Modular_floating_platform"
self._platforms = ModularFloatingPlatformView(
prim_paths_expr=root_path,
name="modular_floating_platform_view",
track_contact_force=True,
)
# Add views to scene
scene.add(self._platforms)
scene.add(self._platforms.base)
scene.add(self._platforms.thrusters)
# Add rigidprim view of docking station to the scene
scene, self._dock_view = self.task.add_dock_to_scene(scene)
# Link replicator to existing onboard cameras
self.collect_camera()
return
def get_floating_platform(self):
"""
Adds the floating platform to the scene."""
fp = ModularFloatingPlatform(
prim_path=self.default_zero_env_path + "/Modular_floating_platform",
name="modular_floating_platform",
translation=self._fp_position,
cfg=self._platform_cfg,
)
self._sim_config.apply_articulation_settings(
"modular_floating_platform",
get_prim_at_path(fp.prim_path),
self._sim_config.parse_actor_config("modular_floating_platform"),
)
def get_target(self) -> None:
"""
Adds the visualization target to the scene."""
self.task.generate_target(
self.default_zero_env_path,
self._default_marker_position,
self._dock_cfg,
)
def get_zero_g_lab(self) -> None:
"""
Adds the Zero-G-lab to the scene."""
usd_path = os.path.join(os.getcwd(), self._task_cfg["lab_usd_path"])
prim = add_reference_to_stage(usd_path, self._task_cfg["lab_path"])
applyCollider(prim, True)
def collect_camera(self) -> None:
"""
Collect active cameras to generate synthetic images in batch."""
active_sensors = []
active_camera_source_path = self._task_cfg["env"]["sensors"]["RLCamera"][
"prim_path"
]
for i in range(self._num_envs):
# swap env_0 to env_i
sensor_path = active_camera_source_path.split("/")
sensor_path[3] = f"env_{i}"
self._task_cfg["env"]["sensors"]["RLCamera"]["prim_path"] = "/".join(
sensor_path
)
rl_sensor = camera_factory.get("RLCamera")(
self._task_cfg["env"]["sensors"]["RLCamera"],
self.rep,
)
active_sensors.append(rl_sensor)
self.active_sensors = active_sensors
def update_state(self) -> None:
"""
Updates the state of the system."""
# Collects the position and orientation of the platform
self.root_pos, self.root_quats = self._platforms.base.get_world_poses(
clone=True
)
# Remove the offset from the different environments
root_positions = self.root_pos - self._env_pos
# Collects the velocity of the platform
self.root_velocities = self._platforms.base.get_velocities(clone=True)
root_velocities = self.root_velocities.clone()
# Cast quaternion to Yaw
siny_cosp = 2 * (
self.root_quats[:, 0] * self.root_quats[:, 3]
+ self.root_quats[:, 1] * self.root_quats[:, 2]
)
cosy_cosp = 1 - 2 * (
self.root_quats[:, 2] * self.root_quats[:, 2]
+ self.root_quats[:, 3] * self.root_quats[:, 3]
)
orient_z = torch.arctan2(siny_cosp, cosy_cosp)
# Add noise on obs
root_positions = self.DR.noisy_observations.add_noise_on_pos(
root_positions, step=self.step
)
root_velocities = self.DR.noisy_observations.add_noise_on_vel(
root_velocities, step=self.step
)
orient_z = self.DR.noisy_observations.add_noise_on_heading(
orient_z, step=self.step
)
# Compute the heading
self.heading[:, 0] = torch.cos(orient_z)
self.heading[:, 1] = torch.sin(orient_z)
# Update goal pose
self.update_goal_state()
# Update FP contact state
net_contact_forces = self.compute_contact_forces()
# Dump to state
self.current_state = {
"position": root_positions[:, :2],
"orientation": self.heading,
"linear_velocity": root_velocities[:, :2],
"angular_velocity": root_velocities[:, -1],
"net_contact_forces": net_contact_forces,
}
def update_goal_state(self) -> None:
"""
Updates the goal state of the task."""
target_positions, target_orientations = self._dock_view.base.get_world_poses(
clone=True
)
self.task.set_goals(
self.all_indices.long(),
target_positions - self._env_pos,
target_orientations,
self.step,
)
def compute_contact_forces(self) -> torch.Tensor:
"""
Get the contact forces of the platform.
Returns:
net_contact_forces_norm (torch.Tensor): the norm of the net contact forces.
"""
net_contact_forces = self._platforms.base.get_net_contact_forces(clone=False)
return torch.norm(net_contact_forces, dim=-1)
def get_observations(self) -> Dict[str, torch.Tensor]:
"""
Gets the observations of the task to be passed to the policy.
Returns:
observations: a dictionary containing the observations of the task."""
# implement logic to retrieve observation states
self.update_state()
# Get the state
self.obs_buf["state"] = self.task.get_state_observations(self.current_state)
# Get thruster transforms
self.obs_buf["transforms"] = self.virtual_platform.current_transforms
# Get the action masks
self.obs_buf["masks"] = self.virtual_platform.action_masks
self.obs_buf["masses"] = self.DR.mass_disturbances.get_masses_and_com()
# Get the camera data
rgb_obs, depth_obs = self.get_rgbd_data()
self.obs_buf["rgb"] = rgb_obs
self.obs_buf["depth"] = depth_obs
if (
self._task_cfg["env"]["sensors"]["save_to_log"]
and self._cfg["wandb_activate"]
):
if (
self.save_image_counter
% self._task_cfg["env"]["sensors"]["save_frequency"]
== 0
):
rgb_grid = ToPILImage(make_grid(rgb_obs, nrow=5))
depth_grid = ToPILImage(make_grid(depth_obs, nrow=5))
wandb.log(
{
"rgb": wandb.Image(rgb_grid, caption="rgb"),
"depth": wandb.Image(depth_grid, caption="depth"),
}
)
self.save_image_counter += 1
observations = {self._platforms.name: {"obs_buf": self.obs_buf}}
return observations
def get_rgbd_data(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""
return batched rgbd data.
Returns:
rgb (torch.Tensor): batched rgb data
depth (torch.Tensor): batched depth data
"""
rs_obs = [sensor.get_observation() for sensor in self.active_sensors]
rgb = torch.stack([ob["rgb"] for ob in rs_obs]).to(self._device)
depth = torch.stack([ob["depth"] for ob in rs_obs]).to(self._device)
rgb = self.DR.noisy_rgb_images.add_noise_on_image(rgb, step=self.step)
depth = self.DR.noisy_depth_images.add_noise_on_image(depth, step=self.step)
return rgb, depth
def pre_physics_step(self, actions: torch.Tensor) -> None:
"""
This function implements the logic to be performed before physics steps.
Args:
actions (torch.Tensor): the actions to be applied to the platform."""
# If is not playing skip
if not self._env._world.is_playing():
return
# Check which environment need to be reset
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
# Reset the environments (Robots)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# Collect actions
actions = actions.clone().to(self._device)
self.actions = actions
# Remap actions to the correct values
if self._discrete_actions == "MultiDiscrete":
# If actions are multidiscrete [0, 1]
thrust_cmds = self.actions.float()
elif self._discrete_actions == "Continuous":
# Transform continuous actions to [0, 1] discrete actions.
thrust_cmds = torch.clamp((self.actions + 1) / 2, min=0.0, max=1.0)
else:
raise NotImplementedError("")
# Applies the thrust multiplier
thrusts = self.virtual_platform.thruster_cfg.thrust_force * thrust_cmds
# Adds random noise on the actions
thrusts = self.DR.noisy_actions.add_noise_on_act(thrusts, step=self.step)
# clear actions for reset envs
thrusts[reset_env_ids] = 0
# If split thrust, equally shares the maximum amount of thrust across thrusters.
if self.split_thrust:
factor = torch.max(
torch.sum(self.actions, -1),
torch.ones((self._num_envs), dtype=torch.float32, device=self._device),
)
self.positions, self.forces = self.virtual_platform.project_forces(
thrusts / factor.view(self._num_envs, 1)
)
else:
self.positions, self.forces = self.virtual_platform.project_forces(thrusts)
return
def apply_forces(self) -> None:
"""
Applies all the forces to the platform and its thrusters."""
# Applies actions from the thrusters
self._platforms.thrusters.apply_forces_and_torques_at_pos(
forces=self.forces, positions=self.positions, is_global=False
)
# Applies the domain randomization
floor_forces = self.DR.force_disturbances.get_force_disturbance(self.root_pos)
torque_disturbance = self.DR.torque_disturbances.get_torque_disturbance(
self.root_pos
)
self._platforms.base.apply_forces_and_torques_at_pos(
forces=floor_forces,
torques=torque_disturbance,
positions=self.root_pos,
is_global=True,
)
def post_reset(self):
"""
This function implements the logic to be performed after a reset."""
# implement any logic required for simulation on-start here
self.root_pos, self.root_rot = self._platforms.get_world_poses()
self.root_velocities = self._platforms.get_velocities()
self._platforms.get_CoM_indices()
self._platforms.get_plane_lock_indices()
self._dock_view.get_plane_lock_indices()
self.initial_root_pos, self.initial_root_rot = (
self.root_pos.clone(),
self.root_rot.clone(),
)
self.initial_pin_pos = self._env_pos
self.initial_pin_rot = torch.zeros(
(self._num_envs, 4), dtype=torch.float32, device=self._device
)
self.initial_pin_rot[:, 0] = 1
# control parameters
self.thrusts = torch.zeros(
(self._num_envs, self._max_actions, 3),
dtype=torch.float32,
device=self._device,
)
self.set_targets(self.all_indices)
def set_targets(self, env_ids: torch.Tensor):
"""
Sets the targets for the task.
Args:
env_ids (torch.Tensor): the indices of the environments for which to set the targets.
"""
num_resets = len(env_ids)
env_long = env_ids.long()
# Randomizes the position and orientation of the dock on the x y axis
target_positions, target_orientation = self.task.get_goals(
env_long,
self.step,
)
siny_cosp = 2 * target_orientation[:, 0] * target_orientation[:, 3]
cosy_cosp = 1 - 2 * (target_orientation[:, 3] * target_orientation[:, 3])
h = torch.arctan2(siny_cosp, cosy_cosp)
# apply resets
dof_pos = torch.zeros(
(num_resets, self._dock_view.num_dof), device=self._device
)
dof_pos[:, self._dock_view.lock_indices[0]] = target_positions[:, 0]
dof_pos[:, self._dock_view.lock_indices[1]] = target_positions[:, 1]
dof_pos[:, self._dock_view.lock_indices[2]] = h
self._dock_view.set_joint_positions(dof_pos, indices=env_ids)
dof_vel = torch.zeros(
(num_resets, self._dock_view.num_dof), device=self._device
)
dof_vel[:, self._dock_view.lock_indices[0]] = 0.0
dof_vel[:, self._dock_view.lock_indices[1]] = 0.0
dof_vel[:, self._dock_view.lock_indices[2]] = 0.0
self._dock_view.set_joint_velocities(dof_vel, indices=env_ids)
def reset_idx(self, env_ids: torch.Tensor) -> None:
"""
Resets the environments with the given indices.
Args:
env_ids (torch.Tensor): the indices of the environments to be reset."""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.task.reset(env_ids)
self.set_targets(env_ids)
self.virtual_platform.randomize_thruster_state(env_ids, num_resets)
self.DR.force_disturbances.generate_forces(env_ids, num_resets, step=self.step)
self.DR.torque_disturbances.generate_torques(
env_ids, num_resets, step=self.step
)
self.DR.mass_disturbances.randomize_masses(env_ids, step=self.step)
CoM_shift = self.DR.mass_disturbances.get_CoM(env_ids)
random_mass = self.DR.mass_disturbances.get_masses(env_ids)
# Randomizes the starting position of the platform
pos, quat, vel = self.task.get_initial_conditions(env_ids, step=self.step)
siny_cosp = 2 * quat[:, 0] * quat[:, 3]
cosy_cosp = 1 - 2 * (quat[:, 3] * quat[:, 3])
h = torch.arctan2(siny_cosp, cosy_cosp)
# Randomizes mass of the dock
if hasattr(self.task._task_parameters, "spawn_dock_mass_curriculum"):
mass = self.task.get_dock_masses(env_ids, step=self.step)
self._dock_view.base.set_masses(mass, indices=env_ids)
# apply joint resets
dof_pos = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
# self._platforms.CoM.set_masses(random_mass, indices=env_ids)
dof_pos[:, self._platforms.lock_indices[0]] = pos[:, 0]
dof_pos[:, self._platforms.lock_indices[1]] = pos[:, 1]
dof_pos[:, self._platforms.lock_indices[2]] = h
dof_pos[:, self._platforms.CoM_shifter_indices[0]] = CoM_shift[:, 0]
dof_pos[:, self._platforms.CoM_shifter_indices[1]] = CoM_shift[:, 1]
self._platforms.set_joint_positions(dof_pos, indices=env_ids)
dof_vel = torch.zeros(
(num_resets, self._platforms.num_dof), device=self._device
)
dof_vel[:, self._platforms.lock_indices[0]] = vel[:, 0]
dof_vel[:, self._platforms.lock_indices[1]] = vel[:, 1]
dof_vel[:, self._platforms.lock_indices[2]] = vel[:, 5]
self._platforms.set_joint_velocities(dof_vel, indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill `extras`
self.extras["episode"] = {}
self.extras_wandb = {}
for key in self.episode_sums.keys():
value = (
torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
)
if key in self._penalties.get_stats_name():
self.extras_wandb[key] = value
elif key in self.task.log_with_wandb:
self.extras_wandb[key] = value
else:
self.extras["episode"][key] = value
self.episode_sums[key][env_ids] = 0.0
def update_state_statistics(self) -> None:
"""
Updates the statistics of the state of the training."""
self.episode_sums["normed_linear_vel"] += torch.norm(
self.current_state["linear_velocity"], dim=-1
)
self.episode_sums["normed_angular_vel"] += torch.abs(
self.current_state["angular_velocity"]
)
self.episode_sums["actions_sum"] += torch.sum(self.actions, dim=-1)
def calculate_metrics(self) -> None:
"""
Calculates the metrics of the training.
That is the rewards, penalties, and other perfomance statistics."""
reward = self.task.compute_reward(self.current_state, self.actions)
self.iteration += 1
self.step += 1 / self._task_cfg["env"]["horizon_length"]
penalties = self._penalties.compute_penalty(
self.current_state, self.actions, self.step
)
self.rew_buf[:] = reward - penalties
self.episode_sums = self.task.update_statistics(self.episode_sums)
self.episode_sums = self._penalties.update_statistics(self.episode_sums)
if self.iteration / self._task_cfg["env"]["horizon_length"] % 1 == 0:
self.extras_wandb["wandb_step"] = int(self.step)
for key, value in self._penalties.get_logs().items():
self.extras_wandb[key] = value
for key, value in self.task.get_logs(self.step).items():
self.extras_wandb[key] = value
for key, value in self.DR.get_logs(self.step).items():
self.extras_wandb[key] = value
if self._cfg["wandb_activate"]:
wandb.log(self.extras_wandb)
self.extras_wandb = {}
self.update_state_statistics()
def is_done(self) -> None:
"""
Checks if the episode is done."""
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = self.task.update_kills()
# resets due to episode length
self.reset_buf[:] = torch.where(
self.progress_buf >= self._max_episode_length - 1, ones, die
)
| 30,179 |
Python
| 37.791774 | 102 | 0.549322 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP3D_Virtual.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.MFP3D_thrusters import (
ModularFloatingPlatform,
)
from omniisaacgymenvs.robots.articulations.views.MFP3D_view import (
ModularFloatingPlatformView,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_thruster_generator import (
VirtualPlatform,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_factory import (
task_factory,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_penalties import (
EnvironmentPenalties,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_disturbances import (
Disturbances,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
quat_to_mat,
)
from omniisaacgymenvs.tasks.MFP2D_Virtual import MFP2DVirtual
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from typing import Dict, List, Tuple
from gym import spaces
import numpy as np
import wandb
import torch
import omni
import time
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class MFP3DVirtual(MFP2DVirtual):
"""
The main class used to run tasks on the floating platform.
Unlike other class in this repo, this class can be used to run different tasks.
The idea being to extend it to multitask RL in the future."""
def __init__(
self,
name: str, # name of the Task
sim_config, # SimConfig instance for parsing cfg
env, # env instance of VecEnvBase or inherited class
offset=None, # transform offset in World
) -> None:
# parse configurations, set task-specific members
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._platform_cfg = self._task_cfg["env"]["platform"]
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self._discrete_actions = self._task_cfg["env"]["action_mode"]
self._device = self._cfg["sim_device"]
self.iteration = 0
self.step = 0
# Split the maximum amount of thrust across all thrusters.
self.split_thrust = self._task_cfg["env"]["split_thrust"]
# Collects the platform parameters
self.dt = self._task_cfg["sim"]["dt"]
# Collects the task parameters
task_cfg = self._task_cfg["env"]["task_parameters"]
reward_cfg = self._task_cfg["env"]["reward_parameters"]
penalty_cfg = self._task_cfg["env"]["penalties_parameters"]
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Instantiate the task, reward and platform
self.task = task_factory.get(task_cfg, reward_cfg, self._num_envs, self._device)
self._penalties = EnvironmentPenalties(**penalty_cfg)
self.virtual_platform = VirtualPlatform(
self._num_envs, self._platform_cfg, self._device
)
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
self._num_observations = self.task._num_observations
self._max_actions = self.virtual_platform._max_thrusters
self._num_actions = self.virtual_platform._max_thrusters
RLTask.__init__(self, name, env)
# Instantiate the action and observations spaces
self.set_action_and_observation_spaces()
# Sets the initial positions of the target and platform
self._fp_position = torch.tensor([0.0, 0.0, 0.5])
self._default_marker_position = torch.tensor([0.0, 0.0, 0.0])
self._marker = None
# Preallocate tensors
self.actions = torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float32,
)
self.heading = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self.all_indices = torch.arange(
self._num_envs, dtype=torch.int32, device=self._device
)
# Extra info
self.extras = {}
self.extras_wandb = {}
# Episode statistics
self.episode_sums = self.task.create_stats({})
self.add_stats(self._penalties.get_stats_name())
self.add_stats(["normed_linear_vel", "normed_angular_vel", "actions_sum"])
return
def set_action_and_observation_spaces(self) -> None:
"""
Sets the action and observation spaces.
"""
# Defines the observation space
self.observation_space = spaces.Dict(
{
"state": spaces.Box(
np.ones(self._num_observations) * -np.Inf,
np.ones(self._num_observations) * np.Inf,
),
"transforms": spaces.Box(low=-1, high=1, shape=(self._max_actions, 10)),
"masks": spaces.Box(low=0, high=1, shape=(self._max_actions,)),
"masses": spaces.Box(low=-np.inf, high=np.inf, shape=(4,)),
}
)
# Defines the action space
if self._discrete_actions == "MultiDiscrete":
# RLGames implementation of MultiDiscrete action space requires a tuple of Discrete spaces
self.action_space = spaces.Tuple([spaces.Discrete(2)] * self._max_actions)
elif self._discrete_actions == "Continuous":
pass
elif self._discrete_actions == "Discrete":
raise NotImplementedError("The Discrete control mode is not supported.")
else:
raise NotImplementedError(
"The requested discrete action type is not supported."
)
def cleanup(self) -> None:
"""
Prepares torch buffers for RL data collection.
"""
# prepare tensors
self.obs_buf = {
"state": torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float,
),
"transforms": torch.zeros(
(self._num_envs, self._max_actions, 10),
device=self._device,
dtype=torch.float,
),
"masks": torch.zeros(
(self._num_envs, self._max_actions),
device=self._device,
dtype=torch.float,
),
"masses": torch.zeros(
(self._num_envs, 4),
device=self._device,
dtype=torch.float,
),
}
self.states_buf = torch.zeros(
(self._num_envs, self._num_states), device=self._device, dtype=torch.float
)
self.rew_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.float
)
self.reset_buf = torch.ones(
self._num_envs, device=self._device, dtype=torch.long
)
self.progress_buf = torch.zeros(
self._num_envs, device=self._device, dtype=torch.long
)
self.extras = {}
def set_up_scene(self, scene) -> None:
"""
Sets up the USD scene inside Omniverse for the task.
Args:
scene (Usd.Stage): The USD stage to setup.
"""
# Add the floating platform, and the marker
self.get_floating_platform()
self.get_target()
RLTask.set_up_scene(self, scene)
# Collects the interactive elements in the scene
root_path = "/World/envs/.*/Modular_floating_platform"
self._platforms = ModularFloatingPlatformView(
prim_paths_expr=root_path, name="modular_floating_platform_view"
)
# Add views to scene
scene.add(self._platforms)
scene.add(self._platforms.base)
scene.add(self._platforms.thrusters)
# Add arrows to scene if task is go to pose
scene, self._marker = self.task.add_visual_marker_to_scene(scene)
return
def get_floating_platform(self):
"""
Adds the floating platform to the scene.
"""
self._fp = ModularFloatingPlatform(
prim_path=self.default_zero_env_path + "/Modular_floating_platform",
name="modular_floating_platform",
translation=self._fp_position,
cfg=self._platform_cfg,
)
self._sim_config.apply_articulation_settings(
"modular_floating_platform",
get_prim_at_path(self._fp.prim_path),
self._sim_config.parse_actor_config("modular_floating_platform"),
)
def update_state(self) -> None:
"""
Updates the state of the system.
"""
# Collects the position and orientation of the platform
self.root_pos, self.root_quats = self._platforms.get_world_poses(clone=True)
# Remove the offset from the different environments
root_positions = self.root_pos - self._env_pos
# Collects the velocity of the platform
self.root_velocities = self._platforms.get_velocities(clone=True)
root_velocities = self.root_velocities.clone()
# Add noise on obs
root_positions = self.DR.noisy_observations.add_noise_on_pos(
root_positions, step=self.step
)
root_velocities = self.DR.noisy_observations.add_noise_on_vel(
root_velocities, step=self.step
)
# Compute the heading
heading = quat_to_mat(self.root_quats)
# Dump to state
self.current_state = {
"position": root_positions,
"orientation": heading,
"linear_velocity": root_velocities[:, :3],
"angular_velocity": root_velocities[:, 3:],
}
def set_targets(self, env_ids: torch.Tensor) -> None:
"""
Sets the targets for the task.
Args:
env_ids: The indices of the environments to set the targets for."""
env_long = env_ids.long()
# Randomizes the position of the ball on the x y z axes
target_positions, target_orientation = self.task.get_goals(
env_long,
self.initial_pin_pos.clone(),
self.initial_pin_rot.clone(),
step=self.step,
)
# Apply the new goals
if self._marker:
self._marker.set_world_poses(
target_positions[env_long],
target_orientation[env_long],
indices=env_long,
)
def update_state_statistics(self) -> None:
"""
Updates the statistics of the state of the training."""
self.episode_sums["normed_linear_vel"] += torch.norm(
self.current_state["linear_velocity"], dim=-1
)
self.episode_sums["normed_angular_vel"] += torch.norm(
self.current_state["angular_velocity"], dim=-1
)
self.episode_sums["actions_sum"] += torch.sum(self.actions, dim=-1)
| 11,320 |
Python
| 35.519355 | 102 | 0.593286 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_go_through_xy_seq.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoThroughXYSequenceReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoThroughXYSequenceParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.pin import VisualPin
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import colorsys
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoThroughXYSequenceTask(Core):
"""
Implements the GoThroughXYSequence task. The robot has to reach a sequence of points in the 2D plane
at a given velocity, it must do so while looking at the target. Unlike the GoThroughXY task, the robot
has to reach a sequence of points in the 2D plane.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoThroughXYSequence task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(GoThroughXYSequenceTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = GoThroughXYSequenceParameters(**task_param)
self._reward_parameters = GoThroughXYSequenceReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._all = torch.arange(self._num_envs, device=self._device)
self._trajectory_completed = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, self._task_parameters.num_points, 2),
device=self._device,
dtype=torch.float32,
)
self._target_index = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.long
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._target_velocities = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._delta_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._previous_position_dist = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 5
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:used
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "progress_reward" in stats.keys():
stats["progress_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "linear_velocity_reward" in stats.keys():
stats["linear_velocity_reward"] = torch_zeros()
if not "linear_velocity_error" in stats.keys():
stats["linear_velocity_error"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = (
self._target_positions[self._all, self._target_index]
- current_state["position"]
).squeeze()
# linear velocity error (normed velocity)
self.linear_velocity_err = self._target_velocities - torch.norm(
current_state["linear_velocity"], dim=-1
)
# heading distance
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
# Compute target heading as the angle required to be looking at the target
self._target_headings = torch.arctan2(
self._position_error[:, 1], self._position_error[:, 0]
)
self._heading_error = torch.arctan2(
torch.sin(self._target_headings - heading),
torch.cos(self._target_headings - heading),
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
self._task_data[:, 4] = self.linear_velocity_err
# position of the other points in the sequence
for i in range(self._task_parameters.num_points - 1):
overflowing = self._target_index + i + 1 >= self._task_parameters.num_points
indices = self._target_index + (i + 1) * (1 - overflowing.int())
self._task_data[:, 5 + 2 * i : 5 + 2 * i + 2] = (
self._target_positions[self._all, indices] - current_state["position"]
) * (1 - overflowing.int()).view(-1, 1)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# Compute progress and normalize by the target velocity
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.abs(self.linear_velocity_err)
position_progress = (
self._previous_position_dist - self.position_dist
) / torch.abs(self._target_velocities)
was_killed = (self._previous_position_dist == 0).float()
position_progress = position_progress * (1 - was_killed)
# Heading
self.heading_dist = torch.abs(self._heading_error)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
goal_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
reached_ids = goal_reached.nonzero(as_tuple=False).squeeze(-1)
# if the goal is reached, the target index is updated
self._target_index = self._target_index + goal_reached
self._trajectory_completed = (
self._target_index >= self._task_parameters.num_points
).int()
# rewards
(
self.progress_reward,
self.heading_reward,
self.linear_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
position_progress,
self.heading_dist,
self.linear_velocity_dist,
)
self._previous_position_dist = self.position_dist.clone()
# If goal is reached make next progress null
self._previous_position_dist[reached_ids] = 0
return (
self.progress_reward
+ self.heading_reward
+ self.linear_velocity_reward
- self.boundary_penalty
- self._reward_parameters.time_penalty
+ self._trajectory_completed * self._reward_parameters.terminal_reward
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._trajectory_completed, dtype=torch.long)
ones = torch.ones_like(self._trajectory_completed, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(self._trajectory_completed > 0, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["progress_reward"] += self.progress_reward
stats["heading_reward"] += self.heading_reward
stats["linear_velocity_reward"] += self.linear_velocity_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["linear_velocity_error"] += self.linear_velocity_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._trajectory_completed[env_ids] = 0
self._target_index[env_ids] = 0
self._previous_position_dist[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
for i in range(self._task_parameters.num_points):
if i == 0:
self._target_positions[env_ids, i] = (
torch.rand((num_goals, 2), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
else:
r = self._spawn_position_sampler.sample(
num_goals, step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
point = torch.zeros((num_goals, 2), device=self._device)
point[:, 0] = r * torch.cos(theta)
point[:, 1] = r * torch.sin(theta)
self._target_positions[env_ids, i] = (
self._target_positions[env_ids, i - 1] + point
)
# Randomize heading
self._delta_headings[env_ids] = self._spawn_heading_sampler.sample(
num_goals, step, device=self._device
)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
self._target_velocities[env_ids] = r
# Creates tensors to save position and orientation
p = torch.zeros(
(num_goals, self._task_parameters.num_points, 3), device=self._device
)
q = torch.zeros(
(num_goals, self._task_parameters.num_points, 4),
device=self._device,
dtype=torch.float32,
)
q[:, :, 0] = 1
p[:, :, :2] = self._target_positions[env_ids]
p[:, :, 2] = 2
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) + self._target_positions[env_ids, 0, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) + self._target_positions[env_ids, 0, 1]
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
target_position_local = (
self._target_positions[env_ids, 0, :2] - initial_position[:, :2]
)
target_heading = torch.arctan2(
target_position_local[:, 1], target_position_local[:, 0]
)
theta = target_heading + self._delta_headings[env_ids]
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 3DoF pose to be reached by the agent.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the arrow.
"""
for i in range(self._task_parameters.num_points):
color = torch.tensor(
colorsys.hsv_to_rgb(i / self._task_parameters.num_points, 1, 1)
)
ball_radius = 0.2
poll_radius = 0.025
poll_length = 2
VisualPin(
prim_path=path + "/pin_" + str(i),
translation=position,
name="target_" + str(i),
ball_radius=ball_radius,
poll_radius=poll_radius,
poll_length=poll_length,
color=color,
)
def add_visual_marker_to_scene(
self, scene: Usd.Stage
) -> Tuple[Usd.Stage, XFormPrimView]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, XFormPrimView]: The scene and the visual marker.
"""
pins = XFormPrimView(prim_paths_expr="/World/envs/.*/pin_[0-5]")
scene.add(pins)
return scene, pins
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step=step, device=self._device
)
r = r.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8), sharey=True)
ax.hist(r, bins=32)
ax.set_title("Target normed linear velocity")
ax.set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax.set_xlabel("vel (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 23,063 |
Python
| 35.32126 | 106 | 0.580454 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_task_rewards.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import torch
from dataclasses import dataclass
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class GoToXYZReward:
""" "
Reward function and parameters for the GoToXY task."""
name: str = "GoToXYZ"
reward_mode: str = "linear"
exponential_reward_coeff: float = 0.25
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
position_error: torch.Tensor,
) -> torch.Tensor:
"""
Defines the function used to compute the reward for the GoToXY task."""
if self.reward_mode.lower() == "linear":
position_reward = 1.0 / (1.0 + position_error)
elif self.reward_mode.lower() == "square":
position_reward = 1.0 / (1.0 + position_error * position_error)
elif self.reward_mode.lower() == "exponential":
position_reward = torch.exp(-position_error / self.exponential_reward_coeff)
else:
raise ValueError("Unknown reward type.")
return position_reward
@dataclass
class GoToPoseReward:
"""
Reward function and parameters for the GoToPose task."""
name: str = "GoToPose"
position_reward_mode: str = "linear"
heading_reward_mode: str = "linear"
position_exponential_reward_coeff: float = 0.25
heading_exponential_reward_coeff: float = 0.25
position_scale: float = 1.0
heading_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.position_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.heading_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state,
actions: torch.Tensor,
position_error: torch.Tensor,
heading_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the GoToPose task."""
if self.position_reward_mode.lower() == "linear":
position_reward = 1.0 / (1.0 + position_error) * self.position_scale
elif self.position_reward_mode.lower() == "square":
position_reward = 1.0 / (1.0 + position_error) * self.position_scale
elif self.position_reward_mode.lower() == "exponential":
position_reward = (
torch.exp(-position_error / self.position_exponential_reward_coeff)
* self.position_scale
)
else:
raise ValueError("Unknown reward type.")
if self.heading_reward_mode.lower() == "linear":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "square":
heading_reward = 1.0 / (1.0 + heading_error) * self.heading_scale
elif self.heading_reward_mode.lower() == "exponential":
heading_reward = (
torch.exp(-heading_error / self.heading_exponential_reward_coeff)
* self.heading_scale
)
else:
raise ValueError("Unknown reward type.")
return position_reward, heading_reward
@dataclass
class TrackXYZVelocityReward:
"""
Reward function and parameters for the TrackXYVelocity task."""
name: str = "TrackXYZVelocity"
reward_mode: str = "linear"
exponential_reward_coeff: float = 0.25
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the TrackXYVelocity task."""
if self.reward_mode.lower() == "linear":
velocity_reward = 1.0 / (1.0 + velocity_error)
elif self.reward_mode.lower() == "square":
velocity_reward = 1.0 / (1.0 + velocity_error * velocity_error)
elif self.reward_mode.lower() == "exponential":
velocity_reward = torch.exp(-velocity_error / self.exponential_reward_coeff)
else:
raise ValueError("Unknown reward type.")
return velocity_reward
@dataclass
class Track6DoFVelocityReward:
"""
Reward function and parameters for the TrackXYOVelocity task."""
name: str = "Track6DVelocity"
linear_reward_mode: str = "linear"
angular_reward_mode: str = "linear"
linear_exponential_reward_coeff: float = 0.25
angular_exponential_reward_coeff: float = 0.25
linear_scale: float = 1.0
angular_scale: float = 1.0
def __post_init__(self) -> None:
"""
Checks that the reward parameters are valid."""
assert self.linear_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
assert self.angular_reward_mode.lower() in [
"linear",
"square",
"exponential",
], "Linear, Square and Exponential are the only currently supported mode."
def compute_reward(
self,
current_state,
actions: torch.Tensor,
linear_velocity_error: torch.Tensor,
angular_velocity_error: torch.Tensor,
) -> None:
"""
Defines the function used to compute the reward for the TrackXYOVelocity task.
"""
if self.linear_reward_mode.lower() == "linear":
linear_reward = 1.0 / (1.0 + linear_velocity_error) * self.linear_scale
elif self.linear_reward_mode.lower() == "square":
linear_reward = 1.0 / (1.0 + linear_velocity_error) * self.linear_scale
elif self.linear_reward_mode.lower() == "exponential":
linear_reward = (
torch.exp(-linear_velocity_error / self.linear_exponential_reward_coeff)
* self.linear_scale
)
else:
raise ValueError("Unknown reward type.")
if self.angular_reward_mode.lower() == "linear":
angular_reward = 1.0 / (1.0 + angular_velocity_error) * self.angular_scale
elif self.angular_reward_mode.lower() == "square":
angular_reward = 1.0 / (1.0 + angular_velocity_error) * self.angular_scale
elif self.angular_reward_mode.lower() == "exponential":
angular_reward = (
torch.exp(
-angular_velocity_error / self.angular_exponential_reward_coeff
)
* self.angular_scale
)
else:
raise ValueError("Unknown reward type.")
return linear_reward, angular_reward
| 7,793 |
Python
| 33.794643 | 88 | 0.589632 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_disturbances_parameters.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumParameters,
)
@dataclass
class MassDistributionDisturbanceParameters:
"""
This class provides an interface to adjust the hyperparameters of the mass distribution disturbance.
"""
mass_curriculum: CurriculumParameters = field(default_factory=dict)
com_curriculum: CurriculumParameters = field(default_factory=dict)
enable: bool = False
def __post_init__(self):
self.mass_curriculum = CurriculumParameters(**self.mass_curriculum)
self.com_curriculum = CurriculumParameters(**self.com_curriculum)
@dataclass
class ForceDisturbanceParameters:
"""
This class provides an interface to adjust the hyperparameters of the force disturbance.
"""
force_curriculum: CurriculumParameters = field(default_factory=dict)
use_sinusoidal_patterns: bool = False
min_freq: float = 0.1
max_freq: float = 5.0
min_offset: float = 0.0
max_offset: float = 1.0
enable: bool = False
def __post_init__(self):
self.force_curriculum = CurriculumParameters(**self.force_curriculum)
assert self.min_freq > 0, "The minimum frequency must be positive."
assert self.max_freq > 0, "The maximum frequency must be positive."
assert (
self.max_freq > self.min_freq
), "The maximum frequency must be larger than the minimum frequency."
@dataclass
class TorqueDisturbanceParameters:
"""
This class provides an interface to adjust the hyperparameters of the force disturbance.
"""
torque_curriculum: CurriculumParameters = field(default_factory=dict)
use_sinusoidal_patterns: bool = False
min_freq: float = 0.1
max_freq: float = 5.0
min_offset: float = 0.0
max_offset: float = 1.0
enable: bool = False
def __post_init__(self):
self.torque_curriculum = CurriculumParameters(**self.torque_curriculum)
assert self.min_freq > 0, "The minimum frequency must be positive."
assert self.max_freq > 0, "The maximum frequency must be positive."
assert (
self.max_freq > self.min_freq
), "The maximum frequency must be larger than the minimum frequency."
@dataclass
class NoisyObservationsParameters:
"""
This class provides an interface to adjust the hyperparameters of the observation noise.
"""
position_curriculum: CurriculumParameters = field(default_factory=dict)
velocity_curriculum: CurriculumParameters = field(default_factory=dict)
orientation_curriculum: CurriculumParameters = field(default_factory=dict)
enable_position_noise: bool = False
enable_velocity_noise: bool = False
enable_orientation_noise: bool = False
def __post_init__(self):
self.position_curriculum = CurriculumParameters(**self.position_curriculum)
self.velocity_curriculum = CurriculumParameters(**self.velocity_curriculum)
self.orientation_curriculum = CurriculumParameters(
**self.orientation_curriculum
)
@dataclass
class NoisyActionsParameters:
"""
This class provides an interface to adjust the hyperparameters of the action noise.
"""
action_curriculum: CurriculumParameters = field(default_factory=dict)
enable: bool = False
def __post_init__(self):
self.action_curriculum = CurriculumParameters(**self.action_curriculum)
@dataclass
class NoisyImagesParameters:
"""
This class provides an interface to adjust the hyperparameters of the action noise.
"""
image_curriculum: CurriculumParameters = field(default_factory=dict)
enable: bool = False
modality: bool = "rgb"
def __post_init__(self):
self.image_curriculum = CurriculumParameters(**self.image_curriculum)
@dataclass
class DisturbancesParameters:
"""
Collection of disturbances.
"""
mass_disturbance: MassDistributionDisturbanceParameters = field(
default_factory=dict
)
force_disturbance: ForceDisturbanceParameters = field(default_factory=dict)
torque_disturbance: TorqueDisturbanceParameters = field(default_factory=dict)
observations_disturbance: NoisyObservationsParameters = field(default_factory=dict)
actions_disturbance: NoisyActionsParameters = field(default_factory=dict)
rgb_disturbance: NoisyImagesParameters = field(default_factory=dict)
depth_disturbance: NoisyImagesParameters = field(default_factory=dict)
def __post_init__(self):
self.mass_disturbance = MassDistributionDisturbanceParameters(
**self.mass_disturbance
)
self.force_disturbance = ForceDisturbanceParameters(**self.force_disturbance)
self.torque_disturbance = TorqueDisturbanceParameters(**self.torque_disturbance)
self.observations_disturbance = NoisyObservationsParameters(
**self.observations_disturbance
)
self.actions_disturbance = NoisyActionsParameters(**self.actions_disturbance)
self.rgb_disturbance = NoisyImagesParameters(**self.rgb_disturbance)
self.depth_disturbance = NoisyImagesParameters(**self.depth_disturbance)
| 5,477 |
Python
| 35.039473 | 104 | 0.710973 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_disturbances.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances_parameters import (
DisturbancesParameters,
MassDistributionDisturbanceParameters,
ForceDisturbanceParameters,
TorqueDisturbanceParameters,
NoisyObservationsParameters,
NoisyActionsParameters,
NoisyImagesParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
import omni
class MassDistributionDisturbances:
"""
Creates disturbances on the platform by simulating a mass distribution on the
platform.
"""
def __init__(
self,
parameters: MassDistributionDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (MassDistributionDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.mass_sampler = CurriculumSampler(parameters.mass_curriculum)
self.CoM_sampler = CurriculumSampler(parameters.com_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
self.instantiate_buffers()
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the mass disturbances.
"""
self.platforms_mass = (
torch.ones((self._num_envs, 1), device=self._device, dtype=torch.float32)
* self.mass_sampler.get_min()
)
self.platforms_CoM = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
def randomize_masses(self, env_ids: torch.Tensor, step: int = 0) -> None:
"""
Randomizes the masses of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
step (int): The current step of the learning process.
"""
if self.parameters.enable:
num_resets = len(env_ids)
self.platforms_mass[env_ids, 0] = self.mass_sampler.sample(
num_resets, step, device=self._device
)
r = self.CoM_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
self.platforms_CoM[env_ids, 0] = torch.cos(theta) * r
self.platforms_CoM[env_ids, 1] = torch.sin(theta) * r
def get_masses(
self,
env_ids: torch.Tensor,
) -> torch.Tensor:
"""
Returns the masses and CoM of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
Returns:
Tuple(torch.Tensor, torch.Tensor): The masses and CoM of the platforms.
"""
return self.platforms_mass[:, 0]
def get_masses_and_com(
self,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns the masses and CoM of the platforms.
Returns:
Tuple(torch.Tensor, torch.Tensor): The masses and CoM of the platforms.
"""
return torch.cat((self.platforms_mass, self.platforms_CoM), axis=1)
def get_CoM(self, env_ids: torch.Tensor) -> torch.Tensor:
"""
Returns the CoM of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
Returns:
torch.Tensor: The CoM of the platforms.
"""
return self.platforms_CoM[env_ids]
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
mass = self.platforms_mass.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(mass, bins=32)
ax.set_title("Mass disturbance")
ax.set_xlim(
self.mass_sampler.get_min_bound(), self.mass_sampler.get_max_bound()
)
ax.set_xlabel("mass (Kg)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/mass_disturbance"] = wandb.Image(data)
if self.parameters.enable:
com = torch.norm(self.platforms_CoM.cpu(), axis=-1).numpy().flatten()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(com, bins=32)
ax.set_title("CoM disturbance")
ax.set_xlim(
self.CoM_sampler.get_min_bound(), self.CoM_sampler.get_max_bound()
)
ax.set_xlabel("Displacement (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/CoM_disturbance"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/mass_disturbance_rate"] = self.mass_sampler.get_rate(step)
dict["disturbance/CoM_disturbance_rate"] = self.CoM_sampler.get_rate(step)
return dict
class ForceDisturbance:
"""
Creates disturbances by applying random forces.
"""
def __init__(
self,
parameters: ForceDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (ForceDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.parameters = parameters
self.force_sampler = CurriculumSampler(self.parameters.force_curriculum)
self._num_envs = num_envs
self._device = device
self.instantiate_buffers()
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the force disturbances.
"""
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_x_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_forces = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.forces = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_forces(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the forces using a sinusoidal pattern or not.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_y_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_x_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._floor_y_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_forces[env_ids] = self.force_sampler.sample(
num_resets, step, device=self._device
)
else:
r = self.force_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
self.forces[env_ids, 0] = torch.cos(theta) * r
self.forces[env_ids, 1] = torch.sin(theta) * r
def get_force_disturbance(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the forces given the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The floor forces.
"""
if self.parameters.use_sinusoidal_patterns:
self.forces[:, 0] = (
torch.sin(root_pos[:, 0] * self._floor_x_freq + self._floor_x_offset)
* self._max_forces
)
self.forces[:, 1] = (
torch.sin(root_pos[:, 1] * self._floor_y_freq + self._floor_y_offset)
* self._max_forces
)
return self.forces
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
force = self.force_sampler.sample(self._num_envs, step, device=self._device)
force = force.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(force, bins=32)
ax.set_title("Force disturbance")
ax.set_xlim(0, self.force_sampler.get_max_bound())
ax.set_xlabel("force (N)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/force_disturbance"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/force_disturbance_rate"] = self.force_sampler.get_rate(
step
)
return dict
class TorqueDisturbance:
"""
Creates disturbances by applying a torque to its center.
"""
def __init__(
self,
parameters: TorqueDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (TorqueDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.parameters = parameters
self.torque_sampler = CurriculumSampler(self.parameters.torque_curriculum)
self._num_envs = num_envs
self._device = device
self.instantiate_buffers()
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the torque disturbances.
"""
if self.parameters.use_sinusoidal_patterns:
self._torque_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_torques = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.torques = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_torques(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the torque disturbance.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current step of the training. Default to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
# use the same min/max frequencies and offsets for the floor
self._torque_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_torques[env_ids] = self.torque_sampler.sample(
num_resets, step, device=self._device
)
else:
self.torques[env_ids, 2] = self.torque_sampler.sample(
num_resets, step, device=self._device
)
def get_torque_disturbance(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the torques given the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The torque disturbance."""
if self.parameters.use_sinusoidal_patterns:
self.torques[:, 2] = (
torch.sin(root_pos * self._torque_freq + self._torque_offset)
* self._max_torques
)
return self.torques
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
torque = self.torque_sampler.sample(
self._num_envs, step, device=self._device
)
torque = torque.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(torque, bins=32)
ax.set_title("Torque disturbance")
ax.set_xlim(
self.torque_sampler.get_min_bound(), self.torque_sampler.get_max_bound()
)
ax.set_xlabel("torque (Nm)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/torque_disturbance"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/torque_disturbance_rate"] = self.torque_sampler.get_rate(
step
)
return dict
class NoisyObservations:
"""
Adds noise to the observations of the robot.
"""
def __init__(
self,
parameters: NoisyObservationsParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
task_cfg (NoisyObservationParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.position_sampler = CurriculumSampler(parameters.position_curriculum)
self.velocity_sampler = CurriculumSampler(parameters.velocity_curriculum)
self.orientation_sampler = CurriculumSampler(parameters.orientation_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
def add_noise_on_pos(self, pos: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the position of the robot.
Args:
pos (torch.Tensor): The position of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The position of the robot with noise.
"""
if self.parameters.enable_position_noise:
self.pos_shape = pos.shape
pos += self.position_sampler.sample(
self._num_envs * pos.shape[1], step, device=self._device
).reshape(-1, self.pos_shape[1])
return pos
def add_noise_on_vel(self, vel: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the velocity of the robot.
Args:
vel (torch.Tensor): The velocity of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The velocity of the robot with noise.
"""
if self.parameters.enable_velocity_noise:
self.vel_shape = vel.shape
vel += self.velocity_sampler.sample(
self._num_envs * vel.shape[1], step, device=self._device
).reshape(-1, self.vel_shape[1])
return vel
def add_noise_on_heading(
self, heading: torch.Tensor, step: int = 0
) -> torch.Tensor:
"""
Adds noise to the heading of the robot.
Args:
heading (torch.Tensor): The heading of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The heading of the robot with noise.
"""
if self.parameters.enable_orientation_noise:
heading += self.orientation_sampler.sample(
self._num_envs, step, device=self._device
)
return heading
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable_position_noise:
position = self.position_sampler.sample(
self._num_envs * self.pos_shape[1], step, device=self._device
)
position = position.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(position, bins=32)
ax.set_title("Position noise")
ax.set_xlim(
self.position_sampler.get_min_bound(),
self.position_sampler.get_max_bound(),
)
ax.set_xlabel("noise (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/position_noise"] = wandb.Image(data)
if self.parameters.enable_velocity_noise:
velocity = self.velocity_sampler.sample(
self._num_envs * self.vel_shape[1], step, device=self._device
)
velocity = velocity.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(velocity, bins=32)
ax.set_title("Velocity noise")
ax.set_xlim(
self.velocity_sampler.get_min_bound(),
self.position_sampler.get_max_bound(),
)
ax.set_xlabel("noise (m/s)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/velocity_noise"] = wandb.Image(data)
if self.parameters.enable_orientation_noise:
orientation = self.orientation_sampler.sample(
self._num_envs, step, device=self._device
)
orientation = orientation.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(orientation, bins=32)
ax.set_title("Orientation noise")
ax.set_xlim(
self.orientation_sampler.get_min_bound(),
self.orientation_sampler.get_max_bound(),
)
ax.set_xlabel("noise (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/orientation_noise"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable_position_noise:
dict["disturbance/position_disturbance_rate"] = (
self.position_sampler.get_rate(step)
)
if self.parameters.enable_velocity_noise:
dict["disturbance/velocity_disturbance_rate"] = (
self.velocity_sampler.get_rate(step)
)
if self.parameters.enable_orientation_noise:
dict["disturbance/orientation_disturbance_rate"] = (
self.orientation_sampler.get_rate(step)
)
return dict
class NoisyActions:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
parameters: NoisyActionsParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.action_sampler = CurriculumSampler(parameters.action_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
def add_noise_on_act(self, act: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the actions of the robot.
Args:
act (torch.Tensor): The actions of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The actions of the robot with noise.
"""
if self.parameters.enable:
self.shape = act.shape
act += self.action_sampler.sample(
self._num_envs * act.shape[1], step, device=self._device
).reshape(-1, self.shape[1])
return act
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
action = self.action_sampler.sample(
self._num_envs * self.shape[1], step, device=self._device
).reshape(-1, self.shape[1])
action = action.cpu().numpy().flatten()
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.hist(action, bins=32)
ax.set_title("Action noise")
ax.set_xlim(
self.action_sampler.get_min_bound(), self.action_sampler.get_max_bound()
)
ax.set_xlabel("noise (N)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["disturbance/action_noise"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict["disturbance/action_disturbance_rate"] = self.action_sampler.get_rate(
step
)
return dict
class NoisyImages:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
parameters: NoisyImagesParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.image_sampler = CurriculumSampler(parameters.image_curriculum)
self.parameters = parameters
self._num_envs = num_envs
self._device = device
def add_noise_on_image(self, image: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the actions of the robot.
Args:
image (torch.Tensor): The image observation of the robot. Shape is (num_envs, channel, height, width).
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The image observation of the robot with noise.
"""
if self.parameters.enable:
self.shape = image.shape
image += self.image_sampler.sample(
self._num_envs * self.shape[1] * self.shape[2] * self.shape[3], step, device=self._device
).reshape(-1, self.shape[1], self.shape[2], self.shape[3])
return image
def get_image_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
image = self.image_sampler.sample(
self._num_envs * self.shape[1] * self.shape[2] * self.shape[3], step, device=self._device
).reshape(-1, self.shape[1], self.shape[2], self.shape[3])
image = image.squeeze().cpu().numpy()[0]
fig, ax = plt.subplots(1, 1, dpi=100, figsize=(8, 8), sharey=True)
ax.imshow(image)
ax.set_title("Action noise")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict[f"disturbance/{self.parameters.modality}_noise"] = wandb.Image(data)
return dict
def get_scalar_logs(self, step: int) -> dict:
"""
Logs the current state of the disturbances.
Args:
step (int): The current step of the learning process.
Returns:
dict: The logged data.
"""
dict = {}
if self.parameters.enable:
dict[f"disturbance/{self.parameters.modality}_disturbance_rate"] = self.image_sampler.get_rate(
step
)
return dict
class Disturbances:
"""
Class to create disturbances on the platform.
"""
def __init__(
self,
parameters: dict,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
self.parameters = DisturbancesParameters(**parameters)
self.mass_disturbances = MassDistributionDisturbances(
self.parameters.mass_disturbance,
num_envs,
device,
)
self.force_disturbances = ForceDisturbance(
self.parameters.force_disturbance,
num_envs,
device,
)
self.torque_disturbances = TorqueDisturbance(
self.parameters.torque_disturbance,
num_envs,
device,
)
self.noisy_observations = NoisyObservations(
self.parameters.observations_disturbance,
num_envs,
device,
)
self.noisy_actions = NoisyActions(
self.parameters.actions_disturbance,
num_envs,
device,
)
self.noisy_rgb_images = NoisyImages(
self.parameters.rgb_disturbance,
num_envs,
device
)
self.noisy_depth_images = NoisyImages(
self.parameters.depth_disturbance,
num_envs,
device
)
def get_logs(self, step: int) -> dict:
"""
Collects logs for all the disturbances.
Args:
step (int): The current training step.
Returns:
dict: The logs for all used disturbances.
"""
dict = {}
dict = {**dict, **self.mass_disturbances.get_scalar_logs(step)}
dict = {**dict, **self.force_disturbances.get_scalar_logs(step)}
dict = {**dict, **self.torque_disturbances.get_scalar_logs(step)}
dict = {**dict, **self.noisy_observations.get_scalar_logs(step)}
dict = {**dict, **self.noisy_actions.get_scalar_logs(step)}
if step % 50 == 0:
dict = {**dict, **self.mass_disturbances.get_image_logs(step)}
dict = {**dict, **self.force_disturbances.get_image_logs(step)}
dict = {**dict, **self.torque_disturbances.get_image_logs(step)}
dict = {**dict, **self.noisy_observations.get_image_logs(step)}
dict = {**dict, **self.noisy_actions.get_image_logs(step)}
return dict
| 32,339 |
Python
| 32.757829 | 114 | 0.551192 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_task_parameters.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
BoundaryPenalty,
)
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class GoToXYZParameters:
"""
Parameters for the GoToXY task.
"""
name: str = "GoToXYZ"
position_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoToPoseParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToPose"
position_tolerance: float = 0.01
orientation_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.orientation_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class TrackXYZVelocityParameters:
"""
Parameters for the TrackXYVelocity task.
"""
name: str = "TrackXYZVelocity"
lin_vel_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_velocity: float = 0.75
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_velocity >= 0, "Goal random velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class Track6DoFVelocityParameters:
"""
Parameters for the TrackXYOVelocity task.
"""
name: str = "Track6DoFVelocity"
lin_vel_tolerance: float = 0.01
ang_vel_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_linear_velocity: float = 0.75
goal_random_angular_velocity: float = 1
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
target_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.ang_vel_tolerance > 0
), "Angular velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert (
self.goal_random_linear_velocity >= 0
), "Goal random linear velocity must be positive."
assert (
self.goal_random_angular_velocity >= 0
), "Goal random angular velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.target_angular_velocity_curriculum = CurriculumParameters(
**self.target_angular_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
| 7,201 |
Python
| 35.744898 | 88 | 0.669768 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_close_proximity_dock.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoToPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
CloseProximityDockParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.dock import Dock, DockView
from omni.isaac.core.articulations import ArticulationView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class CloseProximityDockTask(Core):
"""
Implements the CloseProximityDock task. The robot has to reach a target position and heading.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
super(CloseProximityDockTask, self).__init__(num_envs, device)
# Observation buffers
self._dim_task_data = 4 # data to be used to fullfil the task (floats) [6:10]
self._num_observations = 10
self._obs_buffer = torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float32,
)
self._task_label = torch.ones(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_data = torch.zeros(
(self._num_envs, self._dim_task_data),
device=self._device,
dtype=torch.float32,
)
# Task and reward parameters
self._task_parameters = CloseProximityDockParameters(**task_param)
self._reward_parameters = GoToPoseReward(**reward_param)
# Curriculum samplers
self._fp_footprint_diameter_sampler = CurriculumSampler(
self._task_parameters.fp_footprint_diameter_curriculum
)
self._spawn_dock_mass_sampler = CurriculumSampler(
self._task_parameters.spawn_dock_mass_curriculum
)
self._spawn_dock_space_sampler = CurriculumSampler(
self._task_parameters.spawn_dock_space_curriculum
)
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self.spawn_relative_angle_sampler = CurriculumSampler(
self._task_parameters.spawn_relative_angle_curriculum
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._anchor_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_positions = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_orientations = torch.zeros(
(self._num_envs, 4), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.relative_angle = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._goal_headings = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 6
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._obs_buffer[:, 0:2] = current_state["orientation"]
self._obs_buffer[:, 2:4] = current_state["linear_velocity"]
self._obs_buffer[:, 4] = current_state["angular_velocity"]
self._obs_buffer[:, 5] = self._task_label
self._obs_buffer[:, 6:10] = self._task_data
return self._obs_buffer
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "position_reward" in stats.keys():
stats["position_reward"] = torch_zeros()
if not "position_error" in stats.keys():
stats["position_error"] = torch_zeros()
if not "heading_reward" in stats.keys():
stats["heading_reward"] = torch_zeros()
if not "heading_error" in stats.keys():
stats["heading_error"] = torch_zeros()
if not "boundary_dist" in stats.keys():
stats["boundary_dist"] = torch_zeros()
self.log_with_wandb = []
self.log_with_wandb += self._task_parameters.boundary_penalty.get_stats_name()
self.log_with_wandb += self._task_parameters.relative_angle_penalty.get_stats_name()
self.log_with_wandb += self._task_parameters.contact_penalty.get_stats_name()
for name in self._task_parameters.boundary_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
for name in self._task_parameters.relative_angle_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
for name in self._task_parameters.contact_penalty.get_stats_name():
if not name in stats.keys():
stats[name] = torch_zeros()
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = self._target_positions - current_state["position"]
# heading distance
self._anchor_positions = self._target_positions.clone()
self._anchor_positions[:, 0] += self._task_parameters.goal_to_penalty_anchor_dist * torch.cos(self._target_headings)
self._anchor_positions[:, 1] += self._task_parameters.goal_to_penalty_anchor_dist * torch.sin(self._target_headings)
self._goal_headings = torch.atan2(
(self._anchor_positions - current_state["position"])[:, 1],
(self._anchor_positions - current_state["position"])[:, 0]
)
heading = torch.arctan2(
current_state["orientation"][:, 1], current_state["orientation"][:, 0]
)
# relaxed heading error
self._heading_error = torch.abs(
torch.arctan2(
torch.sin(self._goal_headings - heading),
torch.cos(self._goal_headings - heading),
)
)
# Encode task data
self._task_data[:, :2] = self._position_error
self._task_data[:, 2] = torch.cos(self._heading_error)
self._task_data[:, 3] = torch.sin(self._heading_error)
return self.update_observation_tensor(current_state)
def compute_relative_angle(self, fp_position:torch.Tensor):
"""
Compute relative angle between FP and anchor point of cone-shape penalty.
Args:
fp_position: position of the FP in env coordinate.
Returns:
relative_angle: relative angle between FP and anchor point.
"""
self._anchor_positions = self._target_positions.clone()
self._anchor_positions[:, 0] += self._task_parameters.goal_to_penalty_anchor_dist * torch.cos(self._target_headings)
self._anchor_positions[:, 1] += self._task_parameters.goal_to_penalty_anchor_dist * torch.sin(self._target_headings)
relative_angle = torch.atan2((fp_position - self._anchor_positions)[:, 1], (fp_position - self._anchor_positions)[:, 0]) - self._target_headings
relative_angle = torch.atan2(torch.sin(relative_angle), torch.cos(relative_angle)) # normalize angle within (-pi, pi)
return relative_angle
def compute_reward(
self,
current_state: dict,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
This method differs from GoToPose task since the task is docking.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# compute reward mask
self.relative_angle = self.compute_relative_angle(current_state["position"])
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
# heading error
self.heading_dist = self._heading_error
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# cone shape penalty on fp-dock relative angle
relative_angle_penalty = self._task_parameters.relative_angle_penalty.compute_penalty(
self.relative_angle, step
)
# contact penalty
contact_penalty, self._contact_kills = (
self._task_parameters.contact_penalty.compute_penalty(
current_state["net_contact_forces"], step
)
)
# Checks if the goal is reached
position_goal_is_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
heading_goal_is_reached = (
self.heading_dist < self._task_parameters.heading_tolerance
).int()
goal_is_reached = position_goal_is_reached * heading_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# rewards
(
self.position_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state, actions, self.position_dist, self.heading_dist
)
return (
self.position_reward
+ self.heading_reward
- boundary_penalty
- relative_angle_penalty
- contact_penalty
)
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
die = torch.where(self._contact_kills, ones, die)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["position_reward"] += self.position_reward
stats["heading_reward"] += self.heading_reward
stats["position_error"] += self.position_dist
stats["heading_error"] += self.heading_dist
stats["boundary_dist"] += self.boundary_dist
stats = self._task_parameters.boundary_penalty.update_statistics(stats)
stats = self._task_parameters.relative_angle_penalty.update_statistics(stats)
stats = self._task_parameters.contact_penalty.update_statistics(stats)
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task."""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations in env coordinate.
"""
num_goals = len(env_ids)
target_positions = torch.zeros(
(num_goals, 3), device=self._device, dtype=torch.float32
)
target_orientations = torch.zeros(
(num_goals, 4), device=self._device, dtype=torch.float32
)
# Randomizes the target position (completely random)
dock_space = self._spawn_dock_space_sampler.sample(num_goals, step, device=self._device) # space between dock face close to wall and wall surface (free space)
self._target_positions[env_ids, 0] = \
2*(self._task_parameters.env_x/2 - self._task_parameters.dock_footprint_diameter - dock_space) * torch.rand((num_goals,), device=self._device) \
- (self._task_parameters.env_x/2 - self._task_parameters.dock_footprint_diameter - dock_space)
self._target_positions[env_ids, 1] = \
2*(self._task_parameters.env_y/2 - self._task_parameters.dock_footprint_diameter - dock_space) * torch.rand((num_goals,), device=self._device) \
- (self._task_parameters.env_y/2 - self._task_parameters.dock_footprint_diameter - dock_space)
# Randomizes the target heading
# First, make dock face the center of environment.
self._target_headings[env_ids] = torch.atan2(self._target_positions[env_ids, 1], self._target_positions[env_ids, 0]) + math.pi # facing center
self._target_orientations[env_ids, 0] = torch.cos(
self._target_headings[env_ids] * 0.5
)
self._target_orientations[env_ids, 3] = torch.sin(
self._target_headings[env_ids] * 0.5
)
# Retrieve the target positions and orientations at batch index = env_ids
target_positions[:, :2] = self._target_positions[env_ids]
target_positions[:, 2] = torch.ones(num_goals, device=self._device) * 0.45
target_orientations[:] = self._target_orientations[env_ids]
# Add offset to the local target position
fp_foot_print_diameter = self._fp_footprint_diameter_sampler.sample(num_goals, step, device=self._device)
self._target_positions[env_ids, 0] += (fp_foot_print_diameter / 2) * torch.cos(self._target_headings[env_ids])
self._target_positions[env_ids, 1] += (fp_foot_print_diameter / 2) * torch.sin(self._target_headings[env_ids])
return target_positions, target_orientations
def set_goals(self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
step:int = 0) -> None:
"""
Update goal attribute of task class.
Args:
env_ids: The environment ids for which the goal is set.
target_positions: The target positions for the robots in env coordinate (world position - env_position).
target_orientations: The target orientations for the robots."""
self._target_positions[env_ids] = target_positions[:, :2]
siny_cosp = 2 * target_orientations[env_ids, 0] * target_orientations[env_ids, 3]
cosy_cosp = 1 - 2 * (target_orientations[env_ids, 3] * target_orientations[env_ids, 3])
self._target_headings[env_ids] = torch.arctan2(siny_cosp, cosy_cosp)
# Add offset to the local target position
fp_foot_print_diameter = self._fp_footprint_diameter_sampler.sample(len(env_ids), step, device=self._device)
self._target_positions[env_ids, 0] += (fp_foot_print_diameter / 2) * torch.cos(self._target_headings[env_ids])
self._target_positions[env_ids, 1] += (fp_foot_print_diameter / 2) * torch.sin(self._target_headings[env_ids])
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates spawning positions for the robots following a curriculum.
[Warmup] Randomize only position, but FP always faces center of FP.
[In curriculum] Randomize position and orientation.
[After curriculum] Max position and orientation.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step."""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the initial position and orientation
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
relative_angle = self.spawn_relative_angle_sampler.sample(num_resets, step, device=self._device)
initial_position[:, 0] = self._target_positions[env_ids, 0] + r * torch.cos(self._target_headings[env_ids] + relative_angle)
initial_position[:, 1] = self._target_positions[env_ids, 1] + r * torch.sin(self._target_headings[env_ids] + relative_angle)
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
heading_noise = self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
heading_angle = self._target_headings[env_ids] + relative_angle + math.pi + heading_noise
initial_orientation[:, 0] = torch.cos(heading_angle * 0.5)
initial_orientation[:, 3] = torch.sin(heading_angle * 0.5)
### Randomize linear and angular velocity ###
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def get_dock_masses(self, env_ids: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Generates a random mass for the dock.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
torch.Tensor: The mass of the dock.
"""
mass = self._spawn_dock_mass_sampler.sample(len(env_ids), step, device=self._device)
return mass
def generate_target(self, path, position: torch.Tensor, dock_param: dict = None):
"""
Generate a docking station where the FP will dock to.
Args:
path (str): path to the prim
position (torch.Tensor): position of the docking station
dock_param (dict, optional): dictionary of DockParameters. Defaults to None.
"""
Dock(
prim_path=path+"/dock",
name="dock",
position=position,
dock_params=dock_param,
)
def add_dock_to_scene(
self, scene: Usd.Stage
)->Tuple[Usd.Stage, ArticulationView]:
"""
Adds articulation view and rigiprim view of docking station to the scene.
Args:
scene (Usd.Stage): The scene to add the docking station to."""
dock = DockView(prim_paths_expr="/World/envs/.*/dock")
scene.add(dock)
scene.add(dock.base)
return scene, dock
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
delta_angle = self.spawn_relative_angle_sampler.sample(num_resets, step, device=self._device)
heading_noise = self._spawn_heading_sampler.sample(num_resets, step, device=self._device)
dock_space = self._spawn_dock_space_sampler.sample(num_resets, step, device=self._device)
dock_mass = self._spawn_dock_mass_sampler.sample(num_resets, step, device=self._device)
r = r.cpu().numpy()
delta_angle = delta_angle.cpu().numpy()
heading_noise = heading_noise.cpu().numpy()
dock_space = dock_space.cpu().numpy()
dock_mass = dock_mass.cpu().numpy()
### Plot spawn mass ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(dock_mass, bins=32)
ax.set_title("Dock mass")
ax.set_xlim(
self._spawn_dock_mass_sampler.get_min_bound(),
self._spawn_dock_mass_sampler.get_max_bound(),
)
ax.set_xlabel("mass (kg)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/dock_mass"] = wandb.Image(data)
### Plot spawn position ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
### Plot spawn relative heading ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(delta_angle, bins=32)
ax.set_title("Initial relative heading")
ax.set_xlim(
self.spawn_relative_angle_sampler.get_min_bound(),
self.spawn_relative_angle_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_relative_heading"] = wandb.Image(data)
### Plot spawn heading noise ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading_noise, bins=32)
ax.set_title("Initial heading noise")
ax.set_xlim(
self.spawn_relative_angle_sampler.get_min_bound(),
self.spawn_relative_angle_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading_noise"] = wandb.Image(data)
### Plot dock space ###
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(dock_space, bins=32)
ax.set_title("Dock space")
ax.set_xlim(
self._spawn_dock_space_sampler.get_min_bound(),
self._spawn_dock_space_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/dock_space"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
return {}
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = self._task_parameters.boundary_penalty.get_logs()
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 26,482 |
Python
| 38.409226 | 166 | 0.594668 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/spacecraft_definition.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from typing import List
import math
@dataclass
class CoreParameters:
shape: str = "sphere"
radius: float = 0.31
height: float = 0.5
mass: float = 5.32
CoM: tuple = (0, 0, 0)
refinement: int = 2
usd_asset_path: str = "/None"
def __post_init__(self):
assert self.shape in [
"cylinder",
"sphere",
"asset",
], "The shape must be 'cylinder', 'sphere' or 'asset'."
assert self.radius > 0, "The radius must be larger than 0."
assert self.height > 0, "The height must be larger than 0."
assert self.mass > 0, "The mass must be larger than 0."
assert len(self.CoM) == 3, "The length of the CoM coordinates must be 3."
assert self.refinement > 0, "The refinement level must be larger than 0."
self.refinement = int(self.refinement)
@dataclass
class ThrusterParameters:
"""
The definition of a basic thruster.
"""
max_force: float = 1.0
position: tuple = (0, 0, 0)
orientation: tuple = (0, 0, 0)
delay: float = 0.0
response_order: int = 0
tau: float = 1.0
def __post_init__(self):
assert self.tau > 0, "The response time of the system must be larger than 0"
assert self.response_order in [
0,
1,
], "The response order of the system must be 0 or 1."
assert (
self.delay >= 0
), "The delay in system response must larger or equal to 0."
@dataclass
class ReactionWheelParameters:
"""
The definition of a basic reaction wheel.
"""
mass: float = 0.250
inertia: float = 0.3
position: tuple = (0, 0, 0)
orientation: tuple = (0, 0, 0)
max_speed: float = 5000
delay: float = 0.0
response_order: float = 1
tau: float = 1.0
def __post_init__(self):
assert self.tau > 0, "The response time of the system must be larger than 0"
assert self.response_order in [
0,
1,
], "The response order of the system must be 0 or 1."
assert (
self.delay >= 0
), "The delay in system response must larger or equal to 0."
assert (
self.max_speed > 0
), "The maximum speed of the reaction wheel must be larger than 0."
@dataclass
class FloatingPlatformParameters:
"""
Thruster configuration parameters.
"""
use_four_configurations: bool = False
num_anchors: int = 4
offset: float = math.pi / 4
thrust_force: float = 1.0
visualize: bool = False
save_path: str = "thruster_configuration.png"
thruster_model: ThrusterParameters = field(default_factory=dict)
reaction_wheel_model: ReactionWheelParameters = field(default_factory=dict)
def __post_init__(self):
assert self.num_anchors > 1, "num_anchors must be larger or equal to 2."
def generate_anchors_2D(self, radius):
for i in range(self.num_anchors):
math.pi * 2 * i / self.num_anchors
pass
def generate_anchors_3D(self, radius):
pass
@dataclass
class SpaceCraftDefinition:
"""
The definition of the spacecraft / floating platform.
"""
use_floating_platform_generation = True
core: CoreParameters = field(default_factory=dict)
floating_platform: FloatingPlatformParameters = field(default_factory=dict)
thrusters: List[ThrusterParameters] = field(default_factory=list)
reaction_wheels: List[ReactionWheelParameters] = field(default_factory=list)
def __post_init__(self):
self.core = CoreParameters(**self.core)
if self.use_floating_platform_generation == False:
raise NotImplementedError
@dataclass
class PlatformRandomization:
"""
Platform randomization parameters.
"""
random_permutation: bool = False
random_offset: bool = False
randomize_thruster_position: bool = False
min_random_radius: float = 0.125
max_random_radius: float = 0.25
random_theta: float = 0.125
randomize_thrust_force: bool = False
min_thrust_force: float = 0.5
max_thrust_force: float = 1.0
kill_thrusters: bool = False
max_thruster_kill: int = 1
def compute_actions(cfg_param: FloatingPlatformParameters):
"""
Computes the number of actions for the thruster configuration.
"""
if cfg_param.use_four_configurations:
return 10
else:
return cfg_param.num_anchors * 4
| 4,767 |
Python
| 27.722891 | 84 | 0.624921 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_thruster_generator.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
euler_angles_to_matrix,
)
from typing import List, Tuple
from dataclasses import dataclass, field
import torch
import math
@dataclass
class ConfigurationParameters:
"""
Thruster configuration parameters."""
use_four_configurations: bool = False
num_anchors: int = 4
offset: float = math.pi / 4
thrust_force: float = 1.0
visualize: bool = False
save_path: str = "thruster_configuration.png"
def __post_init__(self):
assert self.num_anchors > 1, "num_anchors must be larger or equal to 2."
@dataclass
class PlatformParameters:
"""
Platform physical parameters."""
shape: str = "sphere"
radius: float = 0.31
height: float = 0.5
mass: float = 5.32
CoM: tuple = (0, 0, 0)
refinement: int = 2
usd_asset_path: str = "/None"
enable_collision: bool = False
@dataclass
class PlatformRandomization:
"""
Platform randomization parameters."""
random_permutation: bool = False
random_offset: bool = False
randomize_thruster_position: bool = False
min_random_radius: float = 0.125
max_random_radius: float = 0.25
random_theta: float = 0.125
randomize_thrust_force: bool = False
min_thrust_force: float = 0.5
max_thrust_force: float = 1.0
kill_thrusters: bool = False
max_thruster_kill: int = 1
def compute_actions(cfg_param: ConfigurationParameters):
"""
Computes the number of actions for the thruster configuration."""
if cfg_param.use_four_configurations:
return 10
else:
return cfg_param.num_anchors * 4
class VirtualPlatform:
"""
Generates a virtual floating platform with thrusters."""
def __init__(self, num_envs: int, platform_cfg: dict, device: str) -> None:
self._num_envs = num_envs
self._device = device
# Generates dataclasses from the configuration file
self.core_cfg = PlatformParameters(**platform_cfg["core"])
self.rand_cfg = PlatformRandomization(**platform_cfg["randomization"])
self.thruster_cfg = ConfigurationParameters(**platform_cfg["configuration"])
# Computes the number of actions
self._max_thrusters = compute_actions(self.thruster_cfg)
# Sets the empty buffers
self.transforms3D = torch.zeros(
(num_envs, self._max_thrusters, 4, 4),
device=self._device,
dtype=torch.float32,
)
self.current_transforms = torch.zeros(
(num_envs, self._max_thrusters, 10),
device=self._device,
dtype=torch.float32,
)
self.action_masks = torch.zeros(
(num_envs, self._max_thrusters), device=self._device, dtype=torch.long
)
self.thrust_force = torch.zeros(
(num_envs, self._max_thrusters), device=self._device, dtype=torch.float32
)
# Creates a unit vector to project the forces
self.create_unit_vector()
# Generates a visualization file for the provided thruster configuration
if True: # self.thruster_cfg.visualize:
self.generate_base_platforms(self._num_envs, torch.arange(self._num_envs))
self.visualize(self.thruster_cfg.save_path)
def create_unit_vector(self) -> None:
"""
Creates a unit vector to project the forces.
The forces are in 2D so the unit vector is a 2D vector."""
tmp_x = torch.ones(
(self._num_envs, self._max_thrusters, 1),
device=self._device,
dtype=torch.float32,
)
tmp_y = torch.zeros(
(self._num_envs, self._max_thrusters, 2),
device=self._device,
dtype=torch.float32,
)
self.unit_vector = torch.cat([tmp_x, tmp_y], dim=-1)
def project_forces(self, forces: torch.Tensor) -> list:
"""
Projects the forces on the platform."""
# Applies force scaling, applies action masking
rand_forces = forces * self.thrust_force * (1 - self.action_masks)
# Split transforms into translation and rotation
R = self.transforms3D[:, :, :3, :3].reshape(-1, 3, 3)
T = self.transforms3D[:, :, 3, :3].reshape(-1, 3)
# Create a zero tensor to add 3rd dimmension
zero = torch.zeros((T.shape[0], 1), device=self._device, dtype=torch.float32)
# Generate positions
positions = T
# Project forces
force_vector = self.unit_vector * rand_forces.view(
self._num_envs, self._max_thrusters, 1
)
projected_forces = torch.matmul(R, force_vector.view(-1, 3, 1))
return positions, projected_forces[:, :, 0]
def randomize_thruster_state(self, env_ids: torch.Tensor, num_resets: int) -> None:
"""
Randomizes the spatial configuration of the thruster."""
self.generate_base_platforms(num_resets, env_ids)
def generate_base_platforms(self, num_envs: int, env_ids: torch.Tensor) -> None:
"""
Generates the spatial configuration of the thruster."""
# ====================
# Basic thruster positioning
# ====================
# Generates a fixed offset between the heading and the first generated thruster
random_offset = (
torch.ones((self._num_envs), device=self._device)
.view(-1, 1)
.expand(self._num_envs, self._max_thrusters)
* math.pi
/ self.thruster_cfg.num_anchors
)
# Adds a random offset to each simulated platform between the heading and the first generated thruster
if self.rand_cfg.random_offset:
random_offset += (
torch.rand((self._num_envs), device=self._device)
.view(-1, 1)
.expand(self._num_envs, self._max_thrusters)
* math.pi
* 2
)
# Generates a 180 degrees offset between two consecutive thruster (+/- 90 degrees).
thrust_90_x = torch.zeros(
(self._num_envs, self._max_thrusters), device=self._device
)
thrust_90_y = (
(
torch.concat(
[
torch.ones(2, device=self._device) / 2.0,
torch.arange(2, device=self._device),
]
)
.repeat(self._max_thrusters // 4)
.expand(self._num_envs, self._max_thrusters)
* 2
- 1
)
* math.pi
/ 2
)
thrust_90_z = (
(
torch.concat(
[
torch.arange(2, device=self._device),
torch.ones(2, device=self._device) / 2.0,
]
)
.repeat(self._max_thrusters // 4)
.expand(self._num_envs, self._max_thrusters)
* 2
- 1
)
* math.pi
/ 2
)
# Generates N, four by four thruster
thrust_offset = (
torch.arange(self.thruster_cfg.num_anchors, device=self._device)
.repeat_interleave(4)
.expand(self._num_envs, self._max_thrusters)
/ self.thruster_cfg.num_anchors
* math.pi
* 2
)
# Generates a mask indicating if the thrusters are usable or not. Used by the transformer to mask the sequence.
mask = torch.ones((self._num_envs, self._max_thrusters), device=self._device)
# ====================
# Random thruster killing
# ====================
# Kill thrusters:
if self.rand_cfg.kill_thrusters:
# Generates 0 and 1 to decide how many thrusters will be killed
weights = torch.ones((self._num_envs, 2), device=self._device)
kills = torch.multinomial(
weights, num_samples=self.rand_cfg.max_thruster_kill, replacement=True
)
# Selects L indices to set to N+1
weights = torch.ones(self._max_thrusters, device=self._device).expand(
self._num_envs, -1
)
kill_ids = torch.multinomial(
weights, num_samples=self.rand_cfg.max_thruster_kill, replacement=False
)
# Multiplies kill or not kill with the ids.
# If no kill, then the value is set to max_thrusters + 1, such that it can be filtered out later
final_kill_ids = kills * kill_ids + (1 - kills) * self._max_thrusters
# Creates a mask from the kills:
kill_mask = torch.sum(
torch.nn.functional.one_hot(final_kill_ids, self._max_thrusters + 1),
dim=1,
)
# Removes the duplicates
kill_mask = 1 - kill_mask[:, : self._max_thrusters]
if self.thruster_cfg.use_four_configurations:
mask[self._num_envs // 4 :] = (
mask[self._num_envs // 4 :] * kill_mask[self._num_envs // 4 :]
)
else:
mask = mask * kill_mask
# Generates the transforms and masks
transforms3D = torch.zeros_like(self.transforms3D) # Used to project the forces
action_masks = torch.zeros_like(self.action_masks) # Used to mask actions
current_transforms = torch.zeros_like(
self.current_transforms
) # Used to feed to the transformer
# ====================
# Randomizes the thruster poses and characteristics.
# ====================
# Randomizes the thrust force:
if self.rand_cfg.randomize_thrust_force:
thrust_force = (
torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.max_thrust_force - self.rand_cfg.min_thrust_force)
+ self.rand_cfg.min_thrust_force
)
else:
thrust_force = torch.ones(
(self._num_envs, self._max_thrusters), device=self._device
)
# Thruster angular position with regards to the center of mass.
theta2 = random_offset + thrust_offset
# Randomizes thruster poses if requested:
if self.rand_cfg.randomize_thruster_position:
radius = self.core_cfg.radius * (
1
+ torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.max_random_radius + self.rand_cfg.min_random_radius)
- self.rand_cfg.min_random_radius
)
theta2 += (
torch.rand((self._num_envs, self._max_thrusters), device=self._device)
* (self.rand_cfg.random_theta * 2)
- self.rand_cfg.random_theta
)
else:
radius = self.core_cfg.radius
# Thruster angle:
thrust_90_z = theta2 + thrust_90_z
# ====================
# Computes the 3D transforms of the thruster locations.
# ====================
euler = torch.concatenate(
[
thrust_90_x.view(thrust_90_x.shape + (1,)),
thrust_90_y.view(thrust_90_x.shape + (1,)),
thrust_90_z.view(thrust_90_x.shape + (1,)),
],
axis=-1,
)
# 3D transforms defining the thruster locations.
transforms3D[:, :, :3, :3] = euler_angles_to_matrix(euler, "XYZ")
transforms3D[:, :, 3, 0] = torch.cos(theta2) * radius
transforms3D[:, :, 3, 1] = torch.sin(theta2) * radius
transforms3D[:, :, 3, 2] = 0
transforms3D[:, :, 3, 3] = 1
transforms3D = transforms3D * mask.view(
mask.shape
+ (
1,
1,
)
)
# Actions masks to define which thrusters can be used.
action_masks[:, :] = 1 - mask.long()
# Transforms to feed to the transformer.
current_transforms[:, :, :6] = transforms3D[:, :, :2, :3].reshape(
self._num_envs, self._max_thrusters, 6
)
current_transforms[:, :, 6:9] = transforms3D[:, :, 3, :3]
current_transforms[:, :, 9] = thrust_force
current_transforms = current_transforms * mask.view(mask.shape + (1,))
# Applies random permutations to the thrusters while keeping the non-used thrusters at the end of the sequence.
if self.rand_cfg.random_permutation:
weights = torch.ones(self._max_thrusters, device=self._device).expand(
self._num_envs, -1
)
selected_thrusters = torch.multinomial(
weights, num_samples=self._max_thrusters, replacement=False
)
mask = torch.gather(1 - mask, 1, selected_thrusters)
_, sorted_idx = mask.sort(1)
selected_thrusters = torch.gather(selected_thrusters, 1, sorted_idx)
transforms3D = torch.gather(
transforms3D,
1,
selected_thrusters.view(
self._num_envs, self._max_thrusters, 1, 1
).expand(self._num_envs, self._max_thrusters, 4, 4),
)
current_transforms = torch.gather(
current_transforms,
1,
selected_thrusters.view(self._num_envs, self._max_thrusters, 1).expand(
self._num_envs, self._max_thrusters, 10
),
)
action_masks = torch.gather(action_masks, 1, selected_thrusters)
thrust_force = torch.gather(thrust_force, 1, selected_thrusters)
# Updates the proper indices
self.thrust_force[env_ids] = thrust_force[env_ids]
self.action_masks[env_ids] = action_masks[env_ids]
self.current_transforms[env_ids] = current_transforms[env_ids]
self.transforms3D[env_ids] = transforms3D[env_ids]
def visualize(self, save_path: str = None):
"""
Visualizes the thruster configuration."""
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d.axes3d import get_test_data
import numpy as np
# Creates a list of color
cmap = cm.get_cmap("hsv")
colors = []
for i in range(self._max_thrusters):
colors.append(cmap(i / self._max_thrusters))
# Split into 1/4th of the envs, so that we can visualize all the configs in use_four_configuration mode.
env_ids = [
0,
1,
2,
3,
self._num_envs // 4,
self._num_envs // 4 + 1,
self._num_envs // 4 + 2,
self._num_envs // 4 + 3,
2 * self._num_envs // 4,
2 * self._num_envs // 4 + 1,
2 * self._num_envs // 4 + 2,
2 * self._num_envs // 4 + 3,
3 * self._num_envs // 4,
3 * self._num_envs // 4 + 1,
3 * self._num_envs // 4 + 2,
3 * self._num_envs // 4 + 3,
]
# Generates a thrust on all the thrusters
forces = torch.ones(
(self._num_envs, self._max_thrusters),
device=self._device,
dtype=torch.float32,
)
# Project
p, f = self.project_forces(forces)
# Reshape and get only the 2D values for plot.
p = p.reshape(self._num_envs, self._max_thrusters, 3)
f = f.reshape(self._num_envs, self._max_thrusters, 3)
p = np.array(p.cpu())
f = np.array(f.cpu())
def repeatForEach(elements, times):
return [e for e in elements for _ in range(times)]
def renderColorsForQuiver3d(colors):
colors = list(filter(lambda x: x != (0.0, 0.0, 0.0), colors))
return colors + repeatForEach(colors, 2)
fig = plt.figure()
fig.set_size_inches(20, 20)
for i in range(4):
for j in range(4):
idx = env_ids[i * 4 + j]
ax = fig.add_subplot(4, 4, i * 4 + (j + 1), projection="3d")
ax.quiver(
p[idx, :, 0],
p[idx, :, 1],
p[idx, :, 2],
f[idx, :, 0],
f[idx, :, 1],
f[idx, :, 2],
color=renderColorsForQuiver3d(colors),
length=0.2,
normalize=True,
)
ax.set_xlim([-0.4, 0.4])
ax.set_ylim([-0.4, 0.4])
ax.set_zlim([-0.4, 0.4])
fig.tight_layout()
fig.savefig(save_path, dpi=300)
plt.close()
| 17,104 |
Python
| 35.784946 | 119 | 0.534846 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_go_to_pose.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
Core,
quat_to_mat,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_rewards import (
GoToPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_parameters import (
GoToPoseParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_pose import (
GoToPoseTask as GoToPoseTask2D,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.arrow3D import VisualArrow3D
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoToPoseTask(GoToPoseTask2D, Core):
"""
Implements the GoToPose task. The robot has to reach a target position and heading.
"""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToPose task.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The reward parameters of the task.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
Core.__init__(self, num_envs, device)
# Task and reward parameters
self._task_parameters = GoToPoseParameters(**task_param)
self._reward_parameters = GoToPoseReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_heading_sampler = CurriculumSampler(
self._task_parameters.spawn_heading_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._target_headings = torch.zeros(
(self._num_envs, 3, 3), device=self._device, dtype=torch.float32
)
self._target_quat = torch.zeros(
(self._num_envs, 4), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
return Core.update_observation_tensor(self, current_state)
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
# position distance
self._position_error = self._target_positions - current_state["position"]
# heading distance
self._heading_error = torch.bmm(
torch.transpose(current_state["orientation"], -2, -1), self._target_headings
)
# Encode task data
self._task_data[:, :3] = self._position_error
self._task_data[:, 3:] = self._heading_error[:, :2, :].reshape(
self._num_envs, 6
)
return self.update_observation_tensor(current_state)
def compute_reward(
self,
current_state: torch.Tensor,
actions: torch.Tensor,
step: int = 0,
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
step (int. optional): The current training step. Defaults to 0.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
trace = (
self._heading_error[:, 0, 0]
+ self._heading_error[:, 1, 1]
+ self._heading_error[:, 2, 2]
)
self.heading_dist = torch.arccos((trace - 1) / 2)
# boundary penalty
self.boundary_dist = torch.abs(
self._task_parameters.kill_dist - self.position_dist
)
self.boundary_penalty = self._task_parameters.boundary_penalty.compute_penalty(
self.boundary_dist, step
)
# Checks if the goal is reached
position_goal_is_reached = (
self.position_dist < self._task_parameters.position_tolerance
).int()
heading_goal_is_reached = (
self.heading_dist < self._task_parameters.orientation_tolerance
).int()
goal_is_reached = position_goal_is_reached * heading_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# rewards
(
self.position_reward,
self.heading_reward,
) = self._reward_parameters.compute_reward(
current_state, actions, self.position_dist, self.heading_dist
)
return self.position_reward + self.heading_reward - self.boundary_penalty
def get_goals(
self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions of the environments.
target_orientations (torch.Tensor): The target orientations of the environments.
step (int, optional): The current training step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomize position
self._target_positions[env_ids] = (
torch.rand((num_goals, 3), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
target_positions[env_ids, :3] += self._target_positions[env_ids]
# Randomize heading
uvw = torch.rand((num_goals, 3), device=self._device)
quat = torch.zeros((num_goals, 4), device=self._device)
quat[:, 0] = torch.sqrt(uvw[:, 0]) * torch.cos(uvw[:, 2] * 2 * math.pi)
quat[:, 1] = torch.sqrt(1 - uvw[:, 0]) * torch.sin(uvw[:, 1] * 2 * math.pi)
quat[:, 2] = torch.sqrt(1 - uvw[:, 0]) * torch.cos(uvw[:, 1] * 2 * math.pi)
quat[:, 3] = torch.sqrt(uvw[:, 0]) * torch.sin(uvw[:, 2] * 2 * math.pi)
target_orientations[env_ids] = quat
# cast quaternions to rotation matrix
self._target_quat[env_ids] = quat
self._target_headings[env_ids] = quat_to_mat(quat)
return target_positions, target_orientations
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates spawning positions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current training step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial positions, orientations, and velocities.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self._goal_reached[env_ids] = 0
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_position[:, 0] = (
r * torch.cos(theta) * torch.sin(phi) + self._target_positions[env_ids, 0]
)
initial_position[:, 1] = (
r * torch.sin(theta) * torch.sin(phi) + self._target_positions[env_ids, 1]
)
initial_position[:, 2] = r * torch.cos(phi) + self._target_positions[env_ids, 2]
# Randomizes the orientation of the platform
# We want to sample something that's not too far from the original orientation
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Projects the angular distance on a sphere in the RPY space
u = (
torch.rand(num_resets, device=self._device, dtype=torch.float32)
* math.pi
* 2
)
v = torch.rand(num_resets, device=self._device, dtype=torch.float32) * math.pi
roll = r * torch.cos(u) * torch.sin(v)
pitch = r * torch.sin(u) * torch.sin(v)
yaw = r * torch.cos(v)
# Cast the displacement in the Quaternion space
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
w0 = cr * cp * cy + sr * sp * sy
x0 = sr * cp * cy - cr * sp * sy
y0 = cr * sp * cy + sr * cp * sy
z0 = cr * cp * sy - sr * sp * cy
w1 = self._target_quat[env_ids, 0]
x1 = self._target_quat[env_ids, 1]
y1 = self._target_quat[env_ids, 2]
z1 = self._target_quat[env_ids, 3]
# Quaternion multiplication with the target orientation
initial_orientation[:, 0] = w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1
initial_orientation[:, 1] = w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1
initial_orientation[:, 2] = w0 * y1 - x0 * z1 + y0 * w1 + z0 * x1
initial_orientation[:, 3] = w0 * z1 + x0 * y1 - y0 * x1 + z0 * w1
initial_orientation /= torch.norm(
initial_orientation + EPS, dim=-1, keepdim=True
)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 2] = linear_velocity * torch.cos(phi)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 3] = angular_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 4] = angular_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 5] = angular_velocity * torch.cos(phi)
return initial_position, initial_orientation, initial_velocity
def generate_target(self, path, position):
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
An arrow is generated to represent the 2D pose to be reached by the agent."""
color = torch.tensor([1, 0, 0])
body_radius = 0.025
body_length = 1.5
head_radius = 0.075
head_length = 0.5
VisualArrow3D(
prim_path=path + "/arrow",
translation=position,
name="target_0",
body_radius=body_radius,
body_length=body_length,
head_radius=head_radius,
head_length=head_length,
color=color,
)
def add_visual_marker_to_scene(self, scene):
"""
Adds the visual marker to the scene."""
arrows = XFormPrimView(prim_paths_expr="/World/envs/.*/arrow")
scene.add(arrows)
return scene, arrows
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the heading of the platform
heading = self._spawn_heading_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the linear velocity of the platform
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
heading = heading.cpu().numpy()
linear_velocities = linear_velocity.cpu().numpy()
angular_velocities = angular_velocity.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Initial position")
ax.set_xlim(
self._spawn_position_sampler.get_min_bound(),
self._spawn_position_sampler.get_max_bound(),
)
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_position"] = wandb.Image(data)
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(heading, bins=32)
ax.set_title("Initial heading")
ax.set_xlim(
self._spawn_heading_sampler.get_min_bound(),
self._spawn_heading_sampler.get_max_bound(),
)
ax.set_xlabel("angular distance (rad)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_heading"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
| 16,881 |
Python
| 36.683036 | 113 | 0.590072 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_track_6DoF_velocity.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_rewards import (
Track6DoFVelocityReward,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_parameters import (
Track6DoFVelocityParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xyo_velocity import (
TrackXYOVelocityTask as TrackXYOVelocityTask2D,
)
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class TrackXYOVelocityTask(TrackXYOVelocityTask2D, Core):
"""
Implements the GoToPose task. The robot has to reach a target position and heading.
"""
def __init__(
self, task_param: dict, reward_param: dict, num_envs: int, device: str
) -> None:
"""
Initializes the GoToPoseTask.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The parameters of the reward.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
Core.__init__(self, num_envs, device)
# Task and reward parameters
self._task_parameters = Track6DoFVelocityParameters(**task_param)
self._reward_parameters = Track6DoFVelocityReward(**reward_param)
# Curriculum
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._target_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.target_angular_velocity_curriculum,
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum,
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum,
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_linear_velocities = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._target_angular_velocities = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 3
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
return Core.update_observation_tensor(self, current_state)
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._linear_velocity_error = (
self._target_linear_velocities - current_state["linear_velocity"]
)
self._angular_velocity_error = (
self._target_angular_velocities - current_state["angular_velocity"]
)
self._position_error = current_state["position"]
self._task_data[:, :3] = self._linear_velocity_error
self._task_data[:, 3:6] = self._angular_velocity_error
return self.update_observation_tensor(current_state)
def compute_reward(
self, current_state: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.sqrt(
torch.square(self._linear_velocity_error).sum(-1)
)
self.angular_velocity_dist = torch.sqrt(
torch.square(self._angular_velocity_error).sum(-1)
)
# Checks if the goal is reached
lin_goal_is_reached = (
self.linear_velocity_dist < self._task_parameters.lin_vel_tolerance
).int()
ang_goal_is_reached = (
self.angular_velocity_dist < self._task_parameters.ang_vel_tolerance
).int()
goal_is_reached = lin_goal_is_reached * ang_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# Rewards
(
self.linear_velocity_reward,
self.angular_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
self.linear_velocity_dist,
self.angular_velocity_dist,
)
return self.linear_velocity_reward + self.angular_velocity_reward
def get_goals(
self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions.
target_orientations (torch.Tensor): The target orientations.
step (int, optional): The current step. Defaults to 0.
Returns:
list: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
phi = torch.rand((num_goals,), device=self._device) * math.pi
self._target_linear_velocities[env_ids, 0] = (
r * torch.cos(theta) * torch.sin(phi)
)
self._target_linear_velocities[env_ids, 1] = (
r * torch.sin(theta) * torch.sin(phi)
)
self._target_linear_velocities[env_ids, 2] = r * torch.cos(phi)
# Randomizes the target angular velocity
r = self._target_angular_velocity_sampler.sample(
num_goals, step=step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
phi = torch.rand((num_goals,), device=self._device) * math.pi
self._target_angular_velocities[env_ids, 0] = (
r * torch.cos(theta) * torch.sin(phi)
)
self._target_angular_velocities[env_ids, 1] = (
r * torch.sin(theta) * torch.sin(phi)
)
self._target_angular_velocities[env_ids, 2] = r * torch.cos(phi)
# This does not matter
return target_positions, target_orientations
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
uvw = torch.rand((num_resets, 3), device=self._device)
initial_orientation[:, 0] = torch.sqrt(uvw[:, 0]) * torch.cos(
uvw[:, 2] * 2 * math.pi
)
initial_orientation[:, 1] = torch.sqrt(1 - uvw[:, 0]) * torch.sin(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 2] = torch.sqrt(1 - uvw[:, 0]) * torch.cos(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 3] = torch.sqrt(uvw[:, 0]) * torch.sin(
uvw[:, 2] * 2 * math.pi
)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 2] = linear_velocity * torch.cos(phi)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 3] = angular_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 4] = angular_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 5] = angular_velocity * torch.cos(phi)
return (
initial_position,
initial_orientation,
initial_velocity,
)
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the target angular velocity
d = self._target_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
d = d.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(r, bins=32)
ax[0].set_title("Target normed linear velocity")
ax[0].set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(d, bins=32)
ax[1].set_title("Target normed angular velocity")
ax[1].set_xlim(
self._target_angular_velocity_sampler.get_min_bound(),
self._target_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/target_velocities"] = wandb.Image(data)
return dict
| 14,089 |
Python
| 34.943877 | 87 | 0.595003 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_task_factory.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_go_to_xyz import GoToXYZTask
from omniisaacgymenvs.tasks.MFP.MFP3D_go_to_pose import (
GoToPoseTask,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_track_xyz_velocity import (
TrackXYZVelocityTask,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_track_6DoF_velocity import (
TrackXYOVelocityTask,
)
class TaskFactory:
"""
Factory class to create tasks."""
def __init__(self):
self.creators = {}
def register(self, name: str, task):
"""
Registers a new task."""
self.creators[name] = task
def get(
self, task_dict: dict, reward_dict: dict, num_envs: int, device: str
) -> object:
"""
Returns a task."""
assert (
task_dict["name"] == reward_dict["name"]
), "The mode of both the task and the reward must match."
mode = task_dict["name"]
assert task_dict["name"] in self.creators.keys(), "Unknown task mode."
return self.creators[mode](task_dict, reward_dict, num_envs, device)
task_factory = TaskFactory()
task_factory.register("GoToXYZ", GoToXYZTask)
task_factory.register("GoToPose", GoToPoseTask)
task_factory.register("TrackXYZVelocity", TrackXYZVelocityTask)
task_factory.register("Track6DoFVelocity", TrackXYOVelocityTask)
# task_factory.register("TrackXYVelocityHeading", TrackXYVelocityHeadingTask)
| 1,673 |
Python
| 29.999999 | 82 | 0.673042 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_go_to_xyz.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP3D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_rewards import (
GoToXYZReward,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_task_parameters import (
GoToXYZParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_xy import (
GoToXYTask as GoToXYTask2D,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omniisaacgymenvs.utils.pin3D import VisualPin3D
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class GoToXYZTask(GoToXYTask2D, Core):
"""
Implements the GoToXY task. The robot has to reach a target position."""
def __init__(
self,
task_param: dict,
reward_param: dict,
num_envs: int,
device: str,
) -> None:
"""
Initializes the GoToXYZ task.
Args:
task_param (dict): Dictionary containing the task parameters.
reward_param (dict): Dictionary containing the reward parameters.
num_envs (int): Number of environments.
device (str): Device to run the task on.
"""
Core.__init__(self, num_envs, device)
# Task and reward parameters
self._task_parameters = GoToXYZParameters(**task_param)
self._reward_parameters = GoToXYZReward(**reward_param)
# Curriculum samplers
self._spawn_position_sampler = CurriculumSampler(
self._task_parameters.spawn_position_curriculum
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_positions = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 1
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
return Core.update_observation_tensor(self, current_state)
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._position_error = self._target_positions - current_state["position"]
self._task_data[:, :3] = self._position_error
return self.update_observation_tensor(current_state)
def get_goals(
self,
env_ids: torch.Tensor,
targets_position: torch.Tensor,
targets_orientation: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions.
target_orientations (torch.Tensor): The target orientations.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
self._target_positions[env_ids] = (
torch.rand((num_goals, 3), device=self._device)
* self._task_parameters.goal_random_position
* 2
- self._task_parameters.goal_random_position
)
targets_position[env_ids, :3] += self._target_positions[env_ids]
return targets_position, targets_orientation
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates spawning positions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self._goal_reached[env_ids] = 0
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_position[:, 0] = r * torch.cos(theta) * torch.sin(phi)
initial_position[:, 1] = r * torch.sin(theta) * torch.sin(phi)
initial_position[:, 1] = r * torch.cos(phi)
# Randomizes the orientation of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
uvw = torch.rand((num_resets, 3), device=self._device)
initial_orientation[:, 0] = torch.sqrt(uvw[:, 0]) * torch.cos(
uvw[:, 2] * 2 * math.pi
)
initial_orientation[:, 1] = torch.sqrt(1 - uvw[:, 0]) * torch.sin(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 2] = torch.sqrt(1 - uvw[:, 0]) * torch.cos(
uvw[:, 1] * 2 * math.pi
)
initial_orientation[:, 3] = torch.sqrt(uvw[:, 0]) * torch.sin(
uvw[:, 2] * 2 * math.pi
)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 2] = linear_velocity * torch.cos(phi)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
phi = torch.rand((num_resets,), device=self._device) * math.pi
initial_velocity[:, 3] = angular_velocity * torch.cos(theta) * torch.sin(phi)
initial_velocity[:, 4] = angular_velocity * torch.sin(theta) * torch.sin(phi)
initial_velocity[:, 5] = angular_velocity * torch.cos(phi)
return initial_position, initial_orientation, initial_velocity
def generate_target(self, path, position):
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
A pin is generated to represent the 3D position to be reached by the agent."""
color = torch.tensor([1, 0, 0])
ball_radius = 0.05
poll_radius = 0.025
poll_length = 2
VisualPin3D(
prim_path=path + "/pin",
translation=position,
name="target_0",
ball_radius=ball_radius,
poll_radius=poll_radius,
poll_length=poll_length,
color=color,
)
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Resets the counter of steps for which the goal was reached
r = self._spawn_position_sampler.sample(num_resets, step, device=self._device)
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(dpi=100, figsize=(8, 8))
ax.hist(r, bins=32)
ax.set_title("Spawn radius")
ax.set_xlabel("spawn distance (m)")
ax.set_ylabel("count")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/spawn_position"] = wandb.Image(data)
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities[:, 0], bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities[:, 1], bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
| 10,907 |
Python
| 35.481605 | 93 | 0.602182 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_core.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import torch
from dataclasses import dataclass
from omniisaacgymenvs.tasks.MFP.MFP2D_core import Core as Core2D
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
def quat_to_mat(quat: torch.Tensor) -> torch.Tensor:
"""
Converts a batch of quaternions to a batch of rotation matrices.
Args:
quat (torch.Tensor): Batch of quaternions.
Returns:
torch.Tensor: The batch of rotation matrices.
"""
w, x, y, z = torch.unbind(quat, -1)
two_s = 2.0 / ((quat * quat).sum(-1) + EPS)
R = torch.stack(
(
1 - two_s * (y * y + z * z),
two_s * (x * y - z * w),
two_s * (x * z + y * w),
two_s * (x * y + z * w),
1 - two_s * (x * x + z * z),
two_s * (y * z - x * w),
two_s * (x * z - y * w),
two_s * (y * z + x * w),
1 - two_s * (x * x + y * y),
),
-1,
)
return R.reshape(quat.shape[:-1] + (3, 3))
def mat_to_quat(mat: torch.Tensor) -> torch.Tensor:
"""
Converts a batch of rotation matrices to a batch of quaternions.
Args:
mat (torch.Tensor): Batch of rotation matrices.
Returns:
torch.Tensor: The batch of quaternions.
q = [w,x,y,z]
"""
quat = torch.zeros((mat.shape[0], 4), dtype=mat.dtype, device=mat.device)
t = mat[..., 0, 0] + mat[..., 1, 1] + mat[..., 2, 2]
r = torch.sqrt(1 + t) + EPS
s = 0.5 / r
quat[:, 0] = 0.5 * r
quat[:, 1] = mat[..., 2, 1] - mat[..., 1, 2] * s
quat[:, 2] = mat[..., 0, 2] - mat[..., 2, 0] * s
quat[:, 3] = mat[..., 1, 0] - mat[..., 0, 1] * s
return quat
def axis_angle_rotation(angle: torch.Tensor, axis: str) -> torch.Tensor:
"""
Returns the rotation matrix for a given angle and axis.
Args:
angle (torch.Tensor): The angle of rotation.
axis (str): The axis of rotation.
Returns:
torch.Tensor: The rotation matrix.
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
elif axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
elif axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
else:
raise ValueError("letter must be either X, Y or Z.")
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
def euler_angles_to_quat(euler_angles: torch.Tensor) -> torch.Tensor:
"""
Converts a batch of euler angles to a batch of quaternions.
Args:
euler_angles (torch.Tensor): Batch of euler angles.
convention (str): The convention to use for the conversion.
Returns:
torch.Tensor: The batch of quaternions.
"""
roll, pitch, yaw = torch.unbind(euler_angles, -1)
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
quat = torch.zeros(
(euler_angles.shape[0], 4), dtype=euler_angles.dtype, device=euler_angles.device
)
quat[:, 0] = cr * cp * cy + sr * sp * sy
quat[:, 1] = sr * cp * cy - cr * sp * sy
quat[:, 2] = cr * sp * cy + sr * cp * sy
quat[:, 3] = cr * cp * sy - sr * sp * cy
return quat
def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor:
"""
Converts a batch of euler angles to a batch of rotation matrices.
Args:
euler_angles (torch.Tensor): Batch of euler angles.
convention (str): The convention to use for the conversion.
Returns:
torch.Tensor: The batch of rotation matrices.
"""
matrices = [
axis_angle_rotation(e, c)
for c, e in zip(convention, torch.unbind(euler_angles, -1))
]
return torch.matmul(torch.matmul(matrices[2], matrices[1]), matrices[0])
class Core(Core2D):
"""
The base class that implements the core of the task.
"""
def __init__(self, num_envs: int, device: str) -> None:
"""
Initializes the core of the task.
Args:
num_envs (int): Number of environments.
device (str): Device to run the code on.
"""
self._num_envs = num_envs
self._device = device
# Dimensions of the observation tensors
self._dim_orientation: (
6 # theta heading in the world frame (cos(theta), sin(theta)) [0:6]
)
self._dim_velocity: 3 # velocity in the world (x_dot, y_dot) [6:9]
self._dim_omega: 3 # rotation velocity (theta_dot) [9:12]
self._dim_task_label: 1 # label of the task to be executed (int) [12]
self._dim_task_data: 9 # data to be used to fullfil the task (floats) [13:22]
# Observation buffers
self._num_observations = 22
self._obs_buffer = torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float32,
)
self._task_label = torch.ones(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_data = torch.zeros(
(self._num_envs, 9), device=self._device, dtype=torch.float32
)
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._obs_buffer[:, 0:6] = current_state["orientation"][:, :2, :].reshape(
self._num_envs, 6
)
self._obs_buffer[:, 6:9] = current_state["linear_velocity"]
self._obs_buffer[:, 9:12] = current_state["angular_velocity"]
self._obs_buffer[:, 12] = self._task_label
self._obs_buffer[:, 13:] = self._task_data
return self._obs_buffer
class TaskDict:
"""
A class to store the task dictionary. It is used to pass the task data to the task class.
"""
def __init__(self) -> None:
self.gotoxy = 0
self.gotopose = 1
self.trackxyvel = 2
self.trackxyovel = 3
self.trackxyvelheading = 4
| 6,690 |
Python
| 29.004484 | 93 | 0.554858 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/__init__.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
| 293 |
Python
| 28.399997 | 82 | 0.648464 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_track_xyo_velocity.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_core import (
Core,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
TrackXYOVelocityReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
TrackXYOVelocityParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
from omni.isaac.core.prims import XFormPrimView
from pxr import Usd
from matplotlib import pyplot as plt
from typing import Tuple
import numpy as np
import wandb
import torch
import math
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class TrackXYOVelocityTask(Core):
"""
Implements the GoToPose task. The robot has to reach a target position and heading.
"""
def __init__(
self, task_param: dict, reward_param: dict, num_envs: int, device: str
) -> None:
"""
Initializes the GoToPoseTask.
Args:
task_param (dict): The parameters of the task.
reward_param (dict): The parameters of the reward.
num_envs (int): The number of environments.
device (str): The device to run the task on.
"""
super(TrackXYOVelocityTask, self).__init__(num_envs, device)
# Task and reward parameters
self._task_parameters = TrackXYOVelocityParameters(**task_param)
self._reward_parameters = TrackXYOVelocityReward(**reward_param)
# Curriculum
self._target_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.target_linear_velocity_curriculum,
)
self._target_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.target_angular_velocity_curriculum,
)
self._spawn_linear_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_linear_velocity_curriculum,
)
self._spawn_angular_velocity_sampler = CurriculumSampler(
self._task_parameters.spawn_angular_velocity_curriculum,
)
# Buffers
self._goal_reached = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.int32
)
self._target_linear_velocities = torch.zeros(
(self._num_envs, 2), device=self._device, dtype=torch.float32
)
self._target_angular_velocities = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_label = self._task_label * 3
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:
dict: The dictionary containing the statistics.
"""
torch_zeros = lambda: torch.zeros(
self._num_envs, dtype=torch.float, device=self._device, requires_grad=False
)
if not "linear_velocity_reward" in stats.keys():
stats["linear_velocity_reward"] = torch_zeros()
if not "linear_velocity_error" in stats.keys():
stats["linear_velocity_error"] = torch_zeros()
if not "angular_velocity_reward" in stats.keys():
stats["angular_velocity_reward"] = torch_zeros()
if not "angular_velocity_error" in stats.keys():
stats["angular_velocity_error"] = torch_zeros()
self.log_with_wandb = []
return stats
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._linear_velocity_error = (
self._target_linear_velocities - current_state["linear_velocity"]
)
self._angular_velocity_error = (
self._target_angular_velocities - current_state["angular_velocity"]
)
self._position_error = current_state["position"]
self._task_data[:, :2] = self._linear_velocity_error
self._task_data[:, 2] = self._angular_velocity_error
return self.update_observation_tensor(current_state)
def compute_reward(
self, current_state: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
# position error
self.position_dist = torch.sqrt(torch.square(self._position_error).sum(-1))
self.linear_velocity_dist = torch.sqrt(
torch.square(self._linear_velocity_error).sum(-1)
)
self.angular_velocity_dist = torch.abs(self._angular_velocity_error)
# Checks if the goal is reached
lin_goal_is_reached = (
self.linear_velocity_dist < self._task_parameters.lin_vel_tolerance
).int()
ang_goal_is_reached = (
self.angular_velocity_dist < self._task_parameters.ang_vel_tolerance
).int()
goal_is_reached = lin_goal_is_reached * ang_goal_is_reached
self._goal_reached *= goal_is_reached # if not set the value to 0
self._goal_reached += goal_is_reached # if it is add 1
# Rewards
(
self.linear_velocity_reward,
self.angular_velocity_reward,
) = self._reward_parameters.compute_reward(
current_state,
actions,
self.linear_velocity_dist,
self.angular_velocity_dist,
)
return self.linear_velocity_reward + self.angular_velocity_reward
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
die = torch.zeros_like(self._goal_reached, dtype=torch.long)
ones = torch.ones_like(self._goal_reached, dtype=torch.long)
die = torch.where(
self.position_dist > self._task_parameters.kill_dist, ones, die
)
die = torch.where(
self._goal_reached > self._task_parameters.kill_after_n_steps_in_tolerance,
ones,
die,
)
return die
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
stats["linear_velocity_reward"] += self.linear_velocity_reward
stats["linear_velocity_error"] += self.linear_velocity_dist
stats["angular_velocity_reward"] += self.angular_velocity_reward
stats["angular_velocity_error"] += self.angular_velocity_dist
return stats
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
self._goal_reached[env_ids] = 0
def get_goals(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor]: The target positions and orientations.
"""
num_goals = len(env_ids)
# Randomizes the target linear velocity
r = self._target_linear_velocity_sampler.sample(
num_goals, step, device=self._device
)
theta = torch.rand((num_goals,), device=self._device) * 2 * math.pi
self._target_linear_velocities[env_ids, 0] = r * torch.cos(theta)
self._target_linear_velocities[env_ids, 1] = r * torch.sin(theta)
# Randomizes the target angular velocity
omega = self._target_angular_velocity_sampler.sample(
num_goals, step, device=self._device
)
self._target_angular_velocities[env_ids] = omega
p = torch.zeros((num_goals, 3), dtype=torch.float32, device=self._device)
p[:, 2] = 2
q = torch.zeros((num_goals, 4), dtype=torch.float32, device=self._device)
q[:, 0] = 1
return p, q
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
num_resets = len(env_ids)
# Resets the counter of steps for which the goal was reached
self.reset(env_ids)
# Randomizes the starting position of the platform
initial_position = torch.zeros(
(num_resets, 3), device=self._device, dtype=torch.float32
)
# Randomizes the heading of the platform
initial_orientation = torch.zeros(
(num_resets, 4), device=self._device, dtype=torch.float32
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_orientation[:, 0] = torch.cos(theta * 0.5)
initial_orientation[:, 3] = torch.sin(theta * 0.5)
# Randomizes the linear velocity of the platform
initial_velocity = torch.zeros(
(num_resets, 6), device=self._device, dtype=torch.float32
)
linear_velocity = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
theta = torch.rand((num_resets,), device=self._device) * 2 * math.pi
initial_velocity[:, 0] = linear_velocity * torch.cos(theta)
initial_velocity[:, 1] = linear_velocity * torch.sin(theta)
# Randomizes the angular velocity of the platform
angular_velocity = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
initial_velocity[:, 5] = angular_velocity
return (
initial_position,
initial_orientation,
initial_velocity,
)
def generate_target(self, path: str, position: torch.Tensor) -> None:
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the target.
"""
pass
def add_visual_marker_to_scene(self, scene: Usd.Stage) -> Tuple[Usd.Stage, None]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, None]: The scene and the visual marker.
"""
return scene, None
def log_spawn_data(self, step: int) -> dict:
"""
Logs the spawn data to wandb.
Args:
step (int): The current step.
Returns:
dict: The spawn data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the linear velocity of the platform
linear_velocities = self._spawn_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the angular velocity of the platform
angular_velocities = self._spawn_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
linear_velocities = linear_velocities.cpu().numpy()
angular_velocities = angular_velocities.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(linear_velocities, bins=32)
ax[0].set_title("Initial normed linear velocity")
ax[0].set_xlim(
self._spawn_linear_velocity_sampler.get_min_bound(),
self._spawn_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(angular_velocities, bins=32)
ax[1].set_title("Initial normed angular velocity")
ax[1].set_xlim(
self._spawn_angular_velocity_sampler.get_min_bound(),
self._spawn_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def log_target_data(self, step: int) -> dict:
"""
Logs the target data to wandb.
Args:
step (int): The current step.
Returns:
dict: The target data.
"""
dict = {}
num_resets = self._num_envs
# Randomizes the target linear velocity of the platform
r = self._target_linear_velocity_sampler.sample(
num_resets, step, device=self._device
)
# Randomizes the target angular velocity
d = self._target_angular_velocity_sampler.sample(
num_resets, step, device=self._device
)
r = r.cpu().numpy()
d = d.cpu().numpy()
fig, ax = plt.subplots(1, 2, dpi=100, figsize=(8, 8), sharey=True)
ax[0].hist(r, bins=32)
ax[0].set_title("Target normed linear velocity")
ax[0].set_xlim(
self._target_linear_velocity_sampler.get_min_bound(),
self._target_linear_velocity_sampler.get_max_bound(),
)
ax[0].set_xlabel("vel (m/s)")
ax[0].set_ylabel("count")
ax[1].hist(d, bins=32)
ax[1].set_title("Target normed angular velocity")
ax[1].set_xlim(
self._target_angular_velocity_sampler.get_min_bound(),
self._target_angular_velocity_sampler.get_max_bound(),
)
ax[1].set_xlabel("vel (rad/s)")
fig.tight_layout()
fig.canvas.draw()
data = np.array(fig.canvas.renderer.buffer_rgba())
plt.close(fig)
dict["curriculum/initial_velocities"] = wandb.Image(data)
return dict
def get_logs(self, step: int) -> dict:
"""
Logs the task data to wandb.
Args:
step (int): The current step.
Returns:
dict: The task data.
"""
dict = {}
if step % 50 == 0:
dict = {**dict, **self.log_spawn_data(step)}
dict = {**dict, **self.log_target_data(step)}
return dict
| 15,555 |
Python
| 33.114035 | 93 | 0.592093 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_core.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Tuple
from pxr import Usd
import torch
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
def quat_addition(q1, q2):
q3 = torch.zeros_like(q1)
q3[:, 0] = (
q1[:, 0] * q2[:, 0]
- q1[:, 1] * q2[:, 1]
- q1[:, 2] * q2[:, 2]
- q1[:, 3] * q2[:, 3]
)
q3[:, 1] = (
q1[:, 0] * q2[:, 1]
+ q1[:, 1] * q2[:, 0]
+ q1[:, 2] * q2[:, 3]
- q1[:, 3] * q2[:, 2]
)
q3[:, 2] = (
q1[:, 0] * q2[:, 2]
- q1[:, 1] * q2[:, 3]
+ q1[:, 2] * q2[:, 0]
+ q1[:, 3] * q2[:, 1]
)
q3[:, 3] = (
q1[:, 0] * q2[:, 3]
+ q1[:, 1] * q2[:, 2]
- q1[:, 2] * q2[:, 1]
+ q1[:, 3] * q2[:, 0]
)
q3 /= torch.norm(q3 + EPS, dim=-1, keepdim=True)
class Core:
"""
The base class that implements the core of the task.
"""
def __init__(self, num_envs: int, device: str) -> None:
"""
The base class for the different subtasks.
Args:
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
# Dimensions of the observation tensors
self._dim_orientation = (
2 # theta heading in the world frame (cos(theta), sin(theta)) [0:2]
)
self._dim_velocity = 2 # velocity in the world (x_dot, y_dot) [2:4]
self._dim_omega = 1 # rotation velocity (theta_dot) [4]
self._dim_task_label = 1 # label of the task to be executed (int) [5]
self._dim_task_data = 22 # data to be used to fullfil the task (floats) [6:16]
# Observation buffers
self._num_observations = 28
self._obs_buffer = torch.zeros(
(self._num_envs, self._num_observations),
device=self._device,
dtype=torch.float32,
)
self._task_label = torch.ones(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._task_data = torch.zeros(
(self._num_envs, self._dim_task_data),
device=self._device,
dtype=torch.float32,
)
def update_observation_tensor(self, current_state: dict) -> torch.Tensor:
"""
Updates the observation tensor with the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
self._obs_buffer[:, 0:2] = current_state["orientation"]
self._obs_buffer[:, 2:4] = current_state["linear_velocity"]
self._obs_buffer[:, 4] = current_state["angular_velocity"]
self._obs_buffer[:, 5] = self._task_label
self._obs_buffer[:, 6:28] = self._task_data
return self._obs_buffer
def create_stats(self, stats: dict) -> dict:
"""
Creates a dictionary to store the training statistics for the task.
Args:
stats (dict): The dictionary to store the statistics.
Returns:
dict: The dictionary containing the statistics.
"""
raise NotImplementedError
def get_state_observations(self, current_state: dict) -> torch.Tensor:
"""
Computes the observation tensor from the current state of the robot.
Args:
current_state (dict): The current state of the robot.
Returns:
torch.Tensor: The observation tensor.
"""
raise NotImplementedError
def compute_reward(
self, current_state: torch.Tensor, actions: torch.Tensor
) -> torch.Tensor:
"""
Computes the reward for the current state of the robot.
Args:
current_state (torch.Tensor): The current state of the robot.
actions (torch.Tensor): The actions taken by the robot.
Returns:
torch.Tensor: The reward for the current state of the robot.
"""
raise NotImplementedError
def update_kills(self) -> torch.Tensor:
"""
Updates if the platforms should be killed or not.
Returns:
torch.Tensor: Wether the platforms should be killed or not.
"""
raise NotImplementedError
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict):The new stastistics to be logged.
Returns:
dict: The statistics of the training
"""
raise NotImplementedError
def reset(self, env_ids: torch.Tensor) -> None:
"""
Resets the goal_reached_flag when an agent manages to solve its task.
Args:
env_ids (torch.Tensor): The ids of the environments.
"""
raise NotImplementedError
def get_goals(
self,
env_ids: torch.Tensor,
target_positions: torch.Tensor,
target_orientations: torch.Tensor,
) -> list:
"""
Generates a random goal for the task.
Args:
env_ids (torch.Tensor): The ids of the environments.
target_positions (torch.Tensor): The target positions.
target_orientations (torch.Tensor): The target orientations.
Returns:
list: The target positions and orientations.
"""
raise NotImplementedError
def get_initial_conditions(
self,
env_ids: torch.Tensor,
step: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Generates the initial conditions for the robots following a curriculum.
Args:
env_ids (torch.Tensor): The ids of the environments.
step (int, optional): The current step. Defaults to 0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The initial position,
orientation and velocity of the robot.
"""
raise NotImplementedError
def generate_target(self, path, position):
"""
Generates a visual marker to help visualize the performance of the agent from the UI.
Args:
path (str): The path where the pin is to be generated.
position (torch.Tensor): The position of the target.
"""
raise NotImplementedError
def add_visual_marker_to_scene(self, scene: Usd.Stage) -> Tuple[Usd.Stage, None]:
"""
Adds the visual marker to the scene.
Args:
scene (Usd.Stage): The scene to add the visual marker to.
Returns:
Tuple[Usd.Stage, None]: The scene and the visual marker.
"""
raise NotImplementedError
class TaskDict:
"""
A class to store the task dictionary. It is used to pass the task data to the task class.
"""
def __init__(self) -> None:
self.gotoxy = 0
self.gotopose = 1
self.trackxyvel = 2
self.trackxyovel = 3
self.trackxyvelheading = 4
| 7,370 |
Python
| 27.792969 | 93 | 0.555631 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_penalties.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
BasePenalty,
EnergyPenalty,
LinearVelocityPenalty,
scaling_functions,
BoundaryPenalty,
)
from dataclasses import dataclass, field
from typing import Dict
import torch
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class AngularVelocityPenalty(BasePenalty):
"""
This class has access to the angular velocity and applies a penalty based on its norm.
"""
weight: float = 0.1
scaling_function: str = "linear"
scaling_parameter: float = 1.0
min_value: float = 0
max_value: float = float("inf")
def __post_init__(self):
super().__post_init__()
assert self.weight > 0, "Weight must be positive"
assert self.scaling_function in scaling_functions, "Scaling function not found"
assert (
self.min_value < self.max_value
), "Min value must be smaller than max value"
self.scaling_function = scaling_functions[self.scaling_function]
def compute_penalty(
self, state: Dict["str", torch.Tensor], actions: torch.Tensor, step: int
):
"""
Computes the penalty based on the norm of the angular velocity.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
if self.enable:
self.last_rate = self.get_rate(step)
# compute the norm of the angular velocity
norm = torch.norm(state["angular_velocity"], dim=-1) - self.min_value
# apply ranging function
norm[norm < 0] = 0
norm[norm > (self.max_value - self.min_value)] = (
self.max_value - self.min_value
)
# apply scaling function
norm = self.scaling_function(norm, p=self.scaling_parameter)
self.last_penalties = norm
return norm * self.last_rate * self.weight
else:
return torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
penalty_classes = {
"energy_penalty": EnergyPenalty,
"linear_velocity_penalty": LinearVelocityPenalty,
"angular_velocity_penalty": AngularVelocityPenalty,
}
@dataclass
class EnvironmentPenalties:
energy_penalty: EnergyPenalty = field(default_factory=dict)
linear_velocity_penalty: LinearVelocityPenalty = field(default_factory=dict)
angular_velocity_penalty: AngularVelocityPenalty = field(default_factory=dict)
def __post_init__(self):
self.penalties = []
self.energy_penalty = EnergyPenalty(**self.energy_penalty)
if self.energy_penalty.enable:
self.penalties.append(self.energy_penalty)
self.linear_velocity_penalty = LinearVelocityPenalty(
**self.linear_velocity_penalty
)
if self.linear_velocity_penalty.enable:
self.penalties.append(self.linear_velocity_penalty)
self.angular_velocity_penalty = AngularVelocityPenalty(
**self.angular_velocity_penalty
)
if self.angular_velocity_penalty.enable:
self.penalties.append(self.angular_velocity_penalty)
def compute_penalty(
self, state: Dict[str, torch.Tensor], actions: torch.Tensor, step: int
) -> torch.Tensor:
"""
Computes the penalties.
Args:
state (Dict[str, torch.Tensor]): State of the system.
actions (torch.Tensor): Actions taken.
step (int): Current step.
Returns:
torch.Tensor: Penalty.
"""
penalties = torch.zeros(
[actions.shape[0]], dtype=torch.float32, device=actions.device
)
for penalty in self.penalties:
penalties += penalty.compute_penalty(state, actions, step)
return penalties
def get_stats_name(self) -> list:
"""
Returns the names of the statistics to be computed.
Returns:
list: Names of the statistics to be tracked.
"""
names = []
for penalty in self.penalties:
names.append("penalties/" + penalty.name)
return names
def update_statistics(self, stats: dict) -> dict:
"""
Updates the training statistics.
Args:
stats (dict): Current statistics.
Returns:
dict: Updated statistics.
"""
for penalty in self.penalties:
stats["penalties/" + penalty.name] += penalty.get_unweigthed_penalties()
return stats
def get_logs(self) -> dict:
"""
Logs the penalty.
Returns:
dict: Dictionary containing the penalty.
"""
dict = {}
for penalty in self.penalties:
dict["penalties/" + penalty.name + "_weight"] = (
penalty.get_last_rate() * penalty.weight
)
return dict
| 5,361 |
Python
| 29.64 | 90 | 0.602873 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP3D_disturbances.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances_parameters import (
DisturbancesParameters,
MassDistributionDisturbanceParameters,
ForceDisturbanceParameters,
TorqueDisturbanceParameters,
NoisyObservationsParameters,
NoisyActionsParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
ForceDisturbance as ForceDisturbance2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
TorqueDisturbance as TorqueDisturbance2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
NoisyActions as NoisyActions2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
NoisyObservations as NoisyObservations2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
MassDistributionDisturbances as MassDistributionDisturbances2D,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances import (
Disturbances as Disturbances2D,
)
from typing import Tuple
import torch
import math
import omni
class MassDistributionDisturbances(MassDistributionDisturbances2D):
"""
Creates disturbances on the platform by simulating a mass distribution on the
platform.
"""
def __init__(
self,
parameters: MassDistributionDisturbanceParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (MassDistributionDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(MassDistributionDisturbances, self).__init__(parameters, num_envs, device)
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the mass disturbances.
"""
super().instantiate_buffers()
self.platforms_CoM = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def randomize_masses(self, env_ids: torch.Tensor, step: int = 0) -> None:
"""
Randomizes the masses of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
step (int): The current step of the learning process.
"""
num_resets = len(env_ids)
self.platforms_mass[env_ids, 0] = self.mass_sampler.sample(
num_resets, step, device=self._device
)
r = self.CoM_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
phi = (
torch.rand((num_resets), dtype=torch.float32, device=self._device) * math.pi
)
self.platforms_CoM[env_ids, 0] = torch.cos(theta) * torch.sin(phi) * r
self.platforms_CoM[env_ids, 1] = torch.sin(theta) * torch.sin(phi) * r
self.platforms_CoM[env_ids, 2] = torch.cos(phi) * r
def set_coms(
self,
body: omni.isaac.core.prims.XFormPrimView,
env_ids: torch.Tensor,
joints_idx: Tuple[int, int],
) -> None:
"""
Sets the CoM of the platforms.
Args:
body (omni.isaac.core.XFormPrimView): The rigid bodies containing the prismatic joints controlling the position of the CoMs.
env_ids (torch.Tensor): The ids of the environments to reset.
joints_idx (Tuple[int, int]): The ids of the x and y joints respectively.
"""
joints_position = torch.zeros(
(len(env_ids), 3), device=self._device, dtype=torch.float32
)
joints_position[:, joints_idx[0]] = self.platforms_CoM[env_ids, 0]
joints_position[:, joints_idx[1]] = self.platforms_CoM[env_ids, 1]
joints_position[:, joints_idx[2]] = self.platforms_CoM[env_ids, 2]
if self.parameters.enable:
body.set_joint_positions(joints_position, indices=env_ids)
class ForceDisturbance(ForceDisturbance2D):
"""
Creates disturbances on the platform by simulating an uneven floor.
"""
def __init__(
self, parameters: ForceDisturbanceParameters, num_envs: int, device: str
) -> None:
"""
Args:
parameters (ForceDisturbanceParameters): The task configuration.
num_envs (int): The number of environments to create.
device (str): The device to use for the computation.
"""
super(ForceDisturbance, self).__init__(parameters, num_envs, device)
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the uneven floor disturbances.
"""
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_z_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_x_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_y_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._floor_z_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_forces = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.forces = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_floor(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the uneven floor.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_y_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_z_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._floor_x_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._floor_y_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._floor_z_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_forces[env_ids] = self.force_sampler.sample(
num_resets, step, device=self._device
)
else:
r = self.force_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
phi = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
)
self.forces[env_ids, 0] = torch.cos(theta) * torch.sin(phi) * r
self.forces[env_ids, 1] = torch.sin(theta) * torch.sin(phi) * r
self.forces[env_ids, 2] = torch.cos(phi) * r
def get_floor_forces(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the floor forces for the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The floor forces to apply to the robot.
"""
if self.parameters.use_sinusoidal_patterns:
self.forces[:, 0] = (
torch.sin(root_pos[:, 0] * self._floor_x_freq + self._floor_x_offset)
* self._max_forces
)
self.forces[:, 1] = (
torch.sin(root_pos[:, 1] * self._floor_y_freq + self._floor_y_offset)
* self._max_forces
)
self.forces[:, 2] = (
torch.sin(root_pos[:, 2] * self._floor_z_freq + self._floor_z_offset)
* self._max_forces
)
return self.forces
class TorqueDisturbance(TorqueDisturbance2D):
"""
Creates disturbances on the platform by simulating a torque applied to its center.
"""
def __init__(
self, parameters: TorqueDisturbanceParameters, num_envs: int, device: str
) -> None:
"""
Args:
parameters (TorqueDisturbanceParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(TorqueDisturbance, self).__init__(parameters, num_envs, device)
def instantiate_buffers(self) -> None:
"""
Instantiates the buffers used to store the uneven torque disturbances."""
if self.parameters.use_sinusoidal_patterns:
self._torque_x_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_y_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_z_freq = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_x_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_y_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._torque_z_offset = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self._max_torques = torch.zeros(
(self._num_envs), device=self._device, dtype=torch.float32
)
self.torques = torch.zeros(
(self._num_envs, 3), device=self._device, dtype=torch.float32
)
def generate_torque(
self, env_ids: torch.Tensor, num_resets: int, step: int = 0
) -> None:
"""
Generates the torque disturbance.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
num_resets (int): The number of resets to perform.
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
# use the same min/max frequencies and offsets for the floor
self._torque_x_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_y_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_z_freq[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_freq - self.parameters.min_freq)
+ self.parameters.min_freq
)
self._torque_x_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._torque_y_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._torque_z_offset[env_ids] = (
torch.rand(num_resets, dtype=torch.float32, device=self._device)
* (self.parameters.max_offset - self.parameters.min_offset)
+ self.parameters.min_offset
)
self._max_torques[env_ids] = self.torque_sampler.sample(
num_resets, step, device=self._device
)
else:
r = self.torque_sampler.sample(num_resets, step, device=self._device)
theta = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
* 2
)
phi = (
torch.rand((num_resets), dtype=torch.float32, device=self._device)
* math.pi
)
self.torques[env_ids, 0] = torch.cos(theta) * torch.sin(phi) * r
self.torques[env_ids, 1] = torch.sin(theta) * torch.sin(phi) * r
self.torques[env_ids, 2] = torch.cos(phi) * r
def get_torque_disturbance(self, root_pos: torch.Tensor) -> torch.Tensor:
"""
Computes the torque forces for the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The torque forces to apply to the robot.
"""
if self.parameters.use_sinusoidal_patterns:
self.torques[:, 0] = (
torch.sin(root_pos[:, 0] * self._torque_x_freq + self._torque_x_offset)
* self._max_torques
)
self.torques[:, 1] = (
torch.sin(root_pos[:, 1] * self._torque_y_freq + self._torque_y_offset)
* self._max_torques
)
self.torques[:, 2] = (
torch.sin(root_pos[:, 2] * self._torque_z_freq + self._torque_z_offset)
* self._max_torques
)
return self.torques
class NoisyObservations(NoisyObservations2D):
"""
Adds noise to the observations of the robot.
"""
def __init__(
self, parameters: NoisyObservationsParameters, num_envs: int, device: str
) -> None:
"""
Args:
task_cfg (NoisyObservationParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(NoisyObservations, self).__init__(parameters, num_envs, device)
class NoisyActions(NoisyActions2D):
"""
Adds noise to the actions of the robot.
"""
def __init__(
self, parameters: NoisyActionsParameters, num_envs: int, device: str
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
super(NoisyActions, self).__init__(parameters, num_envs, device)
class Disturbances(Disturbances2D):
"""
Class to create disturbances on the platform.
"""
def __init__(
self,
parameters: dict,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
self.parameters = DisturbancesParameters(**parameters)
self.mass_disturbances = MassDistributionDisturbances(
self.parameters.mass_disturbance,
num_envs,
device,
)
self.force_disturbances = ForceDisturbance(
self.parameters.force_disturbance,
num_envs,
device,
)
self.torque_disturbances = TorqueDisturbance(
self.parameters.torque_disturbance,
num_envs,
device,
)
self.noisy_observations = NoisyObservations(
self.parameters.observations_disturbance,
num_envs,
device,
)
self.noisy_actions = NoisyActions(
self.parameters.actions_disturbance,
num_envs,
device,
)
| 18,366 |
Python
| 36.560327 | 136 | 0.557661 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_task_parameters.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from dataclasses import dataclass, field
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumParameters,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_penalties import (
BoundaryPenalty,
ConeShapePenalty,
ContactPenalty,
)
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
@dataclass
class GoToXYParameters:
"""
Parameters for the GoToXY task.
"""
name: str = "GoToXY"
position_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoToPoseParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToPose"
position_tolerance: float = 0.01
heading_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 10.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughXYParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughXY"
position_tolerance: float = 0.1
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughXYSequenceParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughXYSequence"
position_tolerance: float = 0.1
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
num_points: int = 5
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert self.num_points > 0, "Number of points must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughPoseParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughPose"
position_tolerance: float = 0.1
heading_tolerance: float = 0.05
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughPoseSequenceParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughPoseSequence"
position_tolerance: float = 0.1
heading_tolerance: float = 0.05
linear_velocity_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 1
goal_random_position: float = 0.0
kill_dist: float = 10.0
num_points: int = 5
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.linear_velocity_tolerance > 0
), "Velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert self.num_points > 0, "Number of points must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughGateParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughGate"
goal_random_position: float = 0.0
kill_dist: float = 10.0
gate_width: float = 1.5
gate_thickness: float = 0.2
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
contact_penalty: ContactPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.gate_width > 0, "Gate width must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.contact_penalty = ContactPenalty(**self.contact_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class GoThroughGateSequenceParameters:
"""
Parameters for the GoToPose task.
"""
name: str = "GoToThroughGate"
goal_random_position: float = 0.0
kill_dist: float = 10.0
gate_width: float = 1.5
gate_thickness: float = 0.2
num_points: int = 5
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
contact_penalty: ContactPenalty = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_gate_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_gate_heading_curriculum: CurriculumParameters = field(default_factory=dict)
def __post_init__(self) -> None:
assert self.gate_width > 0, "Gate width must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert self.num_points > 0, "Number of points must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.contact_penalty = ContactPenalty(**self.contact_penalty)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
self.spawn_gate_position_curriculum = CurriculumParameters(
**self.spawn_gate_position_curriculum
)
self.spawn_gate_heading_curriculum = CurriculumParameters(
**self.spawn_gate_heading_curriculum
)
@dataclass
class TrackXYVelocityParameters:
"""
Parameters for the TrackXYVelocity task.
"""
name: str = "TrackXYVelocity"
lin_vel_tolerance: float = 0.01
kill_after_n_steps_in_tolerance: int = 50
goal_random_velocity: float = 0.75
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_velocity >= 0, "Goal random velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class TrackXYOVelocityParameters:
"""
Parameters for the TrackXYOVelocity task.
"""
name: str = "TrackXYOVelocity"
lin_vel_tolerance: float = 0.01
ang_vel_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_linear_velocity: float = 0.75
goal_random_angular_velocity: float = 1
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
target_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.lin_vel_tolerance > 0, "Linear velocity tolerance must be positive."
assert (
self.ang_vel_tolerance > 0
), "Angular velocity tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert (
self.goal_random_linear_velocity >= 0
), "Goal random linear velocity must be positive."
assert (
self.goal_random_angular_velocity >= 0
), "Goal random angular velocity must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.target_angular_velocity_curriculum = CurriculumParameters(
**self.target_angular_velocity_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class TrackXYVelocityHeadingParameters:
"""
Parameters for the TrackXYVelocityHeading task.
"""
name: str = "TrackXYVelocityHeading"
velocity_tolerance: float = 0.01
heading_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
goal_random_position: float = 0.0
kill_dist: float = 500.0
target_linear_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.velocity_tolerance > 0, "Velocity tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.goal_random_position >= 0, "Goal random position must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
self.target_linear_velocity_curriculum = CurriculumParameters(
**self.target_linear_velocity_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
@dataclass
class CloseProximityDockParameters:
"""
Parameters for the GoToPose task."""
name: str = "CloseProximityDock"
position_tolerance: float = 0.01
heading_tolerance: float = 0.025
kill_after_n_steps_in_tolerance: int = 50
kill_dist: float = 10.0
dock_footprint_diameter: float = 0.8
goal_to_penalty_anchor_dist: float = 2.0
env_x: float = 3.0
env_y: float = 5.0
boundary_penalty: BoundaryPenalty = field(default_factory=dict)
relative_angle_penalty: ConeShapePenalty = field(default_factory=dict)
contact_penalty: ContactPenalty = field(default_factory=dict)
fp_footprint_diameter_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_dock_mass_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_dock_space_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_position_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_relative_angle_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_heading_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_linear_velocity_curriculum: CurriculumParameters = field(default_factory=dict)
spawn_angular_velocity_curriculum: CurriculumParameters = field(
default_factory=dict
)
def __post_init__(self) -> None:
assert self.position_tolerance > 0, "Position tolerance must be positive."
assert self.heading_tolerance > 0, "Heading tolerance must be positive."
assert (
self.kill_after_n_steps_in_tolerance > 0
), "Kill after n steps in tolerance must be positive."
assert self.kill_dist > 0, "Kill distance must be positive."
assert (
self.dock_footprint_diameter > 0
), "Dock footprint diameter must be positive."
assert self.env_x > 0, "Environment x dimension must be positive."
assert self.env_y > 0, "Environment y dimension must be positive."
self.boundary_penalty = BoundaryPenalty(**self.boundary_penalty)
self.relative_angle_penalty = ConeShapePenalty(**self.relative_angle_penalty)
self.contact_penalty = ContactPenalty(**self.contact_penalty)
self.fp_footprint_diameter_curriculum = CurriculumParameters(
**self.fp_footprint_diameter_curriculum
)
self.spawn_dock_mass_curriculum = CurriculumParameters(
**self.spawn_dock_mass_curriculum
)
self.spawn_dock_space_curriculum = CurriculumParameters(
**self.spawn_dock_space_curriculum
)
self.spawn_position_curriculum = CurriculumParameters(
**self.spawn_position_curriculum
)
self.spawn_relative_angle_curriculum = CurriculumParameters(
**self.spawn_relative_angle_curriculum
)
self.spawn_heading_curriculum = CurriculumParameters(
**self.spawn_heading_curriculum
)
self.spawn_linear_velocity_curriculum = CurriculumParameters(
**self.spawn_linear_velocity_curriculum
)
self.spawn_angular_velocity_curriculum = CurriculumParameters(
**self.spawn_angular_velocity_curriculum
)
| 25,090 |
Python
| 38.327586 | 88 | 0.676006 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/curriculum_helpers.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from inspect import isfunction
import dataclasses
import torch
import math
####################################################################################################
# Curriculum growth functions
####################################################################################################
def curriculum_linear_growth(
step: int = 0, start: int = 0, end: int = 1000, **kwargs
) -> float:
"""
Generates a curriculum with a linear growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = current / (relative_end)
return rate
def curriculum_sigmoid_growth(
step: int = 0, start: int = 100, end: int = 1000, extent: float = 3, **kwargs
) -> float:
"""
Generates a curriculum with a sigmoid growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
extent (float, optional): Extent of the sigmoid function.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = (
math.tanh(((extent * 2 * current / relative_end) - extent) / 2)
- math.tanh(-extent / 2)
) / (math.tanh(extent / 2) - math.tanh(-extent / 2))
return rate
def curriculum_pow_growth(
step: int = 0, start: int = 0, end: int = 1000, alpha: float = 2.0, **kwargs
) -> float:
"""
Generates a curriculum with a power growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
alpha (float, optional): Exponent of the power function.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = (current / relative_end) ** alpha
return rate
####################################################################################################
# Curriculum sampling functions
####################################################################################################
def norm_cdf(x: float) -> float:
"""
Computes standard normal cumulative distribution function
Args:
x (float): Input value.
Returns:
float: Value of the standard normal cumulative distribution function
"""
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
def truncated_normal(
n: int = 1,
mean: float = 0.0,
std: float = 0.5,
min_value: float = 0.0,
max_value: float = 1.0,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Method based on https://github.com/pytorch/pytorch/blob/a40812de534b42fcf0eb57a5cecbfdc7a70100cf/torch/nn/init.py#L22
Values are generated by using a truncated uniform distribution and
then using the inverse CDF for the normal distribution.
Args:
n (int, optional): Number of samples to generate.
mean (float, optional): Mean of the normal distribution.
std (float, optional): Standard deviation of the normal distribution.
min_value (float, optional): Minimum value of the truncated distribution.
max_value (float, optional): Maximum value of the truncated distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a truncated normal distribution.
"""
tensor = torch.zeros((n), dtype=torch.float32, device=device)
# Get upper and lower cdf values
l = norm_cdf((min_value - mean) / std)
u = norm_cdf((max_value - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=min_value, max=max_value)
return tensor
def normal(
n: int = 1,
mean: float = 0.0,
std: float = 0.5,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Generates a tensor with values from a normal distribution.
Args:
n (int, optional): Number of samples to generate.
mean (float, optional): Mean of the normal distribution.
std (float, optional): Standard deviation of the normal distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a normal distribution.
"""
return torch.normal(mean, std, (n,), device=device)
def uniform(
n: int = 1,
min_value: float = 0.0,
max_value: float = 1.1,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Generates a tensor with values from a uniform distribution.
Args:
n (int, optional): Number of samples to generate.
min_value (float, optional): Minimum value of the uniform distribution.
max_value (float, optional): Maximum value of the uniform distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a uniform distribution.
"""
return torch.rand((n), device=device) * (max_value - min_value) + min_value
####################################################################################################
# Function dictionaries
####################################################################################################
RateFunctionDict = {
"none": lambda step, start, end, **kwargs: 1.0,
"linear": curriculum_linear_growth,
"sigmoid": curriculum_sigmoid_growth,
"pow": curriculum_pow_growth,
}
SampleFunctionDict = {
"uniform": uniform,
"normal": normal,
"truncated_normal": truncated_normal,
}
@dataclasses.dataclass
class CurriculumRateParameters:
start: int = 50
end: int = 1000
function: str = "none"
extent: float = 3
alpha: float = 2.0
def __post_init__(self):
assert self.start >= 0, "Start must be greater than 0"
assert self.end > 0, "End must be greater than 0"
assert self.start < self.end, "Start must be smaller than end"
assert self.function in [
"none",
"linear",
"sigmoid",
"pow",
], "Function must be linear, sigmoid or pow"
assert self.extent > 0, "Extent must be greater than 0"
assert self.alpha > 0, "Alpha must be greater than 0"
self.function = RateFunctionDict[self.function]
self.kwargs = {
key: value for key, value in self.__dict__.items() if not isfunction(value)
}
def get(self, step: int) -> float:
"""
Gets the difficulty for the given step.
Args:
step (int): Current step.
Returns:
float: Current difficulty.
"""
return self.function(
step=step,
**self.kwargs,
)
@dataclasses.dataclass
class CurriculumSamplingParameters:
distribution: str = "uniform"
start_min_value: float = 0.0 # uniform only
start_max_value: float = 0.0 # uniform only
end_min_value: float = 0.0 # uniform only
end_max_value: float = 0.0 # uniform only
start_mean: float = 0.0 # normal and truncated_normal only
start_std: float = 0.0 # normal and truncated_normal only
end_mean: float = 0.0 # normal and truncated_normal only
end_std: float = 0.0 # normal and truncated_normal only
min_value: float = 0.0 # truncated_normal only
max_value: float = 0.0 # truncated_normal only
def __post_init__(self):
assert (
self.min_value <= self.max_value
), "Min value must be smaller than max value"
assert (
self.start_min_value <= self.start_max_value
), "Min value must be smaller than max value"
assert (
self.end_min_value <= self.end_max_value
), "Min value must be smaller than max value"
assert self.start_std >= 0, "Standard deviation must be greater than 0"
assert self.end_std >= 0, "Standard deviation must be greater than 0"
assert self.distribution in [
"uniform",
"normal",
"truncated_normal",
], "Distribution must be uniform, normal or truncated_normal"
self.function = SampleFunctionDict[self.distribution]
@dataclasses.dataclass
class CurriculumParameters:
rate_parameters: CurriculumRateParameters = dataclasses.field(default_factory=dict)
sampling_parameters: CurriculumSamplingParameters = dataclasses.field(
default_factory=dict
)
def __post_init__(self):
self.rate_parameters = CurriculumRateParameters(**self.rate_parameters)
self.sampling_parameters = CurriculumSamplingParameters(
**self.sampling_parameters
)
class CurriculumSampler:
def __init__(
self,
curriculum_parameters: CurriculumParameters,
):
self.rp = curriculum_parameters.rate_parameters
self.sp = curriculum_parameters.sampling_parameters
def get_rate(self, step: int) -> float:
"""
Gets the difficulty for the given step.
Args:
step (int): Current step.
Returns:
float: Current difficulty.
"""
return self.rp.get(step)
def get_min(self) -> float:
"""
Gets the minimum value for the current step.
Returns:
float: Minimum value.
"""
if self.sp.distribution == "truncated_normal":
return self.sp.start_mean
elif self.sp.distribution == "normal":
return self.sp.start_mean
else:
return self.sp.start_min_value
def get_max(self) -> float:
"""
Gets the maximum value for the current step.
Returns:
float: Maximum value.
"""
if self.sp.distribution == "truncated_normal":
return self.sp.end_mean
elif self.sp.distribution == "normal":
return self.sp.end_mean
else:
return self.sp.end_max_value
def get_min_bound(self) -> float:
if self.sp.distribution == "truncated_normal":
return self.sp.min_value
elif self.sp.distribution == "normal":
return max(
[
self.sp.end_mean - 2 * self.sp.end_std,
self.sp.start_mean - 2 * self.sp.end_std,
]
)
else:
return max([self.sp.end_min_value, self.sp.start_min_value])
def get_max_bound(self) -> float:
if self.sp.distribution == "truncated_normal":
return self.sp.max_value
elif self.sp.distribution == "normal":
return max(
[
self.sp.end_mean + 2 * self.sp.end_std,
self.sp.start_mean + 2 * self.sp.end_std,
]
)
else:
return max([self.sp.end_max_value, self.sp.start_max_value])
def sample(self, n: int, step: int, device: str = "cpu") -> torch.Tensor:
"""
Samples values from the curriculum distribution.
Args:
n (int): Number of samples to generate.
step (int): Current step.
device (str): Device to use for the tensor.
Returns:
torch.Tensor: Tensor with values from the curriculum distribution.
"""
# Get the difficulty for the current step
rate = self.get_rate(step)
# Sample values from the curriculum distribution
if self.sp.distribution == "truncated_normal":
mean = self.sp.start_mean + (self.sp.end_mean - self.sp.start_mean) * rate
std = self.sp.start_std + (self.sp.end_std - self.sp.start_std) * rate
return self.sp.function(
n=n,
mean=mean,
std=std,
min_value=self.sp.min_value,
max_value=self.sp.max_value,
device=device,
)
elif self.sp.distribution == "normal":
mean = self.sp.start_mean + (self.sp.end_mean - self.sp.start_mean) * rate
std = self.sp.start_std + (self.sp.end_std - self.sp.start_std) * rate
return self.sp.function(n=n, mean=mean, std=std, device=device)
else:
min = (
self.sp.start_min_value
+ (self.sp.end_min_value - self.sp.start_min_value) * rate
)
max = (
self.sp.start_max_value
+ (self.sp.end_max_value - self.sp.start_max_value) * rate
)
return self.sp.function(n=n, min_value=min, max_value=max, device=device)
| 13,775 |
Python
| 29.276923 | 121 | 0.561742 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/MFP2D_task_factory.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_xy import GoToXYTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_pose import (
GoToPoseTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xy_velocity import (
TrackXYVelocityTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xyo_velocity import (
TrackXYOVelocityTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_track_xy_velocity_heading import (
TrackXYVelocityHeadingTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_close_proximity_dock import (
CloseProximityDockTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_xy import GoThroughXYTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_xy_seq import GoThroughXYSequenceTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_pose import GoThroughPoseTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_pose_seq import (
GoThroughPoseSequenceTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_gate import GoThroughGateTask
from omniisaacgymenvs.tasks.MFP.MFP2D_go_through_gate_seq import (
GoThroughGateSequenceTask,
)
class TaskFactory:
"""
Factory class to create tasks."""
def __init__(self):
self.creators = {}
def register(self, name: str, task):
"""
Registers a new task."""
self.creators[name] = task
def get(
self, task_dict: dict, reward_dict: dict, num_envs: int, device: str
) -> object:
"""
Returns a task."""
assert (
task_dict["name"] == reward_dict["name"]
), "The mode of both the task and the reward must match."
mode = task_dict["name"]
assert task_dict["name"] in self.creators.keys(), "Unknown task mode."
return self.creators[mode](task_dict, reward_dict, num_envs, device)
task_factory = TaskFactory()
task_factory.register("GoToXY", GoToXYTask)
task_factory.register("GoToPose", GoToPoseTask)
task_factory.register("TrackXYVelocity", TrackXYVelocityTask)
task_factory.register("TrackXYOVelocity", TrackXYOVelocityTask)
task_factory.register("TrackXYVelocityHeading", TrackXYVelocityHeadingTask)
task_factory.register("CloseProximityDock", CloseProximityDockTask)
task_factory.register("GoThroughXY", GoThroughXYTask)
task_factory.register("GoThroughXYSequence", GoThroughXYSequenceTask)
task_factory.register("GoThroughPose", GoThroughPoseTask)
task_factory.register("GoThroughPoseSequence", GoThroughPoseSequenceTask)
task_factory.register("GoThroughGate", GoThroughGateTask)
task_factory.register("GoThroughGateSequence", GoThroughGateSequenceTask)
| 2,842 |
Python
| 35.922077 | 86 | 0.743842 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/unit_tests/test_curriculum_helpers.py
|
import numpy as np
import unittest
import torch
import math
import omniisaacgymenvs.tasks.MFP.curriculum_helpers as ch
sigmoid_dict_0 = {"function": "sigmoid", "start": 0, "end": 1000, "extent": 1.5}
sigmoid_dict_1 = {"function": "sigmoid", "start": 100, "end": 1200, "extent": 3.0}
sigmoid_dict_2 = {"function": "sigmoid", "start": 200, "end": 1400, "extent": 4.5}
sigmoid_dict_3 = {"function": "sigmoid", "start": 400, "end": 1800, "extent": 6.0}
none_dict_0 = {"function": "none", "start": 0, "end": 1000}
none_dict_1 = {"function": "none", "start": 100, "end": 1200}
none_dict_2 = {"function": "none", "start": 200, "end": 1400}
none_dict_3 = {"function": "none", "start": 400, "end": 1800}
lin_dict_0 = {"function": "linear", "start": 0, "end": 1000}
lin_dict_1 = {"function": "linear", "start": 100, "end": 1200}
lin_dict_2 = {"function": "linear", "start": 200, "end": 1400}
lin_dict_3 = {"function": "linear", "start": 400, "end": 1800}
pow_dict_0 = {"function": "pow", "start": 0, "end": 1000, "alpha": 0.5}
pow_dict_1 = {"function": "pow", "start": 100, "end": 1200, "alpha": 0.75}
pow_dict_2 = {"function": "pow", "start": 200, "end": 1400, "alpha": 1.5}
pow_dict_3 = {"function": "pow", "start": 400, "end": 1800, "alpha": 3.0}
rate_list = [
sigmoid_dict_0,
sigmoid_dict_1,
sigmoid_dict_2,
sigmoid_dict_3,
none_dict_0,
none_dict_1,
none_dict_2,
none_dict_3,
lin_dict_0,
lin_dict_1,
lin_dict_2,
lin_dict_3,
pow_dict_0,
pow_dict_1,
pow_dict_2,
pow_dict_3,
]
trunc_norm_dict_0 = {
"distribution": "truncated_normal",
"start_mean": -0.5,
"start_std": 0.5,
"end_mean": 5.0,
"end_std": 0.5,
"min_value": -0.5,
"max_value": 0.5,
}
trunc_norm_dict_1 = {
"distribution": "truncated_normal",
"start_mean": 0.0,
"start_std": 0.01,
"end_mean": 4.0,
"end_std": 0.01,
"min_value": 0.25,
"max_value": 6.0,
}
trunc_norm_dict_2 = {
"distribution": "truncated_normal",
"start_mean": 0.25,
"start_std": 0.5,
"end_mean": 3.0,
"end_std": 2.0,
"min_value": 0.25,
"max_value": 3.0,
}
trunc_norm_dict_3 = {
"distribution": "truncated_normal",
"start_mean": 0.5,
"start_std": 0.5,
"end_mean": 2.0,
"end_std": 1.0,
"min_value": 0.25,
"max_value": 4.0,
}
norm_dict_0 = {
"distribution": "normal",
"start_mean": -0.5,
"start_std": 0.5,
"end_mean": 5.0,
"end_std": 0.5,
}
norm_dict_1 = {
"distribution": "normal",
"start_mean": 0.0,
"start_std": 0.01,
"end_mean": 4.0,
"end_std": 0.01,
}
norm_dict_2 = {
"distribution": "normal",
"start_mean": 0.25,
"start_std": 0.5,
"end_mean": 3.0,
"end_std": 2.0,
}
norm_dict_3 = {
"distribution": "normal",
"start_mean": 0.5,
"start_std": 0.5,
"end_mean": 2.0,
"end_std": 1.0,
}
uniform_dict_0 = {
"distribution": "uniform",
"start_min_value": -0.5,
"start_max_value": 0.5,
"end_min_value": 5.0,
"end_max_value": 5.0,
}
uniform_dict_1 = {
"distribution": "uniform",
"start_min_value": 0.0,
"start_max_value": 0.0,
"end_min_value": 1.0,
"end_max_value": 4.0,
}
uniform_dict_2 = {
"distribution": "uniform",
"start_min_value": 0.2,
"start_max_value": 0.3,
"end_min_value": 2.0,
"end_max_value": 3.0,
}
uniform_dict_3 = {
"distribution": "uniform",
"start_min_value": 0.5,
"start_max_value": 0.5,
"end_min_value": -2.0,
"end_max_value": 2.0,
}
dist_list = [
trunc_norm_dict_0,
trunc_norm_dict_1,
trunc_norm_dict_2,
trunc_norm_dict_3,
norm_dict_0,
norm_dict_1,
norm_dict_2,
norm_dict_3,
uniform_dict_0,
uniform_dict_1,
uniform_dict_2,
uniform_dict_3,
]
class TestCurriculumLoaders(unittest.TestCase):
def test_loading_all_rate_loaders(self):
success = False
try:
for rate in rate_list:
ch.CurriculumRateParameters(**rate)
success = True
except:
pass
self.assertTrue(success)
def test_all_sampler_loaders(self):
success = False
try:
for dist in dist_list:
ch.CurriculumSamplingParameters(**dist)
success = True
except:
pass
self.assertTrue(success)
def test_sigmoid_rate_loader(self):
rate = ch.CurriculumRateParameters(**sigmoid_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["sigmoid"])
self.assertEqual(rate.start, 0)
self.assertEqual(rate.end, 1000)
self.assertEqual(rate.extent, 1.5)
def test_none_rate_loader(self):
rate = ch.CurriculumRateParameters(**none_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["none"])
def test_linear_rate_loader(self):
rate = ch.CurriculumRateParameters(**lin_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["linear"])
self.assertEqual(rate.start, 0)
self.assertEqual(rate.end, 1000)
def test_pow_rate_loader(self):
rate = ch.CurriculumRateParameters(**pow_dict_0)
self.assertEqual(rate.function, ch.RateFunctionDict["pow"])
self.assertEqual(rate.start, 0)
self.assertEqual(rate.end, 1000)
self.assertEqual(rate.alpha, 0.5)
def test_error_handling_rate_loader(self):
success = 1
try:
rate = ch.CurriculumRateParameters(
**{"function": "none", "start": 0, "end": -1000}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "none", "start": -100, "end": 1000}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "sigmoid", "start": 100, "end": 1000, "extent": -1}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "sigmoid", "start": 100, "end": 1000, "extent": 0}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "linear", "start": 100, "end": -1000}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "linear", "start": -1000, "end": -100}
)
success *= 0
except:
pass
try:
rate = ch.CurriculumRateParameters(
**{"function": "pow", "start": 100, "end": 1000, "alpha": -1}
)
success *= 0
except:
pass
self.assertTrue(success == 1)
def test_load_empty_rate_loader(self):
success = False
try:
rate = ch.CurriculumRateParameters(**{})
success = True
except:
pass
self.assertTrue(success)
def test_load_empty_sampler_loader(self):
success = False
try:
dist = ch.CurriculumSamplingParameters(**{})
success = True
except:
pass
self.assertTrue(success)
def test_load_trunc_norm_sampler_loader(self):
dist = ch.CurriculumSamplingParameters(**trunc_norm_dict_0)
self.assertEqual(dist.function, ch.SampleFunctionDict["truncated_normal"])
self.assertEqual(dist.start_mean, -0.5)
self.assertEqual(dist.start_std, 0.5)
self.assertEqual(dist.end_mean, 5.0)
self.assertEqual(dist.end_std, 0.5)
self.assertEqual(dist.min_value, -0.5)
self.assertEqual(dist.max_value, 0.5)
def test_load_norm_sampler_loader(self):
dist = ch.CurriculumSamplingParameters(**norm_dict_0)
self.assertEqual(dist.function, ch.SampleFunctionDict["normal"])
self.assertEqual(dist.start_mean, -0.5)
self.assertEqual(dist.start_std, 0.5)
self.assertEqual(dist.end_mean, 5.0)
self.assertEqual(dist.end_std, 0.5)
def test_load_uniform_sampler_loader(self):
dist = ch.CurriculumSamplingParameters(**uniform_dict_0)
self.assertEqual(dist.function, ch.SampleFunctionDict["uniform"])
self.assertEqual(dist.start_min_value, -0.5)
self.assertEqual(dist.start_max_value, 0.5)
self.assertEqual(dist.end_min_value, 5.0)
self.assertEqual(dist.end_max_value, 5.0)
if __name__ == "__main__":
unittest.main()
| 8,633 |
Python
| 27.401316 | 82 | 0.553226 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/unit_tests/run_tasks_tests.py
|
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp()
import unittest
testmodules = [
"omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_curriculum_helpers",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_disturbances",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_parameters",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_rewards",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_core",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_go_to_xy",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_go_to_pose",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_track_xy_vel",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP2D_track_xyo_vel",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_disturbances",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_parameters",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_rewards",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_core",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_go_to_xyz",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_go_to_pose",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_track_xyz_vel",
# "omniisaacgymenvs.tasks.virtual_floating_platform.unit_tests.test_MFP3D_track_6d_vel",
]
suite = unittest.TestSuite()
for t in testmodules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ["suite"])
suitefn = getattr(mod, "suite")
suite.addTest(suitefn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner(verbosity=2).run(suite)
simulation_app.close()
| 2,172 |
Python
| 50.738094 | 93 | 0.752762 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/MFP/unit_tests/test_MFP2D_go_to_pose.py
|
from omniisaacgymenvs.tasks.MFP.MFP2D_go_to_pose import (
GoToPoseTask,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_rewards import (
GoToPoseReward,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_task_parameters import (
GoToPoseParameters,
)
import numpy as np
import unittest
import torch
import math
# =============================================================================
# Default parameters
# =============================================================================
default_params = GoToPoseParameters(
position_tolerance=0.01,
heading_tolerance=0.025,
kill_after_n_steps_in_tolerance=5,
goal_random_position=0.0,
max_spawn_dist=6.0,
min_spawn_dist=3.0,
kill_dist=8.0,
spawn_curriculum=False,
spawn_curriculum_min_dist=0.5,
spawn_curriculum_max_dist=2.5,
spawn_curriculum_mode="linear",
spawn_curriculum_warmup=250,
spawn_curriculum_end=750,
)
default_rewards = GoToPoseReward(
position_reward_mode="linear",
heading_reward_mode="linear",
position_exponential_reward_coeff=0.25,
heading_exponential_reward_coeff=0.25,
position_scale=1.0,
heading_scale=1.0,
)
default_num_envs = 4
default_device = "cuda:0"
# =============================================================================
# create_stats & update_statistics
# =============================================================================
class TestCreateStats(unittest.TestCase):
def setUp(self) -> None:
torch_zeros = lambda: torch.zeros(
default_num_envs,
dtype=torch.float,
device=default_device,
requires_grad=False,
)
self.stats = {
"position_reward": torch_zeros(),
"heading_reward": torch_zeros(),
"position_error": torch_zeros(),
"heading_error": torch_zeros(),
}
self.obj = GoToPoseTask({}, {}, default_num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._reward_parameters = default_rewards
self.position = 1.0
self.heading = 1.0
self.position_error = 1.0
self.heading_error = 1.0
self.new_stats = {
"position_reward": torch_zeros() + self.position,
"heading_reward": torch_zeros() + self.heading,
"position_error": torch_zeros() + self.position_error,
"heading_error": torch_zeros() + self.heading_error,
}
def test_create_stats(self):
stats = self.obj.create_stats({})
self.assertEqual(stats.keys(), self.stats.keys())
def test_update_statistics(self):
stats = self.obj.create_stats({})
self.obj.position_reward = self.stats["position_reward"]
self.obj.heading_reward = self.stats["heading_reward"]
self.obj.position_dist = self.stats["position_error"]
self.obj.heading_dist = self.stats["heading_error"]
stats = self.obj.update_statistics(self.new_stats)
self.assertTrue(
torch.all(stats["position_reward"] == self.new_stats["position_reward"])
)
self.assertTrue(
torch.all(stats["heading_reward"] == self.new_stats["heading_reward"])
)
self.assertTrue(
torch.all(stats["position_error"] == self.new_stats["position_error"])
)
self.assertTrue(
torch.all(stats["heading_error"] == self.new_stats["heading_error"])
)
# =============================================================================
# get_state_observations
# =============================================================================
class TestGetStateObservation(unittest.TestCase):
def setUp(self):
# Current state of the robots
self.positions = torch.tensor(
[[0, 0], [1, 1], [2, 2], [-1, -1]], dtype=torch.float, device=default_device
)
self.headings = torch.tensor(
[[0], [np.pi / 2], [np.pi], [-np.pi / 2]],
dtype=torch.float,
device=default_device,
)
self.orientations = torch.tensor(
[
[torch.cos(self.headings[0]), torch.sin(self.headings[0])],
[torch.cos(self.headings[1]), torch.sin(self.headings[1])],
[torch.cos(self.headings[2]), torch.sin(self.headings[2])],
[torch.cos(self.headings[3]), torch.sin(self.headings[3])],
],
dtype=torch.float,
device=default_device,
)
# Targets state of the robots
self.target_headings = torch.tensor(
[np.pi * 2, np.pi, np.pi / 2, np.pi / 4],
dtype=torch.float,
device=default_device,
)
self.target_positions = torch.tensor(
[[0, 0], [-1, -1], [-2, 2], [-1, -1]],
dtype=torch.float,
device=default_device,
)
# Expected state observations
self.expected_position = torch.tensor(
[[0, 0], [-2, -2], [-4, 0], [0, 0]],
dtype=torch.float,
device=default_device,
)
self.expected_heading = torch.tensor(
[0, np.pi / 2, -np.pi / 2, np.pi * 3 / 4],
dtype=torch.float,
device=default_device,
)
# Recreate the state dict sent to the task
self.current_state = {
"position": torch.tensor(
self.positions, dtype=torch.float, device=default_device
),
"orientation": torch.tensor(
self.orientations, dtype=torch.float, device=default_device
),
"linear_velocity": torch.zeros(
(default_num_envs, 2), dtype=torch.float, device=default_device
),
"angular_velocity": torch.zeros(
(default_num_envs), dtype=torch.float, device=default_device
),
}
# Generate the task
self.obj = GoToPoseTask({}, {}, default_num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._reward_parameters = default_rewards
# Overriding the target positions and headings
self.obj._target_headings = self.target_headings
self.obj._target_positions = self.target_positions
def test_get_state_position(self):
# Generate the state observation to be passed to the agent
state_observation = self.obj.get_state_observations(self.current_state)
# Position error in the world frame
gen_position = state_observation[:, 6:8]
self.assertTrue(torch.allclose(gen_position, self.expected_position))
def test_get_state_orientation(self):
# Generate the state observation to be passed to the agent
state_observation = self.obj.get_state_observations(self.current_state)
# Heading error in the world frame (cos(theta), sin(theta))
gen_heading = torch.arctan2(state_observation[:, 9], state_observation[:, 8])
self.assertTrue(
torch.allclose(gen_heading, self.expected_heading, rtol=1e-3, atol=1e-4)
)
# =============================================================================
# compute_reward & update_kills
# =============================================================================
class TestComputeReward(unittest.TestCase):
def setUp(self):
# Current state of the robots
self.positions = torch.tensor(
[[0, 0], [1, 1], [2, 2], [-1, -1]], dtype=torch.float, device=default_device
)
self.headings = torch.tensor(
[[0], [np.pi / 2], [np.pi], [-np.pi / 2]],
dtype=torch.float,
device=default_device,
)
self.orientations = torch.tensor(
[
[torch.cos(self.headings[0]), torch.sin(self.headings[0])],
[torch.cos(self.headings[1]), torch.sin(self.headings[1])],
[torch.cos(self.headings[2]), torch.sin(self.headings[2])],
[torch.cos(self.headings[3]), torch.sin(self.headings[3])],
],
dtype=torch.float,
device=default_device,
)
# Targets state of the robots
self.target_headings = torch.tensor(
[0, np.pi, np.pi / 2, np.pi / 4],
dtype=torch.float,
device=default_device,
)
self.target_positions = torch.tensor(
[[0, 0], [-1, -1], [-2, 2], [-1, -1]],
dtype=torch.float,
device=default_device,
)
# Expected state observations
self.expected_position = torch.tensor(
[[0, 0], [-2, -2], [-4, 0], [0, 0]],
dtype=torch.float,
device=default_device,
)
self.expected_heading = torch.tensor(
[0, np.pi / 2, -np.pi / 2, np.pi * 3 / 4],
dtype=torch.float,
device=default_device,
)
# Recreate the state dict sent to the task
self.current_state = {
"position": torch.tensor(
self.positions, dtype=torch.float, device=default_device
),
"orientation": torch.tensor(
self.orientations, dtype=torch.float, device=default_device
),
"linear_velocity": torch.zeros(
(default_num_envs, 2), dtype=torch.float, device=default_device
),
"angular_velocity": torch.zeros(
(default_num_envs), dtype=torch.float, device=default_device
),
}
# Generate the task
self.obj = GoToPoseTask({}, {}, default_num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._reward_parameters = default_rewards
# Overriding the target positions and headings
self.obj._target_headings = self.target_headings
self.obj._target_positions = self.target_positions
def test_get_compute_reward_goal_logic_1(self):
# Will run 3 steps to check if the condition for goal reached is working
# Tests shifts in position
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 1)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 2)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
self.current_state["position"][0, 0] = 2 # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 0)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
def test_get_compute_reward_goal_logic_2(self):
# Will run 3 steps to check if the condition for goal reached is working
# Tests shifts in heading
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 1)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 2)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
self.current_state["orientation"][0, 0] = np.cos(
np.pi / 2
) # moving away from the goal.
self.current_state["orientation"][0, 1] = np.sin(
np.pi / 2
) # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
self.assertTrue(self.obj._goal_reached[0] == 0)
self.assertTrue(self.obj._goal_reached[1] == 0)
self.assertTrue(self.obj._goal_reached[2] == 0)
self.assertTrue(self.obj._goal_reached[3] == 0)
def test_get_compute_reward_position_dist_is_ok(self):
# Checks if the position distance is being computed correctly
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
expected_dist = torch.sqrt(torch.square(self.expected_position).sum(-1))
self.assertTrue(
torch.allclose(self.obj.position_dist, expected_dist, rtol=1e-3, atol=1e-4)
)
def test_get_compute_reward_heading_dist_is_ok(self):
# Checks if the heading distance is being computed correctly
state_observation = self.obj.get_state_observations(self.current_state)
# Compute the reward
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
expected_dist = torch.abs(self.expected_heading)
self.assertTrue(
torch.allclose(self.obj.heading_dist, expected_dist, rtol=1e-3, atol=1e-4)
)
def test_update_kills_1(self):
# Check if the kill condition is being updated correctly
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die1 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die2 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die3 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die4 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die5 = self.obj.update_kills()
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die6 = self.obj.update_kills()
self.assertTrue(
torch.all(die1 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die2 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die3 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die4 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die5 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die6 == torch.tensor([1, 0, 0, 0], device=default_device))
)
def test_update_kills_2(self):
# Check if the kill condition is being updated correctly
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die1 = self.obj.update_kills()
self.current_state["position"][0, 0] = 20 # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die2 = self.obj.update_kills()
self.current_state["position"][0, 0] = 0 # moving away from the goal.
state_observation = self.obj.get_state_observations(self.current_state)
reward = self.obj.compute_reward(state_observation, torch.zeros(4, 2))
die3 = self.obj.update_kills()
self.assertTrue(
torch.all(die1 == torch.tensor([0, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die2 == torch.tensor([1, 0, 0, 0], device=default_device))
)
self.assertTrue(
torch.all(die3 == torch.tensor([0, 0, 0, 0], device=default_device))
)
class TestGetGoals(unittest.TestCase):
def setUp(self):
self.num_envs = 1000
self.obj = GoToPoseTask({}, {}, self.num_envs, default_device)
self.obj._task_parameters = default_params
self.obj._target_positions = torch.zeros(
(self.num_envs, 2), device=default_device
)
self.obj._target_headings = torch.zeros(self.num_envs, device=default_device)
self.target_positions = torch.zeros((self.num_envs, 2), device=default_device)
self.target_orientations = torch.zeros(
(self.num_envs, 4), device=default_device
)
def test_get_goals(self):
env_ids = torch.range(
0, self.num_envs - 1, 1, device=default_device, dtype=torch.int64
)
target_positions, target_orientations = self.obj.get_goals(
env_ids, self.target_positions, self.target_orientations
)
# Check if target positions and orientations are updated correctly
self.assertTrue(torch.all(target_positions[env_ids, :2] != 0))
self.assertTrue(torch.all(target_orientations[env_ids, 0] != 1))
self.assertTrue(torch.all(target_orientations[env_ids, 3] != 0))
# Check if target positions and orientations are within the specified range
self.assertTrue(
torch.all(
torch.abs(target_positions[env_ids, :2])
<= self.obj._task_parameters.goal_random_position
)
)
self.assertTrue(
torch.all(
(torch.abs(target_orientations[env_ids, 0]) <= 1)
* (torch.abs(target_orientations[env_ids, 3]) <= 1)
)
)
# Check if target headings are within the range of [0, 2*pi]
self.assertTrue(
torch.all(
(self.obj._target_headings[env_ids] >= 0)
* (self.obj._target_headings[env_ids] <= 2 * math.pi)
)
)
if __name__ == "__main__":
unittest.main()
| 19,306 |
Python
| 39.222917 | 88 | 0.576919 |
elharirymatteo/RANS/omniisaacgymenvs/tasks/utils/fp_utils.py
|
import torch
# Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
from typing import Callable
def quantize_tensor_values(tensor, n_values):
"""
Quantizes the values of a tensor into N*2 +1 discrete values in the range [-1,1] using PyTorch's quantization functions.
Args:
- tensor: a PyTorch tensor of shape (batch_size, num_features)
- n_values: an integer indicating the number of discrete values to use
Returns:
- a new tensor of the same shape as the input tensor, with each value quantized to a discrete value in the range [-1,1]
"""
assert n_values >= 1, "n_values must be greater than or equal to 1"
assert tensor.min() >= -1 and tensor.max() <= 1, "tensor values must be in the range [-1,1]"
scale = 1.0 / n_values
quantized_tensor = torch.quantize_per_tensor(tensor, scale=scale, zero_point=0,
dtype=torch.qint8)
quantized_tensor = quantized_tensor.dequantize()
return quantized_tensor
def quaternion_to_rotation_matrix(Q):
"""
Covert a quaternion into a full three-dimensional rotation matrix.
Input
:param Q: A 4 element array representing the quaternion (q0,q1,q2,q3)
Output
:return: A 3x3 element matrix representing the full 3D rotation matrix.
This rotation matrix converts a point in the local reference
frame to a point in the global reference frame.
"""
# Extract the values from Q
q0 = Q[0]
q1 = Q[1]
q2 = Q[2]
q3 = Q[3]
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
return rot_matrix
| 3,896 |
Python
| 38.765306 | 125 | 0.669148 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToXY_zero_convergence.yaml
|
name: "position"
run_batch: 1
goals_x: []
goals_y: []
position_distance_threshold: 0.03
save_dir: "mj_runs/position_zero_convergence"
| 133 |
YAML
| 21.33333 | 45 | 0.729323 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToXY_Square.yaml
|
name: "position"
run_batch: 0
goals_x: [ 2, 2, 2, 2, 2, 1, 0,-1,-2,-2,-2,-2,-2,-1, 0, 1, 2]
goals_y: [-2,-1, 0, 1, 2, 2, 2, 2, 2, 1, 0,-1,-2,-2,-2,-2,-2]
position_distance_threshold: 0.03
save_dir: "mj_runs/position_square"
| 223 |
YAML
| 36.333327 | 61 | 0.547085 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Circle.yaml
|
name: "linear_velocity"
run_batch: 0
trajectory_type: "circle"
x_offset: 0
y_offset: 0
radius: 1.5
closed: True
lookahead_dist: 0.10
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_circle"
| 194 |
YAML
| 18.499998 | 30 | 0.742268 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Spiral.yaml
|
name: "linear_velocity"
run_batch: 0
trajectory_type: "spiral"
x_offset: 0
y_offset: 0
start_radius: 0.5
end_radius: 1.5
num_loops: 4
closed: True
lookahead_dist: 0.15
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_spiral"
| 229 |
YAML
| 18.166665 | 30 | 0.737991 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Circle_lab.yaml
|
name: "linear_velocity"
run_batch: 0
trajectory_type: "circle"
x_offset: 2.5
y_offset: -1.5
radius: 0.75
closed: True
lookahead_dist: 0.15
target_tracking_velocity: 0.3
save_dir: "ros_runs/trackXYVel/vel_circle_run_0"
| 217 |
YAML
| 20.799998 | 48 | 0.741935 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_zero_convergence.yaml
|
name: "pose"
run_batch: 1024
goals_x: [0]
goals_y: [0]
goals_theta: [0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "mj_runs/pose_zero_convergence"
| 184 |
YAML
| 22.124997 | 41 | 0.73913 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVelHeading_Infinite.yaml
|
name: "linear_velocity_heading"
run_batch: 0
trajectory_type: "infinite"
x_offset: 0
y_offset: 0
a: 1.5 # 'a' controls the size of the lemniscate, analogous to 'radius' for the circle
closed: True
lookahead_dist: 0.1
target_tracking_velocity: 0.2
save_dir: "mj_runs/vel_heading_infinite"
| 290 |
YAML
| 28.099997 | 87 | 0.741379 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVel_Square.yaml
|
name: "linear_velocity"
run_batch: 0
trajectory_type: "square"
x_offset: 0
y_offset: 0
height: 3.0
closed: True
lookahead_dist: 0.3
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_square"
| 193 |
YAML
| 18.399998 | 30 | 0.740933 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToXY_Circle.yaml
|
name: "position"
run_batch: 0
goals_x: [1.5 , 1.37031819, 1.00369591, 0.46352549, -0.15679269, -0.75, -1.21352549, -1.4672214 , -1.4672214 , -1.21352549, -0.75, -0.15679269, 0.46352549, 1.00369591, 1.37031819, 1.5 ]
goals_y: [ 0.00000000, 0.610104965, 1.11471724, 1.42658477, 1.49178284, 1.29903811, 0.881677878, 0.311867536, -0.311867536, -0.881677878, -1.29903811, -1.49178284, -1.42658477, -1.11471724, -0.610104965, 0.0]
position_distance_threshold: 0.03
save_dir: "mj_runs/position_square"
| 509 |
YAML
| 83.999986 | 214 | 0.681729 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_zero_lab.yaml
|
name: "pose"
run_batch: 1024
goals_x: [2.5]
goals_y: [-1.5]
goals_theta: [0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "ros_runs/pose_disturbance/run_1"
| 191 |
YAML
| 22.999997 | 43 | 0.727749 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_zero_convergence_lab.yaml
|
name: "pose"
run_batch: 256
goals_x: [2.5]
goals_y: [-1.5]
goals_theta: [0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "mj_runs/pose_zero_convergence"
| 188 |
YAML
| 22.624997 | 41 | 0.728723 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/GoToPose_Square.yaml
|
name: "pose"
run_batch: 0
goals_x: [ 2, 2, 2, 2, 2, 1, 0, -1, -2, -2, -2, -2,-2,-1, 0, 1, 2]
goals_y: [ -2, -1, 0, 1, 2, 2, 2, 2, 2, 1, 0, -1,-2,-2,-2,-2,-2]
goals_theta: [1.57,1.57,1.57,1.57,3.14,3.14,3.14,3.14,-1.57,-1.57,-1.57,-1.57, 0, 0, 0, 0, 0]
position_distance_threshold: 0.03
orientation_distance_threshold: 0.03
save_dir: "mj_runs/pose_square"
| 410 |
YAML
| 50.374994 | 93 | 0.478049 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/hl_task/TrackXYVelHeading_Square.yaml
|
name: "linear_velocity_heading"
run_batch: 0
trajectory_type: "square"
x_offset: 0
y_offset: 0
height: 3.0
closed: True
lookahead_dist: 0.3
target_tracking_velocity: 0.25
save_dir: "mj_runs/vel_heading_square"
| 209 |
YAML
| 19.999998 | 38 | 0.751196 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/controller/Optimal_LQR_DAC.yaml
|
name: Discrete_Adaptive_LQR_Controller
# State cost matrix
Q: [1,1,5,5,1,1,1]
# Control cost matrix
R: [0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
# Disturbance weight matrix
W: [0.1,0.1,0.1,0.1,0.1,0.1,0.1]
make_planar_compatible: True
control_type: LQR
update_matrices_every_n_steps: 100
| 288 |
YAML
| 25.272725 | 44 | 0.701389 |
elharirymatteo/RANS/omniisaacgymenvs/cfg/controller/Optimal_LQR_DC.yaml
|
name: Discrete_LQR_Controller
# State cost matrix
# pos_x, pos_y, vel_x, vel_y, qw, qz, wz
Q: [0.0001,0.00001,100,100,0.000001,0.000001,1]
# Control cost matrix
R: [0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
# Disturbance weight matrix
W: [0.1,0.1,0.1,0.1,0.1,0.1,0.1]
make_planar_compatible: True
control_type: LQR
| 314 |
YAML
| 27.636361 | 47 | 0.678344 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/generate_data.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import os
import datetime
import numpy as np
import torch
import hydra
import carb
from omegaconf import DictConfig
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver
from rlgames_train import RLGTrainer
from rl_games.torch_runner import Runner
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.envs.vec_env_rlgames_mfp import VecEnvRLGames
def run_sdg(cfg, horizon, num_ep=1):
"""
Generate synthetic data using the trained agent.
TODO: Discard terminated agents
"""
root_dir = "./sdg/" + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.makedirs(root_dir, exist_ok=True)
rlg_config_dict = omegaconf_to_dict(cfg.train)
runner = Runner(RLGPUAlgoObserver())
runner.load(rlg_config_dict)
runner.reset()
agent = runner.create_player()
agent.restore(cfg.checkpoint)
agent.has_batch_dimension = True
agent.batch_size = 1024
# agent.init_rnn()
env = agent.env
obs = env.reset()
# if conf parameter kill_thrusters is true, print the thrusters that are killed for each episode
if cfg.task.env.platform.randomization.kill_thrusters:
killed_thrusters_idxs = env._task.virtual_platform.action_masks
for n in range(num_ep):
evaluation_dir = os.path.join(root_dir, str(n))
os.makedirs(evaluation_dir, exist_ok=True)
env._task.reset_idx(env._task.all_indices.long())
obs = env.reset()
ep_data = {
"act": [], "state": [], "task": [],
"rgb": [], "depth": [], "rews": []
}
for _ in range(horizon):
actions = agent.get_action(obs["obs"], is_deterministic=True)
position = env._task.current_state["position"]
obs, reward, _, _ = env.step(actions)
state = obs["obs"]["state"][:, :5]
task_data = obs["obs"]["state"][:, 6:]
state = torch.cat([position, state], dim=-1)
rgb, depth = env._task.get_rgbd_data()
ep_data["act"].append(actions.cpu())
ep_data["state"].append(state.cpu())
ep_data["task"].append(task_data.cpu())
ep_data["rgb"].append(rgb.cpu())
ep_data["depth"].append(depth.cpu())
ep_data["rews"].append(reward.cpu())
ep_data["act"] = torch.stack(ep_data["act"]).transpose(0, 1)
ep_data["state"] = torch.stack(ep_data["state"]).transpose(0, 1)
ep_data["task"] = torch.stack(ep_data["task"]).transpose(0, 1)
ep_data["rews"] = torch.stack(ep_data["rews"]).transpose(0, 1)
ep_data["rgb"] = torch.stack(ep_data["rgb"]).transpose(0, 1)
ep_data["depth"] = torch.stack(ep_data["depth"]).transpose(0, 1)
# if thrusters were killed during the episode, save the action with the mask applied to the thrusters that were killed
if cfg.task.env.platform.randomization.kill_thrusters:
ep_data["act"] = ep_data["act"] * (1 - killed_thrusters_idxs.cpu().numpy())
# save the episode data
torch.save(ep_data["act"], os.path.join(evaluation_dir, "act.pt"))
torch.save(ep_data["state"], os.path.join(evaluation_dir, "state.pt"))
torch.save(ep_data["task"], os.path.join(evaluation_dir, "task.pt"))
torch.save(ep_data["rews"], os.path.join(evaluation_dir, "rews.pt"))
torch.save(ep_data["rgb"], os.path.join(evaluation_dir, "rgb.pt"))
torch.save(ep_data["depth"], os.path.join(evaluation_dir, "depth.pt"))
carb.log_info("Data generation complete")
@hydra.main(config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
if cfg.checkpoint is None:
print("No checkpoint specified. Exiting...")
return
horizon = 250 #5s(50fps)
num_ep = 300
cfg.task.env.maxEpisodeLength = horizon + 2
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# _____Create environment_____
headless = cfg.headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
run_sdg(cfg, horizon, num_ep)
env.close()
if __name__ == "__main__":
parse_hydra_configs()
| 5,146 |
Python
| 35.764285 | 126 | 0.629615 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/multi_model_eval.py
|
import numpy as np
import hydra
from omegaconf import DictConfig
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver
from rlgames_train import RLGTrainer
from rl_games.torch_runner import Runner
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from utils.plot_experiment import plot_episode_data_virtual
from utils.eval_metrics import get_GoToPose_success_rate_new
import os
import glob
import pandas as pd
from tqdm import tqdm
# filter out invalid experiments and retrieve valid models
def get_valid_models(load_dir, experiments):
valid_models = []
invalid_experiments = []
for experiment in experiments:
try:
file_pattern = os.path.join(load_dir, experiment, "nn", "last_*ep_2000_rew__*.pth")
model = glob.glob(file_pattern)
if model:
valid_models.append(model[0])
except:
invalid_experiments.append(experiment)
if invalid_experiments:
print(f'Invalid experiments: {invalid_experiments}')
else:
print('All experiments are valid')
return valid_models
def eval_multi_agents(cfg, agent, models, horizon, plot_intermediate=False):
evaluation_dir = "./evaluations/" + models[0].split("/")[1] + "/"
os.makedirs(evaluation_dir, exist_ok=True)
store_all_agents = True # store all agents generated data, if false only the first agent is stored
is_done = False
all_success_rate_df = pd.DataFrame()
for i, model in enumerate(tqdm(models)):
agent.restore(model)
env = agent.env
obs = env.reset()
ep_data = {'act': [], 'obs': [], 'rews': []}
# if conf parameter kill_thrusters is true, print the thrusters that are killed for each episode
if cfg.task.env.platform.randomization.kill_thrusters:
killed_thrusters_idxs = env._task.virtual_platform.action_masks
for _ in range(horizon):
actions = agent.get_action(obs['obs'], is_deterministic=True)
obs, reward, done, info = env.step(actions)
if store_all_agents:
ep_data['act'].append(actions.cpu().numpy())
ep_data['obs'].append(obs['obs']['state'].cpu().numpy())
ep_data['rews'].append(reward.cpu().numpy())
else:
ep_data['act'].append(actions[0].cpu().numpy())
ep_data['obs'].append(obs['obs']['state'][0].cpu().numpy())
ep_data['rews'].append(reward[0].cpu().numpy())
is_done = done.any()
ep_data['obs'] = np.array(ep_data['obs'])
ep_data['rews'] = np.array(ep_data['rews'])
ep_data['act'] = np.array(ep_data['act'])
# if thrusters were killed during the episode, save the action with the mask applied to the thrusters that were killed
if cfg.task.env.platform.randomization.kill_thrusters:
ep_data['act'] = ep_data['act'] * (1 - killed_thrusters_idxs.cpu().numpy())
# Find the episode where the sum of actions has only zeros (no action) for all the time steps
broken_episodes = [i for i in range(0,ep_data['act'].shape[1]) if ep_data['act'][:,i,:].sum() == 0]
# Remove episodes that are broken by the environment (IsaacGym bug)
if broken_episodes:
print(f'Broken episodes: {broken_episodes}')
print(f'Ep data shape before: {ep_data["act"].shape}')
for key in ep_data.keys():
ep_data[key] = np.delete(ep_data[key], broken_episodes, axis=1)
print(f'Ep data shape after: {ep_data["act"].shape}')
task_flag = ep_data['obs'][0, 0, 5].astype(int)
# if task_flag == 0: # GoToXY
# success_rate = get_GoToXY_success_rate(ep_data, print_intermediate=True)
# success_rate_df = success_rate['position']
# elif task_flag == 1: # GoToPose
# success_rate = get_GoToPose_success_rate(ep_data, print_intermediate=True)
# success_rate_df = pd.concat([success_rate['position'], success_rate['heading']], axis=1)
# elif task_flag == 2: # TrackXYVelocity
# success_rate = get_TrackXYVelocity_success_rate(ep_data, print_intermediate=True)
# success_rate_df = success_rate['xy_velocity']
# elif task_flag == 3: # TrackXYOVelocity
# success_rate = get_TrackXYOVelocity_success_rate(ep_data, print_intermediate=True)
# success_rate_df = pd.concat([success_rate['xy_velocity'], success_rate['omega_velocity']], axis=1)
success_rate = get_GoToPose_success_rate_new(ep_data, print_intermediate=True)
success_rate_df = success_rate['pose']
# Collect the data for the success rate table
#success_rate_df['avg_rew'] = [np.mean(ep_data['rews'])]
lin_vel_x = ep_data['obs'][:, 2:3]
lin_vel_y = ep_data['obs'][:, 3:4]
lin_vel = np.linalg.norm(np.array([lin_vel_x, lin_vel_y]), axis=0)
success_rate_df['ALV'] = [np.mean(lin_vel.mean(axis=1))]
ang_vel_z = np.absolute(ep_data['obs'][:, :, 4:5][:,:,0])
success_rate_df['AAV'] = [np.mean(ang_vel_z.mean(axis=1))]
success_rate_df['AAC'] = np.mean(ep_data['act'])
all_success_rate_df = pd.concat([all_success_rate_df, success_rate_df], ignore_index=True)
# If want to print the latex code for the table use the following line
if plot_intermediate:
save_dir = evaluation_dir + model.split("/")[2] + "/"
plot_episode_data_virtual(ep_data, save_dir, store_all_agents)
# create index for the dataframe and save it
model_names = [model.split("/")[2] for model in models]
all_success_rate_df.insert(loc=0, column="model", value=model_names)
all_success_rate_df.to_csv(evaluation_dir + "/new_xy.csv")
@hydra.main(config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
# specify the experiment load directory
load_dir = cfg.checkpoint #"./models/icra24_Pose_new/" #+ "expR_SE/"
print(f'Loading models from: {load_dir} ...')
experiments = os.listdir(load_dir)
print(f'Experiments found in {load_dir} folder: {len(experiments)}')
models = get_valid_models(load_dir, experiments)
#models = [m for m in models if "BB" in m.split("/")[2]]
print(f'Final models: {models}')
if not models:
print('No valid models found')
exit()
# _____Create task_____
# customize environment parameters based on model
if "BB" in models[0]:
print("Using BB model ...")
cfg.train.params.network.mlp.units = [256, 256]
# if "BBB" in models[0]:
# print("Using BBB model ...")
# cfg.train.params.network.mlp.units = [256, 256, 256]
# if "AN" in models[0]:
# print("Adding noise on act ...")
# cfg.task.env.add_noise_on_act = True
# if "AVN" in models[0]:
# print("Adding noise on act and vel ...")
#cfg.task.env.add_noise_on_act = True
#cfg.task.env.add_noise_on_vel = True
# if "UF" in models[0]:
# print("Setting uneven floor in the environment ...")
# cfg.task.env.use_uneven_floor = True
# cfg.task.env.max_floor_force = 0.25
horizon = 250
cfg.task.env.maxEpisodeLength = horizon + 2
cfg.task.env.platform.core.mass = 5.32
cfg.task.env.split_thrust = True
cfg.task.env.clipObservations['state'] = 20.0
cfg.task.env.task_parameters['max_spawn_dist'] = 4.0
cfg.task.env.task_parameters['min_spawn_dist'] = 3.0
cfg.task.env.task_parameters['kill_dist'] = 6.0
cfg.task.env.task_parameters['kill_after_n_steps_in_tolerance'] = 250
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# _____Create environment_____
headless = cfg.headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id, enable_livestream=cfg.enable_livestream, enable_viewport=enable_viewport)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict['seed'] = cfg.seed
task = initialize_task(cfg_dict, env)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
# _____Create players (model)_____
rlg_config_dict = omegaconf_to_dict(cfg.train)
runner = Runner(RLGPUAlgoObserver())
runner.load(rlg_config_dict)
runner.reset()
agent = runner.create_player()
plot_intermediate = False
eval_multi_agents(cfg, agent, models, horizon, plot_intermediate)
env.close()
if __name__ == '__main__':
parse_hydra_configs()
| 8,938 |
Python
| 42.604878 | 142 | 0.628664 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/run_experiments.py
|
import subprocess
import argparse
import json
import sys
import os
parser = argparse.ArgumentParser("Processes one or more experiments.")
parser.add_argument(
"--exps",
type=str,
nargs="+",
default=None,
help="List of path to the experiments' config to be ran.",
)
parser.add_argument(
"--isaac_path", type=str, default=None, help="Path to the python exec of isaac."
)
args, unknown_args = parser.parse_known_args()
WORKINGDIR = os.getcwd()
s = WORKINGDIR.split("/")[:3]
s = "/".join(s)
if args.isaac_path is None:
ov_path = os.path.join(s, ".local/share/ov/pkg/isaac_sim-2022.2.1/python.sh")
else:
ov_path = args.isaac_path
for exp in args.exps:
# Load the configuration file
with open(exp, "r") as f:
experiments = json.load(f)
# Loop through each experiment and execute it
for experiment_name, arguments in experiments.items():
# Construct the command to execute the experiment
cmd = [ov_path, "scripts/rlgames_train_mfp.py"]
for arg, value in arguments.items():
cmd.extend(["{}".format(arg) + "=" + str(value)])
print(f'Running command: {" ".join(cmd)}')
# Execute the command
subprocess.run(cmd)
| 1,222 |
Python
| 27.44186 | 84 | 0.648118 |
elharirymatteo/RANS/omniisaacgymenvs/scripts/evaluate_policy.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import numpy as np
import hydra
from omegaconf import DictConfig
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver
from rlgames_train import RLGTrainer
from rl_games.torch_runner import Runner
from omniisaacgymenvs.utils.task_util import initialize_task
from omniisaacgymenvs.envs.vec_env_rlgames_mfp import VecEnvRLGames
from omniisaacgymenvs.utils.plot_experiment import plot_episode_data_virtual
from omniisaacgymenvs.utils.eval_metrics import (
get_GoToXY_success_rate,
get_GoToPose_results,
get_TrackXYVelocity_success_rate,
get_TrackXYOVelocity_success_rate,
)
import pandas as pd
import os
def eval_multi_agents(cfg, horizon):
"""
Evaluate a trained agent for a given number of steps"""
base_dir = "./evaluations/" + cfg.checkpoint.split("/")[1] + "/"
experiment_name = cfg.checkpoint.split("/")[2]
print(f"Experiment name: {experiment_name}")
evaluation_dir = base_dir + experiment_name + "/"
os.makedirs(evaluation_dir, exist_ok=True)
rlg_config_dict = omegaconf_to_dict(cfg.train)
runner = Runner(RLGPUAlgoObserver())
runner.load(rlg_config_dict)
runner.reset()
agent = runner.create_player()
agent.restore(cfg.checkpoint)
agent.has_batch_dimension = True
agent.batch_size = 4096
agent.init_rnn()
store_all_agents = (
True # store all agents generated data, if false only the first agent is stored
)
is_done = False
env = agent.env
obs = env.reset()
# if conf parameter kill_thrusters is true, print the thrusters that are killed for each episode
if cfg.task.env.platform.randomization.kill_thrusters:
killed_thrusters_idxs = env._task.virtual_platform.action_masks
ep_data = {"act": [], "obs": [], "rews": []}
total_reward = 0
num_steps = 0
for _ in range(horizon):
actions = agent.get_action(obs["obs"], is_deterministic=True)
obs, reward, done, info = env.step(actions)
if store_all_agents:
ep_data["act"].append(actions.cpu().numpy())
ep_data["obs"].append(obs["obs"]["state"].cpu().numpy())
ep_data["rews"].append(reward.cpu().numpy())
else:
ep_data["act"].append(actions[0].cpu().numpy())
ep_data["obs"].append(obs["obs"]["state"][0].cpu().numpy())
ep_data["rews"].append(reward[0].cpu().numpy())
total_reward += reward[0]
num_steps += 1
is_done = done.any()
ep_data["obs"] = np.array(ep_data["obs"])
ep_data["rews"] = np.array(ep_data["rews"])
ep_data["act"] = np.array(ep_data["act"])
# if thrusters were killed during the episode, save the action with the mask applied to the thrusters that were killed
if cfg.task.env.platform.randomization.kill_thrusters:
ep_data["act"] = ep_data["act"] * (1 - killed_thrusters_idxs.cpu().numpy())
# Find the episode where the sum of actions has only zeros (no action) for all the time steps
broken_episodes = [
i
for i in range(0, ep_data["act"].shape[1])
if ep_data["act"][:, i, :].sum() == 0
]
print(broken_episodes)
broken_episodes = []
# Remove episodes that are broken by the environment (IsaacGym bug)
if broken_episodes:
print(f"Broken episodes: {broken_episodes}")
# save in csv the broken episodes
broken_episodes_df = pd.DataFrame(
ep_data[:, broken_episodes, :], index=broken_episodes
)
broken_episodes_df.to_csv(evaluation_dir + "broken_episodes.csv", index=False)
print(f'Ep data shape before: {ep_data["act"].shape}')
for key in ep_data.keys():
ep_data[key] = np.delete(ep_data[key], broken_episodes, axis=1)
print(f'Ep data shape after: {ep_data["act"].shape}')
print(f"\n Episode: rew_sum={total_reward:.2f}, tot_steps={num_steps} \n")
print(f'Episode data obs shape: {ep_data["obs"].shape} \n')
task_flag = ep_data["obs"][0, 0, 5].astype(int)
if task_flag == 0: # GoToXY
success_rate = get_GoToXY_success_rate(ep_data, print_intermediate=True)
elif task_flag == 1: # GoToPose
success_rate = get_GoToPose_results(ep_data)
elif task_flag == 2: # TrackXYVelocity
success_rate = get_TrackXYVelocity_success_rate(
ep_data, print_intermediate=True
)
elif task_flag == 3: # TrackXYOVelocity
success_rate = get_TrackXYOVelocity_success_rate(
ep_data, print_intermediate=True
)
if cfg.headless:
plot_episode_data_virtual(ep_data, evaluation_dir, store_all_agents)
@hydra.main(config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
if cfg.checkpoint is None:
print("No checkpoint specified. Exiting...")
return
# customize environment parameters based on model
if "BB" in cfg.checkpoint:
print("Using BB model ...")
cfg.train.params.network.mlp.units = [256, 256]
if "BBB" in cfg.checkpoint:
print("Using BBB model ...")
cfg.train.params.network.mlp.units = [256, 256, 256]
if "AN" in cfg.checkpoint:
print("Adding noise on act ...")
cfg.task.env.add_noise_on_act = True
if "AVN" in cfg.checkpoint:
print("Adding noise on act and vel ...")
cfg.task.env.add_noise_on_act = True
cfg.task.env.add_noise_on_vel = True
if "UF" in cfg.checkpoint:
print("Setting uneven floor in the environment ...")
cfg.task.env.use_uneven_floor = True
cfg.task.env.max_floor_force = 0.25
horizon = 500
cfg.task.env.maxEpisodeLength = horizon + 2
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# _____Create environment_____
headless = cfg.headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
)
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
# _____Create players (model)_____
# eval_single_agent(cfg_dict, cfg, env)
eval_multi_agents(cfg, horizon)
env.close()
if __name__ == "__main__":
parse_hydra_configs()
| 6,987 |
Python
| 34.835897 | 122 | 0.643767 |
elharirymatteo/RANS/omniisaacgymenvs/doc/domain_randomization.md
|
# Domain Randomization
Unlike the regular version of OmniIsaacGymEnv, this modified version chooses to apply domain randomization
directly inside the task. This is done so that different parameters can receive different level of noise.
For instance, the state is composed of unnormalized angular values and linear velocity values, both of which
have largely different scales. Furthermore, the domain randomization we apply here is not limited to noise
on actions or observations, but we also offer the possibility to randomize the mass of the system,
or apply forces and torques directly onto the system.
All the parameters to add domaine randomization onto the system must be added under the `task.env.disturbances`
flag inside the configuration file. As of today, we support the following disturbances:
- `force_disturbance` it applies random amount of forces at the system origin.
- `torque_disturbance` it applies random amount of torque at the system origin.
- `mass_disturbance` it changes the mass, and center of mass of the system.
- `observations_disturbance` it adds noise onto the obervations.
- `actions_disturbance` it adds noise onto the actions.
## Applying disturbances
In the following, we will go over the different parameters available for the disturbances and how to set them.
All the disturbances build ontop of a scheduler, and a sampler.
The scheduler regulates how quickly the disturbances should take effect during the training.
The sampler allows to randomly pick the amount of disturbance that should be apply on each environment.
A detailed explenation of the schedulers and samplers can be found in the curriculum documentation [LINK].
### Force disturbance
This disturbance applies a force on the system. By default, the force is applied at the root/origin of the body.
This behavior can be adjusted by modifying the body on which the force is applied. When setting the parameters
for the disturbance the user will select the magnitude of the force. It will then be randomly applied in a plane,
or on a sphere. Practically this is done by sampling a radius value (that is the magnitude) using the scheduler
and sampler. Then a theta value (for a 2D problem), or a theta and phi value (for a 3D problem), are sampled
uniformly projecting the force accordingly.
Below, is an example of a configuration, please note that all the parameters have default values.
So you do not need to add them unless you want to modify them. In this example, the sampler is
following a `truncated_normal` distribution (a normal distribution with extremas) and the
scheduler is using a sigmoid growth. We can see that at the begining, there will be almost no force applied,
at the end it is almost uniformly sampled on the \[0, 0.5\] range.
```yaml
force_disturbance:
enable: False # Setting this to True will enable this disturbance
use_sinusoidal_patterns: False # Setting this to True will create none-constant forces.
min_freq: 0.25
max_freq: 3
min_offset: -6
max_offset: 6
# Scheduling and sampling of the disturbance
force_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.5
end_std: 0.5
min_value: 0.0
max_value: 0.5
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
Setting the `use_sinusoidal_patterns` flag to `False` will mean that each environment will have a constant force applied on it.
If this flag is set to `True`, the force magnitude will be modified depending on the position of the system.
This is meant to recreate attraction and repulsion points. The non-constant force means that recurrent networks will struggle more
to reliably estimate the disturbance.
Sinusoidal pattern, freq = 0.25 | Sinusoidal pattern, freq = 3.0
:------------------------------:|:------------------------------:
 | 
Please note that the values for the sinusoidal patterns and the magnitude of the force are updated on an environment reset only.
This means that the magnitude of the force will not evolve through an episode.
### Torque disturbance
This disturbance applies a torque on the system. By default, the torque is applied at the root/origin of the body.
This behavior can be adjusted by modifying the body on which the torque is applied. When setting the parameters
for the disturbance the user will select the magnitude of the torque. It will then be randomly applied in a plane,
or on a sphere. Practically this is done by sampling a radius value (that is the magnitude) using the scheduler
and sampler. For a 2D problem, this is the only thing needed, as there is only 1 rotation DoF. For a 3D problem,
theta and phi value (for a 3D problem), are sampledcuniformly projecting the torque accordingly.
Below, is an example of a configuration, please note that all the parameters have default values.
So you do not need to add them unless you want to modify them. In this example, the sampler is
following a `truncated_normal` distribution (a normal distribution with extremas) and the
scheduler is using a sigmoid growth. We can see that at the begining, there will be almost no torque applied,
at the end it is almost uniformly sampled on the [0, 0.1] range.
```yaml
torque_disturbance:
enable: False # Setting this to True will enable this disturbance
use_sinusoidal_patterns: False # Setting this to True will create none-constant forces.
min_freq: 0.25
max_freq: 3
min_offset: -6
max_offset: 6
# Scheduling and sampling of the disturbance
torque_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.2
min_value: -0.1
max_value: 0.1
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
Setting the `use_sinusoidal_patterns` flag to `False` will mean that each environment will have a constant torque applied on it.
If this flag is set to `True`, the torque magnitude will be modified depending on the position of the system.
This is meant to recreate attraction and repulsion points. The non-constant torque means that recurrent networks will struggle more
to reliably estimate the disturbance.
Sinusoidal pattern, freq = 0.25 | Sinusoidal pattern, freq = 3.0
:------------------------------:|:------------------------------:
 | 
Please note that the values for the sinusoidal patterns and the magnitude of the torque are updated on an environment reset only.
This means that the magnitude of the torque will not evolve through an episode.
### Mass disturbance
The mass disturbances allows to randomize the mass and the CoM of a rigid body. While it is not currently
possible to randomize the CoM of a rigid bodies inside omniverse, we solve this issue by adding two prismatic
joints to the system, at the end of which lies a fixed mass. All of the other elements inside of
the system are changed to have almost no mass such that the only meaningful contribution to the total system
mass and CoM comes from this movable body.
To randomize the mass value, a scheduler and sampler are used, the mass is directly sampled from it.
For the CoM, another set of scheduler and sampler are used, from it a radius is sampled which can then
be used to move the CoM in a 2D plane by uniformly sampling a theta value, or using in 3D by uniformly
sampling a theta and phi value.
Below is an example configuration, please note that all the parameters have default values.
So you do not need to add them unless you want to modify them. In this example, we can see
that both the mass and the CoM have indepent samplers and rates.
```yaml
mass_disturbance:
enable: False # Setting this to True will enable this disturbance
# Scheduling and sampling of the mass disturbance
mass_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 5.32 # Initial mass
start_std: 0.0001 # Low std ensures the mass will remain constand during warmup
end_mean: 5.32
end_std: 3.0
min_value: 3.32
max_value: 7.32
# Scheduling and sampling of the CoM disturbance
com_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0 # displacement about the resting position of the CoM joints
start_std: 0.0001 # Low std ensures the mass will remain constand during warmup
end_mean: 0.25
end_std: 0.25
min_value: 0.0
max_value: 0.25
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
### Observations disturbance
The observations disturbance adds a given type of noise onto the different constituting elements of the
observation tensor. The noise can be independently controlled and applied or not on 3 type of variables:
- positions (meters)
- velocities (meters/s or radians/s)
- orientation (radians)
For each of them, a scheduler and sampler can be set up, enabling fine control over how the system is exposed
to observation noise during its training.
below is am example configuration, please note that all the parameters have default values.
So you do not need to set them unless you want to modify them.
```yaml
observations_disturbance:
enable_position_noise: False # Setting this to True will enable this disturbance
enable_velocity_noise: False # Setting this to True will enable this disturbance
enable_orientation_noise: False # Setting this to True will enable this disturbance
# Scheduling and sampling of the position disturbance
position_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.03
min_value: -0.015
max_value: 0.015
# Scheduling and sampling of the velocity disturbance
velocity_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.03
min_value: -0.015
max_value: 0.015
# Scheduling and sampling of the orientation disturbance
orientation_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.05
min_value: -0.025
max_value: 0.025
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
### Actions disturbance
The actions disturbance adds a given type of noise onto the actions sent by the agent. In our case this is done by
adding (or removing) some force to the output of the thrusters. This should be scale accordingly with the maximum
thrust that your system is capable of. Similarly to all previous disturbances, it also comes with a scheduler and
a sampler.
below is am example configuration, please note that all the parameters have default values.
So you do not need to set them unless you want to modify them.
```yaml
actions_disturbance:
enable: False # Setting this to True will enable this disturbance
# Scheduling and sampling of the disturbance
action_curriculum:
rate_parameters:
function: sigmoid
start: 250
end: 1250
extent: 4.5
sampling_parameters:
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.1
min_value: -0.05
max_value: 0.05
```
Setting the `enable` flag to `True` is required for the penalty to be applied. If the flag is left to `False``,
the penalty will not be applied onto the platform.
## Adding new disturbances
To add new disturbances, we would recommend adding them within the file located within `tasks/virtual_floating_platform/MFP2D_disturbances.py`,
with its defaut parameters given inside `tasks/virtual_floating_platform/MFP2D_disturbances_parameters.py`. Or its 3D counter part.
### Creating a new set of parameters
To create a new set of parameters for a disturbance, you should create a dataclass, with that comes with a scheduler and sampler.
An example of such a class is given below:
```python
@dataclass
class NoisyActionsParameters:
"""
This class provides an interface to adjust the hyperparameters of the action noise.
"""
# Note how the default factory is an empty dict. This is leveraged to
# enable automatic building of the CurriculumParameters.
action_curriculum: CurriculumParameters = field(default_factory=dict)
# Here you could add any other parameters as long as they have a
# default type and value.
enable: bool = False
def __post_init__(self):
# We transform the dictionary into a CurriculumParameter object.
# This action automatically converts the dictionnary.
# Note though that this does not support unknown keys.
# I.e. if a the user adds a key that does not exist, the
# dataclass will complain.
self.action_curriculum = CurriculumParameters(**self.action_curriculum)
```
This very simple class creates the required parameters for the scheduler.
You can of course add your own set of parameters. Do not forget to specify their type, and
assign a default value. This is important as the class disturbance should be instantiable
even if no parameter is given.
To wrap up the parameters part, you will need to add this disturbance to the list of
allowed disturbances. This can be done by adding it to the `DisturbancesParameters` class
at the end of the `tasks/virtual_floating_platform/MFP2D_disturbances_parameters.py`.
```python
@dataclass
class DisturbancesParameters:
[...] # Some more disturbances
actions_disturbance: NoisyActionsParameters = field(default_factory=dict)
def __post_init__(self):
[...] # Some more initializations
# Here we build the object, note that this will automatically retrieve everything under
# `task.env.disturbances.actions_disturbance` inside the configuration file.
self.actions_disturbance = NoisyActionsParameters(**self.actions_disturbance)
```
### Creating a new disturbance
With the parameters created you can now create the class that will implement your new
disturbance. Below, we provide and example of how this could be done.
```python
class NoisyActions:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
parameters: NoisyActionsParameters,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
# Here the parameters from the sampler are being loaded and transformed
# into and object that can be used to sample different noises.
self.action_sampler = CurriculumSampler(parameters.action_curriculum)
# This is used in case you'd want to access other parameters.
self.parameters = parameters
# These are used to known how many environments are used, and the device
# on which the tensors must be store.
self._num_envs = num_envs
self._device = device
def add_noise_on_act(self, act: torch.Tensor, step: int = 0) -> torch.Tensor:
"""
Adds noise to the actions of the robot.
Args:
act (torch.Tensor): The actions of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
torch.Tensor: The actions of the robot with noise.
"""
if self.parameters.enable:
# Note that this depends on the current step / epoch of the training
# this is particularly important for the scheduler as it uses this
# information to adjust its rate
act += self.action_sampler.sample(self._num_envs, step, device=self._device)
return act
```
With this done, we now need to instantiate our disturbance, this can be done by adding it
to the `Disturbances` class at the end of the `MFP2D_disturbances.py` (or its 3D counterpart).
This is done as shown below:
```python
class Disturbances:
"""
Class to create disturbances on the platform.
"""
def __init__(
self,
parameters: dict,
num_envs: int,
device: str,
) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self._num_envs = num_envs
self._device = device
# Loads all the parameters after `task.env.disturbances` inside the configuration file.
self.parameters = DisturbancesParameters(**parameters)
[...] # Some more initialization
# This builds the object
self.noisy_actions = NoisyActions(
self.parameters.actions_disturbance,
num_envs,
device,
)
```
With all this done, your disturbance can be parametrized, and instantiated!
All that's left to do is to apply it!
### Applying domain randomization
First we need to instantiate the `Disturbances` class.
This can be done as shown below:
```python
# Get all the parameters inside the task config as a dict
self._task_cfg = sim_config.task_config
# Get the dict related to the disturbances
domain_randomization_cfg = self._task_cfg["env"]["disturbances"]
# Initializes the disturbances
self.DR = Disturbances(
domain_randomization_cfg,
num_envs=self._num_envs,
device=self._device,
)
```
This should be done in the `__init__` method of your task.
With this done you can apply it as you see fit.
In this example this would be done like so:
```python
thrusts = self.DR.noisy_actions.add_noise_on_act(thrusts)
```
| 19,160 |
Markdown
| 41.019737 | 143 | 0.719729 |
elharirymatteo/RANS/omniisaacgymenvs/doc/curriculum.md
|
# Curriculum
To prevent penalties, disturbances, or tasks from being to hard from the beginning,
we use simple fixed curriculum strategies. Here fixed denotes that the rate at which
the task becomes harder is not dynamically adapting to the agent's current capacities.
Instead, it relies on the current step to set the difficulty accordingly.
## Parametrizing the curriculum
In the following we present how to setup the different components of our curriculum objects.
A curriculum object is always composed of a scheduler and a sampler:
```yaml
curriculum_parameters:
rate_parameters:
[...]
sampler_parameters:
[...]
```
The objects come with default parameters which will result in the rate/scheduler always outputing 1.0.
### Setting up the scheduling/rate of the curriculum
To set the schedule or rate, of the curriculum we provide three main functions:
- a `sigmoid` style growth.
- a `power` style growth.
- a `linear` style growth.
- `none`, the scheduler always returns 1.0.
Below, we provide 4 sample configuration for each of these functions.
```yaml
rate_parameters: # Sigmoid
function: sigmoid
start: 0
end: 1000
extent: 4.5 # Must be larger than 0.
```
```yaml
rate_parameters: # Power
function: power
start: 0
end: 1000
alpha: 2.0 # Can be smaller than 1! Must be larger than 0.
```
```yaml
rate_parameters: # Linear
function: linear
start: 0
end: 1000
```
```yaml
rate_parameters: # None
function: none
```
How the different parameters impact the scheduling of the curiculum is given in the figure below.
Note than once the scheduler reaches 1.0 it means that the highest difficulty has been reached.
The value outputed by the scheduler is always comprised between \[0,1\].

We can see that for the `sigmoid`, large extent, for instance 12, generate sigmoid with a steeper slope,
while smaller extent get closer to the `linear`. Similarly, creating a `power` function with parameter `alpha`
set to 1.0 will generate the exact same curve as the `linear` function. When `alpha` larger than 1.0 will
have a small slope at the beginning and a high slope at the end, and `alpha` smaller than 1.0 will have
the opposite.
### Setting up the sampler of the curriculum
As of now, we provide 3 basic distribution to sample from:
- `uniform`, a uniform distribution between a max and a min.
- `normal`, a normal distribution around a mean with a given sigma.
- `truncated_normal`, a normal distribution with hard boundaries.
Below, we provide 3 sample configurations:
```yaml
sampling_parameters: # Uniform
distribution: uniform
start_min_value: -0.1
start_max_value: 0.1
end_min_value: -0.3
end_max_value: 0.3
```
```yaml
sampling_parameters: # Normal
distribution: normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.2
```
```yaml
sampling_parameters: # Truncated normal
distribution: truncated_normal
start_mean: 0.0
start_std: 0.0001
end_mean: 0.0
end_std: 0.2
min_value: -0.1
max_value: 0.1
```
In the above example, we can see that there is always a start, and an end parameter, be it for the mean, std,
or max and min value of the uniform distribution. Start denotes the distribution as it will be when the
scheduler/rate output is 0. End denotes the distribution as it will be when the scheduler/rate output is 1.
In between, the distribution will transition from one to the other following the function given to the scheduler.
## Modifying the curriculum
In the following we explain how to add new samplers and schedulers to the current set
of curriculum. In the futur we plan on expanding the curriculum to support non-fixed steps.
### Modifying the scheduler
Adding a new scheduler is relatively easy and straight forward.
Create a new function inside `tasks.virtual_floating_platform.curriculum_helpers.py`.
Make sure this function has the following header:
```python
def your_new_function(step: int = 0, start: int = 0, end: int = 1000, **kwargs) -> float
```
Note that in practice step can be a float.
Below is our linear function:
```python
def curriculum_linear_growth(step: int = 0, start: int = 0, end: int = 1000, **kwargs) -> float:
"""
Generates a curriculum with a linear growth rate.
Args:
step (int): Current step.
start (int): Start step.
end (int): End step.
**kwargs: Additional arguments.
Returns:
float: Rate of growth.
"""
if step < start:
return 0.0
if step > end:
return 1.0
current = step - start
relative_end = end - start
rate = current / (relative_end)
return rate
```
Then add this function to the RateFunctionDict, here is an example.
```python
RateFunctionDict = {
"none": lambda step, start, end, **kwargs: 1.0,
"linear": curriculum_linear_growth,
"sigmoid": curriculum_sigmoid_growth,
"pow": curriculum_pow_growth,
}
```
Finally to call your own function, use the key you set inside the dictionary as
the `function` parameter in the rate/scheduler config.
But what if you wanted to add more parameters? In theory, there is an automatic parameter collector.
That means that as long as you create functions with named variables, and that these named variables
match the name of the parameters given to the dataclass, everything should be seemless. With the
notable exception of functions. Below is the automatic parameter collector:
```python
self.kwargs = {
key: value for key, value in self.__dict__.items() if not isfunction(value)
}
```
This is then process inside the following:
```python
def get(self, step: int) -> float:
"""
Gets the difficulty for the given step.
Args:
step (int): Current step.
Returns:
float: Current difficulty.
"""
return self.function(
step=step,
**self.kwargs,
)
```
### Modifying the sampler
Similarly, a new sampler can be added in the same fashion.
Create a new function inside `tasks.virtual_floating_platform.curriculum_helpers.py`.
This function must follow the following header style:
```python
def your_new_function(n: int = 1, device: str = "cpu", **kwargs) -> torch.Tensor:
```
You can add arguments as you see fit.
Below is our implementation of the uniform sampling:
```python
def uniform(
n: int = 1,
min_value: float = 0.0,
max_value: float = 1.1,
device: str = "cpu",
**kwargs,
) -> torch.Tensor:
"""
Generates a tensor with values from a uniform distribution.
Args:
n (int, optional): Number of samples to generate.
min_value (float, optional): Minimum value of the uniform distribution.
max_value (float, optional): Maximum value of the uniform distribution.
device (str, optional): Device to use for the tensor.
**kwargs: Additional arguments.
Returns:
torch.Tensor: Tensor with values from a uniform distribution.
"""
return torch.rand((n), device=device) * (max_value - min_value) + min_value
```
Proceed to add this function inside the `SampleFunctionDict`:
```python
SampleFunctionDict = {
"uniform": uniform,
"normal": normal,
"truncated_normal": truncated_normal,
}
```
With this done, all that's left to is to define the routine to update the different parameters
given the rate. While this operation could be automated this would likely lead to the overall
code being less flexible. Thus, we require to update the `CurriculumSampler` class.
Inside the `sample` function, you will need to add an if statement that matches your distribution's
name. An example is given below:
```python
elif self.sp.distribution == "normal":
mean = self.sp.start_mean + (self.sp.end_mean - self.sp.start_mean) * rate
std = self.sp.start_std + (self.sp.end_std - self.sp.start_std) * rate
return self.sp.function(n=n, mean=mean, std=std, device=device)
```
| 7,926 |
Markdown
| 30.835341 | 113 | 0.713348 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/controllers/discrete_LQR_controller.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Callable, NamedTuple, Optional, Union, List, Dict
from scipy.linalg import solve_discrete_are
import numpy as np
import mujoco
import cvxpy as cp
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import (
MuJoCoFloatingPlatform,
)
def parseControllerConfig(
cfg_dict: Dict, env: MuJoCoFloatingPlatform
) -> Dict[str, Union[List[float], int, float, str, MuJoCoFloatingPlatform]]:
"""
Parse the controller configuration.
Args:
cfg_dict (Dict): A dictionary containing the configuration.
env (MuJoCoFloatingPlatform): A MuJoCoFloatingPlatform object.
Returns:
Dict[str, Union[List[float], int, float, str, MuJoCoFloatingPlatform]]: A dictionary containing the parsed configuration.
"""
config = {}
config["target_position"] = [0, 0, 0]
config["target_orientation"] = [1, 0, 0, 0]
config["target_linear_velocity"] = [0, 0, 0]
config["target_angular_velocity"] = [0, 0, 0]
config["thruster_count"] = (
cfg_dict["task"]["env"]["platform"]["configuration"]["num_anchors"] * 2
)
config["dt"] = cfg_dict["task"]["sim"]["dt"]
config["Mod"] = env
config["control_type"] = cfg_dict["controller"]["control_type"]
config["Q"] = cfg_dict["controller"]["Q"]
config["R"] = cfg_dict["controller"]["R"]
config["W"] = cfg_dict["controller"]["W"]
return config
class DiscreteController:
"""
Discrete pose controller for the Floating Platform."""
def __init__(
self,
target_position: List[float] = [0, 0, 0],
target_orientation: List[float] = [1, 0, 0, 0],
target_linear_velocity: List[float] = [0, 0, 0],
target_angular_velocity: List[float] = [0, 0, 0],
thruster_count: int = 8,
dt: float = 0.02,
Mod: MuJoCoFloatingPlatform = None,
control_type: str = "LQR",
Q: List[float] = [1, 1, 5, 5, 1, 1, 1],
W: List[float] = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
R: List[float] = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
**kwargs
) -> None:
"""
Initialize the discrete controller.
Args:
target_position (List[float], optional): A list containing the target position. Defaults to [0,0,0].
target_orientation (List[float], optional): A list containing the target orientation. Defaults to [1,0,0,0].
target_linear_velocity (List[float], optional): A list containing the target linear velocity. Defaults to [0,0,0].
target_angular_velocity (List[float], optional): A list containing the target angular velocity. Defaults to [0,0,0].
thruster_count (int, optional): An integer containing the number of thrusters. Defaults to 8.
dt (float, optional): A float containing the time step. Defaults to 0.02.
Mod (MuJoCoFloatingPlatform, optional): A MuJoCoFloatingPlatform object. Used to compute the linearized system matrices. Defaults to None.
control_type (str, optional): A string containing the type of control. Either 'H-inf' or 'LQR'. Defaults to 'LQR'.
Q (List[float], optional): A list containing the state cost matrix. Defaults to [1,1,5,5,1,1,1].
W (List[float], optional): A list containing the disturbance weight matrix. Defaults to [0.01,0.01,0.01,0.01,0.01,0.01,0.01].
R (List[float], optional): A list containing the control cost matrix. Defaults to [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1].
**kwargs: Additional arguments."""
self.thruster_count = thruster_count
self.thrusters = np.zeros(thruster_count) # Initialize all thrusters to off
self.dt = dt
self.FP = Mod
self.control_type = control_type
self.opti_states = None
# Instantiate goals to be null
self.target_position = target_position
self.target_orientation = target_orientation
self.target_linear_velocity = target_linear_velocity
self.target_angular_velocity = target_angular_velocity
# Control parameters
# State cost matrix
self.Q = np.diag(Q)
# Control cost matrix
self.R = np.diag(R)
# Disturbance weight matrix
self.W = np.diag(W)
self.findGains()
def findGains(self, r0=None) -> None:
"""
Find the gains for the controller.
Args:
r0 (np.ndarray, optional): An array containing the initial state. Defaults to None.
"""
# Compute linearized system matrices A and B based on your system dynamics
self.A, self.B = self.computeLinearizedSystem(
r0
) # Compute linearized system matrices
self.makePlanarCompatible()
if self.control_type == "H-inf":
self.computeHInfinityGains()
elif self.control_type == "LQR":
self.computeLQRGains()
else:
raise ValueError("Invalid control type specified.")
def computeLQRGains(self) -> None:
"""
Compute the LQR gains."""
self.P = solve_discrete_are(self.A, self.B, self.Q, self.R)
self.L = (
np.linalg.inv(self.R + self.B.T @ self.P @ self.B)
@ self.B.T
@ self.P
@ self.A
)
def computeHInfinityGains(self) -> None:
"""
Compute the H-infinity gains."""
X = cp.Variable((self.A.shape[0], self.A.shape[0]), symmetric=True)
gamma = cp.Parameter(nonneg=True) # Define gamma as a parameter
regularization_param = 1e-6
# Regularize matrix using the pseudo-inverse
A_regularized = self.A @ np.linalg.inv(
self.A.T @ self.A + regularization_param * np.eye(self.A.shape[1])
)
B_regularized = self.B @ np.linalg.inv(
self.B.T @ self.B + regularization_param * np.eye(self.B.shape[1])
)
# Define the constraints using regularized matrices
constraints = [X >> np.eye(A_regularized.shape[1])] # X >= 0
# Define a relaxation factor
relaxation_factor = 1 # Adjust this value based on your experimentation
# Linear matrix inequality constraint with relaxation
constraints += [
cp.bmat(
[
[
A_regularized.T @ X @ A_regularized - X + self.Q,
A_regularized.T @ X @ B_regularized,
],
[
B_regularized.T @ X @ A_regularized,
B_regularized.T @ X @ B_regularized
- (gamma**2)
* relaxation_factor
* np.eye(B_regularized.shape[1]),
],
]
)
<< 0
]
objective = cp.Minimize(gamma)
prob = cp.Problem(objective, constraints)
# Set the value of the parameter gamma
gamma.value = 1.0 # You can set the initial value based on your problem
prob.solve()
if prob.status == cp.OPTIMAL:
self.L = (
np.linalg.inv(
self.B.T @ X.value @ self.B
+ gamma.value**2 * np.eye(self.B.shape[1])
)
@ self.B.T
@ X.value
@ self.A
)
breakpoint()
else:
raise Exception("H-infinity control design failed.")
def setTarget(
self,
target_position: List[float] = None,
target_heading: List[float] = None,
target_linear_velocity: List[float] = None,
target_angular_velocity: List[float] = None,
) -> None:
"""
Sets the target position, orientation, and velocities.
Args:
target_position (List[float], optional): A list containing the target position. Defaults to None.
target_heading (List[float], optional): A list containing the target heading. Defaults to None.
target_linear_velocity (List[float], optional): A list containing the target linear velocity. Defaults to None.
target_angular_velocity (List[float], optional): A list containing the target angular velocity. Defaults to None.
"""
if target_position is not None:
self.target_position = np.array(target_position)
if target_heading is not None:
self.target_orientation = np.array(target_heading)
if target_linear_velocity is not None:
self.target_linear_velocity = np.array(target_linear_velocity)
if target_angular_velocity is not None:
self.target_angular_velocity = np.array(target_angular_velocity)
def computeLinearizedSystem(self, r0: np.ndarray = None) -> None:
"""
Compute linearized system matrices A and B.
With A the state transition matrix.
With B the control input matrix.
Args:
r0 (np.ndarray, optional): An array containing the initial state. Defaults to None.
"""
if r0 is None:
r0 = np.concatenate(
(
self.FP.data.qpos[:3],
self.FP.data.qvel[:3],
self.FP.data.qpos[3:],
self.FP.data.qvel[3:],
),
axis=None,
)
t_int = 0.2 # time-interval at 5Hz
A = self.f_STM(r0, t_int, self.FP.model, self.FP.data, self.FP.body_id)
B = self.f_B(
r0, t_int, self.FP.model, self.FP.data, self.FP.body_id, self.thruster_count
)
return A, B
def makePlanarCompatible(self) -> None:
"""
Remove elements of the STM to make it planar compatible.
Required states #[x,y,vx,vy,qw,qz,wz]."""
a = self.A
b = self.B
a = np.delete(a, 11, axis=0) # Remove row: wy
a = np.delete(a, 10, axis=0) # Remove row: wx
a = np.delete(a, 8, axis=0) # Remove row: qy
a = np.delete(a, 7, axis=0) # Remove row: qz
a = np.delete(a, 5, axis=0) # Remove row: vz
a = np.delete(a, 2, axis=0) # Remove row: z
a = np.delete(a, 11, axis=1) # Remove col: wy
a = np.delete(a, 10, axis=1) # Remove col: wx
a = np.delete(a, 8, axis=1) # Remove col: qy
a = np.delete(a, 7, axis=1) # Remove col: qz
a = np.delete(a, 5, axis=1) # Remove col: vz
a = np.delete(a, 2, axis=1) # Remove col: z
b = np.delete(b, 11, axis=0) # Remove row: wy
b = np.delete(b, 10, axis=0) # Remove row: wx
b = np.delete(b, 8, axis=0) # Remove row: qy
b = np.delete(b, 7, axis=0) # Remove row: qz
b = np.delete(b, 5, axis=0) # Remove row: vz
b = np.delete(b, 2, axis=0) # Remove row: z
b[b == 0] = 1e-4
self.A = a
self.B = b
return None
def f_STM(self, r0: np.ndarray, t_int: float, model, data, body_id) -> None:
"""
Identify A matrix of linearized system through finite differencing.
Args:
r0 (np.ndarray): An array containing the initial state.
t_int (float): A float containing the time interval.
model: A MuJoCo model object.
data: A MuJoCo data object.
body_id: An integer containing the body id."""
IC_temp0 = r0
force = [0.0, 0.0, 0.0]
torque = [0.0, 0.0, 0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
current_time = data.time
for k in range(np.size(r0)):
delta = max(1e-3, IC_temp0[k] / 100)
delta_vec = np.zeros(np.size(r0))
delta_vec[k] = delta
IC_temp_pos = np.add(IC_temp0, delta_vec)
IC_temp_neg = np.subtract(IC_temp0, delta_vec)
# Positive direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_pos[0:3]
data.qvel[:3] = IC_temp_pos[3:6]
data.qpos[3:] = IC_temp_pos[6:10]
data.qvel[3:] = IC_temp_pos[10:13]
mujoco.mj_applyFT(
model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied
)
mujoco.mj_step(model, data)
ans_pos = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
# print('final_time', data.time)
# Negative direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_neg[0:3]
data.qvel[:3] = IC_temp_neg[3:6]
data.qpos[3:] = IC_temp_neg[6:10]
data.qvel[3:] = IC_temp_neg[10:13]
mujoco.mj_applyFT(
model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied
)
mujoco.mj_step(model, data)
ans_neg = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
# print('final_time', data.time)
if k == 0:
STM = np.subtract(ans_pos, ans_neg) / (2 * delta)
else:
temp = np.subtract(ans_pos, ans_neg) / (2 * delta)
STM = np.vstack((STM, temp))
STM = STM.transpose()
STM[6, 6] = 1.0
data.time = current_time
model.opt.timestep = default_tstep
return STM
def f_STM_analytical(
self, r0: np.ndarray, t_int: float, model, data, body_id
) -> None:
"""
Identify A matrix of linearized system through finite differencing.
Args:
r0 (np.ndarray): An array containing the initial state.
t_int (float): A float containing the time interval.
model: A MuJoCo model object.
data: A MuJoCo data object.
body_id: An integer containing the body id."""
IC_temp0 = r0
STM = np.eye(np.size(r0))
w1 = IC_temp0[10]
w2 = IC_temp0[11]
w3 = IC_temp0[12]
qw = IC_temp0[6]
qx = IC_temp0[7]
qy = IC_temp0[8]
qz = IC_temp0[9]
STM[0, 3] = t_int
STM[1, 4] = t_int
STM[2, 5] = t_int
STM[6, 6] = 1
STM[6, 7] = -0.5 * w1 * t_int
STM[6, 8] = -0.5 * w2 * t_int
STM[6, 9] = -0.5 * w3 * t_int
STM[6, 10] = -0.5 * qx * t_int
STM[6, 11] = -0.5 * qy * t_int
STM[6, 12] = -0.5 * qz * t_int
STM[7, 6] = 0.5 * w1 * t_int
STM[7, 7] = 1
STM[7, 8] = 0.5 * w3 * t_int
STM[7, 9] = -0.5 * w2 * t_int
STM[7, 10] = 0.5 * qw * t_int
STM[7, 11] = -0.5 * qz * t_int
STM[7, 12] = 0.5 * qy * t_int
STM[8, 6] = 0.5 * w2 * t_int
STM[8, 7] = -0.5 * w3 * t_int
STM[8, 8] = 1
STM[8, 9] = 0.5 * w1 * t_int
STM[8, 10] = 0.5 * qz * t_int
STM[8, 11] = 0.5 * qw * t_int
STM[8, 12] = -0.5 * qx * t_int
STM[9, 6] = 0.5 * w3 * t_int
STM[9, 7] = -0.5 * w2 * t_int
STM[9, 8] = -0.5 * w1 * t_int
STM[9, 9] = 1
STM[9, 10] = -0.5 * qy * t_int
STM[9, 11] = 0.5 * qx * t_int
STM[9, 12] = 0.5 * qw * t_int
return STM
def f_B(
self, r0: np.ndarray, t_int: float, model, data, body_id, number_thrust: int
) -> None:
"""
Identify B matrix of linearized system through finite differencing.
Args:
r0 (np.ndarray): An array containing the initial state.
t_int (float): A float containing the time interval.
model: A MuJoCo model object.
data: A MuJoCo data object.
body_id: An integer containing the body id.
number_thrust (int): An integer containing the number of thrusters."""
IC_temp0 = r0
force = [0.0, 0.0, 0.0]
torque = [0.0, 0.0, 0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
u = np.zeros(number_thrust)
current_time = data.time
# for k in range(np.size(u)):
for k in range(np.size(u)):
delta = 0.01
delta_vec = np.zeros(np.size(u))
delta_vec[k] = delta
# Positive direction
u_plus = np.add(u, delta_vec)
force_plus = u_plus[k] * self.FP.forces[k] # * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3, 3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_plus = np.matmul(
rmat, force_plus
) # Rotate the force to the body frame.
p2 = (
np.matmul(rmat, self.FP.positions[k]) + p
) # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(
model, data, force_plus, torque, p2, body_id, data.qfrc_applied
) # Apply the force.
mujoco.mj_step(model, data)
ans_pos = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
# Negative direction
u_minus = np.subtract(u, delta_vec)
force_minus = u_minus[k] * self.FP.forces[k] * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3, 3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_minus = np.matmul(
rmat, force_minus
) # Rotate the force to the body frame.
p2 = (
np.matmul(rmat, self.FP.positions[k]) + p
) # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(
model, data, force_minus, torque, p2, body_id, data.qfrc_applied
) # Apply the force.
mujoco.mj_step(model, data)
ans_neg = np.concatenate(
(data.qpos[:3], data.qvel[:3], data.qpos[3:], data.qvel[3:]), axis=None
)
if k == 0:
B = np.subtract(ans_pos, ans_neg) / (2 * delta)
else:
temp = np.subtract(ans_pos, ans_neg) / (2 * delta)
B = np.vstack((B, temp))
B = B.transpose()
model.opt.timestep = default_tstep
data.time = current_time
return B
def controlCost(self) -> np.ndarray:
"""
Compute the control cost.
Returns:
np.ndarray: An array containing the control cost."""
# Cost function to be minimized for control input optimization
if self.control_type == "H-inf":
control_input = np.array(self.L @ self.state) + self.disturbance
elif self.control_type == "LQR":
self.findGains(r0=self.opti_states)
control_input = np.array(self.L @ self.state)
else:
raise ValueError("Invalid control type specified.")
return control_input
def makeState4Controller(self, state: Dict[str, np.ndarray]) -> List[np.ndarray]:
"""
Make the state compatible with the controller.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state.
Returns:
List[np.ndarray]: A list containing the current position, current orientation, current linear velocity, and current angular velocity.
"""
current_position = state["position"]
current_position[-1] = 0
current_orientation = state["quaternion"]
current_linear_velocity = state["linear_velocity"]
current_angular_velocity = state["angular_velocity"]
return (
current_position,
current_orientation,
current_linear_velocity,
current_angular_velocity,
)
def getAction(
self,
obs_state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = True, **kwargs
) -> np.ndarray:
"""
Get the action.
Args:
obs_state (Dict[str, np.ndarray]): A dictionary containing the state.
is_deterministic (bool, optional): A boolean containing whether the action is deterministic. Defaults to True.
Returns:
np.ndarray: An array containing the action.
"""
return self.update(*self.makeState4Controller(obs_state))
def update(
self,
current_position: np.ndarray,
current_orientation: np.ndarray,
current_velocity: np.ndarray,
current_angular_velocity: np.ndarray,
disturbance: np.ndarray = None,
) -> None:
"""
Update the controller.
Args:
current_position (np.ndarray): An array containing the current position.
current_orientation (np.ndarray): An array containing the current orientation.
current_velocity (np.ndarray): An array containing the current linear velocity.
current_angular_velocity (np.ndarray): An array containing the current angular velocity.
disturbance (np.ndarray, optional): An array containing the disturbance. Defaults to None.
"""
# Calculate errors
position_error = self.target_position - current_position
orientation_error = self.target_orientation - current_orientation
velocity_error = self.target_linear_velocity - current_velocity
angvel_error = self.target_angular_velocity - current_angular_velocity
self.opti_states = np.concatenate(
(
current_position,
current_velocity,
current_orientation,
current_angular_velocity,
),
axis=None,
)
if disturbance == None:
disturbance = np.random.rand(8) * 0.000
self.disturbance = disturbance
# Combine errors into the state vector (planar)
self.state = np.array(
[
position_error[0],
position_error[1],
velocity_error[0],
velocity_error[1],
orientation_error[0],
orientation_error[3],
angvel_error[2],
]
)
# Optimal U
original_u = self.controlCost()
# filter to zero values of u that are less than 0.5
intermediate_u = np.where(np.abs(original_u) < 0.25, 0.0, original_u)
if np.max(intermediate_u) == 0.0:
normalized_array = np.zeros(self.thruster_count)
else:
normalized_array = (intermediate_u - np.min(intermediate_u)) / (
np.max(intermediate_u) - np.min(intermediate_u)
)
# ROund the normalized array to the nearest integer biasing the center to 0.25
final_U = np.round(normalized_array - 0.25).astype(int)
# Round the normalized array to the nearest integer
self.thrusters = final_U
return self.thrusters
| 23,820 |
Python
| 35.760802 | 150 | 0.540638 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/controllers/RL_games_model_4_mujoco.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Dict
from gym import spaces
import numpy as np
import torch
import yaml
from rl_games.algos_torch.players import (
BasicPpoPlayerContinuous,
BasicPpoPlayerDiscrete,
)
class RLGamesModel:
"""
This class implements a wrapper for the RLGames model.
It is used to interface the RLGames model with the MuJoCo environment.
It currently only supports PPO agents."""
def __init__(
self,
config: Dict = None,
config_path: str = None,
model_path: str = None,
**kwargs
):
"""
Initialize the RLGames model.
Args:
config (Dict, optional): A dictionary containing the configuration of the RLGames model. Defaults to None.
config_path (str, optional): A string containing the path to the configuration file of the RLGames model. Defaults to None.
model_path (str, optional): A string containing the path to the model of the RLGames model. Defaults to None.
**kwargs: Additional arguments."""
self.obs = dict(
{
"state": torch.zeros((1, 10), dtype=torch.float32, device="cuda"),
"transforms": torch.zeros(5, 8, device="cuda"),
"masks": torch.zeros(8, dtype=torch.float32, device="cuda"),
"masses": torch.zeros(3, dtype=torch.float32, device="cuda"),
}
)
# Build model using the configuration files
if config is None:
self.loadConfig(config_path)
else:
self.cfg = config
self.buildModel()
self.restore(model_path)
# Default target and task values
self.mode = 0
self.position_target = [0, 0, 0]
self.orientation_target = [1, 0, 0, 0]
self.linear_velocity_target = [0, 0, 0]
self.angular_velocity_target = [0, 0, 0]
self.obs_state = torch.zeros((1, 10), dtype=torch.float32, device="cuda")
def buildModel(self) -> None:
"""
Build the RLGames model."""
act_space = spaces.Tuple([spaces.Discrete(2)] * 8)
obs_space = spaces.Dict(
{
"state": spaces.Box(np.ones(10) * -np.Inf, np.ones(10) * np.Inf),
"transforms": spaces.Box(low=-1, high=1, shape=(8, 5)),
"masks": spaces.Box(low=0, high=1, shape=(8,)),
"masses": spaces.Box(low=-np.Inf, high=np.Inf, shape=(3,)),
}
)
self.player = BasicPpoPlayerDiscrete(
self.cfg, obs_space, act_space, clip_actions=False, deterministic=True
)
def loadConfig(self, config_name: str) -> None:
"""
Load the configuration file of the RLGames model.
Args:
config_name (str): A string containing the path to the configuration file of the RLGames model.
"""
with open(config_name, "r") as stream:
self.cfg = yaml.safe_load(stream)
def restore(self, model_name: str) -> None:
"""
Restore the weights of the RLGames model.
Args:
model_name (str): A string containing the path to the checkpoint of an RLGames model matching the configuation file.
"""
self.player.restore(model_name)
def setTarget(
self,
target_position=None,
target_heading=None,
target_linear_velocity=None,
target_angular_velocity=None,
mode=None,
) -> None:
"""
Set the targets of the agent. mode is task flag.
Args:
target_position (list, optional): A list containing the target position. Defaults to None.
target_heading (list, optional): A list containing the target heading. Defaults to None.
target_linear_velocity (list, optional): A list containing the target linear velocity. Defaults to None.
target_angular_velocity (list, optional): A list containing the target angular velocity. Defaults to None.
mode (int, optional): An integer indicating the agent's task. Defaults to None.
"""
if mode == 0:
self.position_target = target_position
self.mode = mode
elif mode == 1:
self.position_target = target_position
self.orientation_target = target_heading
self.mode = mode
elif mode == 2:
self.linear_velocity_target = target_linear_velocity
self.mode = mode
elif mode == 3:
self.linear_velocity_target = target_linear_velocity
self.angular_velocity_target = target_angular_velocity
self.mode = mode
elif mode == 4:
self.linear_velocity_target = target_linear_velocity
self.orientation_target = target_heading
self.mode = mode
elif mode == 6:
#TODO: remove hardcoding
fp_footprint_radius = 0.31+0.01
siny_cosp = 2 * target_heading[0] * target_heading[3]
cosy_cosp = 1 - 2 * (target_heading[3] * target_heading[3])
target_heading_angle = np.arctan2(siny_cosp, cosy_cosp)
target_position_clone = target_position.copy()
target_position_clone[0] += fp_footprint_radius * np.cos(target_heading_angle)
target_position_clone[1] += fp_footprint_radius * np.sin(target_heading_angle)
self.position_target = target_position_clone
self.orientation_target = target_heading
self.mode = mode
else:
raise ValueError("Please specify a task flag.")
def generate_task_data(self, state: Dict[str, np.ndarray]) -> None:
"""
Generate the task data used by the agent.
The task flag is used to determine the format of the task data.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state of the environment.
"""
if self.mode == 0:
self.target = [
self.position_target[0] - state["position"][0],
self.position_target[1] - state["position"][1],
0,
0,
]
elif self.mode == 1:
siny_cosp_target = 2 * (
self.orientation_target[0] * self.orientation_target[3]
+ self.orientation_target[1] * self.orientation_target[2]
)
cosy_cosp_target = 1 - 2 * (
self.orientation_target[2] * self.orientation_target[2]
+ self.orientation_target[3] * self.orientation_target[3]
)
heading_target = np.arctan2(siny_cosp_target, cosy_cosp_target)
siny_cosp_system = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp_system = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
heading_system = np.arctan2(siny_cosp_system, cosy_cosp_system)
heading_error = np.arctan2(
np.sin(heading_target - heading_system),
np.cos(heading_target - heading_system),
)
self.target = [
self.position_target[0] - state["position"][0],
self.position_target[1] - state["position"][1],
np.cos(heading_error),
np.sin(heading_error),
]
elif self.mode == 2:
self.target = [
self.linear_velocity_target[0] - state["linear_velocity"][0],
self.linear_velocity_target[1] - state["linear_velocity"][1],
0,
0,
]
elif self.mode == 3:
self.target = [
self.linear_velocity_target[0] - state["linear_velocity"][0],
self.linear_velocity_target[1] - state["linear_velocity"][1],
self.angular_velocity_target[2] - state["angular_velocity"][2],
0,
]
elif self.mode == 4:
siny_cosp_target = 2 * (
self.orientation_target[0] * self.orientation_target[3]
+ self.orientation_target[1] * self.orientation_target[2]
)
cosy_cosp_target = 1 - 2 * (
self.orientation_target[2] * self.orientation_target[2]
+ self.orientation_target[3] * self.orientation_target[3]
)
heading_target = np.arctan2(siny_cosp_target, cosy_cosp_target)
siny_cosp_system = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp_system = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
heading_system = np.arctan2(siny_cosp_system, cosy_cosp_system)
heading_error = np.arctan2(
np.sin(heading_target - heading_system),
np.cos(heading_target - heading_system),
)
self.target = [
self.linear_velocity_target[0] - state["linear_velocity"][0],
self.linear_velocity_target[1] - state["linear_velocity"][1],
np.cos(heading_error),
np.sin(heading_error),
]
elif self.mode == 6:
#TODO: remove hardcoding
target_to_cone_dist = -2.0
siny_cosp_target = 2 * (
self.orientation_target[0] * self.orientation_target[3]
+ self.orientation_target[1] * self.orientation_target[2]
)
cosy_cosp_target = 1 - 2 * (
self.orientation_target[2] * self.orientation_target[2]
+ self.orientation_target[3] * self.orientation_target[3]
)
heading_target = np.arctan2(siny_cosp_target, cosy_cosp_target)
anchor_positions = self.position_target.copy()
anchor_positions[0] += target_to_cone_dist * np.cos(heading_target)
anchor_positions[1] += target_to_cone_dist * np.sin(heading_target)
goal_headings = np.arctan2(
anchor_positions[1] - state["position"][1],
anchor_positions[0] - state["position"][0]
)
siny_cosp_system = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp_system = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
heading_system = np.arctan2(siny_cosp_system, cosy_cosp_system)
heading_error = np.abs(
np.arctan2(
np.sin(goal_headings - heading_system),
np.cos(goal_headings - heading_system),
)
)
self.target = [
self.position_target[0] - state["position"][0],
self.position_target[1] - state["position"][1],
np.cos(heading_error),
np.sin(heading_error),
]
def makeObservationBuffer(self, state: Dict[str, np.ndarray]) -> None:
"""
Make the observation buffer used by the agent.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state of the environment.
"""
self.generate_task_data(state)
siny_cosp = 2 * (
state["quaternion"][0] * state["quaternion"][3]
+ state["quaternion"][1] * state["quaternion"][2]
)
cosy_cosp = 1 - 2 * (
state["quaternion"][2] * state["quaternion"][2]
+ state["quaternion"][3] * state["quaternion"][3]
)
self.obs_state[0, :2] = torch.tensor(
[cosy_cosp, siny_cosp], dtype=torch.float32, device="cuda"
)
self.obs_state[0, 2:4] = torch.tensor(
state["linear_velocity"][:2], dtype=torch.float32, device="cuda"
)
self.obs_state[0, 4] = state["angular_velocity"][2]
self.obs_state[0, 5] = self.mode
self.obs_state[0, 6:] = torch.tensor(
self.target, dtype=torch.float32, device="cuda"
)
def getAction(self, state, is_deterministic=True, **kwargs) -> np.ndarray:
"""
Get the action of the agent.
Args:
state (Dict[str, np.ndarray]): A dictionary containing the state of the environment.
is_deterministic (bool): A boolean indicating whether the action should be deterministic or not.
**kwargs: Additional arguments.
Returns:
np.ndarray: The action of the agent."""
self.makeObservationBuffer(state)
self.obs["state"] = self.obs_state
actions = (
self.player.get_action(self.obs.copy(), is_deterministic=is_deterministic)
.cpu()
.numpy()
)
return actions
| 13,598 |
Python
| 38.64723 | 135 | 0.542874 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/controllers/hl_controllers.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import List, Tuple, Dict, Union
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import os
import datetime
from mujoco_envs.controllers.discrete_LQR_controller import (
DiscreteController,
)
from mujoco_envs.controllers.RL_games_model_4_mujoco import (
RLGamesModel,
)
class BaseController:
"""
Base class for high-level controllers."""
def __init__(self, dt: float, save_dir: str = "mujoco_experiment") -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
"""
self.save_dir = os.path.join(save_dir, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
self.dt = dt
self.time = 0
self.initializeLoggers()
self.csv_datas = []
def initializeLoggers(self) -> None:
"""
Initializes the loggers for the simulation.
Allowing for the simulation to be replayed/plotted."""
self.logs = {}
self.logs["timevals"] = []
self.logs["angular_velocity"] = []
self.logs["linear_velocity"] = []
self.logs["position"] = []
self.logs["quaternion"] = []
self.logs["actions"] = []
def updateLoggers(
self, state: Dict[str, np.ndarray], action: np.ndarray, time: float = None
) -> None:
"""
Updates the loggers for the simulation.
Args:
state (Dict[str, np.ndarray]): State of the system.
action (np.ndarray): Action taken by the controller."""
self.logs["timevals"].append(self.time)
self.logs["position"].append(state["position"])
self.logs["quaternion"].append(state["quaternion"])
self.logs["angular_velocity"].append(state["angular_velocity"])
self.logs["linear_velocity"].append(state["linear_velocity"])
self.logs["actions"].append(action)
if time is not None:
self.time = time
else:
self.time += self.dt
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return False
def getGoal(self) -> None:
"""
Returns the current goal of the controller."""
raise NotImplementedError
def setGoal(self) -> None:
"""
Sets the goal of the controller."""
raise NotImplementedError
def getAction(self, **kwargs) -> np.ndarray:
"""
Gets the action from the controller."""
raise NotImplementedError
def plotSimulation(
self, dpi: int = 120, width: int = 600, height: int = 800
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 120.
width (int, optional): Width of the figure. Defaults to 600.
height (int, optional): Height of the figure. Defaults to 800."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"])
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], np.abs(self.logs["position"]))
ax[0].set_xlabel("time (seconds)")
ax[0].set_ylabel("meters")
_ = ax[0].set_title("position")
ax[0].set_yscale("log")
ax[1].plot(
np.array(self.logs["position"])[:, 0], np.array(self.logs["position"])[:, 1]
)
ax[1].set_xlabel("meters")
ax[1].set_ylabel("meters")
_ = ax[1].set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
def saveSimulationData(self, suffix: str = "") -> None:
"""
Saves the simulation data.
Args:
suffix (str, optional): Suffix to add to the file name. Defaults to ""."""
xyz = ["x", "y", "z"]
wxyz = ["w", "x", "y", "z"]
try:
os.makedirs(self.save_dir, exist_ok=True)
csv_data = pd.DataFrame()
for key in self.logs.keys():
if len(self.logs[key]) != 0:
if key == "actions":
data = np.array(self.logs[key])
for i in range(data.shape[1]):
csv_data["t_" + str(i)] = data[:, i]
else:
data = np.array(self.logs[key])
if len(data.shape) > 1:
if data.shape[1] == 4:
var_name = wxyz
else:
var_name = xyz
for i in range(data.shape[1]):
csv_data[var_name[i] + "_" + key] = data[:, i]
else:
csv_data[key] = data
csv_data.to_csv(os.path.join(self.save_dir, "exp_logs" + suffix + ".csv"))
self.csv_datas.append(csv_data)
except Exception as e:
print("Saving failed: ", e)
def plotBatch(self, dpi: int = 120, width: int = 600, height: int = 800) -> None:
"""
Plots a batch of simulations.
Args:
dpi (int, optional): Dots per inch. Defaults to 120.
width (int, optional): Width of the figure. Defaults to 600.
height (int, optional): Height of the figure. Defaults to 800."""
figsize = (width / dpi, height / dpi)
fig = plt.figure(figsize=figsize)
for csv_data in self.csv_datas:
plt.plot(csv_data["x_position"], csv_data["y_position"])
plt.axis("equal")
plt.xlabel("meters")
plt.ylabel("meters")
plt.tight_layout()
fig.savefig(os.path.join(self.save_dir, "positions.png"))
class PositionController(BaseController):
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
goals_x: List[float],
goals_y: List[float],
position_distance_threshold: float = 0.03,
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
goals_x (List[float]): List of x coordinates of the goals.
goals_y (List[float]): List of y coordinates of the goals.
position_distance_threshold (float, optional): Distance threshold for the position. Defaults to 0.03.
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
self.model = model
self.goals = np.array([goals_x, goals_y, [0] * len(goals_x)]).T
self.current_goal = self.goals[0]
self.distance_threshold = position_distance_threshold
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["position_target"] = []
def updateLoggers(self, state, actions, time: float) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["position_target"].append(self.current_goal[:2])
def isGoalReached(self, state: Dict[str, np.ndarray]) -> bool:
"""
Checks if the goal is reached.
Args:
state (Dict[str, np.ndarray]): State of the system.
Returns:
bool: True if the goal is reached, False otherwise."""
dist = np.linalg.norm(self.current_goal[:2] - state["position"][:2])
if dist < self.distance_threshold:
return True
def getGoal(self) -> np.ndarray:
"""
Returns the current goal."""
return self.current_goal
def setGoal(self, goal) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return len(self.goals) == 0
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
self.model.setTarget(target_position=self.current_goal, mode=0)
def getAction(
self,
state,
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
Returns:
np.ndarray: Action taken by the controller."""
if self.isGoalReached(state):
if not mute:
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.setTarget()
actions = self.model.getAction(state, is_deterministic=is_deterministic)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 90, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 90.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(
self.logs["timevals"],
self.logs["linear_velocity"],
label="system velocities",
)
ax[1].legend()
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="position goals",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("meters")
ax.set_ylabel("meters")
ax.axis("equal")
_ = ax.set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
self.logs["timevals"], np.array(self.logs["actions"]), label="system action"
)
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "actions.png"))
except Exception as e:
print("Saving failed: ", e)
class PoseController(BaseController):
"""
Controller for the pose of the robot."""
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
goals_x: List[float],
goals_y: List[float],
goals_theta: List[float],
position_distance_threshold: float = 0.03,
orientation_distance_threshold: float = 0.03,
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
goals_x (List[float]): List of x coordinates of the goals.
goals_y (List[float]): List of y coordinates of the goals.
goals_theta (List[float]): List of theta coordinates of the goals.
position_distance_threshold (float, optional): Distance threshold for the position. Defaults to 0.03.
orientation_distance_threshold (float, optional): Distance threshold for the orientation. Defaults to 0.03.
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
# Discrete controller
self.model = model
# Creates an array goals
if goals_theta is None:
goals_theta = np.zeros_like(goals_x)
self.goals = np.array([goals_x, goals_y, goals_theta]).T
self.current_goal = self.goals[0]
self.current_goal_controller = np.zeros((3), dtype=np.float32)
self.current_goal_controller = self.current_goal
self.position_distance_threshold = position_distance_threshold
self.orientation_distance_threshold = orientation_distance_threshold
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["position_target"] = []
self.logs["heading_target"] = []
def updateLoggers(self, state, actions, time: float = None) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["position_target"].append(self.current_goal[:2])
self.logs["heading_target"].append(self.current_goal[-1])
def isGoalReached(self, state: Dict[str, np.ndarray]) -> bool:
"""
Checks if the goal is reached.
Args:
state (Dict[str, np.ndarray]): State of the system.
Returns:
bool: True if the goal is reached, False otherwise."""
dist = np.linalg.norm(self.current_goal[:2] - state["position"][:2])
if dist < self.position_distance_threshold:
return True
def getGoal(self) -> np.ndarray:
"""
Returns the current goal.
Returns:
np.ndarray: Current goal."""
return self.current_goal
def setGoal(self, goal: np.ndarray) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return len(self.goals) == 0
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
position_goal = self.current_goal
yaw = self.current_goal[2]
q = [np.cos(yaw / 2), 0, 0, np.sin(yaw / 2)]
orientation_goal = q
self.model.setTarget(
target_position=position_goal, target_heading=orientation_goal, mode=1
)
def getAction(
self,
state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
Returns:
np.ndarray: Action taken by the controller."""
if self.isGoalReached(state):
if not mute:
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.setTarget()
actions = self.model.getAction(
state, is_deterministic=is_deterministic, mute=mute
)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 90, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 90.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(
self.logs["timevals"],
self.logs["linear_velocity"],
label="system velocities",
)
ax[1].legend()
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="position goals",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("meters")
ax.set_ylabel("meters")
ax.axis("equal")
_ = ax.set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
self.logs["timevals"], np.array(self.logs["actions"]), label="system action"
)
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "actions.png"))
except Exception as e:
print("Saving failed: ", e)
class DockController(PoseController):
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
position_goal = self.current_goal
yaw = self.current_goal[2]
q = [np.cos(yaw / 2), 0, 0, np.sin(yaw / 2)]
orientation_goal = q
self.model.setTarget(
target_position=position_goal, target_heading=orientation_goal, mode=6
)
def plotSimulation(
self, dpi: int = 90, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 90.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title("angular velocity")
ax[0].set_ylabel("radians / second")
ax[1].plot(
self.logs["timevals"],
self.logs["linear_velocity"],
label="system velocities",
)
ax[1].legend()
ax[1].set_xlabel("time (seconds)")
ax[1].set_ylabel("meters / second")
_ = ax[1].set_title("linear_velocity")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="position goals",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("meters")
ax.set_ylabel("meters")
ax.axis("equal")
_ = ax.set_title("x y coordinates")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
self.logs["timevals"], np.array(self.logs["actions"]), label="system action"
)
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "actions.png"))
except Exception as e:
print("Saving failed: ", e)
class TrajectoryTracker:
"""
A class to generate and track trajectories."""
def __init__(
self, lookahead: float = 0.25, closed: bool = False, offset=(0, 0), **kwargs
):
"""
Initializes the trajectory tracker.
Args:
lookahead (float, optional): Lookahead distance. Defaults to 0.25.
closed (bool, optional): Whether the trajectory is closed or not. Defaults to False.
offset (tuple, optional): Offset of the trajectory. Defaults to (0,0).
**kwargs: Additional arguments."""
self.current_point = -1
self.lookhead = lookahead
self.closed = closed
self.is_done = False
self.offset = np.array(offset)
def generateCircle(self, radius: float = 2, num_points: int = 360 * 10):
"""
Generates a circle trajectory.
Args:
radius (float, optional): Radius of the circle. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10."""
theta = np.linspace(0, 2 * np.pi, num_points, endpoint=(not self.closed))
self.positions = (
np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
)
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
self.angles = np.arctan2(self.angles[:, 1], self.angles[:, 0])
def generateSquare(self, h: float = 2, num_points: int = 360 * 10) -> None:
"""
Generates a square trajectory.
Args:
h (float, optional): Height of the square. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10."""
points_per_side = num_points // 4
s1y = np.linspace(-h / 2, h / 2, num_points, endpoint=False)
s1x = np.ones_like(s1y) * h / 2
u1 = np.ones_like(s1y)
v1 = np.zeros_like(s1y)
s2x = np.linspace(h / 2, -h / 2, num_points, endpoint=False)
s2y = np.ones_like(s2x) * h / 2
u2 = np.zeros_like(s2x)
v2 = -np.ones_like(s2x)
s3y = np.linspace(h / 2, -h / 2, num_points, endpoint=False)
s3x = np.ones_like(s3y) * (-h / 2)
u3 = -np.ones_like(s3y)
v3 = np.zeros_like(s3y)
s4x = np.linspace(-h / 2, h / 2, num_points, endpoint=False)
s4y = np.ones_like(s4x) * (-h / 2)
u4 = np.zeros_like(s4x)
v4 = np.ones_like(s4x)
self.positions = (
np.vstack(
[np.hstack([s1x, s2x, s3x, s4x]), np.hstack([s1y, s2y, s3y, s4y])]
).T
+ self.offset
)
self.u = np.hstack([u1, u2, u3, u4]).T
self.v = np.hstack([v1, v2, v3, v4]).T
self.angles = np.arctan2(self.u, self.v)
def generateSpiral(
self,
start_radius: float = 0.5,
end_radius: float = 2,
num_loop: float = 5,
num_points: int = 360 * 20,
) -> None:
"""
Generates a spiral trajectory.
Args:
start_radius (float, optional): Start radius of the spiral. Defaults to 0.5.
end_radius (float, optional): End radius of the spiral. Defaults to 2.
num_loop (float, optional): Number of loops. Defaults to 5.
num_points (int, optional): Number of points. Defaults to 360*20."""
radius = np.linspace(
start_radius, end_radius, num_points, endpoint=(not self.closed)
)
theta = np.linspace(
0, 2 * np.pi * num_loop, num_points, endpoint=(not self.closed)
)
self.positions = (
np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
)
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
self.angles = np.arctan2(self.angles[:, 1], self.angles[:, 0])
def generateInfinite(self, a: float = 2, num_points: int = 360 * 10) -> None:
"""
Generates an infinite (lemniscate of Bernoulli) trajectory.
Args:
a (float, optional): Controls the size of the lemniscate. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10.
"""
t = np.linspace(0, 2 * np.pi, num_points, endpoint=(not self.closed))
x = (a * np.cos(t)) / (1 + np.sin(t) ** 2)
y = (a * np.sin(t) * np.cos(t)) / (1 + np.sin(t) ** 2)
self.positions = np.array([x, y]).T + self.offset
# Derive angles based on the direction of movement across points for consistency with other functions
directions = np.diff(self.positions, axis=0, append=self.positions[0:1])
self.angles = (
np.array([directions[:, 1], -directions[:, 0]]).T
/ np.linalg.norm(directions, axis=1)[:, None]
)
self.angles = np.arctan2(self.angles[:, 0], self.angles[:, 1])
def generateInfinite(self, a: float = 2, num_points: int = 360 * 10) -> None:
"""
Generates an infinite (lemniscate of Bernoulli) trajectory.
Args:
a (float, optional): Controls the size of the lemniscate. Defaults to 2.
num_points (int, optional): Number of points. Defaults to 360*10.
"""
t = np.linspace(0, 2 * np.pi, num_points, endpoint=(not self.closed))
x = (a * np.cos(t)) / (1 + np.sin(t) ** 2)
y = (a * np.sin(t) * np.cos(t)) / (1 + np.sin(t) ** 2)
self.positions = np.array([x, y]).T + self.offset
# Derive angles based on the direction of movement across points for consistency with other functions
directions = np.diff(self.positions, axis=0, append=self.positions[0:1])
self.angles = (
np.array([directions[:, 1], -directions[:, 0]]).T
/ np.linalg.norm(directions, axis=1)[:, None]
)
def getTrackingPointIdx(self, position: np.ndarray) -> None:
"""
Gets the tracking point index.
The tracking point is the point the robot is currently locked on.
Args:
position (np.ndarray): Current position of the robot."""
distances = np.linalg.norm(self.positions - position, axis=1)
if self.current_point == -1:
self.current_point = 0
else:
indices = np.where(distances < self.lookhead)[0]
if len(indices) > 0:
indices = indices[indices < 60]
if len(indices) > 0:
self.current_point = np.max(indices)
def rollTrajectory(self) -> None:
"""
Rolls the trajectory, so that the current point is the first point."""
if self.closed:
self.positions = np.roll(self.positions, -self.current_point, axis=0)
self.angles = np.roll(self.angles, -self.current_point, axis=0)
self.current_point = 0
else:
self.positions = self.positions[self.current_point :]
self.angles = self.angles[self.current_point :]
self.current_point = 0
if self.positions.shape[0] <= 1:
self.is_done = True
def isDone(self):
"""
Checks if the trajectory is done."""
return self.is_done
def getPointForTracking(self) -> List[np.ndarray]:
"""
Gets the position the tracker is currently locked on.
Returns:
List[np.ndarray]: Position being tracked."""
position = self.positions[self.current_point]
angle = self.angles[self.current_point]
self.rollTrajectory()
return position, angle
def get_target_position(self) -> np.ndarray:
"""
Gets the target position.
Returns:
np.ndarray: Target position."""
return self.target_position
def computeVelocityVector(
self, target_position: np.ndarray, position: np.ndarray
) -> np.ndarray:
"""
Computes the velocity vector.
That is the vector that will enable the robot to reach the position being tracked.
Args:
target_position (np.ndarray): Position being tracked.
position (np.ndarray): Current position of the robot."""
diff = target_position - position
return diff / np.linalg.norm(diff)
def getVelocityVector(self, position: np.ndarray) -> np.ndarray:
"""
Gets the velocity vector.
Args:
position (np.ndarray): Current position of the robot.
Returns:
np.ndarray: Velocity vector."""
self.getTrackingPointIdx(position)
self.target_position, target_angle = self.getPointForTracking()
velocity_vector = self.computeVelocityVector(self.target_position, position)
return velocity_vector, target_angle
class VelocityTracker(BaseController):
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
target_tracking_velocity: float = 0.25,
lookahead_dist: float = 0.15,
closed: bool = True,
x_offset: float = 0,
y_offset: float = 0,
radius: float = 1.5,
height: float = 1.5,
start_radius: float = 0.5,
end_radius: float = 2.0,
num_loops: int = 4,
trajectory_type: str = "circle",
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
target_tracking_velocity (float, optional): Target tracking velocity. Defaults to 0.25.
lookahead_dist (float, optional): Lookahead distance. Defaults to 0.15.
closed (bool, optional): Whether the trajectory is closed or not. Defaults to True.
x_offset (float, optional): x offset of the trajectory. Defaults to 0.
y_offset (float, optional): y offset of the trajectory. Defaults to 0.
radius (float, optional): Radius of the trajectory. Defaults to 1.5.
height (float, optional): Height of the trajectory. Defaults to 1.5.
start_radius (float, optional): Start radius of the trajectory. Defaults to 0.5.
end_radius (float, optional): End radius of the trajectory. Defaults to 2.0.
num_loops (int, optional): Number of loops. Defaults to 4.
trajectory_type (str, optional): Type of trajectory. Defaults to "circle".
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
self.tracker = TrajectoryTracker(
lookahead=lookahead_dist, closed=closed, offset=(x_offset, y_offset)
)
if trajectory_type.lower() == "square":
self.tracker.generateSquare(h=height)
elif trajectory_type.lower() == "circle":
self.tracker.generateCircle(radius=radius)
elif trajectory_type.lower() == "spiral":
self.tracker.generateSpiral(
start_radius=start_radius, end_radius=end_radius, num_loop=num_loops
)
elif trajectory_type.lower() == "infinite":
self.tracker.generateInfinite(a=radius)
else:
raise ValueError(
"Unknown trajectory type. Must be square, circle or spiral."
)
self.model = model
self.target_tracking_velocity = target_tracking_velocity
self.velocity_goal = [0, 0, 0]
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["velocity_goal"] = []
self.logs["position_target"] = []
def updateLoggers(self, state, actions, time: float = None) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["velocity_goal"].append(self.velocity_goal[:2])
self.logs["position_target"].append(self.getTargetPosition())
def getGoal(self) -> np.ndarray:
"""
Returns the current goal.
Returns:
np.ndarray: Current goal."""
return self.velocity_goal
def setGoal(self, goal: np.ndarray) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.target_tracking_velocity = goal
def getTargetPosition(self) -> np.ndarray:
"""
Gets the target position.
Returns:
np.ndarray: Target position."""
return self.tracker.get_target_position()
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return self.tracker.is_done
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
self.model.setTarget(target_linear_velocity=self.velocity_goal, mode=2)
def getAction(
self,
state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
"""
self.velocity_vector, _ = self.tracker.getVelocityVector(state["position"][:2])
self.velocity_goal[0] = self.velocity_vector[0] * self.target_tracking_velocity
self.velocity_goal[1] = self.velocity_vector[1] * self.target_tracking_velocity
self.setTarget()
actions = self.model.getAction(
state, is_deterministic=is_deterministic, mute=mute
)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 135, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 135.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
linear_velocity = np.array(self.logs["linear_velocity"])
target_velocity = np.array(self.logs["velocity_goal"])
ax.plot(
self.logs["timevals"],
linear_velocity[:, 0],
label="x linear velocity",
)
ax.plot(
self.logs["timevals"],
linear_velocity[:, 1],
label="y linear velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 0],
label="x target velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 1],
label="y target velocity",
)
ax.legend()
ax.set_xlabel("time (seconds)")
ax.set_ylabel("Linear velocities (m/s)")
_ = ax.set_title("Linear velocity tracking")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="trajectory",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("x (meters)")
ax.set_ylabel("y (meters)")
ax.axis("equal")
_ = ax.set_title("Trajectory in xy plane")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class VelocityHeadingTracker(BaseController):
def __init__(
self,
dt: float,
model: Union[RLGamesModel, DiscreteController],
target_tracking_velocity: float = 0.25,
lookahead_dist: float = 0.15,
closed: bool = True,
x_offset: float = 0,
y_offset: float = 0,
radius: float = 1.5,
height: float = 1.5,
start_radius: float = 0.5,
end_radius: float = 2.0,
num_loops: int = 4,
trajectory_type: str = "circle",
save_dir: str = "mujoco_experiment",
**kwargs
) -> None:
"""
Initializes the controller.
Args:
dt (float): Simulation time step.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
target_tracking_velocity (float, optional): Target tracking velocity. Defaults to 0.25.
lookahead_dist (float, optional): Lookahead distance. Defaults to 0.15.
closed (bool, optional): Whether the trajectory is closed or not. Defaults to True.
x_offset (float, optional): x offset of the trajectory. Defaults to 0.
y_offset (float, optional): y offset of the trajectory. Defaults to 0.
radius (float, optional): Radius of the trajectory. Defaults to 1.5.
height (float, optional): Height of the trajectory. Defaults to 1.5.
start_radius (float, optional): Start radius of the trajectory. Defaults to 0.5.
end_radius (float, optional): End radius of the trajectory. Defaults to 2.0.
num_loops (int, optional): Number of loops. Defaults to 4.
trajectory_type (str, optional): Type of trajectory. Defaults to "circle".
save_dir (str, optional): Directory to save the simulation data. Defaults to "mujoco_experiment".
**kwargs: Additional arguments."""
super().__init__(dt, save_dir)
self.tracker = TrajectoryTracker(
lookahead=lookahead_dist, closed=closed, offset=(x_offset, y_offset)
)
if trajectory_type.lower() == "square":
self.tracker.generateSquare(h=height)
elif trajectory_type.lower() == "circle":
self.tracker.generateCircle(radius=radius)
elif trajectory_type.lower() == "spiral":
self.tracker.generateSpiral(
start_radius=start_radius, end_radius=end_radius, num_loop=num_loops
)
elif trajectory_type.lower() == "infinite":
self.tracker.generateInfinite(a=radius)
else:
raise ValueError(
"Unknown trajectory type. Must be square, circle or spiral."
)
self.model = model
self.target_tracking_velocity = target_tracking_velocity
self.velocity_goal = [0, 0, 0]
self.target_heading = [0]
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["velocity_goal"] = []
self.logs["heading_target"] = []
self.logs["position_target"] = []
def updateLoggers(self, state, actions, time=None) -> None:
"""
Updates the loggers.
Args:
state (Dict[str, np.ndarray]): State of the system.
actions (np.ndarray): Action taken by the controller."""
super().updateLoggers(state, actions, time=time)
self.logs["velocity_goal"].append(self.velocity_goal[:2])
self.logs["heading_target"].append(self.target_heading[0])
self.logs["position_target"].append(self.getTargetPosition())
def getGoal(self) -> np.ndarray:
"""
Returns the current goal.
Returns:
np.ndarray: Current goal."""
return self.velocity_goal + self.target_heading
def setGoal(self, goal: np.ndarray) -> None:
"""
Sets the goal of the controller.
Args:
goal (np.ndarray): Goal to set."""
self.target_tracking_velocity = goal[:3]
self.target_heading = goal[3]
def getTargetPosition(self) -> np.ndarray:
"""
Gets the target position.
Returns:
np.ndarray: Target position."""
return self.tracker.get_target_position()
def isDone(self) -> bool:
"""
Checks if the simulation is done.
Returns:
bool: True if the simulation is done, False otherwise."""
return self.tracker.is_done
def setTarget(self) -> None:
"""
Sets the target of the low-level controller."""
yaw = self.target_heading[0]
q = [np.cos(yaw / 2), 0, 0, np.sin(yaw / 2)]
orientation_goal = q
self.model.setTarget(
target_linear_velocity=self.velocity_goal, target_heading=orientation_goal, mode=4
)
def getAction(
self,
state: Dict[str, np.ndarray],
is_deterministic: bool = True,
mute: bool = False,
time: float = None,
) -> np.ndarray:
"""
Gets the action from the controller.
Args:
state (Dict[str, np.ndarray]): State of the system.
is_deterministic (bool, optional): Whether the action is deterministic or not. Defaults to True.
mute (bool, optional): Whether to print the goal reached or not. Defaults to False.
"""
self.velocity_vector, target_heading = self.tracker.getVelocityVector(
state["position"][:2]
)
self.velocity_goal[0] = self.velocity_vector[0] * self.target_tracking_velocity
self.velocity_goal[1] = self.velocity_vector[1] * self.target_tracking_velocity
self.target_heading[0] = target_heading
self.setTarget()
actions = self.model.getAction(
state, is_deterministic=is_deterministic, mute=mute
)
self.updateLoggers(state, actions, time=time)
return actions
def plotSimulation(
self, dpi: int = 135, width: int = 1000, height: int = 1000
) -> None:
"""
Plots the simulation.
Args:
dpi (int, optional): Dots per inch. Defaults to 135.
width (int, optional): Width of the figure. Defaults to 1000.
height (int, optional): Height of the figure. Defaults to 1000."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
linear_velocity = np.array(self.logs["linear_velocity"])
target_velocity = np.array(self.logs["velocity_goal"])
ax.plot(
self.logs["timevals"],
linear_velocity[:, 0],
label="x linear velocity",
)
ax.plot(
self.logs["timevals"],
linear_velocity[:, 1],
label="y linear velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 0],
label="x target velocity",
)
ax.plot(
self.logs["timevals"],
target_velocity[:, 1],
label="y target velocity",
)
ax.legend()
ax.set_xlabel("time (seconds)")
ax.set_ylabel("Linear velocities (m/s)")
_ = ax.set_title("Linear velocity tracking")
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(
np.array(self.logs["position_target"])[:, 0],
np.array(self.logs["position_target"])[:, 1],
label="trajectory",
)
ax.plot(
np.array(self.logs["position"])[:, 0],
np.array(self.logs["position"])[:, 1],
label="system position",
)
ax.legend()
ax.set_xlabel("x (meters)")
ax.set_ylabel("y (meters)")
ax.axis("equal")
_ = ax.set_title("Trajectory in xy plane")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
orientations = np.array(self.logs["quaternion"])
positions = np.array(self.logs["position"])
target_positions = np.array(self.logs["position_target"])
v = 2 * (
orientations[:, 0] * orientations[:, 3]
+ orientations[:, 1] * orientations[:, 2]
)
u = 1 - 2 * (
orientations[:, 2] * orientations[:, 2]
+ orientations[:, 3] * orientations[:, 3]
)
target_headings = np.array(self.logs["heading_target"])
u_target = np.cos(target_headings)
v_target = np.sin(target_headings)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.quiver(
target_positions[:, 0],
target_positions[:, 1],
u_target,
v_target,
label="reference_trajectory",
color="r",
)
ax.quiver(
positions[:, 0],
positions[:, 1],
u,
v,
label="system trajectory",
color="b",
)
ax.legend()
ax.set_xlabel("x (meters)")
ax.set_ylabel("y (meters)")
ax.axis("equal")
_ = ax.set_title("Trajectory in xy plane")
plt.tight_layout()
try:
os.makedirs(self.save_dir, exist_ok=True)
fig.savefig(os.path.join(self.save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class HLControllerFactory:
"""
Factory for high-level controllers."""
def __init__(self):
self.registered_controllers = {}
def registerController(
self,
name: str,
controller: Union[PositionController, PoseController, VelocityTracker],
):
"""
Registers a controller.
Args:
name (str): Name of the controller.
controller (Union[PositionController, PoseController, VelocityTracker]): Controller class.
"""
self.registered_controllers[name] = controller
def parseControllerConfiguration(self, cfg: Dict):
"""
Parses the controller configuration.
Args:
cfg (Dict): Configuration dictionary."""
return cfg["hl_task"], cfg["hl_task"]["name"]
def __call__(
self, cfg: Dict, model: Union[RLGamesModel, DiscreteController], dt: float
):
"""
Creates a controller.
Args:
cfg (Dict): Configuration dictionary.
model (Union[RLGamesModel, DiscreteController]): Low-level controller.
dt (float): Simulation time step."""
new_cfg, mode = self.parseControllerConfiguration(cfg)
assert mode in list(self.registered_controllers.keys()), "Unknown hl_task mode."
return self.registered_controllers[mode](dt, model, **new_cfg)
"""
Register the controllers."""
hlControllerFactory = HLControllerFactory()
hlControllerFactory.registerController("position", PositionController)
hlControllerFactory.registerController("pose", PoseController)
hlControllerFactory.registerController("dock", DockController)
hlControllerFactory.registerController("linear_velocity", VelocityTracker)
hlControllerFactory.registerController(
"linear_velocity_heading", VelocityHeadingTracker
)
| 51,795 |
Python
| 33.692565 | 119 | 0.561 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/environments/disturbances.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Dict, Tuple
import numpy as np
import math
from omniisaacgymenvs.tasks.MFP.MFP2D_disturbances_parameters import (
DisturbancesParameters,
MassDistributionDisturbanceParameters,
ForceDisturbanceParameters,
TorqueDisturbanceParameters,
NoisyObservationsParameters,
NoisyActionsParameters,
)
from omniisaacgymenvs.tasks.MFP.curriculum_helpers import (
CurriculumSampler,
)
class RandomSpawn:
"""
Randomly spawns the robot in the environment."""
def __init__(self, cfg: Dict[str, float]) -> None:
"""
Initialize the random spawn strategy.
Args:
cfg (dict): A dictionary containing the configuration of the random spawn disturbance.
"""
self._rng = np.random.default_rng(seed=cfg["seed"])
self._max_spawn_dist = cfg["max_spawn_dist"]
self._min_spawn_dist = cfg["min_spawn_dist"]
self._kill_dist = cfg["kill_dist"]
def getInitialCondition(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Generates a random initial condition for the robot.
Returns:
Tuple[np.ndarray, np.ndarray]: A tuple containing the initial position and orientation of the robot.
"""
theta = self._rng.uniform(-np.pi, np.pi, 1)
r = self._rng.uniform(self._min_spawn_dist, self._max_spawn_dist)
initial_position = [np.cos(theta) * r, np.sin(theta) * r]
heading = self._rng.uniform(-np.pi, np.pi, 1)
initial_orientation = [np.cos(heading * 0.5), 0, 0, np.sin(heading * 0.5)]
return initial_position, initial_orientation
class RandomKillThrusters:
"""
Randomly kills thrusters."""
def __init__(self, cfg: Dict[str, float]) -> None:
"""
Initialize the random kill thrusters strategy.
Args:
cfg (dict): A dictionary containing the configuration of the random kill thrusters disturbance.
"""
self._rng = np.random.default_rng(seed=42) # cfg["seed"])
self._num_thrusters_to_kill = cfg["num_thrusters_to_kill"]
self.killed_thrusters_id = []
self.killed_mask = np.ones([8])
def generate_thruster_kills(self) -> None:
"""
Generates the thrusters to kill."""
self.killed_thrusters_id = self._rng.choice(
8, self._num_thrusters_to_kill, replace=False
)
class MassDistributionDisturbances:
"""
Creates disturbances on the platform by simulating a mass distribution on the
platform.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: MassDistributionDisturbanceParameters,
) -> None:
"""
Args:
parameters (MassDistributionDisturbanceParameters): The settings of the domain randomization.
"""
self.rng = rng
self.mass_sampler = CurriculumSampler(parameters.mass_curriculum)
self.CoM_sampler = CurriculumSampler(parameters.com_curriculum)
self.parameters = parameters
self.platforms_mass = 5.32
self.platforms_CoM = np.zeros((2), dtype=np.float32)
def randomize_masses(self, step: int = 100000) -> None:
"""
Randomizes the masses of the platforms.
Args:
env_ids (torch.Tensor): The ids of the environments to reset.
step (int): The current step of the learning process.
"""
self.platforms_mass = self.mass_sampler.sample(1, step).numpy()[0]
r = self.CoM_sampler.sample(1, step).numpy()[0]
theta = self.rand.uniform((1), dtype=np.float32) * math.pi * 2
self.platforms_CoM[0] = np.cos(theta) * r
self.platforms_CoM[1] = np.sin(theta) * r
def get_masses(self) -> Tuple[float, np.ndarray]:
"""
Returns the masses and CoM of the platforms.
Returns:
Tuple(float, np.ndarray): The masses and CoM of the platforms.
"""
return (self.platforms_mass, self.platforms_CoM)
class ForceDisturbance:
"""
Creates disturbances by applying random forces.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: ForceDisturbanceParameters,
) -> None:
"""
Args:
parameters (ForceDisturbanceParameters): The settings of the domain randomization.
"""
self.rng = rng
self.parameters = parameters
self.force_sampler = CurriculumSampler(self.parameters.force_curriculum)
self.forces = np.zeros(3, dtype=np.float32)
self.max_forces = 0
self._floor_x_freq = 0
self._floor_y_freq = 0
self._floor_x_offset = 0
self._floor_y_offset = 0
def generate_forces(self, step: int = 100000) -> None:
"""
Generates the forces using a sinusoidal pattern or not.
Args:
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = self.rng.uniform(
self.parameters.min_freq, self.parameters.max_freq, 1
)
self._floor_y_freq = self.rng.uniform(
self.parameters.min_freq, self.parameters.max_freq, 1
)
self._floor_x_offset = self.rng.uniform(
self.parameters.min_offset, self.parameters.max_offset, 1
)
self._floor_y_offset = self.rng.uniform(
self.parameters.min_offset, self.parameters.max_offset, 1
)
self._max_forces = self.force_sampler.sample(1, step).numpy()[0]
else:
r = self.force_sampler.sample(1, step).numpy()[0]
theta = self.rng.uniform(0, 1, 1) * math.pi * 2
self.forces[0] = np.cos(theta) * r
self.forces[1] = np.sin(theta) * r
def get_floor_forces(self, root_pos: np.ndarray) -> np.ndarray:
"""
Computes the forces given the current state of the robot.
Args:
root_pos (np.ndarray): The position of the root of the robot.
Returns:
np.ndarray: The floor forces.
"""
if self.parameters.use_sinusoidal_patterns:
self.forces[0] = (
np.sin(root_pos[0] * self._floor_x_freq + self._floor_x_offset)
* self._max_forces
)
self.forces[1] = (
np.sin(root_pos[1] * self._floor_y_freq + self._floor_y_offset)
* self._max_forces
)
return self.forces
class TorqueDisturbance:
"""
Creates disturbances by applying a torque to its center.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: TorqueDisturbanceParameters,
) -> None:
"""
Args:
parameters (TorqueDisturbanceParameters): The settings of the domain randomization.
"""
self.rng = rng
self.parameters = parameters
self.torque_sampler = CurriculumSampler(self.parameters.torque_curriculum)
self.torques = np.zeros(3, dtype=np.float32)
self.max_torques = 0
self._freq = 0
self._offset = 0
def generate_torques(self, step: int = 100000) -> None:
"""
Generates the torques using a sinusoidal pattern or not.
Args:
step (int, optional): The current training step. Defaults to 0.
"""
if self.parameters.enable:
if self.parameters.use_sinusoidal_patterns:
self._floor_x_freq = self.rng.uniform(
self.parameters.min_freq, self.parameters.max_freq, 1
)
self._floor_x_offset = self.rng.uniform(
self.parameters.min_offset, self.parameters.max_offset, 1
)
self._max_torques = self.torque_sampler.sample(1, step).numpy()[0]
else:
r = self.torque_sampler.sample(1, step).numpy()[0]
self.torques[2] = r
def get_torque_disturbance(self, root_pos: np.ndarray) -> np.ndarray:
"""
Computes the torques given the current state of the robot.
Args:
root_pos (torch.Tensor): The position of the root of the robot.
Returns:
torch.Tensor: The torque disturbance."""
if self.parameters.use_sinusoidal_patterns:
self.torques[:, 2] = (
np.sin(root_pos * self._freq + self._offset) * self._max_torques
)
return self.torques
class NoisyObservations:
"""
Adds noise to the observations of the robot.
"""
def __init__(
self,
rng: np.random.default_rng,
parameters: NoisyObservationsParameters,
) -> None:
"""
Args:
task_cfg (NoisyObservationParameters): The settings of the domain randomization.
num_envs (int): The number of environments.
device (str): The device on which the tensors are stored.
"""
self.rng = rng
self.position_sampler = CurriculumSampler(parameters.position_curriculum)
self.velocity_sampler = CurriculumSampler(parameters.velocity_curriculum)
self.orientation_sampler = CurriculumSampler(parameters.orientation_curriculum)
self.parameters = parameters
def add_noise_on_pos(self, pos: np.ndarray, step: int = 100000) -> np.ndarray:
"""
Adds noise to the position of the robot.
Args:
pos (np.ndarray): The position of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The position of the robot with noise.
"""
if self.parameters.enable_position_noise:
pos += self.position_sampler.sample(1, step).numpy()[0]
return pos
def add_noise_on_vel(self, vel: np.ndarray, step: int = 100000) -> np.ndarray:
"""
Adds noise to the velocity of the robot.
Args:
vel (np.ndarray): The velocity of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The velocity of the robot with noise.
"""
if self.parameters.enable_velocity_noise:
vel += self.velocity_sampler.sample(1, step).numpy()[0]
return vel
def add_noise_on_heading(self, heading: np.ndarray, step: int = 0) -> np.ndarray:
"""
Adds noise to the heading of the robot.
Args:
heading (np.ndarray): The heading of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The heading of the robot with noise.
"""
if self.parameters.enable_orientation_noise:
heading += self.orientation_sampler.sample(1, step).numpy()[0]
return heading
class NoisyActions:
"""
Adds noise to the actions of the robot."""
def __init__(
self,
rng: np.random.default_rng,
parameters: NoisyActionsParameters,
) -> None:
"""
Args:
parameters (NoisyActionParameters): The task configuration.
"""
self.rng = rng
self.action_sampler = CurriculumSampler(parameters.action_curriculum)
self.parameters = parameters
def add_noise_on_act(self, act: np.ndarray, step: int = 100000) -> np.ndarray:
"""
Adds noise to the actions of the robot.
Args:
act (np.ndarray): The actions of the robot.
step (int, optional): The current step of the learning process. Defaults to 0.
Returns:
np.ndarray: The actions of the robot with noise.
"""
if self.parameters.enable:
act += self.action_sampler.sample(1, step).numpy()[0]
return act
class Disturbances:
"""
Class to create disturbances on the platform.
"""
def __init__(self, parameters: dict, seed: int = 42) -> None:
"""
Args:
parameters (dict): The settings of the domain randomization.
"""
self.rng = np.random.default_rng(seed=seed)
self.parameters = DisturbancesParameters(**parameters)
self.mass_disturbances = MassDistributionDisturbances(
self.rng,
self.parameters.mass_disturbance,
)
self.force_disturbances = ForceDisturbance(
self.rng,
self.parameters.force_disturbance,
)
self.torque_disturbances = TorqueDisturbance(
self.rng,
self.parameters.torque_disturbance,
)
self.noisy_observations = NoisyObservations(
self.rng,
self.parameters.observations_disturbance,
)
self.noisy_actions = NoisyActions(
self.rng,
self.parameters.actions_disturbance,
)
| 13,434 |
Python
| 30.912114 | 112 | 0.585901 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/environments/mujoco_base_env.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Dict, Union, List, Tuple
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import mujoco
import math
import os
from omniisaacgymenvs.mujoco_envs.environments.disturbances import (
Disturbances,
RandomKillThrusters,
RandomSpawn,
)
def parseEnvironmentConfig(
cfg: Dict[str, Union[float, int, Dict]]
) -> Dict[str, Union[float, int, Dict]]:
"""
Parses the environment configuration from the config file.
Args:
cfg (Dict[str, Union[float, int, Dict]]): The configuration dictionary.
Returns:
Dict[str, Union[float, int, Dict]]: The parsed configuration dictionary."""
new_cfg = {}
new_cfg["disturbances"] = cfg["task"]["env"]["disturbances"]
new_cfg["spawn_parameters"] = {}
new_cfg["spawn_parameters"]["seed"] = cfg["seed"]
try:
new_cfg["spawn_parameters"]["max_spawn_dist"] = cfg["task"]["env"][
"task_parameters"
]["max_spawn_dist"]
except:
new_cfg["spawn_parameters"]["max_spawn_dist"] = 5.0
try:
new_cfg["spawn_parameters"]["min_spawn_dist"] = cfg["task"]["env"][
"task_parameters"
]["min_spawn_dist"]
except:
new_cfg["spawn_parameters"]["min_spawn_dist"] = 5.0
new_cfg["spawn_parameters"]["kill_dist"] = cfg["task"]["env"]["task_parameters"][
"kill_dist"
]
new_cfg["step_time"] = cfg["task"]["sim"]["dt"]
new_cfg["duration"] = (
cfg["task"]["env"]["maxEpisodeLength"] * cfg["task"]["sim"]["dt"] * cfg["task"]["env"]["controlFrequencyInv"]
)
new_cfg["inv_play_rate"] = cfg["task"]["env"]["controlFrequencyInv"]
new_cfg["platform"] = cfg["task"]["env"]["platform"]
new_cfg["platform"]["seed"] = cfg["seed"]
new_cfg["run_batch"] = cfg["hl_task"]["run_batch"]
new_cfg["max_episode_length"] = cfg["task"]["env"]["maxEpisodeLength"]
return new_cfg
class MuJoCoFloatingPlatform:
"""
A class for the MuJoCo Floating Platform environment."""
def __init__(
self,
step_time: float = 0.02,
duration: float = 60.0,
inv_play_rate: int = 10,
spawn_parameters: Dict[str, float] = None,
platform: Dict[str, Union[bool, dict, float, str, int]] = None,
disturbances: Dict[str, Union[bool, float]] = None,
run_batch: int = 1,
max_episode_length: int = 500,
**kwargs
) -> None:
"""
Initializes the MuJoCo Floating Platform environment.
Args:
step_time (float, optional): The time between steps in the simulation (seconds). Defaults to 0.02.
duration (float, optional): The duration of the simulation (seconds). Defaults to 60.0.
inv_play_rate (int, optional): The inverse of the play rate. Defaults to 10.
spawn_parameters (Dict[str, float], optional): A dictionary containing the spawn parameters. Defaults to None.
platform (Dict[str, Union[bool,dict,float,str,int]], optional): A dictionary containing the platform parameters. Defaults to None.
disturbances (Dict[str, Union[bool, float]], optional): A dictionary containing the disturbances parameters. Defaults to None.
**kwargs: Additional arguments."""
self.inv_play_rate = inv_play_rate
self.platform = platform
self.run_batch = run_batch
self.max_episode_length = max_episode_length
self.DR = Disturbances(disturbances, platform["seed"])
self.TK = RandomKillThrusters(
{
"num_thrusters_to_kill": platform["randomization"]["max_thruster_kill"]
* platform["randomization"]["kill_thrusters"],
"seed": platform["seed"],
}
)
self.RS = RandomSpawn(spawn_parameters)
self.createModel()
self.initializeModel()
self.setupPhysics(step_time, duration)
self.initForceAnchors()
self.reset()
self.csv_datas = []
def reset(
self,
initial_position: List[float] = [0, 0, 0],
initial_orientation: List[float] = [1, 0, 0, 0],
) -> None:
"""
Resets the simulation.
Args:
initial_position (list, optional): The initial position of the body. Defaults to [0,0,0].
initial_orientation (list, optional): The initial orientation of the body. Defaults to [1,0,0,0].
"""
self.initializeModel()
self.resetPosition(
initial_position=initial_position, initial_orientation=initial_orientation
)
self.DR.force_disturbances.generate_forces()
self.DR.torque_disturbances.generate_torques()
self.TK.generate_thruster_kills()
def initializeModel(self) -> None:
"""
Initializes the mujoco model for the simulation."""
self.data = mujoco.MjData(self.model)
mujoco.mj_forward(self.model, self.data)
self.body_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "top")
def setupPhysics(self, step_time: float, duration: float) -> None:
"""
Sets up the physics parameters for the simulation.
Args:
step_time (float): The time between steps in the simulation (seconds).
duration (float): The duration of the simulation (seconds)."""
self.model.opt.timestep = step_time
self.model.opt.gravity = [0, 0, 0]
self.duration = duration
def createModel(self) -> None:
"""
A YAML style string that defines the MuJoCo model for the simulation.
The mass is set to 5.32 kg, the radius is set to 0.31 m.
The initial position is set to (3, 3, 0.4) m."""
self.radius = self.platform["core"]["radius"]
self.mass = self.platform["core"]["mass"]
sphere_p1 = """
<mujoco model="tippe top">
<option integrator="RK4"/>
<asset>
<texture name="grid" type="2d" builtin="checker" rgb1=".1 .2 .3"
rgb2=".2 .3 .4" width="300" height="300"/>
<material name="grid" texture="grid" texrepeat="8 8" reflectance=".2"/>
</asset>
<worldbody>
<geom size="10.0 10.0 .01" type="plane" material="grid"/>
<light pos="0 0 10.0"/>
<camera name="closeup" pos="0 -3 2" xyaxes="1 0 0 0 1 2"/>
<body name="top" pos="0 0 .4">
<freejoint/>
"""
sphere_p2 = (
'<geom name="ball" type="sphere" size="'
+ str(self.radius)
+ '" mass="'
+ str(self.mass)
+ '"/>'
)
sphere_p3 = """
</body>
</worldbody>
<keyframe>
<key name="idle" qpos="3 3 0.4 1 0 0 0" qvel="0 0 0 0 0 0" />
</keyframe>
</mujoco>
"""
sphere = "\n".join([sphere_p1, sphere_p2, sphere_p3])
self.model = mujoco.MjModel.from_xml_string(sphere)
def initForceAnchors(self) -> None:
""" "
Defines where the forces are applied relatively to the center of mass of the body.
self.forces: 8x3 array of forces, indicating the direction of the force.
self.positions: 8x3 array of positions, indicating the position of the force."""
self.max_thrust = self.platform["configuration"]["thrust_force"]
self.forces = np.array(
[
[1, -1, 0],
[-1, 1, 0],
[1, 1, 0],
[-1, -1, 0],
[-1, 1, 0],
[1, -1, 0],
[-1, -1, 0],
[1, 1, 0],
]
)
# Normalize the forces.
self.forces = self.forces / np.linalg.norm(self.forces, axis=1).reshape(-1, 1)
# Multiply by the max thrust.
self.forces = self.forces * self.max_thrust
self.positions = (
np.array(
[
[1, 1, 0],
[1, 1, 0],
[-1, 1, 0],
[-1, 1, 0],
[-1, -1, 0],
[-1, -1, 0],
[1, -1, 0],
[1, -1, 0],
]
)
* 0.2192
)
def resetPosition(
self,
initial_position: List[float] = [0, 0],
initial_orientation: List[float] = [1, 0, 0, 0],
) -> None:
"""
Resets the position of the body and sets its velocity to 0.
Resets the timer as well.
Args:
initial_position (list, optional): The initial position of the body. Defaults to [0,0].
initial_orientation (list, optional): The initial orientation of the body. Defaults to [1,0,0,0].
"""
mujoco.mj_resetDataKeyframe(self.model, self.data, 0)
self.data.qpos[:2] = initial_position[:2]
self.data.qpos[3:7] = initial_orientation
self.data.qvel = 0
def applyForces(self, action: np.ndarray) -> None:
"""
Applies the forces to the body.
Args:
action (np.ndarray): The actions to apply to the body."""
self.data.qfrc_applied[...] = 0 # Clear applied forces.
rmat = self.data.xmat[self.body_id].reshape(3, 3) # Rotation matrix.
p = self.data.xpos[self.body_id] # Position of the body.
# Compute the number of thrusters fired, split the pressure between the nozzles.
factor = max(np.sum(action), 1)
# For each thruster, apply a force if needed.
for i in range(8):
if (
self.TK.killed_thrusters_id is not None
and i in self.TK.killed_thrusters_id
):
continue
# The force applied is the action value (1 or 0), divided by the number of thrusters fired (factor),
force = self.DR.noisy_actions.add_noise_on_act(action[i])
force = force * (1.0 / factor) * self.forces[i]
# If the force is not zero, apply the force.
if np.sum(np.abs(force)) > 0:
force = np.matmul(rmat, force) # Rotate the force to the global frame.
p2 = (
np.matmul(rmat, self.positions[i]) + p
) # Compute the position of the force.
mujoco.mj_applyFT(
self.model,
self.data,
force,
[0, 0, 0],
p2,
self.body_id,
self.data.qfrc_applied,
) # Apply the force.
uf_forces = self.DR.force_disturbances.get_floor_forces(self.data.qpos[:2])
td_forces = self.DR.torque_disturbances.get_torque_disturbance(
self.data.qpos[:2]
)
mujoco.mj_applyFT(
self.model,
self.data,
uf_forces,
td_forces,
self.data.qpos[:3],
self.body_id,
self.data.qfrc_applied,
) # Apply the force.
def getObs(self) -> Dict[str, np.ndarray]:
"""
returns an up to date observation buffer.
Returns:
Dict[str, np.ndarray]: A dictionary containing the state of the simulation.
"""
state = {}
state["angular_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.data.qvel[3:6].copy()
)
state["linear_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.data.qvel[0:3].copy()
)
state["position"] = self.DR.noisy_observations.add_noise_on_pos(
self.data.qpos[0:3].copy()
)
state["quaternion"] = self.data.qpos[3:].copy()
return state
def runLoop(
self,
model,
initial_position: List[float] = [0, 0],
initial_orientation: List[float] = [1, 0, 0, 0],
):
"""
Runs the simulation loop.
Args:
model (object): The model of the controller.
initial_position (list, optional): The initial position of the body. Defaults to [0,0].
initial_orientation (list, optional): The initial orientation of the body. Defaults to [1,0,0,0].
"""
print(self.run_batch)
if self.run_batch > 1:
self.run_batch_evaluation(model)
else:
self.run_single_evaluation(model, initial_position, initial_orientation)
def run_single_evaluation(self, model, initial_position, initial_orientation):
self.reset(
initial_position=initial_position, initial_orientation=initial_orientation
)
done = False
while (self.duration > self.data.time) and (not done):
state = self.getObs() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
done = model.isDone()
model.saveSimulationData()
model.plotSimulation()
def run_batch_evaluation(self, model):
"""
Runs the simulation loop.
Args:
model (object): The model of the controller.
"""
print("Running the simulations.")
for i in range(self.run_batch):
# Runs the simulation
print("Running simulation " + str(i) + " of " + str(self.run_batch) + ".")
initial_position, initial_orientation = self.RS.getInitialCondition()
self.reset(
initial_position=initial_position,
initial_orientation=initial_orientation,
)
model.initializeLoggers()
step = 0
while self.max_episode_length > step:
state = self.getObs() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state, mute=True)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
step += 1
# Saves the simulation data
model.saveSimulationData(suffix=str(i))
model.plotBatch()
| 14,827 |
Python
| 34.389021 | 142 | 0.547177 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/position_controller_RL.py
|
from typing import Callable, NamedTuple, Optional, Union, List
import matplotlib.pyplot as plt
import numpy as np
import argparse
import mujoco
import torch
import os
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import RLGamesModel
class MuJoCoPositionControl(MuJoCoFloatingPlatform):
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
super().initializeLoggers()
self.logs["position_target"] = []
def updateLoggers(self, target) -> None:
super().updateLoggers()
self.logs["position_target"].append(target)
def applyFriction(self, fdyn=0.1, fstat=0.1, tdyn=0.05, tstat=0.0):
lin_vel = self.data.qvel[:3]
lin_vel_norm = np.linalg.norm(lin_vel)
ang_vel = self.data.qvel[-1]
forces = self.data.qfrc_applied[:3]
forces_norm = np.linalg.norm(forces)
torques = self.data.qfrc_applied[3:]
torques_norm = np.linalg.norm(torques)
#if (forces_norm > fstat) or (torques_norm > tstat):
if lin_vel_norm > 0.001:
lin_vel_normed = np.array(lin_vel) / lin_vel_norm
force = -lin_vel_normed * fdyn
force[-1] = 0
mujoco.mj_applyFT(self.model, self.data, list(force), [0,0,0], self.data.qpos[:3], self.body_id, self.data.qfrc_applied)
if ang_vel > 0.001:
torque = - np.sign(ang_vel) * tdyn
mujoco.mj_applyFT(self.model, self.data, [0,0,0], [0,0,torque], self.data.qpos[:3], self.body_id, self.data.qfrc_applied)
#else:
# self.data.qfrc_applied[:3] = 0
def runLoop(self, model, xy: np.ndarray) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.resetPosition() # Resets the position of the body.
self.data.qpos[:2] = xy # Sets the position of the body.
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
def plotSimulation(self, dpi:int = 90, width:int = 1000, height:int = 1000, save:bool = True, save_dir:str = "position_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title('angular velocity')
ax[0].set_ylabel('radians / second')
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax[1].legend()
ax[1].set_xlabel('time (seconds)')
ax[1].set_ylabel('meters / second')
_ = ax[1].set_title('linear_velocity')
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="position goals")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('meters')
ax.set_ylabel('meters')
ax.axis("equal")
_ = ax.set_title('x y coordinates')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class PositionController:
def __init__(self, model: RLGamesModel, goal_x: List[float], goal_y: List[float], distance_threshold: float = 0.03) -> None:
self.model = model
self.goals = np.array([goal_x, goal_y]).T
self.current_goal = self.goals[0]
self.distance_threshold = distance_threshold
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def isGoalReached(self, state):
dist = np.linalg.norm(self.current_goal - state["position"])
if dist < self.distance_threshold:
return True
def getGoal(self):
return self.current_goal
def setGoal(self, goal):
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self):
return len(self.goals) == 0
def getObs(self):
return self.obs_state.cpu().numpy()
def makeObservationBuffer(self, state):
self.obs_state[0,:2] = torch.tensor(state["orientation"], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"]
self.obs_state[0,5] = 0
self.obs_state[0,6:8] = torch.tensor(self.current_goal - state["position"], dtype=torch.float32, device="cuda")
def getAction(self, state, is_deterministic: bool = True):
if self.isGoalReached(state):
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.makeObservationBuffer(state)
return self.model.getAction(self.obs_state, is_deterministic=is_deterministic)
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--model_path", type=str, default=None, help="The path to the model to be loaded. It must be a velocity tracking model.")
parser.add_argument("--config_path", type=str, default=None, help="The path to the network configuration to be loaded.")
parser.add_argument("--goal_x", type=float, nargs="+", default=None, help="List of x coordinates for the goals to be reached by the platform. In world frame, meters.")
parser.add_argument("--goal_y", type=float, nargs="+", default=None, help="List of y coordinates for the goals to be reached by the platform. In world frame, meters.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--save_dir", type=str, default="position_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert os.path.exists(args.model_path), "The model file does not exist."
assert os.path.exists(args.config_path), "The configuration file does not exist."
assert not args.goal_x is None, "The x coordinates of the goals must be specified."
assert not args.goal_y is None, "The y coordinates of the goals must be specified."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
assert len(args.goal_x) == len(args.goal_y), "The number of x coordinates must be equal to the number of y coordinates."
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
position_controller = PositionController(model, args.goal_x, args.goal_y)
# Creates the environment
env = MuJoCoPositionControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Runs the simulation
env.runLoop(position_controller, [0,0])
# Plots the simulation
env.plotSimulation(save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir)
| 10,188 |
Python
| 50.459596 | 318 | 0.642226 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/pose_controller_DC.py
|
from typing import Callable, NamedTuple, Optional, Union, List, Dict
from scipy.linalg import solve_discrete_are
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import numpy as np
import argparse
import scipy.io
import mujoco
import torch
import os
import cvxpy as cp
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
class MuJoCoPoseControl(MuJoCoFloatingPlatform):
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
super().initializeLoggers()
self.logs["position_target"] = []
self.logs["heading_target"] = []
def updateLoggers(self, target) -> None:
super().updateLoggers()
self.logs["position_target"].append(target[:2])
self.logs["heading_target"].append(target[-1])
def updateState(self) -> Dict[str, np.ndarray]:
"""
Updates the loggers with the current state of the simulation."""
state = {}
state["angular_velocity"] = self.ON.add_noise_on_vel(self.data.qvel[3:6].copy())
state["linear_velocity"] = self.ON.add_noise_on_vel(self.data.qvel[0:3].copy())
state["position"] = self.ON.add_noise_on_pos(self.data.qpos[0:3].copy())
state["quaternion"] = self.data.qpos[3:].copy()
return state
def runLoop(self, model, xy: np.ndarray) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.resetPosition() # Resets the position of the body.
self.data.qpos[:2] = xy # Sets the position of the body.
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
def plotSimulation(self, dpi:int = 90, width:int = 1000, height:int = 1000, save:bool = True, save_dir:str = "position_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title('angular velocity')
ax[0].set_ylabel('radians / second')
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax[1].legend()
ax[1].set_xlabel('time (seconds)')
ax[1].set_ylabel('meters / second')
_ = ax[1].set_title('linear_velocity')
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="position goals")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('meters')
ax.set_ylabel('meters')
ax.axis("equal")
_ = ax.set_title('x y coordinates')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(self.logs["timevals"], np.array(self.logs["actions"]), label="system action")
plt.tight_layout()
if save:
#try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "actions.png"))
#except Exception as e:
#print("Saving failed: ", e)
class DiscreteController:
"""
Discrete pose controller for the Floating Platform."""
def __init__(self, target_position: List[float], target_orientation: List[float], thruster_count:int=8, dt:float=0.02, Mod:MuJoCoFloatingPlatform=None, control_type = 'LQR') -> None:
self.target_position = np.array(target_position)
self.target_orientation = np.array(target_orientation)
self.thruster_count = thruster_count
self.thrusters = np.zeros(thruster_count) # Initialize all thrusters to off
self.dt = dt
self.FP = Mod
self.control_type = control_type
self.opti_states = None
# control parameters
self.Q = np.diag([1,1,5,5,1,1,1]) # State cost matrix
self.R = np.diag([0.01] * self.thruster_count) # Control cost matrix
self.W = np.diag([0.1] * 7) # Disturbance weight matrix
self.find_gains()
def find_gains(self,r0=None):
# Compute linearized system matrices A and B based on your system dynamics
self.A, self.B = self.compute_linearized_system(r0) # Compute linearized system matrices
self.make_planar_compatible()
if self.control_type == 'H-inf':
self.compute_hinfinity_gains()
elif self.control_type == 'LQR':
self.compute_lqr_gains()
else:
raise ValueError("Invalid control type specified.")
def compute_lqr_gains(self):
self.P = solve_discrete_are(self.A, self.B, self.Q, self.R)
self.L = np.linalg.inv(self.R + self.B.T @ self.P @ self.B) @ self.B.T @ self.P @ self.A
def compute_hinfinity_gains(self):
X = cp.Variable((self.A.shape[0], self.A.shape[0]), symmetric=True)
gamma = cp.Parameter(nonneg=True) # Define gamma as a parameter
regularization_param = 1e-6
# Regularize matrix using the pseudo-inverse
A_regularized = self.A @ np.linalg.inv(self.A.T @ self.A + regularization_param * np.eye(self.A.shape[1]))
B_regularized = self.B @ np.linalg.inv(self.B.T @ self.B + regularization_param * np.eye(self.B.shape[1]))
# Define the constraints using regularized matrices
constraints = [X >> np.eye(A_regularized.shape[1])] # X >= 0
# Define a relaxation factor
relaxation_factor = 1 # Adjust this value based on your experimentation
# Linear matrix inequality constraint with relaxation
constraints += [cp.bmat([[A_regularized.T @ X @ A_regularized - X + self.Q, A_regularized.T @ X @ B_regularized],
[B_regularized.T @ X @ A_regularized, B_regularized.T @ X @ B_regularized - (gamma**2) * relaxation_factor * np.eye(B_regularized.shape[1])]]) << 0]
objective = cp.Minimize(gamma)
prob = cp.Problem(objective, constraints)
# Set the value of the parameter gamma
gamma.value = 1.0 # You can set the initial value based on your problem
prob.solve()
if prob.status == cp.OPTIMAL:
self.L = np.linalg.inv(self.B.T @ X.value @ self.B + gamma.value**2 * np.eye(self.B.shape[1])) @ self.B.T @ X.value @ self.A
breakpoint()
else:
raise Exception("H-infinity control design failed.")
def set_target(self, target_position: List[float], target_orientation: List[float]) -> None:
"""
Sets the target position and orientation."""
self.target_position = np.array(target_position)
self.target_orientation = np.array(target_orientation)
def compute_linearized_system(self, r0=None) -> None:
"""
Compute linearized system matrices A and B.
With A the state transition matrix.
With B the control input matrix."""
if r0 is None:
r0 = np.concatenate((self.FP.data.qpos[:3],self.FP.data.qvel[:3], self.FP.data.qpos[3:], self.FP.data.qvel[3:]),axis =None)
t_int = 0.2 # time-interval at 5Hz
A = self.f_STM(r0,t_int,self.FP.model,self.FP.data,self.FP.body_id)
#Aan = self.f_STM(r0,t_int,self.FP.model,self.FP.data,self.FP.body_id)
B = self.f_B(r0,t_int,self.FP.model,self.FP.data,self.FP.body_id,self.thruster_count)
return A, B
def make_planar_compatible(self) -> None:
"""
Remove elements of the STM to make it planar compatible.
Required states #[x,y,vx,vy,qw,qz,wz]."""
a = self.A
b = self.B
a = np.delete(a, 11, axis=0) # Remove row: wy
a = np.delete(a, 10, axis=0) # Remove row: wx
a = np.delete(a, 8, axis=0) # Remove row: qy
a = np.delete(a, 7, axis=0) # Remove row: qz
a = np.delete(a, 5, axis=0) # Remove row: vz
a = np.delete(a, 2, axis=0) # Remove row: z
a = np.delete(a, 11, axis=1) # Remove col: wy
a = np.delete(a, 10, axis=1) # Remove col: wx
a = np.delete(a, 8, axis=1) # Remove col: qy
a = np.delete(a, 7, axis=1) # Remove col: qz
a = np.delete(a, 5, axis=1) # Remove col: vz
a = np.delete(a, 2, axis=1) # Remove col: z
b = np.delete(b, 11, axis=0) # Remove row: wy
b = np.delete(b, 10, axis=0) # Remove row: wx
b = np.delete(b, 8, axis=0) # Remove row: qy
b = np.delete(b, 7, axis=0) # Remove row: qz
b = np.delete(b, 5, axis=0) # Remove row: vz
b = np.delete(b, 2, axis=0) # Remove row: z
b[b == 0] = 1e-4
self.A = a
self.B = b
return None
def f_STM(self, r0:np.ndarray, t_int: float, model, data, body_id) -> None:
"""
Identify A matrix of linearized system through finite differencing."""
IC_temp0 = r0
force = [0.0,0.0,0.0]
torque = [0.0,0.0,0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
current_time = data.time
for k in range(np.size(r0)):
delta = max(1e-3,IC_temp0[k]/100)
delta_vec = np.zeros(np.size(r0))
delta_vec[k] = delta
IC_temp_pos = np.add(IC_temp0,delta_vec)
IC_temp_neg = np.subtract(IC_temp0,delta_vec)
# Positive direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_pos[0:3]
data.qvel[:3] = IC_temp_pos[3:6]
data.qpos[3:] = IC_temp_pos[6:10]
data.qvel[3:] = IC_temp_pos[10:13]
mujoco.mj_applyFT(model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied)
mujoco.mj_step(model, data)
ans_pos = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
#print('final_time', data.time)
# Negative direction
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp_neg[0:3]
data.qvel[:3] = IC_temp_neg[3:6]
data.qpos[3:] = IC_temp_neg[6:10]
data.qvel[3:] = IC_temp_neg[10:13]
mujoco.mj_applyFT(model, data, force, torque, data.qpos[:3], body_id, data.qfrc_applied)
mujoco.mj_step(model, data)
ans_neg = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
#print('final_time', data.time)
if k==0:
STM = np.subtract(ans_pos,ans_neg)/(2*delta)
else :
temp = np.subtract(ans_pos,ans_neg)/(2*delta)
STM = np.vstack((STM,temp))
STM = STM.transpose()
STM[6,6] = 1.0
data.time = current_time
model.opt.timestep = default_tstep
return STM
def f_STM_analytical(self, r0:np.ndarray, t_int:float, model, data, body_id) -> None:
"""
Identify A matrix of linearized system through finite differencing."""
IC_temp0 = r0
STM = np.eye(np.size(r0))
w1 = IC_temp0[10]
w2 = IC_temp0[11]
w3 = IC_temp0[12]
qw = IC_temp0[6]
qx = IC_temp0[7]
qy = IC_temp0[8]
qz = IC_temp0[9]
STM[0,3] = t_int
STM[1,4] = t_int
STM[2,5] = t_int
STM[6,6] = 1
STM[6,7] = -0.5*w1*t_int
STM[6,8] = -0.5*w2*t_int
STM[6,9] = -0.5*w3*t_int
STM[6,10] = -0.5*qx*t_int
STM[6,11] = -0.5*qy*t_int
STM[6,12] = -0.5*qz*t_int
STM[7,6] = 0.5*w1*t_int
STM[7,7] = 1
STM[7,8] = 0.5*w3*t_int
STM[7,9] = -0.5*w2*t_int
STM[7,10] = 0.5*qw*t_int
STM[7,11] = -0.5*qz*t_int
STM[7,12] = 0.5*qy*t_int
STM[8,6] = 0.5*w2*t_int
STM[8,7] = -0.5*w3*t_int
STM[8,8] = 1
STM[8,9] = 0.5*w1*t_int
STM[8,10] = 0.5*qz*t_int
STM[8,11] = 0.5*qw*t_int
STM[8,12] = -0.5*qx*t_int
STM[9,6] = 0.5*w3*t_int
STM[9,7] = -0.5*w2*t_int
STM[9,8] = -0.5*w1*t_int
STM[9,9] = 1
STM[9,10] = -0.5*qy*t_int
STM[9,11] = 0.5*qx*t_int
STM[9,12] = 0.5*qw*t_int
return STM
def f_B(self, r0: np.ndarray, t_int: float, model, data, body_id, number_thrust: int) -> None:
"""
Identify B matrix of linearized system through finite differencing."""
IC_temp0 = r0
force = [0.0,0.0,0.0]
torque = [0.0,0.0,0.0]
default_tstep = model.opt.timestep
model.opt.timestep = t_int
u = np.zeros(number_thrust)
current_time = data.time
#for k in range(np.size(u)):
for k in range(np.size(u)):
delta = 0.01
delta_vec = np.zeros(np.size(u))
delta_vec[k] = delta
# Positive direction
u_plus = np.add(u,delta_vec)
force_plus = u_plus[k] * self.FP.forces[k]# * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3,3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_plus = np.matmul(rmat, force_plus) # Rotate the force to the body frame.
p2 = np.matmul(rmat, self.FP.positions[k]) + p # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(model, data, force_plus, torque, p2, body_id, data.qfrc_applied) # Apply the force.
mujoco.mj_step(model, data)
ans_pos = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
# Negative direction
u_minus = np.subtract(u,delta_vec)
force_minus = u_minus[k] * self.FP.forces[k] * np.sqrt(0.5)
rmat = data.xmat[body_id].reshape(3,3) # Rotation matrix.
p = data.xpos[body_id] # Position of the body.
force_minus = np.matmul(rmat, force_minus) # Rotate the force to the body frame.
p2 = np.matmul(rmat, self.FP.positions[k]) + p # Compute the position of the force.
data.time = 0.0
data.qfrc_applied[...] = 0.0
data.qpos[:3] = IC_temp0[0:3]
data.qvel[:3] = IC_temp0[3:6]
data.qpos[3:] = IC_temp0[6:10]
data.qvel[3:] = IC_temp0[10:13]
mujoco.mj_applyFT(model, data, force_minus, torque, p2, body_id, data.qfrc_applied) # Apply the force.
mujoco.mj_step(model, data)
ans_neg = np.concatenate((data.qpos[:3],data.qvel[:3], data.qpos[3:], data.qvel[3:]),axis =None)
if k==0:
B = np.subtract(ans_pos,ans_neg)/(2*delta)
else :
temp = np.subtract(ans_pos,ans_neg)/(2*delta)
B = np.vstack((B,temp))
B = B.transpose()
model.opt.timestep = default_tstep
data.time = current_time
return B
def control_cost(self) -> np.ndarray:
# Cost function to be minimized for control input optimization
if self.control_type == 'H-inf':
control_input = np.array(self.L @ self.state) + self.disturbance
elif self.control_type == 'LQR':
self.find_gains(r0=self.opti_states)
control_input = np.array(self.L @ self.state)
else:
raise ValueError("Invalid control type specified.")
return control_input
def update(self, current_position: np.ndarray, current_orientation: np.ndarray, current_velocity: np.ndarray, current_angular_velocity:np.ndarray, disturbance:np.ndarray = None):
# Calculate errors
position_error = self.target_position - current_position
orientation_error = self.target_orientation - current_orientation
velocity_error = np.array([0.0, 0.0, 0.0]) - current_velocity
angvel_error = np.array([0.0, 0.0, 0.0]) - current_angular_velocity
self.opti_states = np.concatenate((current_position, current_velocity, current_orientation, current_angular_velocity), axis=None)
if disturbance == None:
disturbance = np.random.rand(8) * 0.000 # Example disturbance
self.disturbance = disturbance
# Combine errors into the state vector
self.state = np.array([position_error[0], position_error[1], velocity_error[0], velocity_error[1], orientation_error[0], orientation_error[3], angvel_error[2]])
# Optimal U
original_u = self.control_cost()
# filter to zero values of u that are less than 0.5
intermediate_u = np.where(np.abs(original_u) < .25, 0.0, original_u)
if np.max(intermediate_u) == 0.0:
normalized_array = np.zeros(self.thruster_count)
else:
normalized_array = (intermediate_u - np.min(intermediate_u)) / (np.max(intermediate_u) - np.min(intermediate_u))
# ROund the normalized array to the nearest integer biasing the center to 0.25
final_U = np.round(normalized_array - 0.25).astype(int)
# Round the normalized array to the nearest integer
self.thrusters = final_U
return self.thrusters
class PoseController:
"""
Controller for the pose of the robot."""
def __init__(self, model: DiscreteController, goal_x: List[float], goal_y: List[float], goal_theta: List[float], distance_threshold: float = 0.03) -> None:
# Discrete controller
self.model = model
# Creates an array goals
if goal_theta is None:
goal_theta = np.zeros_like(goal_x)
self.goals = np.array([goal_x, goal_y, goal_theta]).T
self.current_goal = self.goals[0]
self.current_goal_controller = np.zeros((3), dtype=np.float32)
self.current_goal_controller[:2] = self.current_goal[:2]
self.distance_threshold = distance_threshold
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def isGoalReached(self, state: Dict[str, np.ndarray]) -> bool:
dist = np.linalg.norm(self.current_goal[:2] - state["position"][:2])
if dist < self.distance_threshold:
return True
def getGoal(self) -> np.ndarray:
return self.current_goal
def setGoal(self, goal:np.ndarray) -> None:
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self) -> bool:
return len(self.goals) == 0
def getObs(self):
return self.obs_state.cpu().numpy()
def makeObservationBuffer(self, state):
q = state["quaternion"]
siny_cosp = 2 * (q[0] * q[3] + q[1] * q[2])
cosy_cosp = 1 - 2 * (q[2] * q[2] + q[3] * q[3])
self.obs_state[0,:2] = torch.tensor([cosy_cosp, siny_cosp], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"][:2], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"][-1]
self.obs_state[0,5] = 1
self.obs_state[0,6:8] = torch.tensor(self.current_goal[:2] - state["position"][:2], dtype=torch.float32, device="cuda")
heading = np.arctan2(siny_cosp, cosy_cosp)
heading_error = np.arctan2(np.sin(self.current_goal[-1] - heading), np.cos(self.current_goal[-1] - heading))
self.obs_state[0,8] = torch.tensor(np.cos(heading_error), dtype=torch.float32, device="cuda")
self.obs_state[0,9] = torch.tensor(np.sin(heading_error), dtype=torch.float32, device="cuda")
def makeState4Controller(self, state: Dict[str, np.ndarray]) -> List[np.ndarray]:
self.makeObservationBuffer(state)
current_position = state["position"]
current_position[-1] = 0
current_orientation = state["quaternion"]
current_linear_velocity = state["linear_velocity"]
current_angular_velocity = state["angular_velocity"]
return current_position, current_orientation, current_linear_velocity, current_angular_velocity
def getAction(self, state: Dict[str, np.ndarray], **kwargs) -> np.ndarray:
if self.isGoalReached(state):
print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1,:2]
self.current_goal_controller[:2] = self.current_goal
self.goals = self.goals[1:]
self.model.find_gains(r0=self.opti_states)
else:
self.goals = []
current_position, current_orientation, current_linear_velocity, current_angular_velocity = self.makeState4Controller(state)
self.model.set_target(self.current_goal_controller, [1,0,0,0])
return self.model.update(current_position, current_orientation, current_linear_velocity, current_angular_velocity)
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--goal_x", type=float, nargs="+", default=None, help="List of x coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_y", type=float, nargs="+", default=None, help="List of y coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_theta", type=float, nargs="+", default=None, help="List of headings for the goals to be reached by the platform. In world frame, radiants.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--save_dir", type=str, default="position_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert not args.goal_x is None, "The x coordinates of the goals must be specified."
assert not args.goal_y is None, "The y coordinates of the goals must be specified."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
assert len(args.goal_x) == len(args.goal_y), "The number of x coordinates must be equal to the number of y coordinates."
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Creates the environment
print(1.0/args.sim_rate)
env = MuJoCoPoseControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Instantiates the Discrete Controller (DC)
model = DiscreteController([2.5,-1.5,0.],[1,0,0,0], Mod=env, control_type='LQR') # control type: 'H-inf' or 'LQR' | H-inf not stable at many locations
# Creates the velocity tracker
position_controller = PoseController(model, args.goal_x, args.goal_y, args.goal_theta)
# Runs the simulation
env.runLoop(position_controller, [0,0])
# Plots the simulation
env.plotSimulation(save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir)
| 26,942 |
Python
| 45.373494 | 318 | 0.575644 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/linear_velocity_tracker_RL.py
|
from typing import Callable, NamedTuple, Optional, Union, List
import matplotlib.pyplot as plt
import numpy as np
import argparse
import mujoco
import torch
import os
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import RLGamesModel
class MuJoCoVelTracking(MuJoCoFloatingPlatform):
"""
The environment for the velocity tracking task inside Mujoco."""
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
"""
Initializes the loggers."""
super().initializeLoggers()
self.logs["velocity_goal"] = []
self.logs["position_target"] = []
def updateLoggers(self, goal: np.ndarray, target: np.ndarray) -> None:
"""
Updates the loggers."""
super().updateLoggers()
self.logs["velocity_goal"].append(goal)
self.logs["position_target"].append(target)
def runLoop(self, model, xy: np.ndarray) -> None:
"""
Runs the simulation loop."""
self.resetPosition() # Resets the position of the body.
self.data.qpos[:2] = xy # Sets the position of the body.
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal(), model.getTargetPosition())
def plotSimulation(self, dpi:int = 135, width:int = 1000, height:int = 1000, save:bool = False, save_dir:str = "velocity_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax.plot(self.logs["timevals"], self.logs["velocity_goal"], label="target velocities")
ax.legend()
ax.set_xlabel('time (seconds)')
ax.set_ylabel('Linear velocities (m/s)')
_ = ax.set_title('Linear velocity tracking')
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir,"velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.plot(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="trajectory")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('x (meters)')
ax.set_ylabel('y (meters)')
ax.axis("equal")
_ = ax.set_title('Trajectory in xy plane')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class TrajectoryTracker:
"""
A class to generate and track trajectories."""
def __init__(self, lookahead:float = 0.25, closed:bool = False, offset = (0,0)):
self.current_point = -1
self.lookhead = lookahead
self.closed = closed
self.is_done = False
self.offset = np.array(offset)
def generateCircle(self, radius:float = 2, num_points:int = 360*10):
theta = np.linspace(0, 2*np.pi, num_points, endpoint=(not self.closed))
self.positions = np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
def generateSquare(self, h:float = 2, num_points:int = 360*10) -> None:
points_per_side = num_points // 4
s1y = np.linspace(-h/2,h/2, num_points, endpoint=False)
s1x = np.ones_like(s1y)*h/2
s2x = np.linspace(h/2,-h/2, num_points, endpoint=False)
s2y = np.ones_like(s2x) * h/2
s3y = np.linspace(h/2,-h/2, num_points, endpoint=False)
s3x = np.ones_like(s3y) * (-h/2)
s4x = np.linspace(-h/2,h/2, num_points, endpoint=False)
s4y = np.ones_like(s4x) * (-h/2)
self.positions = np.vstack([np.hstack([s1x,s2x,s3x,s4x]), np.hstack([s1y,s2y,s3y,s4y])]).T + self.offset
self.angles = np.ones_like(self.positions)#np.array([-np.sin(theta), np.cos(theta)]).T
def generateSpiral(self, start_radius:float = 0.5, end_radius:float = 2, num_loop:float = 5, num_points: int = 360*20) -> None:
radius = np.linspace(start_radius, end_radius, num_points, endpoint=(not self.closed))
theta = np.linspace(0, 2*np.pi*num_loop, num_points, endpoint=(not self.closed))
self.positions = np.array([np.cos(theta) * radius, np.sin(theta) * radius]).T + self.offset
self.angles = np.array([-np.sin(theta), np.cos(theta)]).T
def getTrackingPointIdx(self, position:np.ndarray) -> None:
distances = np.linalg.norm(self.positions - position, axis=1)
if self.current_point == -1:
self.current_point = 0
else:
indices = np.where(distances < self.lookhead)[0]
if len(indices) > 0:
indices = indices[indices < 60]
if len(indices) > 0:
self.current_point = np.max(indices)
def rollTrajectory(self) -> None:
if self.closed:
self.positions = np.roll(self.positions, -self.current_point, axis=0)
self.angles = np.roll(self.angles, -self.current_point, axis=0)
self.current_point = 0
else:
self.positions = self.positions[self.current_point:]
self.angles = self.angles[self.current_point:]
self.current_point = 0
if self.positions.shape[0] <= 1:
self.is_done = True
def getPointForTracking(self) -> List[np.ndarray]:
position = self.positions[self.current_point]
angle = self.angles[self.current_point]
self.rollTrajectory()
return position, angle
def get_target_position(self) -> np.ndarray:
return self.target_position
def computeVelocityVector(self, target_position:np.ndarray, position:np.ndarray) -> np.ndarray:
diff = target_position - position
return diff / np.linalg.norm(diff)
def getVelocityVector(self, position:np.ndarray) -> np.ndarray:
self.getTrackingPointIdx(position)
self.target_position, target_angle = self.getPointForTracking()
velocity_vector = self.computeVelocityVector(self.target_position, position)
return velocity_vector
class VelocityTracker:
def __init__(self, trajectory_tracker: TrajectoryTracker, model: RLGamesModel, target_tracking_velocity:float = 0.25):
self.trajectory_tracker = trajectory_tracker
self.model = model
self.target_tracking_velocity = target_tracking_velocity
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def getGoal(self):
return self.velocity_vector*self.target_tracking_velocity
def setGoal(self, goal):
self.target_tracking_velocity = goal
def getObs(self):
return self.obs_state.cpu().numpy()
def getTargetPosition(self):
return self.trajectory_tracker.get_target_position()
def isDone(self):
return self.trajectory_tracker.is_done
def makeObservationBuffer(self, state, velocity_vector):
self.obs_state[0,:2] = torch.tensor(state["orientation"], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"]
self.obs_state[0,5] = 2
self.obs_state[0,6:8] = torch.tensor(velocity_vector, dtype=torch.float32, device="cuda")
def getAction(self, state, is_deterministic=True):
self.velocity_vector = self.trajectory_tracker.getVelocityVector(state["position"])
velocity_goal = self.velocity_vector*self.target_tracking_velocity - state["linear_velocity"]
self.makeObservationBuffer(state, velocity_goal)
action = self.model.getAction(self.obs_state, is_deterministic=is_deterministic)
return action
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--model_path", type=str, default=None, help="The path to the model to be loaded. It must be a velocity tracking model.")
parser.add_argument("--config_path", type=str, default=None, help="The path to the network configuration to be loaded.")
parser.add_argument("--trajectory_type", type=str, default="Circle", help="The type of trajectory to be generated. Options are: Circle, Square, Spiral.")
parser.add_argument("--trajectory_x_offset", type=float, default=0, help="The offset of the trajectory along the x axis. In meters.")
parser.add_argument("--trajectory_y_offset", type=float, default=0, help="The offset of the trajectory along the y axis. In meters.")
parser.add_argument("--radius", type=float, default=1.5, help="The radius of the circle trajectory. In meters.")
parser.add_argument("--height", type=float, default=3.0, help="The height of the square trajectory. In meters.")
parser.add_argument("--start_radius", type=float, default=0.5, help="The starting radius for the spiral for the spiral trajectory. In meters.")
parser.add_argument("--end_radius", type=float, default=2.0, help="The final radius for the spiral trajectory. In meters.")
parser.add_argument("--num_loop", type=float, default=5.0, help="The number of loops the spiral trajectory should make. Must be greater than 0.")
parser.add_argument("--closed", type=bool, default=True, help="Whether the trajectory is closed (it forms a loop) or not.")
parser.add_argument("--lookahead_dist", type=float, default=0.15, help="How far the velocity tracker looks to generate the velocity vector that will track the trajectory. In meters.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--tracking_velocity", type=float, default=0.25, help="The tracking velocity. In meters per second.")
parser.add_argument("--save_dir", type=str, default="velocity_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert os.path.exists(args.model_path), "The model file does not exist."
assert os.path.exists(args.config_path), "The configuration file does not exist."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.num_loop > 0, "The number of loops must be greater than 0."
assert args.lookahead_dist > 0, "The lookahead distance must be greater than 0."
assert args.radius > 0, "The radius must be greater than 0."
assert args.start_radius > 0, "The start radius must be greater than 0."
assert args.end_radius > 0, "The end radius must be greater than 0."
assert args.height > 0, "The height must be greater than 0."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.tracking_velocity > 0, "The tracking velocity must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Creates the trajectory tracker
tracker = TrajectoryTracker(lookahead=args.lookahead_dist, closed=args.closed, offset=(args.trajectory_x_offset, args.trajectory_y_offset))
if args.trajectory_type.lower() == "square":
tracker.generateSquare(h=args.height)
elif args.trajectory_type.lower() == "circle":
tracker.generateCircle(radius=args.radius)
elif args.trajectory_type.lower() == "spiral":
tracker.generateSpiral(start_radius=args.start_radius, end_radius=args.end_radius, num_loop=args.num_loop)
else:
raise ValueError("Unknown trajectory type. Must be square, circle or spiral.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
velocity_tracker = VelocityTracker(tracker, model)
# Creates the environment
env = MuJoCoVelTracking(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Runs the simulation
env.runLoop(velocity_tracker, [0,0])
# Plots the simulation
env.plotSimulation(save=True, save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(args.save_dir)
| 14,937 |
Python
| 53.123188 | 318 | 0.659035 |
elharirymatteo/RANS/omniisaacgymenvs/mujoco_envs/legacy/pose_controller_RL.py
|
from typing import Callable, NamedTuple, Optional, Union, List
import matplotlib.pyplot as plt
import numpy as np
import argparse
import mujoco
import torch
import os
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import MuJoCoFloatingPlatform
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import RLGamesModel
class MuJoCoPositionControl(MuJoCoFloatingPlatform):
def __init__(self, step_time:float = 0.02, duration:float = 60.0, inv_play_rate:int = 10,
mass:float = 5.32, max_thrust:float = 1.0, radius:float = 0.31) -> None:
super().__init__(step_time, duration, inv_play_rate, mass, max_thrust, radius)
def initializeLoggers(self) -> None:
super().initializeLoggers()
self.logs["position_target"] = []
self.logs["heading_target"] = []
def updateLoggers(self, target) -> None:
super().updateLoggers()
self.logs["position_target"].append(target[:2])
self.logs["heading_target"].append(target[-1])
def runLoop(self, model, initial_position=[0,0], initial_orientation=[1,0,0,0]) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.reset(initial_position=initial_position, initial_orientation=initial_orientation)
while (self.duration > self.data.time) and (model.isDone() == False):
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
def runLoopForNSteps(self, model, initial_position=[0,0], initial_orientation=[1,0,0,0], max_steps=502) -> None:
"""
Runs the simulation loop.
model: the agent.
xy: 2D position of the body."""
self.reset(initial_position=initial_position, initial_orientation=initial_orientation)
i = 0
while i < max_steps:
state = self.updateState() # Updates the state of the simulation.
# Get the actions from the controller
self.actions = model.getAction(state)
# Plays only once every self.inv_play_rate steps.
for _ in range(self.inv_play_rate):
self.applyForces(self.actions)
mujoco.mj_step(self.model, self.data)
self.updateLoggers(model.getGoal())
i += 1
def plotSimulation(self, dpi:int = 90, width:int = 1000, height:int = 1000, save:bool = True, save_dir:str = "position_exp") -> None:
"""
Plots the simulation."""
figsize = (width / dpi, height / dpi)
fig, ax = plt.subplots(2, 1, figsize=figsize, dpi=dpi)
ax[0].plot(self.logs["timevals"], self.logs["angular_velocity"])
ax[0].set_title('angular velocity')
ax[0].set_ylabel('radians / second')
ax[1].plot(self.logs["timevals"], self.logs["linear_velocity"], label="system velocities")
ax[1].legend()
ax[1].set_xlabel('time (seconds)')
ax[1].set_ylabel('meters / second')
_ = ax[1].set_title('linear_velocity')
print(save_dir)
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "velocities.png"))
except Exception as e:
print("Saving failed: ", e)
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.scatter(np.array(self.logs["position_target"])[:,0], np.array(self.logs["position_target"])[:,1], label="position goals")
ax.plot(np.array(self.logs["position"])[:,0], np.array(self.logs["position"])[:,1], label="system position")
ax.legend()
ax.set_xlabel('meters')
ax.set_ylabel('meters')
ax.axis("equal")
_ = ax.set_title('x y coordinates')
plt.tight_layout()
if save:
try:
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, "positions.png"))
except Exception as e:
print("Saving failed: ", e)
class PoseController:
def __init__(self, model: RLGamesModel, goal_x: List[float], goal_y: List[float], goal_theta: List[float], distance_threshold: float = 0.03, heading_threshold: float = 0.03) -> None:
self.model = model
self.goals = np.array([goal_x, goal_y, goal_theta]).T
if goal_theta is None:
goal_theta = np.zeros_like(goal_x)
self.current_goal = self.goals[0]
self.distance_threshold = distance_threshold
self.heading_threshold = heading_threshold
self.obs_state = torch.zeros((1,10), dtype=torch.float32, device="cuda")
def isGoalReached(self, state):
dist = np.linalg.norm(self.current_goal[:2] - state["position"])
ang = np.linalg.norm(self.current_goal[2:] - state["orientation"])
heading = np.arctan2(state["orientation"][1], state["orientation"][0])
ang = np.arctan2(np.sin(self.current_goal[-1] - heading), np.cos(self.current_goal[-1] - heading))
dist_cd = False
ang_cd = False
if dist < self.distance_threshold:
dist_cd = True
if ang < self.heading_threshold:
ang_cd = True
return dist_cd and ang_cd
def getGoal(self):
return self.current_goal
def setGoal(self, goal):
self.current_goal = goal
self.goals = np.array([goal])
def isDone(self):
return len(self.goals) == 0
def getObs(self):
return self.obs_state.cpu().numpy()
def makeObservationBuffer(self, state):
self.obs_state[0,:2] = torch.tensor(state["orientation"], dtype=torch.float32, device="cuda")
self.obs_state[0,2:4] = torch.tensor(state["linear_velocity"], dtype=torch.float32, device="cuda")
self.obs_state[0,4] = state["angular_velocity"]
self.obs_state[0,5] = 1
self.obs_state[0,6:8] = torch.tensor(self.current_goal[:2] - state["position"], dtype=torch.float32, device="cuda")
heading = np.arctan2(state["orientation"][1], state["orientation"][0])
heading_error = np.arctan2(np.sin(self.current_goal[-1] - heading), np.cos(self.current_goal[-1] - heading))
self.obs_state[0,8] = torch.tensor(np.cos(heading_error), dtype=torch.float32, device="cuda")
self.obs_state[0,9] = torch.tensor(np.sin(heading_error), dtype=torch.float32, device="cuda")
def getAction(self, state, is_deterministic: bool = True):
if self.isGoalReached(state):
#print("Goal reached!")
if len(self.goals) > 1:
self.current_goal = self.goals[1]
self.goals = self.goals[1:]
else:
self.goals = []
self.makeObservationBuffer(state)
return self.model.getAction(self.obs_state,is_deterministic=is_deterministic)
def runBatchEvaluation(args, cfg=default_cfg):
horizon = 500
#cfg["maxEpisodeLength"] = horizon + 2
#cfg["platform_mass"] = 5.32
#cfg["clipObservations"]["state"] = 20.0
cfg["max_spawn_dist"] = 4.0
cfg["min_spawn_dist"] = 3.0
#cfg["kill_dist"] = 6.0
cfg["num_envs"] = 256
# Try to create the save directory
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
position_controller = PoseController(model, [0], [0], [0])
# Creates the environment
env = MuJoCoPositionControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
for i in range(cfg["num_envs"]):
# Runs the simulation
initial_position, initial_orientation = env.RS.getInitialCondition()
env.runLoopForNSteps(position_controller, initial_position=initial_position, initial_orientation=initial_orientation)
# Plots the simulation
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir, suffix=str(i))
env.plotBatch(save_dir = args.save_dir)
def runSingleEvaluation(args):
try:
os.makedirs(args.save_dir, exist_ok=True)
except:
raise ValueError("Could not create the save directory.")
# Instantiates the RL agent
model = RLGamesModel(args.config_path, args.model_path)
# Creates the velocity tracker
position_controller = PoseController(model, args.goal_x, args.goal_y, args.goal_theta)
# Creates the environment
env = MuJoCoPositionControl(step_time=1.0/args.sim_rate, duration=args.sim_duration, inv_play_rate=int(args.sim_rate/args.play_rate),
mass=args.platform_mass, radius=args.platform_radius, max_thrust=args.platform_max_thrust)
# Runs the simulation
env.runLoop(position_controller, [0,0])
# Plots the simulation
env.plotSimulation(save_dir = args.save_dir)
# Saves the simulation data
env.saveSimulationData(save_dir = args.save_dir)
def parseArgs():
parser = argparse.ArgumentParser("Generates meshes out of Digital Elevation Models (DEMs) or Heightmaps.")
parser.add_argument("--model_path", type=str, default=None, help="The path to the model to be loaded. It must be a velocity tracking model.")
parser.add_argument("--config_path", type=str, default=None, help="The path to the network configuration to be loaded.")
parser.add_argument("--goal_x", type=float, nargs="+", default=None, help="List of x coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_y", type=float, nargs="+", default=None, help="List of y coordinates for the goals to be reached by the platform.")
parser.add_argument("--goal_theta", type=float, nargs="+", default=None, help="List of headings for the goals to be reached by the platform. In world frame, radiants.")
parser.add_argument("--sim_duration", type=float, default=240, help="The length of the simulation. In seconds.")
parser.add_argument("--play_rate", type=float, default=5.0, help="The frequency at which the agent will played. In Hz. Note, that this depends on the sim_rate, the agent my not be able to play at this rate depending on the sim_rate value. To be consise, the agent will play at: sim_rate / int(sim_rate/play_rate)")
parser.add_argument("--sim_rate", type=float, default=50.0, help="The frequency at which the simulation will run. In Hz.")
parser.add_argument("--save_dir", type=str, default="position_exp", help="The path to the folder in which the results will be stored.")
parser.add_argument("--platform_mass", type=float, default=5.32, help="The mass of the floating platform. In Kg.")
parser.add_argument("--platform_radius", type=float, default=0.31, help="The radius of the floating platform. In meters.")
parser.add_argument("--platform_max_thrust", type=float, default=1.0, help="The maximum thrust of the floating platform. In newtons.")
parser.add_argument("--run_batch", type=bool, default=False, help="If mujoco should be run in batch mode, it's useful to evaluate models. True will enable batch mode.")
parser.add_argument("--num_evals", type=int, default=256, help="The number of experiments that should be ran when in batch mode.")
parser.add_argument("--num_steps", type=int, default=502, help="The number of steps the simulation should run for in batch mode.")
args, unknown_args = parser.parse_known_args()
return args, unknown_args
if __name__ == "__main__":
# Collects args
args, _ = parseArgs()
# Checks args
assert os.path.exists(args.model_path), "The model file does not exist."
assert os.path.exists(args.config_path), "The configuration file does not exist."
assert not args.goal_x is None, "The x coordinates of the goals must be specified."
assert not args.goal_y is None, "The y coordinates of the goals must be specified."
assert not args.goal_theta is None, "The theta coordinates of the goals must be specified."
assert args.sim_rate > args.play_rate, "The simulation rate must be greater than the play rate."
assert args.sim_duration > 0, "The simulation duration must be greater than 0."
assert args.play_rate > 0, "The play rate must be greater than 0."
assert args.sim_rate > 0, "The simulation rate must be greater than 0."
assert args.platform_mass > 0, "The mass of the platform must be greater than 0."
assert args.platform_radius > 0, "The radius of the platform must be greater than 0."
assert args.platform_max_thrust > 0, "The maximum thrust of the platform must be greater than 0."
assert len(args.goal_x) == len(args.goal_y), "The number of x coordinates must be equal to the number of y coordinates."
assert len(args.goal_x) == len(args.goal_theta), "The number of x coordinates must be equal to the number of headings."
# Try to create the save directory
if args.run_batch:
runBatchEvaluation(args)
else:
runSingleEvaluation(args)
| 13,511 |
Python
| 50.376426 | 318 | 0.650877 |
elharirymatteo/RANS/omniisaacgymenvs/utils/arrow3D.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Optional, Sequence
import numpy as np
from omni.isaac.core.materials.visual_material import VisualMaterial
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.prims.geometry_prim import GeometryPrim
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.core.materials import PhysicsMaterial
from omni.isaac.core.utils.string import find_unique_string_name
from pxr import UsdGeom, Gf
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from omni.isaac.core.utils.stage import get_current_stage
from omniisaacgymenvs.utils.shape_utils import Arrow3D
class VisualArrow3D(XFormPrim, Arrow3D):
"""_summary_
Args:
prim_path (str): _description_
name (str, optional): _description_. Defaults to "visual_arrow".
position (Optional[Sequence[float]], optional): _description_. Defaults to None.
translation (Optional[Sequence[float]], optional): _description_. Defaults to None.
orientation (Optional[Sequence[float]], optional): _description_. Defaults to None.
scale (Optional[Sequence[float]], optional): _description_. Defaults to None.
visible (Optional[bool], optional): _description_. Defaults to True.
color (Optional[np.ndarray], optional): _description_. Defaults to None.
radius (Optional[float], optional): _description_. Defaults to None.
visual_material (Optional[VisualMaterial], optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
def __init__(
self,
prim_path: str,
name: str = "visual_arrow",
position: Optional[Sequence[float]] = None,
translation: Optional[Sequence[float]] = None,
orientation: Optional[Sequence[float]] = None,
scale: Optional[Sequence[float]] = None,
visible: Optional[bool] = True,
color: Optional[np.ndarray] = None,
body_radius: Optional[float] = None,
body_length: Optional[float] = None,
head_radius: Optional[float] = None,
head_length: Optional[float] = None,
visual_material: Optional[VisualMaterial] = None,
) -> None:
if visible is None:
visible = True
XFormPrim.__init__(
self,
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
)
Arrow3D.__init__(
self, prim_path, body_radius, body_length, head_radius, head_length
)
self.setBodyRadius(body_radius)
self.setBodyLength(body_length)
self.setHeadRadius(head_radius)
self.setHeadLength(head_length)
self.updateExtent()
return
class FixedArrow3D(VisualArrow3D):
"""_summary_
Args:
prim_path (str): _description_
name (str, optional): _description_. Defaults to "fixed_sphere".
position (Optional[np.ndarray], optional): _description_. Defaults to None.
translation (Optional[np.ndarray], optional): _description_. Defaults to None.
orientation (Optional[np.ndarray], optional): _description_. Defaults to None.
scale (Optional[np.ndarray], optional): _description_. Defaults to None.
visible (Optional[bool], optional): _description_. Defaults to None.
color (Optional[np.ndarray], optional): _description_. Defaults to None.
radius (Optional[np.ndarray], optional): _description_. Defaults to None.
visual_material (Optional[VisualMaterial], optional): _description_. Defaults to None.
physics_material (Optional[PhysicsMaterial], optional): _description_. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "fixed_arrow",
position: Optional[np.ndarray] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.ndarray] = None,
visible: Optional[bool] = None,
color: Optional[np.ndarray] = None,
body_radius: Optional[float] = None,
body_length: Optional[float] = None,
head_radius: Optional[float] = None,
head_length: Optional[float] = None,
visual_material: Optional[VisualMaterial] = None,
physics_material: Optional[PhysicsMaterial] = None,
) -> None:
if not is_prim_path_valid(prim_path):
# set default values if no physics material given
if physics_material is None:
static_friction = 0.2
dynamic_friction = 1.0
restitution = 0.0
physics_material_path = find_unique_string_name(
initial_name="/World/Physics_Materials/physics_material",
is_unique_fn=lambda x: not is_prim_path_valid(x),
)
physics_material = PhysicsMaterial(
prim_path=physics_material_path,
dynamic_friction=dynamic_friction,
static_friction=static_friction,
restitution=restitution,
)
VisualArrow3D.__init__(
self,
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
color=color,
body_radius=body_radius,
body_length=body_length,
head_radius=head_radius,
head_length=head_length,
visual_material=visual_material,
)
# XFormPrim.set_collision_enabled(self, True)
# if physics_material is not None:
# FixedArrow.apply_physics_material(self, physics_material)
return
class DynamicArrow3D(RigidPrim, FixedArrow3D):
"""_summary_
Args:
prim_path (str): _description_
name (str, optional): _description_. Defaults to "dynamic_sphere".
position (Optional[np.ndarray], optional): _description_. Defaults to None.
translation (Optional[np.ndarray], optional): _description_. Defaults to None.
orientation (Optional[np.ndarray], optional): _description_. Defaults to None.
scale (Optional[np.ndarray], optional): _description_. Defaults to None.
visible (Optional[bool], optional): _description_. Defaults to None.
color (Optional[np.ndarray], optional): _description_. Defaults to None.
radius (Optional[np.ndarray], optional): _description_. Defaults to None.
visual_material (Optional[VisualMaterial], optional): _description_. Defaults to None.
physics_material (Optional[PhysicsMaterial], optional): _description_. Defaults to None.
mass (Optional[float], optional): _description_. Defaults to None.
density (Optional[float], optional): _description_. Defaults to None.
linear_velocity (Optional[Sequence[float]], optional): _description_. Defaults to None.
angular_velocity (Optional[Sequence[float]], optional): _description_. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "dynamic_sphere",
position: Optional[np.ndarray] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.ndarray] = None,
visible: Optional[bool] = None,
color: Optional[np.ndarray] = None,
body_radius: Optional[float] = None,
body_length: Optional[float] = None,
head_radius: Optional[float] = None,
head_length: Optional[float] = None,
visual_material: Optional[VisualMaterial] = None,
physics_material: Optional[PhysicsMaterial] = None,
mass: Optional[float] = None,
density: Optional[float] = None,
linear_velocity: Optional[Sequence[float]] = None,
angular_velocity: Optional[Sequence[float]] = None,
) -> None:
if not is_prim_path_valid(prim_path):
if mass is None:
mass = 0.02
FixedArrow3D.__init__(
self,
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
color=color,
body_radius=body_radius,
body_length=body_length,
head_radius=head_radius,
head_length=head_length,
visual_material=visual_material,
physics_material=physics_material,
)
RigidPrim.__init__(
self,
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
mass=mass,
density=density,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
)
| 9,523 |
Python
| 40.051724 | 96 | 0.616297 |
elharirymatteo/RANS/omniisaacgymenvs/utils/plot_lab_data.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from matplotlib.ticker import AutoMinorLocator
from pathlib import Path
from utils.plot_experiment import plot_one_episode
import argparse
if __name__ == "__main__":
# Get load dir from arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--load_dir", type=str, default=None, help="Directory to load data from"
)
args = parser.parse_args()
load_dir = Path(args.load_dir)
# load_dir = Path("./ros_lab_exp/7_9_23/dc_controller")
sub_dirs = [d for d in load_dir.iterdir() if d.is_dir()]
# sub_dirs = [d for d in sub_dirs if ("pose" not in str(d) and "kill3" not in str(d) and "new_pose" not in str(d))]
if sub_dirs:
latest_exp = max(sub_dirs, key=os.path.getmtime)
n_episodes = 1
else:
print("No experiments found in", load_dir)
exit()
for d in sub_dirs:
obs_path = os.path.join(d, "obs.npy")
actions_path = os.path.join(d, "act.npy")
if not os.path.exists(obs_path) or not os.path.exists(actions_path):
print("Required files not found in", d)
exit()
obs = np.load(obs_path, allow_pickle=True)
actions = np.load(actions_path)
# if obs is empty, skip this experiment and print warning
if not obs.any():
print(f"Empty obs file in {d}, skipping...")
continue
print("Plotting data for experiment:", d)
# transform the obs numpy array of dictionaries to numpy array of arrays
obs = np.array([o.flatten() for o in obs])
save_to = os.path.join(d, "plots/")
os.makedirs(save_to, exist_ok=True)
ep_data = {"act": actions, "obs": obs, "rews": []}
plot_one_episode(ep_data, save_to, show=False)
print("Done!")
| 2,145 |
Python
| 31.02985 | 119 | 0.614452 |
elharirymatteo/RANS/omniisaacgymenvs/utils/make_latex_table.py
|
import pandas as pd
import numpy as np
import os
RL_root = "RL"
DC_root = "DC-real"
exp_keys = ["AN","VN","UF","TD","RTF"]
ordered_metrics_keys = ["PA1","PA2","PSA","OA1","OA2","OSA","ALV","AAV","AAC","PT5","PT2","PT1","OT5","OT2","OT1"]
metrics_keys = ["PT5","PT2","PT1","OT5","OT2","OT1","ALV","AAV","AAC"]
colored_metrics = ["PT5","PT2","PT1","OT5","OT2","OT1"]
exp_names = os.listdir(RL_root)
exp_names.sort()
#columns = ["Controller","AN","ON","UF","TD","RTK","PA2","PA1","PSA","OA2","OA1","OSA","ALV","AAV","AAC","PT5","PT2","PT1","OT5","OT2","OT1"]
columns = ["Controller","AN","VN","UF","TD","RTF","PT5","PT2","PT1","OT5","OT2","OT1","ALV","AAV","AAC"]
table = pd.DataFrame(0, columns=columns, index=range(len(exp_names)*2))
ctable = pd.DataFrame("none", columns=columns, index=range(len(exp_names)*2))
colors_name = np.array(['ForestGreen',
'LimeGreen',
'Goldenrod',
'Orange',
'OrangeRed'])
cv1 = np.array([0,20,40,60,80])
cv2 = np.array([20,40,60,80,100])
i = 0
exp_name = "ideal"
exp_keys_names = []
exp_keys_values = []
table["Controller"][i] = "RL"
table["Controller"][i+len(exp_names)] = "LQR"
RL_baseline = np.load(os.path.join(RL_root,exp_name,"aggregated_results.npy"))
DC_baseline = np.load(os.path.join(DC_root,exp_name,"aggregated_results.npy"))
for j, metric in enumerate(ordered_metrics_keys):
if metric in metrics_keys:
if metric in colored_metrics:
table[metric][i] = int(RL_baseline[j]*100)
table[metric][i+len(exp_names)] = int(DC_baseline[j]*100)
ctable[metric][i] = 'black'
ctable[metric][i+len(exp_names)] = 'black'
else:
table[metric][i] = RL_baseline[j]
table[metric][i+len(exp_names)] = DC_baseline[j]
i = 1
for exp_name in exp_names:
if exp_name == "ideal":
continue
exp_keys_names = []
exp_keys_values = []
table["Controller"][i] = "RL"
table["Controller"][i+len(exp_names)] = "LQR"
for tmp in exp_name.split("-"):
exp_keys_name = tmp.split("_")[0]
exp_keys_values = float(tmp.split("_")[1])
table[exp_keys_name][i] = exp_keys_values
table[exp_keys_name][i+len(exp_names)] = exp_keys_values
RL_results = np.load(os.path.join(RL_root,exp_name,"aggregated_results.npy"))
DC_results = np.load(os.path.join(DC_root,exp_name,"aggregated_results.npy"))
RL_deltas = (RL_baseline - RL_results) / RL_baseline
DC_deltas = (DC_baseline - DC_results) / DC_baseline
for j, metric in enumerate(ordered_metrics_keys):
if metric in metrics_keys:
if metric in colored_metrics:
table[metric][i] = int(RL_results[j]*100)
table[metric][i+len(exp_names)] = int(DC_results[j]*100)
if RL_deltas[j] < 0:
RL_deltas[j] = 0
b1 = RL_deltas[j]*100 <= cv2
b2 = RL_deltas[j]*100 >= cv1
b = b1*b2
ctable[metric][i] = colors_name[b][0]
if DC_deltas[j] < 0:
DC_deltas[j] = 0
b1 = DC_deltas[j]*100 <= cv2
b2 = DC_deltas[j]*100 >= cv1
b = b1*b2
ctable[metric][i+len(exp_names)] = colors_name[b][0]
else:
table[metric][i] = RL_results[j]
table[metric][i+len(exp_names)] = DC_results[j]
i+=1
print(table)
print(ctable)
latex1 = table.to_latex(float_format="%.2f")
latex2 = ctable.to_latex(float_format="%.2f")
print(latex1)
print(latex2)
l1s = latex1.split("\n")
l2s = latex2.split("\n")
l3s = []
for i in range(4,40):
ll1s = l1s[i].split('&')
ll2s = l2s[i].split('&')
ll1s = [lll1s.strip() for lll1s in ll1s]
print(ll1s)
for j in range(2,7):
if (ll1s[j] == "0.00") or (ll1s[j] == "0"):
ll1s[j] = "-"
for j in range(7,13):
ll1s[j] = '\\textcolor{'+ll2s[j].strip()+'}{'+ll1s[j]+'}'
ll1s = ll1s[1:]
l3s.append('&'.join(ll1s))
l3s = l1s[:4]+l3s+l1s[40:]
header =["\\begin{tabular}{|l|l|ccccc|ccccccccc|}",
"\\toprule",
"\multirow{2}{*}{Conditions}& \multirow{2}{*}{Controllers} & \multicolumn{5}{c|}{Disturbances} & \multicolumn{9}{c|}{Metrics} \\",
"& & AN & VN & UF & TD & RTF & PT5 & PT2 & PT1 & OT5 & OT2 & OT1 & ALV & AAV & AAC \\",
"\midrule\hline",
]
data = [
'\multirow{2}{*}{Ideal} &'+l3s[4],
'&'+l3s[22]+'\hline\hline',
'\multirow{6}{*}{Velocity Noise} &'+l3s[19],
'&'+l3s[20],
'&'+l3s[21]+"\cline{2-16}",
'&'+l3s[37],
'&'+l3s[38],
'&'+l3s[39]+'\hline\hline',
'\multirow{8}{*}{Action Noise}&'+l3s[7],
'&'+l3s[8]+"\cline{2-16}",
'&'+l3s[25],
'&'+l3s[26]+'\hline\hline',
'\multirow{4}{*}{Constant Torque}&'+l3s[12],
'&'+l3s[13]+"\cline{2-16}",
'&'+l3s[30],
'&'+l3s[31]+'\hline\hline',
'\multirow{6}{*}{Constant Force}&'+l3s[14],
'&'+l3s[16],
'&'+l3s[18]+"\cline{2-16}",
'&'+l3s[32],
'&'+l3s[34],
'&'+l3s[36]+'\hline\hline',
'\multirow{4}{*}{Constant Force \& Torque}&'+l3s[15],
'&'+l3s[17]+"\cline{2-16}",
'&'+l3s[33],
'&'+l3s[35]+'\hline\hline',
'&'+l3s[9],
'&'+l3s[10],
'\multirow{6}{*}{Thruster Failures}&'+l3s[11]+"\cline{2-16}",
'&'+l3s[27],
'&'+l3s[28],
'&'+l3s[29]+'\hline\hline']
footer = ["\\bottomrule",
"\end{tabular}",
"}",
"\caption{",
"Description TBD. PT, OT higher",
"}",
"\label{tab:my_label}",
"\end{table*}",
]
latex3 = "\n".join(header+data+footer)
latexs = "\n".join(data)
print(latex3)
#print([ll1s[k] for k in range(7,13)])
#print([ll2s[k] for k in range(7,13)])
#print(['{\color{'+ll2s[k]+'}'+ll1s[k]+'}' for k in range(7,13)])
print(latexs)
| 5,632 |
Python
| 31.005682 | 141 | 0.543146 |
elharirymatteo/RANS/omniisaacgymenvs/utils/eval_metrics.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import pandas as pd
import numpy as np
def compute_average_linear_velocity(ep_data: dict) -> float:
"""Compute the average linear velocity of the agent.
Args:
ep_data (dict): Dictionary containing the data of an episode.
Returns:
float: Average linear velocity of the agent."""
return np.mean(np.linalg.norm(ep_data["obs"][:, :, 2:4], axis=2))
def compute_average_angular_velocity(ep_data: dict) -> float:
"""Compute the average angular velocity of the agent.
Args:
ep_data (dict): Dictionary containing the data of an episode.
Returns:
float: Average angular velocity of the agent."""
return np.mean(np.abs(ep_data["obs"][:, :, 4]))
def compute_average_action_count(ep_data: dict) -> float:
"""Compute the average number of actions taken by the agent.
Args:
ep_data (dict): Dictionary containing the data of an episode.
Returns:
float: Average number of actions taken by the agent."""
return np.mean(np.sum(ep_data["act"] != 0, axis=2))
def build_distance_dataframe(distances: np.ndarray, threshold: float) -> list:
distances_df = pd.DataFrame(
distances, columns=[f"Ep_{i}" for i in range(distances.shape[1])]
)
# get a boolean dataframe where True means that the distance is less than the threshold
less_than_thr_df = distances_df.lt(threshold)
threshold_2 = threshold / 2
less_than_thr2_df = distances_df.lt(threshold_2)
# get the index of the first True value for each episode and fill with -1 if there is no True value
first_less_than_thr_idxs = less_than_thr_df.idxmax().where(
less_than_thr_df.any(), -1
)
first_less_than_thr2_idxs = less_than_thr2_df.idxmax().where(
less_than_thr2_df.any(), -1
)
margin = threshold * 7.5
less_than_margin_df = distances_df.lt(margin)
return less_than_margin_df, first_less_than_thr_idxs, first_less_than_thr2_idxs
def check_stay(
less_than_margin_df: pd.DataFrame,
first_less_than_thr_idxs: pd.DataFrame,
first_less_than_thr2_idxs: pd.DataFrame,
) -> list:
all_true_after_index = pd.DataFrame(index=less_than_margin_df.columns)
all_true_after_index["all_true"] = less_than_margin_df.apply(
lambda column: column.loc[first_less_than_thr_idxs[column.name] :].all(), axis=0
)
success_and_stay_rate = all_true_after_index.value_counts(normalize=True)
success_and_stay_rate = (
success_and_stay_rate[True] if True in success_and_stay_rate.index else 0
)
success_rate_thr = (first_less_than_thr_idxs > -1).mean() * 100
success_rate_thr2 = (first_less_than_thr2_idxs > -1).mean() * 100
return success_rate_thr, success_rate_thr2, success_and_stay_rate
def print_success(
success_rate_thr: float,
success_rate_thr2: float,
success_and_stay_rate: float,
threshold: float,
print_intermediate: bool = False,
) -> None:
if print_intermediate:
print(f"Success rate with threshold {threshold}: {success_rate_thr}")
print(f"Success rate with threshold {threshold/2}: {success_rate_thr2}")
print(
f"Success rate and stay with margin {threshold*7.5}: {success_and_stay_rate * 100}"
)
def get_GoToPose_success_rate_new(
ep_data: dict, threshold: float = 0.02, print_intermediate: bool = False
) -> dict:
"""Compute the success rate from the distances to the target.
Args:
distances (np.ndarray): Array of distances to the target for N episodes.
precision (float): Distance at which the target is considered reached.
Returns:
float: Success rate."""
distances = np.linalg.norm(ep_data["obs"][:, :, 6:8], axis=2)
dist = distances
avg_p005 = np.mean([dist < 0.05])
avg_p002 = np.mean([dist < 0.02])
avg_p001 = np.mean([dist < 0.01])
heading = np.abs(np.arctan2(ep_data["obs"][:, :, -1], ep_data["obs"][:, :, -2]))
avg_h005 = np.mean([heading < np.pi * 5 / 180])
avg_h002 = np.mean([heading < np.pi * 2 / 180])
avg_h001 = np.mean([heading < np.pi * 1 / 180])
if print_intermediate:
print(
"percentage of time spent under (5cm, 2cm, 1cm):",
avg_p005 * 100,
avg_p002 * 100,
avg_p001 * 100,
)
print(
"percentage of time spent under (5deg, 2deg, 1deg):",
avg_h005 * 100,
avg_h002 * 100,
avg_h001 * 100,
)
success_rate_df = pd.DataFrame(
{
"PT5": [avg_p005],
"PT2": [avg_p002],
"PT1": [avg_p001],
"OT5": [avg_h005],
"OT2": [avg_h002],
"OT1": [avg_h001],
}
)
return {"pose": success_rate_df}
def get_GoToXY_success_rate(
ep_data: dict, threshold: float = 0.02, print_intermediate: bool = False
) -> dict:
"""Compute the success rate from the distances to the target.
Args:
distances (np.ndarray): Array of distances to the target for N episodes.
precision (float): Distance at which the target is considered reached.
Returns:
float: Success rate."""
distances = np.linalg.norm(ep_data["obs"][:, :, 6:8], axis=2)
(
less_than_margin_df,
first_less_than_thr_idxs,
first_less_than_thr2_idxs,
) = build_distance_dataframe(distances, threshold)
success_rate_thr, success_rate_thr2, success_and_stay_rate = check_stay(
less_than_margin_df, first_less_than_thr_idxs, first_less_than_thr2_idxs
)
print_success(
success_rate_thr,
success_rate_thr2,
success_and_stay_rate,
threshold,
print_intermediate,
)
success_rate_df = pd.DataFrame(
{
f"success_rate_{threshold}_m": [success_rate_thr],
f"success_rate_{threshold/2}_m": [success_rate_thr2],
f"success_and_stay_within_{threshold*7.5}_m": [success_and_stay_rate * 100],
}
)
return {"position": success_rate_df}
def get_GoToPose_results(
ep_data: dict,
position_threshold: float = 0.02,
heading_threshold: float = 0.087,
print_intermediate: bool = False,
) -> None:
new_SR = get_GoToPose_success_rate_new(ep_data, print_intermediate=False)
old_SR = get_GoToPose_success_rate(ep_data, print_intermediate=False)
alv = compute_average_linear_velocity(ep_data)
aav = compute_average_angular_velocity(ep_data)
aac = compute_average_action_count(ep_data) / 8
ordered_metrics_keys = [
"PA1",
"PA2",
"PSA",
"OA1",
"OA2",
"OSA",
"ALV",
"AAV",
"AAC",
"PT5",
"PT2",
"PT1",
"OT5",
"OT2",
"OT1",
]
ordered_metrics_descriptions = [
"Position reached below 0.02 m of the target",
"Position reached below 0.01 m of the target",
"Position success and stay within 0.15 m",
"Orientation reached below 0.087 rad of the target",
"Orientation reached below 0.0435 rad of the target",
"Orientation success and stay within 0.6525 rad",
"Average linear velocity",
"Average angular velocity",
"Average action count",
"Percentage of time spent within 0.05 m of the target",
"Percentage of time spent within 0.02 m of the target",
"Percentage of time spent within 0.01 m of the target",
"Percentage of time spent within 0.05 rad of the target",
"Percentage of time spent within 0.02 rad of the target",
"Percentage of time spent within 0.01 rad of the target",
]
ordered_metrics_units = [
"%",
"%",
"%",
"%",
"%",
"%",
"m/s",
"rad/s",
"N",
"%",
"%",
"%",
"%",
"%",
"%",
]
ordered_metrics_multipliers = [
1,
1,
1,
1,
1,
1,
1,
1,
1,
100,
100,
100,
100,
100,
100,
]
metrics = np.array(
[
old_SR["position"]["success_rate_0.02_m"][0], # PA1
old_SR["position"]["success_rate_0.01_m"][0], # PA2
old_SR["position"]["success_and_stay_within_0.15_m"][0], # PSA
old_SR["heading"]["success_rate_0.087_rad"][0], # OA1
old_SR["heading"]["success_rate_0.0435_rad"][0], # OA2
old_SR["heading"]["success_and_stay_within_0.6525_rad"][0], # OSA
alv, # ALV
aav, # AAV
aac, # AAC
new_SR["pose"]["PT5"][0], # PT5
new_SR["pose"]["PT2"][0], # PT2
new_SR["pose"]["PT1"][0], # PT1
new_SR["pose"]["OT5"][0], # OT5
new_SR["pose"]["OT2"][0], # OT2
new_SR["pose"]["OT1"][0], # OT1
]
)
# Print the metrics line by line
print(f"Metrics acquired using a sample of {ep_data['act'].shape[1]}:")
for i, (metric, unit, mult, desc) in enumerate(
zip(
ordered_metrics_keys,
ordered_metrics_units,
ordered_metrics_multipliers,
ordered_metrics_descriptions,
)
):
print(f" + {metric}: {metrics[i]*mult:.2f}{unit}. {desc}.")
return
def get_GoToPose_success_rate(
ep_data: dict,
position_threshold: float = 0.02,
heading_threshold: float = 0.087,
print_intermediate: bool = False,
) -> dict:
"""Compute the success rate from the distances to the target.
Args:
distances (np.ndarray): Array of distances to the target for N episodes.
precision (float): Distance at which the target is considered reached.
Returns:
float: Success rate."""
position_distances = np.linalg.norm(ep_data["obs"][:, :, 6:8], axis=2)
heading_distances = np.abs(
np.arctan2(ep_data["obs"][:, :, 9], ep_data["obs"][:, :, 8])
)
(
less_than_margin_df,
first_less_than_thr_idxs,
first_less_than_thr2_idxs,
) = build_distance_dataframe(position_distances, position_threshold)
success_rate_thr, success_rate_thr2, success_and_stay_rate = check_stay(
less_than_margin_df, first_less_than_thr_idxs, first_less_than_thr2_idxs
)
print_success(
success_rate_thr,
success_rate_thr2,
success_and_stay_rate,
position_threshold,
print_intermediate,
)
position_success_rate_df = pd.DataFrame(
{
f"success_rate_{position_threshold}_m": [success_rate_thr],
f"success_rate_{position_threshold/2}_m": [success_rate_thr2],
f"success_and_stay_within_{position_threshold*7.5}_m": [
success_and_stay_rate * 100
],
}
)
(
less_than_margin_df,
first_less_than_thr_idxs,
first_less_than_thr2_idxs,
) = build_distance_dataframe(heading_distances, heading_threshold)
success_rate_thr, success_rate_thr2, success_and_stay_rate = check_stay(
less_than_margin_df, first_less_than_thr_idxs, first_less_than_thr2_idxs
)
print_success(
success_rate_thr,
success_rate_thr2,
success_and_stay_rate,
heading_threshold,
print_intermediate,
)
heading_success_rate_df = pd.DataFrame(
{
f"success_rate_{heading_threshold}_rad": [success_rate_thr],
f"success_rate_{heading_threshold/2}_rad": [success_rate_thr2],
f"success_and_stay_within_{heading_threshold*7.5}_rad": [
success_and_stay_rate * 100
],
}
)
return {"position": position_success_rate_df, "heading": heading_success_rate_df}
def get_TrackXYVelocity_success_rate(
ep_data: dict, threshold: float = 0.15, print_intermediate: bool = False
) -> dict:
"""Compute the success rate from the distances to the target.
Args:
distances (np.ndarray): Array of distances to the target for N episodes.
precision (float): Distance at which the target is considered reached.
Returns:
float: Success rate."""
distances = np.linalg.norm(ep_data["obs"][:, :, 6:8], axis=2)
(
less_than_margin_df,
first_less_than_thr_idxs,
first_less_than_thr2_idxs,
) = build_distance_dataframe(distances, threshold)
success_rate_thr, success_rate_thr2, success_and_stay_rate = check_stay(
less_than_margin_df, first_less_than_thr_idxs, first_less_than_thr2_idxs
)
print_success(
success_rate_thr,
success_rate_thr2,
success_and_stay_rate,
threshold,
print_intermediate,
)
success_rate_df = pd.DataFrame(
{
f"success_rate_{threshold}_m/s": [success_rate_thr],
f"success_rate_{threshold/2}_m/s": [success_rate_thr2],
f"success_and_stay_within_{threshold*7.5}_m/s": [
success_and_stay_rate * 100
],
}
)
return {"xy_velocity": success_rate_df}
def get_TrackXYOVelocity_success_rate(
ep_data: dict,
xy_threshold: float = 0.15,
omega_threshold: float = 0.3,
print_intermediate: bool = False,
) -> float:
"""Compute the success rate from the distances to the target.
Args:
distances (np.ndarray): Array of distances to the target for N episodes.
precision (float): Distance at which the target is considered reached.
Returns:
float: Success rate."""
xy_distances = np.linalg.norm(ep_data["obs"][:, :, 6:8], axis=2)
omega_distances = np.abs(ep_data["obs"][:, :, 8])
(
less_than_margin_df,
first_less_than_thr_idxs,
first_less_than_thr2_idxs,
) = build_distance_dataframe(xy_distances, xy_threshold)
success_rate_thr, success_rate_thr2, success_and_stay_rate = check_stay(
less_than_margin_df, first_less_than_thr_idxs, first_less_than_thr2_idxs
)
print_success(
success_rate_thr,
success_rate_thr2,
success_and_stay_rate,
xy_threshold,
print_intermediate,
)
xy_success_rate_df = pd.DataFrame(
{
f"success_rate_{xy_threshold}_m/s": [success_rate_thr],
f"success_rate_{xy_threshold/2}_m/s": [success_rate_thr2],
f"success_and_stay_within_{xy_threshold*7.5}_m/s": [
success_and_stay_rate * 100
],
}
)
(
less_than_margin_df,
first_less_than_thr_idxs,
first_less_than_thr2_idxs,
) = build_distance_dataframe(omega_distances, omega_threshold)
success_rate_thr, success_rate_thr2, success_and_stay_rate = check_stay(
less_than_margin_df, first_less_than_thr_idxs, first_less_than_thr2_idxs
)
print_success(
success_rate_thr,
success_rate_thr2,
success_and_stay_rate,
omega_threshold,
print_intermediate,
)
omega_success_rate_df = pd.DataFrame(
{
f"success_rate_{omega_threshold}_rad/s": [success_rate_thr],
f"success_rate_{omega_threshold/2}_rad/s": [success_rate_thr2],
f"success_and_stay_within_{omega_threshold*7.5}_rad/s": [
success_and_stay_rate * 100
],
}
)
return {"xy_velocity": xy_success_rate_df, "omega_velocity": omega_success_rate_df}
def get_success_rate_table(success_rate_df: pd.DataFrame) -> None:
print(
success_rate_df.to_latex(
index=False,
formatters={"name": str.upper},
float_format="{:.1f}".format,
bold_rows=True,
caption="Success rate for each experiment.",
label="tab:success_rate",
)
)
| 16,078 |
Python
| 30.902778 | 103 | 0.582473 |
elharirymatteo/RANS/omniisaacgymenvs/utils/aggregate_and_eval_mujoco_batch_data.py
|
from argparse import ArgumentParser
from omniisaacgymenvs.utils.eval_metrics import (
get_GoToPose_success_rate_new,
get_GoToPose_success_rate,
compute_average_action_count,
compute_average_angular_velocity,
compute_average_linear_velocity,
)
import pandas as pd
import numpy as np
import os
def parse_args():
parser = ArgumentParser()
parser.add_argument("--folder_path", type=str, default=None)
parser.add_argument("--save_metrics", action="store_true")
parser.add_argument("--use_xyzw", action="store_true")
parser.add_argument("--use_wxyz", action="store_true")
parser.add_argument("--display_metrics", action="store_true")
return parser.parse_args()
args = parse_args()
if args.use_xyzw and args.use_wxyz:
raise ValueError("Cannot use both xyzw and wxyz")
if not args.use_xyzw and not args.use_wxyz:
raise ValueError("Must use either xyzw or wxyz")
folder_path = args.folder_path
save_metrics = args.save_metrics
files = os.listdir(folder_path)
csvs = [f for f in files if f.endswith(".csv")]
eps_data = {}
eps_data["obs"] = []
eps_data["act"] = []
obss = []
acts = []
for csv in csvs:
df = pd.read_csv(os.path.join(folder_path, csv))
# Replicate an observation buffer
obs = np.zeros((df.shape[0], 10))
# Position
x = df["x_position"].to_numpy()
y = df["y_position"].to_numpy()
tx = df["x_position_target"].to_numpy()
ty = df["y_position_target"].to_numpy()
# Velocities
vx = df["x_linear_velocity"].to_numpy()
vy = df["y_linear_velocity"].to_numpy()
vrz = df["z_angular_velocity"].to_numpy()
# Heading
if args.use_xyzw:
quat = np.column_stack(
[
df["x_quaternion"],
df["y_quaternion"],
df["z_quaternion"],
df["w_quaternion"],
]
)
elif args.use_wxyz:
quat = np.column_stack(
[
df["w_quaternion"],
df["x_quaternion"],
df["y_quaternion"],
df["z_quaternion"],
]
)
else:
raise ValueError("Must use either xyzw or wxyz")
th = df["heading_target"].to_numpy()
siny_cosp = 2 * (quat[:, 0] * quat[:, 3] + quat[:, 1] * quat[:, 2])
cosy_cosp = 1 - 2 * (quat[:, 2] * quat[:, 2] + quat[:, 3] * quat[:, 3])
orient_z = np.arctan2(siny_cosp, cosy_cosp)
heading_error = np.arctan2(np.sin(th - orient_z), np.cos(th - orient_z))
obs[:, 0] = np.cos(orient_z)
obs[:, 1] = np.sin(orient_z)
obs[:, 2] = vx
obs[:, 3] = vy
obs[:, 4] = vrz
obs[:, 5] = 1
obs[:, 6] = tx - x
obs[:, 7] = ty - y
obs[:, 8] = np.cos(heading_error)
obs[:, 9] = np.sin(heading_error)
act = np.column_stack(
[
df["t_0"].to_numpy(),
df["t_1"].to_numpy(),
df["t_2"].to_numpy(),
df["t_3"].to_numpy(),
df["t_4"].to_numpy(),
df["t_5"].to_numpy(),
df["t_6"].to_numpy(),
df["t_7"].to_numpy(),
]
)
acts.append([act])
obss.append([obs])
eps_data["act"] = np.concatenate(acts, axis=0)
eps_data["obs"] = np.concatenate(obss, axis=0)
new_SR = get_GoToPose_success_rate_new(eps_data, print_intermediate=False)
old_SR = get_GoToPose_success_rate(eps_data, print_intermediate=False)
alv = compute_average_linear_velocity(eps_data)
aav = compute_average_angular_velocity(eps_data)
aac = compute_average_action_count(eps_data) / 8
ordered_metrics_keys = [
"PA1",
"PA2",
"PSA",
"OA1",
"OA2",
"OSA",
"ALV",
"AAV",
"AAC",
"PT5",
"PT2",
"PT1",
"OT5",
"OT2",
"OT1",
]
ordered_metrics_descriptions = [
"Position reached below 0.02 m of the target",
"Position reached below 0.01 m of the target",
"Position success and stay within 0.15 m",
"Orientation reached below 0.087 rad of the target",
"Orientation reached below 0.0435 rad of the target",
"Orientation success and stay within 0.6525 rad",
"Average linear velocity",
"Average angular velocity",
"Average action count",
"Percentage of time spent within 0.05 m of the target",
"Percentage of time spent within 0.02 m of the target",
"Percentage of time spent within 0.01 m of the target",
"Percentage of time spent within 0.05 rad of the target",
"Percentage of time spent within 0.02 rad of the target",
"Percentage of time spent within 0.01 rad of the target",
]
ordered_metrics_units = [
"%",
"%",
"%",
"%",
"%",
"%",
"m/s",
"rad/s",
"N",
"%",
"%",
"%",
"%",
"%",
"%",
]
ordered_metrics_multipliers = [
1,
1,
1,
1,
1,
1,
1,
1,
1,
100,
100,
100,
100,
100,
100,
]
metrics = np.array(
[
old_SR["position"]["success_rate_0.02_m"][0], # PA1
old_SR["position"]["success_rate_0.01_m"][0], # PA2
old_SR["position"]["success_and_stay_within_0.15_m"][0], # PSA
old_SR["heading"]["success_rate_0.087_rad"][0], # OA1
old_SR["heading"]["success_rate_0.0435_rad"][0], # OA2
old_SR["heading"]["success_and_stay_within_0.6525_rad"][0], # OSA
alv, # ALV
aav, # AAV
aac, # AAC
new_SR["pose"]["PT5"][0], # PT5
new_SR["pose"]["PT2"][0], # PT2
new_SR["pose"]["PT1"][0], # PT1
new_SR["pose"]["OT5"][0], # OT5
new_SR["pose"]["OT2"][0], # OT2
new_SR["pose"]["OT1"][0], # OT1
]
)
np.save(os.path.join(folder_path, "aggregated_results.npy"), metrics)
# Print the metrics line by line
print(f"Metrics acquired using a sample of {eps_data['act'].shape[0]}:")
if args.display_metrics:
for i, (metric, unit, mult, desc) in enumerate(
zip(
ordered_metrics_keys,
ordered_metrics_units,
ordered_metrics_multipliers,
ordered_metrics_descriptions,
)
):
print(f" + {metric}: {metrics[i]*mult:.2f}{unit}. {desc}.")
| 6,081 |
Python
| 25.329004 | 76 | 0.551554 |
elharirymatteo/RANS/omniisaacgymenvs/utils/shape_utils.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from pxr import UsdGeom, Gf, UsdShade, Sdf, Usd, UsdPhysics
import omni
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from omni.isaac.core.utils.string import find_unique_string_name
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.materials import PreviewSurface
import numpy as np
def setXformOp(prim, value, property):
xform = UsdGeom.Xformable(prim)
op = None
for xformOp in xform.GetOrderedXformOps():
if xformOp.GetOpType() == property:
op = xformOp
if op:
xform_op = op
else:
xform_op = xform.AddXformOp(property, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(value)
def setScale(prim, value):
setXformOp(prim, value, UsdGeom.XformOp.TypeScale)
def setTranslate(prim, value):
setXformOp(prim, value, UsdGeom.XformOp.TypeTranslate)
def setRotateXYZ(prim, value):
setXformOp(prim, value, UsdGeom.XformOp.TypeRotateXYZ)
def setOrient(prim, value):
setXformOp(prim, value, UsdGeom.XformOp.TypeOrient)
def setTransform(prim, value: Gf.Matrix4d):
setXformOp(prim, value, UsdGeom.XformOp.TypeTransform)
def applyTransforms(prim, translation, rotation, scale, material=None):
setTranslate(prim, Gf.Vec3d(translation))
setOrient(prim, Gf.Quatd(rotation[-1], Gf.Vec3d(rotation[:3])))
setScale(prim, Gf.Vec3d(scale))
if material is not None:
applyMaterial(prim, material)
def createPrim(prim_path, name="/body", geom_type=UsdGeom.Cylinder):
obj_prim_path = prim_path + name
if is_prim_path_valid(obj_prim_path):
prim = get_prim_at_path(obj_prim_path)
if not prim.IsA(geom_type):
raise Exception(
"The prim at path {} cannot be parsed as an arrow object".format(
obj_prim_path
)
)
geom = geom_type(prim)
else:
geom = geom_type.Define(get_current_stage(), obj_prim_path)
prim = get_prim_at_path(obj_prim_path)
return geom, prim
def createColor(stage: Usd.Stage, material_path: str, color: list):
"""
Creates a color material."""
material_path = omni.usd.get_stage_next_free_path(stage, material_path, False)
material = UsdShade.Material.Define(stage, material_path)
shader = UsdShade.Shader.Define(stage, material_path + "/shader")
shader.CreateIdAttr("UsdPreviewSurface")
shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Float3).Set(Gf.Vec3f(color))
material.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface")
return material
def applyMaterial(prim: Usd.Prim, material: UsdShade.Material) -> None:
"""
Applies a material to a prim."""
binder = UsdShade.MaterialBindingAPI.Apply(prim)
binder.Bind(material)
def getCurrentStage():
return omni.usd.get_context().get_stage()
def applyCollider(prim: Usd.Prim, enable: bool = False) -> UsdPhysics.CollisionAPI:
"""
Applies a ColliderAPI to a prim.
Args:
prim (Usd.Prim): The prim to apply the ColliderAPI.
enable (bool): Enable or disable the collider.
Returns:
UsdPhysics.CollisionAPI: The ColliderAPI.
"""
collider = UsdPhysics.CollisionAPI.Apply(prim)
collider.CreateCollisionEnabledAttr(enable)
return collider
class Pin:
def __init__(self, prim_path, ball_radius, poll_radius, poll_length):
if ball_radius is None:
ball_radius = 0.1
if poll_radius is None:
poll_radius = 0.02
if poll_length is None:
poll_length = 2
self.ball_geom, ball_prim = createPrim(
prim_path, name="/ball", geom_type=UsdGeom.Sphere
)
self.poll_geom, poll_prim = createPrim(
prim_path, name="/poll", geom_type=UsdGeom.Cylinder
)
applyTransforms(poll_prim, [0, 0, -poll_length / 2], [0, 0, 0, 1], [1, 1, 1])
applyTransforms(ball_prim, [0, 0, 0], [0, 0, 0, 1], [1, 1, 1])
def updateExtent(self):
radius = self.getBallRadius()
self.ball_geom.GetExtentAttr().Set(
[Gf.Vec3f([-radius, -radius, -radius]), Gf.Vec3f([radius, radius, radius])]
)
radius = self.getPollRadius()
height = self.getPollLength()
self.poll_geom.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
def setBallRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.ball_geom.GetRadiusAttr().Set(radius)
return
def getBallRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.ball_geom.GetRadiusAttr().Get()
def setPollRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.poll_geom.GetRadiusAttr().Set(radius)
return
def getPollRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.poll_geom.GetRadiusAttr().Get()
def setPollLength(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.poll_geom.GetHeightAttr().Set(radius)
return
def getPollLength(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.poll_geom.GetHeightAttr().Get()
class Pin3D:
def __init__(self, prim_path, ball_radius, poll_radius, poll_length):
if ball_radius is None:
ball_radius = 0.05
if poll_radius is None:
poll_radius = 0.02
if poll_length is None:
poll_length = 2
red_material = createColor(
getCurrentStage(), "/World/Looks/red_material", [1, 0, 0]
)
green_material = createColor(
getCurrentStage(), "/World/Looks/green_material", [0, 1, 0]
)
blue_material = createColor(
getCurrentStage(), "/World/Looks/blue_material", [0, 0, 1]
)
self.ball11_geom, ball11_prim = createPrim(
prim_path, name="/ball_11", geom_type=UsdGeom.Sphere
)
self.ball12_geom, ball12_prim = createPrim(
prim_path, name="/ball_12", geom_type=UsdGeom.Sphere
)
self.ball21_geom, ball21_prim = createPrim(
prim_path, name="/ball_21", geom_type=UsdGeom.Sphere
)
self.ball22_geom, ball22_prim = createPrim(
prim_path, name="/ball_22", geom_type=UsdGeom.Sphere
)
self.ball31_geom, ball31_prim = createPrim(
prim_path, name="/ball_31", geom_type=UsdGeom.Sphere
)
self.ball32_geom, ball32_prim = createPrim(
prim_path, name="/ball_32", geom_type=UsdGeom.Sphere
)
self.poll1_geom, poll1_prim = createPrim(
prim_path, name="/poll_1", geom_type=UsdGeom.Cylinder
)
self.poll2_geom, poll2_prim = createPrim(
prim_path, name="/poll_2", geom_type=UsdGeom.Cylinder
)
self.poll3_geom, poll3_prim = createPrim(
prim_path, name="/poll_3", geom_type=UsdGeom.Cylinder
)
# Z Axis
applyTransforms(poll1_prim, [0, 0, 0], [0, 0, 0, 1], [1, 1, 1], blue_material)
applyTransforms(
ball11_prim, [0, 0, poll_length / 2], [0, 0, 0, 1], [1, 1, 1], blue_material
)
applyTransforms(
ball12_prim,
[0, 0, -poll_length / 2],
[0, 0, 0, 1],
[1, 1, 1],
blue_material,
)
# Y Axis
applyTransforms(
poll2_prim, [0, 0, 0], [0.707, 0, 0, 0.707], [1, 1, 1], green_material
)
applyTransforms(
ball21_prim,
[0, poll_length / 2, 0],
[0, 0, 0, 1],
[1, 1, 1],
green_material,
)
applyTransforms(
ball22_prim,
[0, -poll_length / 2, 0],
[0, 0, 0, 1],
[1, 1, 1],
green_material,
)
# X Axis
applyTransforms(
poll3_prim, [0, 0, 0], [0, 0.707, 0, 0.707], [1, 1, 1], red_material
)
applyTransforms(
ball31_prim,
[poll_length / 2.0, 0, 0],
[0, 0.707, 0, 0.707],
[1, 1, 1],
red_material,
)
applyTransforms(
ball32_prim,
[-poll_length / 2.0, 0, 0],
[0, 0.707, 0, 0.707],
[1, 1, 1],
red_material,
)
def updateExtent(self):
radius = self.getBallRadius()
self.ball11_geom.GetExtentAttr().Set(
[Gf.Vec3f([-radius, -radius, -radius]), Gf.Vec3f([radius, radius, radius])]
)
self.ball21_geom.GetExtentAttr().Set(
[Gf.Vec3f([-radius, -radius, -radius]), Gf.Vec3f([radius, radius, radius])]
)
self.ball31_geom.GetExtentAttr().Set(
[Gf.Vec3f([-radius, -radius, -radius]), Gf.Vec3f([radius, radius, radius])]
)
self.ball12_geom.GetExtentAttr().Set(
[Gf.Vec3f([-radius, -radius, -radius]), Gf.Vec3f([radius, radius, radius])]
)
self.ball22_geom.GetExtentAttr().Set(
[Gf.Vec3f([-radius, -radius, -radius]), Gf.Vec3f([radius, radius, radius])]
)
self.ball32_geom.GetExtentAttr().Set(
[Gf.Vec3f([-radius, -radius, -radius]), Gf.Vec3f([radius, radius, radius])]
)
radius = self.getPollRadius()
height = self.getPollLength()
self.poll1_geom.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
self.poll2_geom.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
self.poll3_geom.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
def setBallRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.ball11_geom.GetRadiusAttr().Set(radius)
self.ball21_geom.GetRadiusAttr().Set(radius)
self.ball31_geom.GetRadiusAttr().Set(radius)
self.ball12_geom.GetRadiusAttr().Set(radius)
self.ball22_geom.GetRadiusAttr().Set(radius)
self.ball32_geom.GetRadiusAttr().Set(radius)
return
def getBallRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.ball11_geom.GetRadiusAttr().Get()
def setPollRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.poll1_geom.GetRadiusAttr().Set(radius)
self.poll2_geom.GetRadiusAttr().Set(radius)
self.poll3_geom.GetRadiusAttr().Set(radius)
return
def getPollRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.poll1_geom.GetRadiusAttr().Get()
def setPollLength(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.poll1_geom.GetHeightAttr().Set(radius)
self.poll2_geom.GetHeightAttr().Set(radius)
self.poll3_geom.GetHeightAttr().Set(radius)
return
def getPollLength(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.poll1_geom.GetHeightAttr().Get()
class Arrow:
def __init__(
self,
prim_path,
body_radius,
body_length,
poll_radius,
poll_length,
head_radius,
head_length,
):
if body_radius is None:
body_radius = 0.1
if body_length is None:
body_length = 0.5
if poll_radius is None:
poll_radius = 0.02
if poll_length is None:
poll_length = 2
if head_radius is None:
head_radius = 0.25
if head_length is None:
head_length = 0.5
# createPrim()
self.body_geom, body_prim = createPrim(
prim_path, name="/body", geom_type=UsdGeom.Cylinder
)
self.poll_geom, poll_prim = createPrim(
prim_path, name="/poll", geom_type=UsdGeom.Cylinder
)
self.head_geom, head_prim = createPrim(
prim_path, name="/head", geom_type=UsdGeom.Cone
)
applyTransforms(poll_prim, [0, 0, -poll_length / 2], [0, 0, 0, 1], [1, 1, 1])
applyTransforms(
body_prim, [body_length / 2, 0, 0], [0, 0.707, 0, 0.707], [1, 1, 1]
)
applyTransforms(
head_prim,
[body_length + head_length / 2, 0, 0],
[0, 0.707, 0, 0.707],
[1, 1, 1],
)
def updateExtent(self):
radius = self.getBodyRadius()
height = self.getBodyLength()
self.body_geom.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
radius = self.getPollRadius()
height = self.getPollLength()
self.poll_geom.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
radius = self.getHeadRadius()
height = self.getHeadLength()
self.head_geom.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
return
def setBodyRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.body_geom.GetRadiusAttr().Set(radius)
return
def getBodyRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.body_geom.GetRadiusAttr().Get()
def setBodyLength(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.body_geom.GetHeightAttr().Set(radius)
return
def getBodyLength(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.body_geom.GetHeightAttr().Get()
def setPollRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.poll_geom.GetRadiusAttr().Set(radius)
return
def getPollRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.poll_geom.GetRadiusAttr().Get()
def setPollLength(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.poll_geom.GetHeightAttr().Set(radius)
return
def getPollLength(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.poll_geom.GetHeightAttr().Get()
def setHeadRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.head_geom.GetRadiusAttr().Set(radius)
return
def getHeadRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.head_geom.GetRadiusAttr().Get()
def setHeadLength(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.head_geom.GetHeightAttr().Set(radius)
return
def getHeadLength(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.head_geom.GetHeightAttr().Get()
class Arrow3D:
def __init__(self, prim_path, body_radius, body_length, head_radius, head_length):
if body_radius is None:
body_radius = 0.1
if body_length is None:
body_length = 0.5
if head_radius is None:
head_radius = 0.25
if head_length is None:
head_length = 0.5
red_material = createColor(
getCurrentStage(), "/World/Looks/red_material", [1, 0, 0]
)
green_material = createColor(
getCurrentStage(), "/World/Looks/green_material", [0, 1, 0]
)
blue_material = createColor(
getCurrentStage(), "/World/Looks/blue_material", [0, 0, 1]
)
# createPrim()
self.body_geom1, body_prim1 = createPrim(
prim_path, name="/body1", geom_type=UsdGeom.Cylinder
)
self.body_geom2, body_prim2 = createPrim(
prim_path, name="/body2", geom_type=UsdGeom.Cylinder
)
self.body_geom3, body_prim3 = createPrim(
prim_path, name="/body3", geom_type=UsdGeom.Cylinder
)
self.head_geom1, head_prim1 = createPrim(
prim_path, name="/head1", geom_type=UsdGeom.Cone
)
self.head_geom2, head_prim2 = createPrim(
prim_path, name="/head2", geom_type=UsdGeom.Cone
)
self.head_geom3, head_prim3 = createPrim(
prim_path, name="/head3", geom_type=UsdGeom.Cone
)
# Z Axis
applyTransforms(
body_prim1,
[0, 0, body_length / 2],
[0, 0, 0, 1.0],
[1, 1, 1],
material=blue_material,
)
applyTransforms(
head_prim1,
[0, 0, body_length + head_length / 2],
[0, 0, 0, 1.0],
[1, 1, 1],
material=blue_material,
)
# Y Axis
applyTransforms(
body_prim2,
[0, body_length / 2, 0],
[0.707, 0, 0, 0.707],
[1, 1, 1],
material=green_material,
)
applyTransforms(
head_prim2,
[0, body_length + head_length / 2, 0],
[-0.707, 0, 0, 0.707],
[1, 1, 1],
material=green_material,
)
# X Axis
applyTransforms(
body_prim3,
[body_length / 2, 0, 0],
[0, 0.707, 0, 0.707],
[1, 1, 1],
material=red_material,
)
applyTransforms(
head_prim3,
[body_length + head_length / 2, 0, 0],
[0, 0.707, 0, 0.707],
[1, 1, 1],
material=red_material,
)
def updateExtent(self):
radius = self.getBodyRadius()
height = self.getBodyLength()
self.body_geom1.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
self.body_geom2.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
self.body_geom3.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
radius = self.getHeadRadius()
height = self.getHeadLength()
self.head_geom1.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
self.head_geom2.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
self.head_geom3.GetExtentAttr().Set(
[
Gf.Vec3f([-radius, -radius, -height / 2.0]),
Gf.Vec3f([radius, radius, height / 2.0]),
]
)
return
def setBodyRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.body_geom1.GetRadiusAttr().Set(radius)
self.body_geom2.GetRadiusAttr().Set(radius)
self.body_geom3.GetRadiusAttr().Set(radius)
return
def getBodyRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.body_geom1.GetRadiusAttr().Get()
def setBodyLength(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.body_geom1.GetHeightAttr().Set(radius)
self.body_geom2.GetHeightAttr().Set(radius)
self.body_geom3.GetHeightAttr().Set(radius)
return
def getBodyLength(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.body_geom1.GetHeightAttr().Get()
def setHeadRadius(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.head_geom1.GetRadiusAttr().Set(radius)
self.head_geom2.GetRadiusAttr().Set(radius)
self.head_geom3.GetRadiusAttr().Set(radius)
return
def getHeadRadius(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.head_geom1.GetRadiusAttr().Get()
def setHeadLength(self, radius: float) -> None:
"""[summary]
Args:
radius (float): [description]
"""
self.head_geom1.GetHeightAttr().Set(radius)
self.head_geom2.GetHeightAttr().Set(radius)
self.head_geom3.GetHeightAttr().Set(radius)
return
def getHeadLength(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.head_geom1.GetHeightAttr().Get()
class Gate:
def __init__(
self,
prim_path,
gate_width,
gate_thickness,
):
if gate_width is None:
gate_width = 1.0
if gate_thickness is None:
gate_thickness = 0.2
# Front
self.bottom_front_geom, self.bottom_front_prim = createPrim(
prim_path, name="/bottom_front", geom_type=UsdGeom.Cube
)
self.top_front_geom, self.top_front_prim = createPrim(
prim_path, name="/top_front", geom_type=UsdGeom.Cube
)
self.left_front_geom, self.left_front_prim = createPrim(
prim_path, name="/left_front", geom_type=UsdGeom.Cube
)
self.right_front_geom, self.right_front_prim = createPrim(
prim_path, name="/right_ftont", geom_type=UsdGeom.Cube
)
# Back
self.bottom_back_geom, self.bottom_back_prim = createPrim(
prim_path, name="/bottom_back", geom_type=UsdGeom.Cube
)
self.top_back_geom, self.top_back_prim = createPrim(
prim_path, name="/top_back", geom_type=UsdGeom.Cube
)
self.left_back_geom, self.left_back_prim = createPrim(
prim_path, name="/left_back", geom_type=UsdGeom.Cube
)
self.right_back_geom, self.right_back_prim = createPrim(
prim_path, name="/right_back", geom_type=UsdGeom.Cube
)
# Corners
self.top_right_corner_geom, self.top_right_corner_prim = createPrim(
prim_path, name="/top_right_corner", geom_type=UsdGeom.Cube
)
self.top_left_corner_geom, self.top_left_corner_prim = createPrim(
prim_path, name="/top_left_corner", geom_type=UsdGeom.Cube
)
self.bottom_left_corner_geom, self.bottom_left_corner_prim = createPrim(
prim_path, name="/bottom_left_corner", geom_type=UsdGeom.Cube
)
self.bottom_right_corner_geom, self.bottom_right_corner_prim = createPrim(
prim_path, name="/bottom_right_corner", geom_type=UsdGeom.Cube
)
# Colors
self.red_material = createColor(
getCurrentStage(), "/World/Looks/red_material", [1, 0, 0]
)
self.blue_material = createColor(
getCurrentStage(), "/World/Looks/blue_material", [0, 0, 1]
)
self.white_material = createColor(
getCurrentStage(), "/World/Looks/white_material", [1, 1, 1]
)
self.gate_thickness = gate_thickness
self.gate_width = gate_width
self.setThicknessInternal(gate_thickness)
self.applyTransformsInternal(gate_thickness, gate_width)
def applyTransformsInternal(self, gate_thickness, gate_width):
ratio = gate_width / gate_thickness
# Front (Red)
applyTransforms(
self.top_front_prim,
[gate_thickness / 4, 0, gate_width / 2 + gate_thickness / 2],
[0, 0, 0, 1],
[0.5, ratio, 1],
material=self.red_material,
)
applyTransforms(
self.bottom_front_prim,
[gate_thickness / 4, 0, -gate_width / 2 - gate_thickness / 2],
[0, 0, 0, 1],
[0.5, ratio, 1],
material=self.red_material,
)
applyTransforms(
self.left_front_prim,
[gate_thickness / 4, -gate_width / 2 - gate_thickness / 2, 0],
[0, 0, 0, 1],
[0.5, 1, ratio],
material=self.red_material,
)
applyTransforms(
self.right_front_prim,
[gate_thickness / 4, gate_width / 2 + gate_thickness / 2, 0],
[0, 0, 0, 1],
[0.5, 1, ratio],
material=self.red_material,
)
# Back (Blue)
applyTransforms(
self.top_back_prim,
[-gate_thickness / 4, 0, gate_width / 2 + gate_thickness / 2],
[0, 0, 0, 1],
[0.5, ratio, 1],
material=self.blue_material,
)
applyTransforms(
self.bottom_back_prim,
[-gate_thickness / 4, 0, -gate_width / 2 - gate_thickness / 2],
[0, 0, 0, 1],
[0.5, ratio, 1],
material=self.blue_material,
)
applyTransforms(
self.left_back_prim,
[-gate_thickness / 4, -gate_width / 2 - gate_thickness / 2, 0],
[0, 0, 0, 1],
[0.5, 1, ratio],
material=self.blue_material,
)
applyTransforms(
self.right_back_prim,
[-gate_thickness / 4, gate_width / 2 + gate_thickness / 2, 0],
[0, 0, 0, 1],
[0.5, 1, ratio],
material=self.blue_material,
)
# Corners (White)
applyTransforms(
self.top_right_corner_prim,
[
0,
gate_width / 2 + gate_thickness / 2,
gate_width / 2 + gate_thickness / 2,
],
[0, 0, 0, 1],
[1, 1, 1],
material=self.white_material,
)
applyTransforms(
self.top_left_corner_prim,
[
0,
-gate_width / 2 - gate_thickness / 2,
gate_width / 2 + gate_thickness / 2,
],
[0, 0, 0, 1],
[1, 1, 1],
material=self.white_material,
)
applyTransforms(
self.bottom_left_corner_prim,
[
0,
-gate_width / 2 - gate_thickness / 2,
-gate_width / 2 - gate_thickness / 2,
],
[0, 0, 0, 1],
[1, 1, 1],
material=self.white_material,
)
applyTransforms(
self.bottom_right_corner_prim,
[
0,
gate_width / 2 + gate_thickness / 2,
-gate_width / 2 - gate_thickness / 2,
],
[0, 0, 0, 1],
[1, 1, 1],
material=self.white_material,
)
def setThicknessInternal(self, thickness):
# Front
self.top_front_geom.GetSizeAttr().Set(thickness)
self.bottom_front_geom.GetSizeAttr().Set(thickness)
self.left_front_geom.GetSizeAttr().Set(thickness)
self.right_front_geom.GetSizeAttr().Set(thickness)
# Back
self.top_back_geom.GetSizeAttr().Set(thickness)
self.bottom_back_geom.GetSizeAttr().Set(thickness)
self.left_back_geom.GetSizeAttr().Set(thickness)
self.right_back_geom.GetSizeAttr().Set(thickness)
# Corners
self.top_right_corner_geom.GetSizeAttr().Set(thickness)
self.top_left_corner_geom.GetSizeAttr().Set(thickness)
self.bottom_left_corner_geom.GetSizeAttr().Set(thickness)
self.bottom_right_corner_geom.GetSizeAttr().Set(thickness)
def updateExtent(self):
return
def applyCollisions(self):
applyCollider(self.top_front_prim, True)
applyCollider(self.bottom_front_prim, True)
applyCollider(self.left_front_prim, True)
applyCollider(self.right_front_prim, True)
applyCollider(self.top_back_prim, True)
applyCollider(self.bottom_back_prim, True)
applyCollider(self.left_back_prim, True)
applyCollider(self.right_back_prim, True)
applyCollider(self.top_right_corner_prim, True)
applyCollider(self.top_left_corner_prim, True)
applyCollider(self.bottom_left_corner_prim, True)
applyCollider(self.bottom_right_corner_prim, True)
def setGateThickness(self, thickness: float) -> None:
"""[summary]
Args:
thickness (float): [description]
"""
self.gate_thickness = thickness
self.setThicknessInternal(self.gate_thickness)
self.applyTransformsInternal(self.gate_thickness, self.gate_width)
return
def getGateThickness(self) -> float:
"""[summary]
Returns:
thickness: [description]
"""
return self.gate_thickness
def setGateThickness(self, width: float) -> None:
"""[summary]
Args:
width (float): [description]
"""
self.gate_width = width
self.setThicknessInternal(self.gate_thickness)
self.applyTransformsInternal(self.gate_thickness, self.gate_width)
return
def getGateWidth(self) -> float:
"""[summary]
Returns:
float: [description]
"""
return self.gate_width
| 31,432 |
Python
| 29.39942 | 88 | 0.530892 |
elharirymatteo/RANS/omniisaacgymenvs/utils/dock.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import os
from typing import Optional, Sequence
from dataclasses import dataclass
from omniisaacgymenvs.robots.articulations.utils.MFP_utils import *
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.articulations import Articulation, ArticulationView
@dataclass
class DockParameters:
"""
Docking station parameters.
Args:
usd_path (str): path to the usd file
show_axis (bool): show the axis of the docking station
mass (float): mass of the docking station
"""
usd_path: str = None
show_axis: bool = False
mass: float = 5.0
enable_collision: bool = True
class Dock(Articulation):
"""
Class to create xform prim for a docking station.
See parent class for more details about the arguments.
Args:
prim_path (str): path to the prim
name (str): name of the prim
position (Optional[Sequence[float]], optional): _description_. Defaults to None.
translation (Optional[Sequence[float]], optional): _description_. Defaults to None.
orientation (Optional[Sequence[float]], optional): _description_. Defaults to None.
scale (Optional[Sequence[float]], optional): _description_. Defaults to None.
visible (Optional[bool], optional): _description_. Defaults to True.
dock_params (dict, optional): dictionary of DockParameters. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "dock",
position: Optional[Sequence[float]] = None,
translation: Optional[Sequence[float]] = None,
orientation: Optional[Sequence[float]] = None,
scale: Optional[Sequence[float]] = None,
visible: Optional[bool] = True,
dock_params: dict = None,
):
self.dock_params = DockParameters(**dock_params)
self.stage = get_current_stage()
self.joints_path = "joints"
self.create_articulation_root(prim_path)
super().__init__(
prim_path=prim_path,
name=name,
position=position,
translation=translation,
orientation=orientation,
scale=scale,
visible=visible,
)
self.build()
return
def create_articulation_root(self, prim_path)->None:
"""
Create a root xform and link usd to it.
Args:
prim_path (str): path to the prim
"""
createArticulation(self.stage, prim_path)
add_reference_to_stage(os.path.join(os.getcwd(), self.dock_params.usd_path), prim_path)
axis_prim = get_prim_at_path(prim_path+"/dock/axis")
if self.dock_params.show_axis:
axis_prim.GetAttribute("visibility").Set("visible")
else:
axis_prim.GetAttribute("visibility").Set("invisible")
def build(self)->None:
"""
Apply RigidBody API, Collider, mass, and PlaneLock joints.
"""
self.joints_path, self.joints_prim = createXform(
self.stage, self.prim_path + "/" + self.joints_path
)
self.configure_core_prim()
self.createXYPlaneLock()
def createXYPlaneLock(self) -> None:
"""
Creates a set of joints to constrain the platform to the XY plane.
3DoF: translation on X and Y, rotation on Z.
"""
# Create anchor to world. It's fixed.
anchor_path, anchor_prim = createXform(
self.stage, self.prim_path + "/world_anchor"
)
setTranslate(anchor_prim, Gf.Vec3d(0, 0, 0))
setOrient(anchor_prim, Gf.Quatd(1, Gf.Vec3d(0, 0, 0)))
applyRigidBody(anchor_prim)
applyMass(anchor_prim, 0.0000001)
fixed_joint = createFixedJoint(
self.stage, self.joints_path, body_path2=anchor_path
)
# Create the bodies & joints allowing translation
x_tr_path, x_tr_prim = createXform(
self.stage, self.prim_path + "/x_translation_body"
)
y_tr_path, y_tr_prim = createXform(
self.stage, self.prim_path + "/y_translation_body"
)
setTranslate(x_tr_prim, Gf.Vec3d(0, 0, 0))
setOrient(x_tr_prim, Gf.Quatd(1, Gf.Vec3d(0, 0, 0)))
applyRigidBody(x_tr_prim)
applyMass(x_tr_prim, 0.0000001)
setTranslate(y_tr_prim, Gf.Vec3d(0, 0, 0))
setOrient(y_tr_prim, Gf.Quatd(1, Gf.Vec3d(0, 0, 0)))
applyRigidBody(y_tr_prim)
applyMass(y_tr_prim, 0.0000001)
tr_joint_x = createPrismaticJoint(
self.stage,
self.joints_path + "/dock_world_joint_x",
body_path1=anchor_path,
body_path2=x_tr_path,
axis="X",
enable_drive=False,
)
tr_joint_y = createPrismaticJoint(
self.stage,
self.joints_path + "/dock_world_joint_y",
body_path1=x_tr_path,
body_path2=y_tr_path,
axis="Y",
enable_drive=False,
)
# Adds the joint allowing for rotation
rv_joint_z = createRevoluteJoint(
self.stage,
self.joints_path + "/dock_world_joint_z",
body_path1=y_tr_path,
body_path2=self.core_path,
axis="Z",
enable_drive=False,
)
def configure_core_prim(self):
"""
Configures the body of the platform.
"""
self.core_path = self.prim_path+"/dock"
core = get_prim_at_path(self.core_path)
applyRigidBody(core)
applyCollider(core, self.dock_params.enable_collision)
applyMass(core, self.dock_params.mass)
class DockView(ArticulationView):
def __init__(
self, prim_paths_expr: str,
name: Optional[str] = "DockView",
) -> None:
"""[summary]"""
super().__init__(
prim_paths_expr=prim_paths_expr,
name=name,
)
self.base = RigidPrimView(
prim_paths_expr=f"/World/envs/.*/dock/dock",
name="dock_base_view",
)
def get_plane_lock_indices(self):
self.lock_indices = [
self.get_dof_index("dock_world_joint_x"),
self.get_dof_index("dock_world_joint_y"),
self.get_dof_index("dock_world_joint_z"),
]
| 6,804 |
Python
| 34.815789 | 95 | 0.585097 |
elharirymatteo/RANS/omniisaacgymenvs/utils/plot_experiment.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import array
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
import seaborn as sns
from matplotlib.collections import LineCollection
def plot_episode_data_virtual(
ep_data: dict, save_dir: str, all_agents: bool = False
) -> None:
"""
Plots the evaluation data for a single agent across a set of evaluation episodes.
The following metrics are aggregated across all epsiodes:
- distance to the goal
- reward
- velocities (angular and linear)
- actions
- trajectories: XY positions, no heading.
Args:
ep_data: dict: dictionary containing episode data
save_dir: str: directory where to save the plots
all_agents: bool: if True, plot average results over all agents, if False only the first agent is plotted
"""
reward_history = ep_data["rews"]
control_history = ep_data["act"]
state_history = ep_data["obs"]
# Overrides the user arg is there is only one episode
all_agents = False if reward_history.shape[1] == 1 else all_agents
fig_count = 0
if all_agents:
best_agent = np.argmax(reward_history.sum(axis=0))
worst_agent = np.argmin(reward_history.sum(axis=0))
rand_agent = np.random.choice(
list(
set(range(0, reward_history.shape[1])) - set([best_agent, worst_agent])
)
)
print(
"Best agent: ",
best_agent,
"| Worst agent: ",
worst_agent,
"| Random Agent",
rand_agent,
)
# plot best and worst episodes data
plot_one_episode(
{k: np.array([v[best_agent] for v in vals]) for k, vals in ep_data.items()},
save_dir + "best_ep/",
)
plot_one_episode(
{
k: np.array([v[worst_agent] for v in vals])
for k, vals in ep_data.items()
},
save_dir + "worst_ep/",
)
plot_one_episode(
{k: np.array([v[rand_agent] for v in vals]) for k, vals in ep_data.items()},
save_dir + f"rand_ep_{rand_agent}/",
)
tgrid = np.linspace(0, len(reward_history), len(control_history))
args = {
"best_agent": best_agent,
"worst_agent": worst_agent,
"rand_agent": rand_agent,
"fig_count": fig_count,
"save_dir": save_dir,
"reward_history": reward_history,
"control_history": control_history,
"state_history": state_history,
"tgrid": tgrid,
}
shared_metrics = [plot_reward, plot_velocities, plot_actions_box_plot]
task_metrics = []
task_flag = state_history[0, 0, 5].astype(int)
task_metrics = []
if task_flag == 0: # GoToXY
task_metrics = [
plot_trajectories_GoToXY,
plot_distance_GoToXY,
plot_all_distances_GoToXY,
]
elif task_flag == 1: # GoToPose
task_metrics = [
plot_trajectories_GoToXY,
plot_distance_GoToPose,
plot_all_distances_GoToPose,
]
elif task_flag == 2: # TrackXYVelocity
task_metrics = [
plot_distance_TrackXYVelocity,
plot_all_distances_TrackXYVelocity,
]
elif task_flag == 3: # TrackXYOVelocity
task_metrics = [
plot_distance_TrackXYOVelocity,
plot_all_distances_TrackXYOVelocity,
]
else:
task_metrics = []
metrics = shared_metrics + task_metrics
for metric in metrics:
fig_count = metric(**args)
args["fig_count"] = fig_count
else:
fig_count = plot_one_episode(
{k: np.array([v[0] for v in vals]) for k, vals in ep_data.items()},
save_dir + "first_ep/",
)
def plot_distance_GoToXY(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
best_agent: int,
worst_agent: int,
fig_count: int,
**kwargs,
) -> int:
"""
Plot mean, std_dev, best and worst distance over all episodes."""
all_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
fig_count += 1
fig, ax = plt.subplots()
ax.plot(
tgrid,
all_distances.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
all_distances.mean(axis=1) - all_distances.std(axis=1),
all_distances.mean(axis=1) + all_distances.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
all_distances.mean(axis=1) - 2 * all_distances.std(axis=1),
all_distances.mean(axis=1) + 2 * all_distances.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
all_distances[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
all_distances[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m]")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(f"Mean, best and worst distances over {all_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "mean_best_worst_position_distances")
return fig_count
def plot_all_distances_GoToXY(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
**kwargs,
) -> int:
"""
Plot all distances over all episodes."""
all_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
fig_count += 1
fig, ax = plt.subplots()
cmap = cm.get_cmap("tab20")
for j in range(all_distances.shape[1]):
ax.plot(
tgrid, all_distances[:, j], alpha=1.0, color=cmap(j % cmap.N), linewidth=1.0
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m]")
plt.title(f"All distances over {all_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "all_position_distances")
return fig_count
def plot_distance_GoToPose(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
best_agent: int,
worst_agent: int,
fig_count: int,
**kwargs,
) -> int:
"""
Plot mean, std_dev, best and worst distance over all episodes."""
all_position_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
shape = state_history.shape[:-1]
all_heading_distances = np.arctan2(
state_history[:, :, 9].flatten(), state_history[:, :, 8].flatten()
)
all_heading_distances = all_heading_distances.reshape(shape)
fig_count += 1
fig, ax = plt.subplots()
ax.plot(
tgrid,
all_position_distances.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
all_position_distances.mean(axis=1) - all_position_distances.std(axis=1),
all_position_distances.mean(axis=1) + all_position_distances.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
all_position_distances.mean(axis=1) - 2 * all_position_distances.std(axis=1),
all_position_distances.mean(axis=1) + 2 * all_position_distances.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
all_position_distances[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
all_position_distances[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m]")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(
f"Mean, best and worst distances over {all_position_distances.shape[1]} episodes"
)
plt.grid()
plt.savefig(save_dir + "mean_best_worst_position_distances")
fig_count += 1
fig, ax = plt.subplots()
ax.plot(
tgrid,
all_heading_distances.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
all_heading_distances.mean(axis=1) - all_heading_distances.std(axis=1),
all_heading_distances.mean(axis=1) + all_heading_distances.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
all_heading_distances.mean(axis=1) - 2 * all_heading_distances.std(axis=1),
all_heading_distances.mean(axis=1) + 2 * all_heading_distances.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
all_heading_distances[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
all_heading_distances[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [rad]")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(
f"Mean, best and worst distances over {all_heading_distances.shape[1]} episodes"
)
plt.grid()
plt.savefig(save_dir + "mean_best_worst_heading_distances")
return fig_count
def plot_all_distances_GoToPose(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
**kwargs,
) -> int:
"""
Plot all distances over all episodes."""
all_position_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
shape = state_history.shape[:-1]
all_heading_distances = np.arctan2(
state_history[:, :, 9].flatten(), state_history[:, :, 8].flatten()
)
all_heading_distances = all_heading_distances.reshape(shape)
fig_count += 1
fig, ax = plt.subplots()
cmap = cm.get_cmap("tab20")
for j in range(all_position_distances.shape[1]):
ax.plot(
tgrid,
all_position_distances[:, j],
alpha=1.0,
color=cmap(j % cmap.N),
linewidth=1.0,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m]")
plt.title(f"All distances over {all_position_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "all_position_distances")
fig_count += 1
fig, ax = plt.subplots()
cmap = cm.get_cmap("tab20")
for j in range(all_heading_distances.shape[1]):
ax.plot(
tgrid,
all_heading_distances[:, j],
alpha=1.0,
color=cmap(j % cmap.N),
linewidth=1.0,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [rad]")
plt.title(f"All distances over {all_heading_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "all_heading_distances")
return fig_count
def plot_distance_TrackXYVelocity(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
best_agent: int,
worst_agent: int,
fig_count: int,
**kwargs,
) -> int:
"""
Plot mean, std_dev, best and worst distance over all episodes."""
all_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
fig_count += 1
fig, ax = plt.subplots()
ax.plot(
tgrid,
all_distances.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
all_distances.mean(axis=1) - all_distances.std(axis=1),
all_distances.mean(axis=1) + all_distances.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
all_distances.mean(axis=1) - 2 * all_distances.std(axis=1),
all_distances.mean(axis=1) + 2 * all_distances.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
all_distances[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
all_distances[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m/s]")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(f"Mean, best and worst distances over {all_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "mean_best_worst_velocity_distances")
return fig_count
def plot_all_distances_TrackXYVelocity(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
**kwargs,
) -> int:
"""
Plot all distances over all episodes."""
all_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
fig_count += 1
fig, ax = plt.subplots()
cmap = cm.get_cmap("tab20")
for j in range(all_distances.shape[1]):
ax.plot(
tgrid, all_distances[:, j], alpha=1.0, color=cmap(j % cmap.N), linewidth=1.0
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m/s]")
plt.title(f"All distances over {all_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "all_velocity_distances")
return fig_count
def plot_distance_TrackXYOVelocity(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
best_agent: int,
worst_agent: int,
fig_count: int,
**kwargs,
) -> int:
"""
Plot mean, std_dev, best and worst distance over all episodes."""
all_xy_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
all_omega_distances = np.linalg.norm(state_history[:, :, 8], axis=2)
fig_count += 1
fig, ax = plt.subplots()
ax.plot(
tgrid,
all_xy_distances.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
all_xy_distances.mean(axis=1) - all_xy_distances.std(axis=1),
all_xy_distances.mean(axis=1) + all_xy_distances.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
all_xy_distances.mean(axis=1) - 2 * all_xy_distances.std(axis=1),
all_xy_distances.mean(axis=1) + 2 * all_xy_distances.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
all_xy_distances[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
all_xy_distances[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m/s]")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(
f"Mean, best and worst distances over {all_xy_distances.shape[1]} episodes"
)
plt.grid()
plt.savefig(save_dir + "mean_best_worst_velocity_distances")
fig_count += 1
fig, ax = plt.subplots()
ax.plot(
tgrid,
all_omega_distances.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
all_omega_distances.mean(axis=1) - all_omega_distances.std(axis=1),
all_omega_distances.mean(axis=1) + all_omega_distances.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
all_omega_distances.mean(axis=1) - 2 * all_omega_distances.std(axis=1),
all_omega_distances.mean(axis=1) + 2 * all_omega_distances.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
all_omega_distances[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
all_omega_distances[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m/s]")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(
f"Mean, best and worst distances over {all_omega_distances.shape[1]} episodes"
)
plt.grid()
plt.savefig(save_dir + "mean_best_worst_velocity_distances")
return fig_count
def plot_all_distances_TrackXYOVelocity(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
**kwargs,
) -> int:
"""
Plot all distances over all episodes."""
all_xy_distances = np.linalg.norm(state_history[:, :, 6:8], axis=2)
all_omega_distances = np.linalg.norm(state_history[:, :, 8], axis=2)
fig_count += 1
fig, ax = plt.subplots()
cmap = cm.get_cmap("tab20")
for j in range(all_xy_distances.shape[1]):
ax.plot(
tgrid,
all_xy_distances[:, j],
alpha=1.0,
color=cmap(j % cmap.N),
linewidth=1.0,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m/s]")
plt.title(f"All distances over {all_xy_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "all_velocity_distances")
fig_count += 1
fig, ax = plt.subplots()
cmap = cm.get_cmap("tab20")
for j in range(all_omega_distances.shape[1]):
ax.plot(
tgrid,
all_omega_distances[:, j],
alpha=1.0,
color=cmap(j % cmap.N),
linewidth=1.0,
)
plt.xlabel("Time steps")
plt.ylabel("Distance [rad/s]")
plt.title(f"All distances over {all_omega_distances.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "all_velocity_distances")
return fig_count
def plot_reward(
reward_history: np.ndarray,
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
best_agent: int,
worst_agent: int,
fig_count: int,
**kwargs,
) -> int:
"""
Plot mean, std_dev, best and worst reward over all episodes."""
fig_count += 1
fig, ax = plt.subplots()
ax.plot(
tgrid,
reward_history.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
reward_history.mean(axis=1) - reward_history.std(axis=1),
reward_history.mean(axis=1) + reward_history.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
reward_history.mean(axis=1) - 2 * reward_history.std(axis=1),
reward_history.mean(axis=1) + 2 * reward_history.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
reward_history[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
reward_history[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Reward")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(f"Mean, best and worst reward over {state_history.shape[1]} episodes")
plt.grid()
plt.savefig(save_dir + "mean_best_worst_rewards")
return fig_count
def plot_velocities(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
best_agent: int,
worst_agent: int,
fig_count: int,
**kwargs,
) -> int:
"""
Plot mean, std_dev, best and worst velocities over all episodes."""
fig_count += 1
fig, ax = plt.subplots()
ang_vel_z = state_history[:, :, 4:5][:, :, 0] # getting rid of the extra dimension
ax.plot(
tgrid,
ang_vel_z.mean(axis=1),
alpha=0.5,
color="blue",
label="mean_dist",
linewidth=1.5,
)
ax.fill_between(
tgrid,
ang_vel_z.mean(axis=1) - ang_vel_z.std(axis=1),
ang_vel_z.mean(axis=1) + ang_vel_z.std(axis=1),
color="blue",
alpha=0.4,
)
ax.fill_between(
tgrid,
ang_vel_z.mean(axis=1) - 2 * ang_vel_z.std(axis=1),
ang_vel_z.mean(axis=1) + 2 * ang_vel_z.std(axis=1),
color="blue",
alpha=0.2,
)
ax.plot(
tgrid,
ang_vel_z[:, best_agent],
alpha=0.5,
color="green",
label="best",
linewidth=1.5,
)
ax.plot(
tgrid,
ang_vel_z[:, worst_agent],
alpha=0.5,
color="red",
label="worst",
linewidth=1.5,
)
plt.xlabel("Time steps")
plt.ylabel("Angular speed [rad/s]")
plt.legend(["mean", "best", "worst", "1-std", "2-std"], loc="best")
plt.title(
f"Angular speed of mean, best and worst agents {ang_vel_z.shape[1]} episodes"
)
plt.grid()
plt.savefig(save_dir + "mean_best_worst_ang_velocities")
return fig_count
def plot_actions_histogram(
control_history: np.ndarray, save_dir: str, fig_count: int, **kwargs
) -> int:
"""
Plot mean number of thrusts over all episodes."""
fig_count += 1
plt.figure(fig_count)
plt.clf()
control_history = control_history.reshape(
(control_history.shape[1], control_history.shape[0], control_history.shape[2])
)
control_history = np.array([c for c in control_history])
freq = pd.DataFrame(
data=np.array(
[control_history[i].sum(axis=0) for i in range(control_history.shape[0])]
),
columns=[f"T{i+1}" for i in range(control_history.shape[2])],
)
mean_freq = freq.mean()
plt.bar(mean_freq.index, mean_freq.values)
plt.title(f"Mean number of thrusts in {control_history.shape[0]} episodes")
plt.savefig(save_dir + "mean_actions_histogram")
return fig_count
def plot_actions_box_plot(
control_history: np.ndarray, save_dir: str, fig_count: int, **kwargs
) -> int:
"""
Plot box plot of actions over all episodes."""
fig_count += 1
plt.figure(fig_count)
plt.clf()
control_history = control_history.reshape(
(control_history.shape[1], control_history.shape[0], control_history.shape[2])
)
control_history = np.array([c for c in control_history])
freq = pd.DataFrame(
data=np.array(
[control_history[i].sum(axis=0) for i in range(control_history.shape[0])]
),
columns=[f"T{i+1}" for i in range(control_history.shape[2])],
)
sns.boxplot(data=freq, orient="h")
plt.title(f"Mean number of thrusts in {control_history.shape[0]} episodes")
plt.savefig(save_dir + "actions_boxplot")
return fig_count
def plot_trajectories_GoToXY(
state_history: np.ndarray, save_dir: str, fig_count: int, **kwargs
) -> int:
"""
Plot trajectories of all agents in 2D space."""
fig_count += 1
plt.figure(fig_count)
plt.clf()
positions = state_history[:, :, 6:8]
cmap = cm.get_cmap("tab20")
for j in range(positions.shape[1]):
col = cmap(
j % cmap.N
) # Select a color from the colormap based on the current index
plt.plot(
positions[:, j, 0], positions[:, j, 1], color=col, alpha=1.0, linewidth=0.75
)
plt.xlabel("X [m]")
plt.ylabel("Y [m]")
plt.grid(alpha=0.3)
plt.title(f"Trajectories in 2D space [{positions.shape[1]} episodes]")
plt.gcf().dpi = 200
plt.savefig(save_dir + "multi_trajectories")
return fig_count
def plot_one_episode(
ep_data: dict,
save_dir: str = None,
show: bool = False,
debug: bool = False,
fig_count: int = 0,
) -> None:
"""
Plot episode metrics for a single agent.
ep_data: dictionary containing episode data
save_dir: directory where to save the plots
all_agents: if True, plot average results over all agents, if False only the first agent is plotted.
"""
os.makedirs(save_dir, exist_ok=True)
control_history = ep_data["act"]
reward_history = ep_data["rews"]
state_history = ep_data["obs"]
# save data to csv file
pd.DataFrame.to_csv(pd.DataFrame(control_history), save_dir + "actions.csv")
shared_metrics = [
plot_single_linear_vel,
plot_single_angular_vel,
plot_single_absolute_heading,
plot_single_rewards,
plot_single_action_histogram,
plot_single_actions,
]
if debug:
debug_metrics = [plot_single_heading_cos_sin]
else:
debug_metrics = []
# setting the right task_data labels based on the task fla.
task_flag = state_history[0, 5].astype(int)
task_metrics = []
if task_flag == 0: # GoToXY
task_data_label = ["error_x", "error_y"]
task_metrics = [
plot_single_xy_position,
plot_single_xy_position_error,
plot_single_GoToXY_distance_to_target,
plot_single_GoToXY_log_distance_to_target,
]
elif task_flag == 1: # GoToPose
task_data_label = [
"error_x",
"error_y",
"cos_error_heading",
"sin_error_heading",
]
task_metrics = [
plot_single_xy_position,
plot_single_xy_pose,
plot_single_xy_position_error,
plot_single_heading_error,
plot_single_xy_position_heading,
plot_single_GoToPose_distance_to_target,
plot_single_GoToPose_log_distance_to_target,
]
elif task_flag == 2: # TrackXYVelocity
task_data_label = ["error_vx", "error_vy"]
task_metrics = [
plot_single_TrackXYVelocity_distance_to_target,
plot_single_TrackXYVelocity_log_distance_to_target,
]
elif task_flag == 3: # TrackXYOVelocity
task_data_label = ["error_vx", "error_vy", "error_omega"]
task_metrics = [
plot_single_TrackXYOVelocity_distance_to_target,
plot_single_TrackXYOVelocity_log_distance_to_target,
]
else:
task_data_label = []
task_metrics = []
# Generate plots
metrics = shared_metrics + task_metrics + debug_metrics
tgrid = np.linspace(0, len(control_history), len(control_history))
args = {
"state_history": state_history,
"control_history": control_history,
"reward_history": reward_history,
"save_dir": save_dir,
"fig_count": fig_count,
"show": show,
"tgrid": tgrid,
}
for metric in metrics:
fig_count = metric(**args)
args["fig_count"] = fig_count
df_cols = [
"cos_theta",
"sin_theta",
"lin_vel_x",
"lin_vel_y",
"ang_vel_z",
"task_flag",
] + task_data_label
pd.DataFrame.to_csv(
pd.DataFrame(state_history[:, : len(df_cols)], columns=df_cols),
save_dir + "states_episode.csv",
)
fig_count = 0
def plot_single_linear_vel(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot linear velocity of a single agent."""
lin_vels = state_history[:, 2:4]
# plot linear velocity
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, lin_vels[:, 0], color=cm.get_cmap("tab20")(0))
plt.plot(tgrid, lin_vels[:, 1], color=cm.get_cmap("tab20")(2))
plt.xlabel("Time steps")
plt.ylabel("Velocity [m/s]")
plt.legend(["x", "y"], loc="best")
plt.title("Velocity state history")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_linear_velocity")
if show:
plt.show()
return fig_count
def plot_single_angular_vel(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot angular velocity of a single agent."""
ang_vel_z = state_history[:, 4:5]
# plot angular speed (z coordinate)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, ang_vel_z, color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Angular speed [rad/s]")
plt.legend(["z"], loc="best")
plt.title("Angular speed state history")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_angular_velocity")
if show:
plt.show()
return fig_count
def plot_single_heading_cos_sin(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot heading of a single agent with cos and sin representation."""
headings = state_history[:, :2]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, headings[:, 0], color=cm.get_cmap("tab20")(0)) # cos
plt.plot(tgrid, headings[:, 1], color=cm.get_cmap("tab20")(2)) # sin
plt.xlabel("Time steps")
plt.ylabel("Heading")
plt.legend(["cos(${\\theta}$)", "sin(${\\theta}$)"], loc="best")
plt.title("Heading state history")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_heading_cos_sin")
if show:
plt.show()
return fig_count
def plot_single_absolute_heading(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot heading of a single agent."""
headings = state_history[:, :2]
angles = np.arctan2(headings[:, 1], headings[:, 0])
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, angles, color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Angle [rad]")
plt.legend(["${\\theta}$"], loc="best")
plt.title("Angle state history")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_heading")
if show:
plt.show()
return fig_count
def plot_single_actions(
control_history: np.ndarray, save_dir: str, fig_count: int, show: bool, **kwargs
) -> int:
"""
Plot actions of a single agent."""
fig_count += 1
plt.figure(fig_count)
plt.clf()
control_history_df = pd.DataFrame(data=control_history)
fig, axes = plt.subplots(
len(control_history_df.columns), 1, sharex=True, figsize=(8, 6)
)
# Select subset of colors from a colormap
colormap = cm.get_cmap("tab20")
num_colors = len(control_history_df.columns)
colors = [colormap(i) for i in range(0, num_colors * 2, 2)]
for i, column in enumerate(control_history_df.columns):
control_history_df[column].plot(ax=axes[i], color=colors[i])
axes[i].set_ylabel(f"T{column}")
plt.xlabel("Time steps")
if save_dir:
fig.savefig(save_dir + "single_actions")
if show:
plt.show()
return fig_count
def plot_single_action_histogram(
control_history: np.ndarray, save_dir: str, fig_count: int, show: bool, **kwargs
) -> int:
"""
Plot histogram of actions of a single agent."""
fig_count += 1
plt.figure(fig_count)
plt.clf()
control_history = np.array(control_history)
actions_df = pd.DataFrame(
control_history, columns=[f"T{i+1}" for i in range(control_history.shape[1])]
)
freq = actions_df.sum()
plt.bar(freq.index, freq.values, color=cm.get_cmap("tab20")(0))
plt.title("Number of thrusts in episode")
plt.tight_layout()
if save_dir:
plt.savefig(save_dir + "single_actions_hist")
if show:
plt.show()
return fig_count
def plot_single_rewards(
reward_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot rewards of a single agent."""
if any(reward_history):
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, reward_history, color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Reward")
plt.legend(["reward"], loc="best")
plt.title("Reward history")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_reward")
if show:
plt.show()
return fig_count
def plot_single_xy_position_error(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot position error of a single agent."""
pos_error = state_history[:, 6:8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, pos_error[:, 0], color=cm.get_cmap("tab20")(0))
plt.plot(tgrid, pos_error[:, 1], color=cm.get_cmap("tab20")(2))
plt.xlabel("Time steps")
plt.ylabel("Position [m]")
plt.legend(["x position", "y position"], loc="best")
plt.title("Position Error")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_xy_position_error")
if show:
plt.show()
return fig_count
def plot_single_heading_error(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot heading error of a single agent."""
heading_error = state_history[:, 8:]
heading_error = np.arctan2(heading_error[:, 1], heading_error[:, 0])
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, heading_error, color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Heading [rad]")
plt.title("Heading error")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_heading_heading_error")
if show:
plt.show()
return fig_count
def plot_single_xy_position(
state_history: np.ndarray, save_dir: str, fig_count: int, show: bool, **kwargs
) -> int:
"""
Plot position of a single agent."""
pos_error = state_history[:, 6:8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
# Set aspect ratio to be equal
plt.gca().set_aspect("equal", adjustable="box")
x, y = pos_error[:, 0], pos_error[:, 1]
fig, ax = plt.subplots(figsize=(6, 6))
# Setting the limit of x and y direction to define which portion to zoom
x1, x2, y1, y2 = -0.07, 0.07, -0.08, 0.08
if y[0] > 0 and x[0] > 0:
location = 4
else:
location = 2 if (y[0] < 0 and x[0] < 0) else 1
axins = inset_axes(ax, width=1.5, height=1.25, loc=location)
ax.plot(x, y, color=cm.get_cmap("tab20")(0))
ax.set_xlabel("X [m]")
ax.set_ylabel("Y [m]")
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
axins.plot(x, y)
if save_dir:
fig.savefig(save_dir + "single_xy_trajectory")
if show:
plt.show()
return fig_count
def plot_single_xy_pose(
state_history: np.ndarray, save_dir: str, fig_count: int, show: bool, **kwargs
) -> int:
"""
Plot position of a single agent."""
pos_error = state_history[:, 6:8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
# Set aspect ratio to be equal
plt.gca().set_aspect("equal", adjustable="box")
# Get the heading error values
heading_error = state_history[:, 8:]
heading_error = np.abs(np.arctan2(heading_error[:, 1], heading_error[:, 0]))
x, y = pos_error[:, 0], pos_error[:, 1]
segments = [
np.column_stack([x[i : i + 2], y[i : i + 2]]) for i in range(len(x) - 1)
]
fig, ax = plt.subplots(figsize=(7, 6))
# make sure that the plot won't be limited between 0 and 1, ensuring the limits derive from the x, y coordinates plus a margin
margin = 0.08
ax.set_xlim(min(x) - margin, max(x) + margin)
ax.set_ylim(min(y) - margin, max(y) + margin)
lc = LineCollection(segments, cmap="jet", array=heading_error)
line = ax.add_collection(lc)
plt.colorbar(line, label="heading error [rad]")
# ax.plot(x, y, color=cm.get_cmap('tab20')(0))
plt.grid(alpha=0.3)
ax.set_xlabel("X [m]")
ax.set_ylabel("Y [m]")
plt.grid(alpha=0.3)
if save_dir:
fig.savefig(save_dir + "single_pose_trajectory")
if show:
plt.show()
return fig_count
def plot_single_xy_position_heading(
state_history: np.ndarray, save_dir: str, fig_count: int, show: bool, **kwargs
) -> int:
"""
Plot position of a single agent."""
pos_error = state_history[:, 6:8]
heading = state_history[:, :2]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
# Set aspect ratio to be equal
plt.gca().set_aspect("equal", adjustable="box")
x, y = pos_error[:, 0], pos_error[:, 1]
c, s = heading[:, 0], heading[:, 1]
fig, ax = plt.subplots(figsize=(6, 6))
# Setting the limit of x and y direction to define which portion to zoom
x1, x2, y1, y2 = -0.07, 0.07, -0.08, 0.08
if y[0] > 0 and x[0] > 0:
location = 4
else:
location = 2 if (y[0] < 0 and x[0] < 0) else 1
axins = inset_axes(ax, width=1.5, height=1.25, loc=location)
ax.plot(x, y, color=cm.get_cmap("tab20")(0))
ax.quiver(x[::10], y[::10], s[::10], c[::10], color=cm.get_cmap("tab20")(0))
ax.set_xlabel("X [m]")
ax.set_ylabel("Y [m]")
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
axins.plot(x, y)
if save_dir:
fig.savefig(save_dir + "single_xy_trajectory_with_heading")
if show:
plt.show()
return fig_count
def plot_single_GoToXY_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot distance to target of a single agent."""
pos_error = state_history[:, 6:8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(
tgrid,
np.linalg.norm(np.array([pos_error[:, 0], pos_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m]")
plt.legend(["abs dist"], loc="best")
plt.title("Distance to target")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_distance_to_position_target")
if show:
plt.show()
return fig_count
def plot_single_GoToXY_log_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot log distance to target of a single agent."""
pos_error = state_history[:, 6:8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.yscale("log")
plt.plot(
tgrid,
np.linalg.norm(np.array([pos_error[:, 0], pos_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Log distance [m]")
plt.legend(["x-y dist"], loc="best")
plt.title("Log distance to target")
plt.grid(True)
if save_dir:
plt.savefig(save_dir + "single_log_distance_to_position_target")
if show:
plt.show()
return fig_count
def plot_single_GoToPose_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot distance to target of a single agent."""
pos_error = state_history[:, 6:8]
heading_error = state_history[:, 8:]
heading_error = np.arctan2(heading_error[:, 1], heading_error[:, 0])
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(
tgrid,
np.linalg.norm(np.array([pos_error[:, 0], pos_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m]")
plt.legend(["abs dist"], loc="best")
plt.title("Distance to target")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_distance_to_position_target")
if show:
plt.show()
# plot heading
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, np.abs(heading_error), color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Heading [rad]")
plt.legend(["abs dist"], loc="best")
plt.title("Distance to target")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_distance_to_heading_target")
if show:
plt.show()
return fig_count
def plot_single_GoToPose_log_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot log distance to target of a single agent."""
pos_error = state_history[:, 6:8]
heading_error = state_history[:, 8:]
heading_error = np.arctan2(heading_error[:, 1], heading_error[:, 0])
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.yscale("log")
plt.plot(
tgrid,
np.linalg.norm(np.array([pos_error[:, 0], pos_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Log distance [m]")
plt.legend(["x-y dist"], loc="best")
plt.title("Log distance to target")
plt.grid(True)
if save_dir:
plt.savefig(save_dir + "single_log_distance_to_position_target")
if show:
plt.show()
# plot heading
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.yscale("log")
plt.plot(tgrid, np.abs(heading_error), color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Log distance [rad]")
plt.legend(["x-y dist"], loc="best")
plt.title("Log distance to target")
plt.grid(True)
if save_dir:
plt.savefig(save_dir + "single_log_distance_to_heading_target")
if show:
plt.show()
return fig_count
def plot_single_TrackXYVelocity_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot distance to target of a single agent."""
vel_error = state_history[:, 6:8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(
tgrid,
np.linalg.norm(np.array([vel_error[:, 0], vel_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m/s]")
plt.legend(["abs dist"], loc="best")
plt.title("Distance to target")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_distance_to_velocity_target")
if show:
plt.show()
return fig_count
def plot_single_TrackXYVelocity_log_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot log distance to target of a single agent."""
vel_error = state_history[:, 6:8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.yscale("log")
plt.plot(
tgrid,
np.linalg.norm(np.array([vel_error[:, 0], vel_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Log distance [m/s]")
plt.legend(["x-y dist"], loc="best")
plt.title("Log distance to target")
plt.grid(True)
if save_dir:
plt.savefig(save_dir + "single_log_distance_to_velocity_target")
if show:
plt.show()
return fig_count
def plot_single_TrackXYOVelocity_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot distance to target of a single agent."""
vel_error = state_history[:, 6:8]
omega_error = state_history[:, 8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(
tgrid,
np.linalg.norm(np.array([vel_error[:, 0], vel_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Distance [m/s]")
plt.legend(["abs dist"], loc="best")
plt.title("Distance to target")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_distance_to_linear_velocity_target")
if show:
plt.show()
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.plot(tgrid, np.abs(omega_error), color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Distance [rad/s]")
plt.legend(["abs dist"], loc="best")
plt.title("Distance to target")
plt.grid()
if save_dir:
plt.savefig(save_dir + "single_distance_to_angular_velocity_target")
if show:
plt.show()
return fig_count
def plot_single_TrackXYOVelocity_log_distance_to_target(
state_history: np.ndarray,
tgrid: np.ndarray,
save_dir: str,
fig_count: int,
show: bool,
**kwargs,
) -> int:
"""
Plot log distance to target of a single agent."""
vel_error = state_history[:, 6:8]
omega_error = state_history[:, 8]
# plot position (x, y coordinates)
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.yscale("log")
plt.plot(
tgrid,
np.linalg.norm(np.array([vel_error[:, 0], vel_error[:, 1]]), axis=0),
color=cm.get_cmap("tab20")(0),
)
plt.xlabel("Time steps")
plt.ylabel("Log distance [m/s]")
plt.legend(["x-y dist"], loc="best")
plt.title("Log distance to target")
plt.grid(True)
if save_dir:
plt.savefig(save_dir + "single_log_distance_to_linear_velocity_target")
if show:
plt.show()
fig_count += 1
plt.figure(fig_count)
plt.clf()
plt.yscale("log")
plt.plot(tgrid, np.abs(omega_error), color=cm.get_cmap("tab20")(0))
plt.xlabel("Time steps")
plt.ylabel("Log distance [rad/s]")
plt.legend(["x-y dist"], loc="best")
plt.title("Log distance to target")
plt.grid(True)
if save_dir:
plt.savefig(save_dir + "single_log_distance_to_angular_velocity_target")
if show:
plt.show()
return fig_count
| 47,201 |
Python
| 26.946714 | 130 | 0.571513 |
elharirymatteo/RANS/omniisaacgymenvs/robots/sensors/proprioceptive/base_sensor.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import numpy as np
from dataclasses import asdict
from omniisaacgymenvs.robots.sensors.proprioceptive.Type import *
class BaseSensorInterface:
"""
Base sensor class
"""
def __init__(self, sensor_cfg: Sensor_T):
"""
dt: float
inertial_to_sensor_frame: List[float]
sensor_frame_to_optical_frame: List[float]
"""
self.sensor_cfg = asdict(sensor_cfg)
self.dt = self.sensor_cfg["dt"]
self.body_to_sensor_frame = self.sensor_cfg["body_to_sensor_frame"]
self.sensor_frame_to_optical_frame = self.sensor_cfg[
"sensor_frame_to_optical_frame"
]
self._sensor_state = None
def update(self, state: State):
"""
state is the state of the rigid body to be simulated
Args:
state (State): state of the rigid body to be simulated
"""
raise NotImplementedError
def reset_idx(self, env_ids:torch.Tensor) -> None:
"""
reset sensor state of specified env.
Args:
env_ids (torch.Tensor): list of env ids to reset
"""
raise NotImplementedError
@property
def state(self):
"""
return sensor state
"""
raise NotImplementedError
| 1,589 |
Python
| 27.90909 | 82 | 0.601007 |
elharirymatteo/RANS/omniisaacgymenvs/robots/sensors/proprioceptive/Type.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import numpy as np
import torch
import dataclasses
from typing import List
EPS = 1e-5
@dataclasses.dataclass
class Gyroscope_T:
"""
Gyroscope typing class.
Args:
noise_density (float): noise density of the gyroscope.
random_walk (float): random walk of the gyroscope.
bias_correlation_time (float): bias correlation time of the gyroscope.
turn_on_bias_sigma (float): turn on bias sigma of the gyroscope.
"""
noise_density: float = 0.0003393695767766752
random_walk: float = 3.878509448876288e-05
bias_correlation_time: float = 1.0e3
turn_on_bias_sigma: float = 0.008726646259971648
@dataclasses.dataclass
class Accelometer_T:
"""
Accelometer typing class.
Args:
noise_density (float): noise density of the accelometer.
random_walk (float): random walk of the accelometer.
bias_correlation_time (float): bias correlation time of the accelometer.
turn_on_bias_sigma (float): turn on bias sigma of the accelometer.
"""
noise_density: float = 0.004
random_walk: float = 0.006
bias_correlation_time: float = 300.0
turn_on_bias_sigma: float = 0.196
@dataclasses.dataclass
class Sensor_T:
"""
Sensor typing class.
Args:
dt (float): physics time resolution
inertial_to_sensor_frame (List[float]): transform from inertial frame (ENU) to sensor frame (FLU)
sensor_frame_to_optical_frame (List[float]): transform from sensor frame (FLU) to sensor optical optical frame (OPENCV)
"""
dt: float = 0.01
body_to_sensor_frame: List[float] = dataclasses.field(default_factory=list)
sensor_frame_to_optical_frame: List[float] = dataclasses.field(default_factory=list)
def __post_init__(self):
assert len(self.body_to_sensor_frame) == 4
assert len(self.sensor_frame_to_optical_frame) == 4
self.body_to_sensor_frame = torch.tensor(self.body_to_sensor_frame).to(torch.float32)
self.sensor_frame_to_optical_frame = torch.tensor(self.sensor_frame_to_optical_frame).to(torch.float32)
@dataclasses.dataclass
class IMU_T(Sensor_T):
"""
IMU typing class.
Args:
dt (float): physics time resolution
inertial_to_sensor_frame (List[float]): transform from inertial frame (ENU) to sensor frame (FLU)
sensor_frame_to_optical_frame (List[float]): transform from sensor frame (FLU) to sensor optical optical frame (OPENCV)
gravity_vector (List[float]): gravity vector in inertial frame
accel_param (Accelometer_T): accelometer parameter
gyro_param (Gyroscope_T): gyroscope parameter
"""
gyro_param: Gyroscope_T = Gyroscope_T()
accel_param: Accelometer_T = Accelometer_T()
gravity_vector: List[float] = dataclasses.field(default_factory=list)
def __post_init__(self):
super().__post_init__()
assert len(self.gravity_vector) == 3
self.gravity_vector = torch.tensor(self.gravity_vector).to(torch.float32)
@dataclasses.dataclass
class GPS_T(Sensor_T):
"""
GPS typing class.
Not implemented yet.
Args:
dt (float): physics time resolution
inertial_to_sensor_frame (List[float]): transform from inertial frame (ENU) to sensor frame (FLU)
sensor_frame_to_optical_frame (List[float]): transform from sensor frame (FLU) to sensor optical optical frame (OPENCV)
"""
def __post_init__(self):
super().__post_init__()
@dataclasses.dataclass
class State:
"""
State typing class of any rigid body (to be simulated) respective to inertial frame.
Args:
position (torch.float32): position of the body in inertial frame.
orientation (torch.float32): orientation of the body in inertial frame.
linear_velocity (torch.float32): linear velocity of the body in inertial frame.
angular_velocity (torch.float32): angular velocity of the body in inertial frame.
"""
position: torch.float32
orientation: torch.float32
linear_velocity: torch.float32
angular_velocity: torch.float32
def __post_init__(self):
assert len(self.position.shape) == 2, f"need to be batched tensor."
assert len(self.orientation.shape) == 2, f"need to be batched tensor."
assert len(self.linear_velocity.shape) == 2, f"need to be batched tensor."
assert len(self.angular_velocity.shape) == 2, f"need to be batched tensor."
@staticmethod
def quat_to_mat(quat: torch.Tensor) -> torch.Tensor:
"""
Convert batched quaternion to batched rotation matrix.
Args:
quat (torch.Tensor): batched quaternion.(..., 4)
"""
w, x, y, z = torch.unbind(quat, -1)
two_s = 2.0 / ((quat * quat).sum(-1) + EPS)
R = torch.stack(
(
1 - two_s * (y * y + z * z),
two_s * (x * y - z * w),
two_s * (x * z + y * w),
two_s * (x * y + z * w),
1 - two_s * (x * x + z * z),
two_s * (y * z - x * w),
two_s * (x * z - y * w),
two_s * (y * z + x * w),
1 - two_s * (x * x + y * y),
),
-1,
)
return R.reshape(quat.shape[:-1] + (3, 3))
@property
def body_transform(self) -> torch.float32:
"""
Return transform from inertial frame to body frame(= inverse of body pose).
T[:, :3, :3] = orientation.T
T[:, :3, 3] = - orientation.T @ position
Returns:
transform (torch.float32): transform matrix from inertial frame to body frame.
"""
transform = torch.zeros(self.position.shape[0], 4, 4).to(self.orientation.device)
orientation = self.quat_to_mat(self.orientation)
transform[:, :3, :3] = orientation.transpose(1, 2)
transform[:, :3, 3] = - 1 * torch.bmm(orientation.transpose(1, 2), self.position[:, :, None]).squeeze()
return transform
@dataclasses.dataclass
class ImuState:
"""
IMU state typing class.
Args:
angular_velocity (torch.float32): angular velocity of the body in body frame.
linear_acceleration (torch.float32): linear acceleration of the body in body frame.
"""
angular_velocity: torch.float32 = torch.zeros(1, 3)
linear_acceleration: torch.float32 = torch.zeros(1, 3)
def update(self, angular_velocity:torch.float32, linear_acceleration:torch.float32) -> None:
"""
Update internal attribute from arguments.
Args:
angular_velocity (torch.float32): angular velocity of the body in body frame.
linear_acceleration (torch.float32): linear acceleration of the body in body frame.
"""
self.angular_velocity = angular_velocity
self.linear_acceleration = linear_acceleration
def reset_idx(self, env_ids:torch.Tensor) -> None:
"""
Reset internal attribute of specified env to zero.
"""
self.angular_velocity[env_ids] = 0
self.linear_acceleration[env_ids] = 0
@property
def unite_imu(self) -> torch.float32:
"""
Return IMU state as a single tensor.
Returns:
imu (torch.float32): IMU state as a single tensor.
"""
return torch.cat([self.angular_velocity, self.linear_acceleration], dim=1)
| 7,593 |
Python
| 37.548223 | 127 | 0.632951 |
elharirymatteo/RANS/omniisaacgymenvs/robots/sensors/proprioceptive/gps.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import numpy as numpy
from omniisaacgymenvs.robots.sensors.proprioceptive.base_sensor import BaseSensorInterface
from omniisaacgymenvs.robots.sensors.proprioceptive.Type import *
class GPSInterface(BaseSensorInterface):
"""
GPS sensor class to simulate GPS based on pegasus simulator
(https://github.com/PegasusSimulator/PegasusSimulator)
"""
def __init__(self, sensor_cfg: GPS_T):
"""
Args:
sensor_cfg (GPS_T): GPS sensor configuration.
"""
super().__init__(sensor_cfg)
| 850 |
Python
| 33.039999 | 90 | 0.678824 |
elharirymatteo/RANS/omniisaacgymenvs/robots/sensors/proprioceptive/imu.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import numpy as numpy
import torch
from omniisaacgymenvs.robots.sensors.proprioceptive.base_sensor import BaseSensorInterface
from omniisaacgymenvs.robots.sensors.proprioceptive.Type import IMU_T, Accelometer_T, Gyroscope_T, State, ImuState
class IMUInterface(BaseSensorInterface):
"""
IMU sensor class to simulate accelometer and gyroscope based on pegasus simulator.
(https://github.com/PegasusSimulator/PegasusSimulator)
The way it works is that it takes the state information, directly published from physics engine,
and then add imu noise (white noise and time diffusing random walk) to state info.
Since it is "inteface", you do not need to call initialize method as seen in omn.isaac.sensor.IMUSensor.
"""
def __init__(self, sensor_cfg: IMU_T, num_envs: int = 1):
"""
Args:
sensor_cfg (IMU_T): imu sensor configuration.
num_envs (int): number of environments.
"""
super().__init__(sensor_cfg)
self.gravity_vector = self.sensor_cfg["gravity_vector"]
self._gyroscope_bias = torch.zeros(3, 1)
self._gyroscope_noise_density = self.sensor_cfg["gyro_param"]["noise_density"]
self._gyroscope_random_walk = self.sensor_cfg["gyro_param"]["random_walk"]
self._gyroscope_bias_correlation_time = self.sensor_cfg["gyro_param"][
"bias_correlation_time"
]
self._gyroscope_turn_on_bias_sigma = self.sensor_cfg["gyro_param"][
"turn_on_bias_sigma"
]
self._accelerometer_bias = torch.zeros(3, 1)
self._accelerometer_noise_density = self.sensor_cfg["accel_param"][
"noise_density"
]
self._accelerometer_random_walk = self.sensor_cfg["accel_param"]["random_walk"]
self._accelerometer_bias_correlation_time = self.sensor_cfg["accel_param"][
"bias_correlation_time"
]
self._accelerometer_turn_on_bias_sigma = self.sensor_cfg["accel_param"][
"turn_on_bias_sigma"
]
self._prev_linear_velocity = torch.zeros(num_envs, 3).to(torch.float32)
self._sensor_state = ImuState(angular_velocity=torch.zeros(num_envs, 3).to(torch.float32),
linear_acceleration=torch.zeros(num_envs, 3).to(torch.float32))
def update(self, state: State):
"""
gyroscope and accelerometer simulation (https://ieeexplore.ieee.org/document/7487628)
gyroscope = angular_velocity + white noise + random walk.
accelerometer = -1 * (acceleration + white noise + random walk).
NOTE that accelerometer measures inertial acceleration. Thus, the reading is the negative of body acceleration.
"""
device = state.angular_velocity.device
# gyroscope term
tau_g = self._gyroscope_bias_correlation_time
sigma_g_d = 1 / torch.sqrt(torch.tensor(self.dt)) * self._gyroscope_noise_density
sigma_b_g = self._gyroscope_random_walk
sigma_b_g_d = torch.sqrt(-sigma_b_g * sigma_b_g * tau_g / 2.0 * (torch.exp(torch.tensor(-2.0 * self.dt / tau_g)) - 1.0))
phi_g_d = torch.exp(torch.tensor(-1.0/tau_g * self.dt))
angular_velocity = torch.bmm(state.body_transform[:, :3, :3], state.angular_velocity[:, :, None]).squeeze()
for i in range(3):
self._gyroscope_bias[i] = phi_g_d * self._gyroscope_bias[i] + sigma_b_g_d * torch.randn(1)
angular_velocity[:, i] = angular_velocity[:, i] + sigma_g_d * torch.randn(1).to(device) + self._gyroscope_bias[i].to(device)
# accelerometer term
self._prev_linear_velocity = self._prev_linear_velocity.to(device)
tau_a = self._accelerometer_bias_correlation_time
sigma_a_d = 1.0 / torch.sqrt(torch.tensor(self.dt)) * self._accelerometer_noise_density
sigma_b_a = self._accelerometer_random_walk
sigma_b_a_d = torch.sqrt(-sigma_b_a * sigma_b_a * tau_a / 2.0 * (torch.exp(torch.tensor(-2.0 * self.dt / tau_a)) - 1.0))
phi_a_d = torch.exp(torch.tensor(-1.0 / tau_a * self.dt))
linear_acceleration_inertial = (state.linear_velocity - self._prev_linear_velocity) / self.dt + self.gravity_vector.to(device)
self._prev_linear_velocity = state.linear_velocity
linear_acceleration = torch.bmm(state.body_transform[:, :3, :3], linear_acceleration_inertial[:, :, None]).squeeze()
for i in range(3):
self._accelerometer_bias[i] = phi_a_d * self._accelerometer_bias[i] + sigma_b_a_d * torch.randn(1)
linear_acceleration[:, i] = (
linear_acceleration[:, i] + sigma_a_d * torch.randn(1).to(device)
) #+ self._accelerometer_bias[i]
# transform accel/gyro from body frame to sensor optical frame
angular_velocity = torch.bmm(self.sensor_frame_to_optical_frame[None, :3, :3].expand(angular_velocity.shape[0], 3, 3).to(device),
torch.bmm(
self.body_to_sensor_frame[None, :3, :3].expand(angular_velocity.shape[0], 3, 3).to(device), angular_velocity[:, :, None]
)).squeeze()
linear_acceleration = torch.bmm(self.sensor_frame_to_optical_frame[None, :3, :3].expand(linear_acceleration.shape[0], 3, 3).to(device),
torch.bmm(
self.body_to_sensor_frame[None, :3, :3].expand(linear_acceleration.shape[0], 3, 3).to(device), linear_acceleration[:, :, None]
)).squeeze()
self._sensor_state.update(angular_velocity, -1*linear_acceleration)
def reset_idx(self, env_ids: torch.Tensor):
"""
reset sensor state of specified env.
Args:
env_ids (torch.Tensor): environment indices to reset.
"""
env_long = env_ids.long()
self._sensor_state.reset_idx(env_ids=env_long)
self._prev_linear_velocity[env_long] = 0
@property
def state(self):
"""
return sensor state.
"""
return self._sensor_state
if __name__ == "__main__":
## comes from yaml parsed by hydra ##########
BODY_TO_SENSOR_FRAME = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
SENSOR_FRAME_TO_OPTICAL_FRAME = [[-0, -1, 0, 0],
[0, 0, -1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]]
GRAVITY_VECTOR = [0, 0, -9.81]
dt = 0.01
ACCEL_PARAM = {"noise_density": 0.004,
"random_walk": 0.006,
"bias_correlation_time": 300.0,
"turn_on_bias_sigma": 0.196
}
GYRO_PARAM = {"noise_density": 0.0003393695767766752,
"random_walk": 3.878509448876288e-05,
"bias_correlation_time": 1.0e3,
"turn_on_bias_sigma": 0.008726646259971648
}
#############################################
imu_t = IMU_T(
body_to_sensor_frame=BODY_TO_SENSOR_FRAME,
sensor_frame_to_optical_frame=SENSOR_FRAME_TO_OPTICAL_FRAME,
gravity_vector=GRAVITY_VECTOR,
dt=dt,
accel_param=Accelometer_T(**ACCEL_PARAM),
gyro_param=Gyroscope_T(**GYRO_PARAM),
)
imu = IMUInterface(imu_t)
while True:
N = 16
position = torch.zeros(N, 3).to(torch.float32)
orientation = torch.zeros(N, 4).to(torch.float32)
orientation[:, 0] = 1.0
linear_velocity = torch.zeros(N, 3).to(torch.float32)
angular_velocity = torch.zeros(N, 3).to(torch.float32)
state = State(position, orientation, linear_velocity, angular_velocity)
imu.update(state)
print(imu.state)
| 8,278 |
Python
| 49.481707 | 167 | 0.57526 |
elharirymatteo/RANS/omniisaacgymenvs/robots/sensors/exteroceptive/camera.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omniisaacgymenvs.robots.sensors.exteroceptive.camera_interface import camera_interface_factory
from typing import List
from dataclasses import dataclass, field
from omni.isaac.core.utils.stage import get_current_stage
from pxr import Gf
import carb
## Replicator hack
carb_settings = carb.settings.get_settings()
carb_settings.set_bool(
"rtx/raytracing/cached/enabled",
False,
)
carb_settings.set_int(
"rtx/descriptorSets",
8192,
)
@dataclass
class CameraCalibrationParam:
"""
Camera calibration params class.
Args:
focalLength (float): focal length of the camera.
focusDistance (float): focus distance of the camera.
clippingRange (List[float]): clipping range of the camera.
horizontalAperture (float): horizontal aperture of the camera.
verticalAperture (float): vertical aperture of the camera.
"""
focalLength: float = None
focusDistance: float = None
clippingRange: List[float] = None
horizontalAperture: float = None
verticalAperture: float = None
@dataclass
class RLCameraParams:
"""
RLCamera params class.
Args:
prim_path (str): path to the prim that the sensor is attached to.
resolution (List[int]): resolution of the sensor.
is_override (bool): if True, the sensor parameters will be overriden.
params (dict): parameters for the sensor.
"""
prim_path: str
resolution: List[int]
is_override: bool
params: CameraCalibrationParam = field(default_factory=dict)
def __post_init__(self):
assert len(self.resolution) == 2, f"resolution should be a list of 2 ints, got {self.resolution}"
self.params = CameraCalibrationParam(**self.params)
class RLCamera:
"""
RLCamera is a sensor that can be used in RL tasks.
It uses replicator to record synthetic (mostly images) data.
"""
def __init__(self, sensor_cfg:dict, rep:object)->None:
"""
Args:
sensor_cfg (dict): configuration for the sensor with the following key, value
prim_path (str): path to the prim that the sensor is attached to
sensor_param (dict): parameters for the sensor
override_param (bool): if True, the sensor parameters will be overriden
rep (object): omni.replicator.core object
"""
self.sensor_cfg = RLCameraParams(**sensor_cfg)
self.prim_path = self.sensor_cfg.prim_path
self.is_override = self.sensor_cfg.is_override
self.rep = rep
if self.is_override:
assert "params" in sensor_cfg.keys(), "params must be provided if override is True."
self.override_params(get_current_stage(), self.prim_path, self.sensor_cfg.params)
self.render_product = self.rep.create.render_product(
self.prim_path,
resolution=[*self.sensor_cfg.resolution])
self.annotators = {}
self.camera_interfaces = {}
self.enable_rgb()
self.enable_depth()
def override_params(self, stage, prim_path:str, sensor_param:CameraCalibrationParam)->None:
"""
Override the sensor parameters if override=True
Args:
stage (Stage): stage object
prim_path (str): path to the prim that the sensor is attached to
sensor_param (CameraCalibrationParam): parameters for the sensor
"""
camera = stage.DefinePrim(prim_path, 'Camera')
camera.GetAttribute('focalLength').Set(sensor_param.focalLength)
camera.GetAttribute('focusDistance').Set(sensor_param.focusDistance)
camera.GetAttribute("clippingRange").Set(Gf.Vec2f(*sensor_param.clippingRange))
camera.GetAttribute("horizontalAperture").Set(sensor_param.horizontalAperture)
camera.GetAttribute("verticalAperture").Set(sensor_param.verticalAperture)
def enable_rgb(self) -> None:
"""
Enable RGB as a RL observation
"""
rgb_annot = self.rep.AnnotatorRegistry.get_annotator("rgb")
rgb_annot.attach([self.render_product])
self.annotators.update({"rgb":rgb_annot})
self.camera_interfaces.update({"rgb":camera_interface_factory.get("RGBInterface")()})
def enable_depth(self) -> None:
"""
Enable depth as a RL observation
"""
depth_annot = self.rep.AnnotatorRegistry.get_annotator("distance_to_image_plane")
depth_annot.attach([self.render_product])
self.annotators.update({"depth":depth_annot})
self.camera_interfaces.update({"depth":camera_interface_factory.get("DepthInterface")()})
def get_observation(self) -> dict:
"""
Returns a dict of observations
"""
obs_buf = {}
for modality, annotator in self.annotators.items():
camera_interface = self.camera_interfaces[modality]
data_pt = camera_interface(annotator.get_data())
obs_buf.update({modality:data_pt})
return obs_buf
class CameraFactory:
"""
Factory class to create sensors.
"""
def __init__(self):
self.creators = {}
def register(self, name: str, sensor):
"""
Registers a new sensor.
Args:
name (str): name of the sensor.
sensor (object): sensor object.
"""
self.creators[name] = sensor
def get(
self, name: str
) -> object:
"""
Returns a sensor.
Args:
name (str): name of the sensor.
"""
assert name in self.creators.keys(), f"{name} not in {self.creators.keys()}"
return self.creators[name]
camera_factory = CameraFactory()
camera_factory.register("RLCamera", RLCamera)
| 6,082 |
Python
| 34.782353 | 105 | 0.637455 |
elharirymatteo/RANS/omniisaacgymenvs/robots/sensors/exteroceptive/camera_module_generator.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import os
from dataclasses import dataclass, field
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path
from pxr import Gf
from omniisaacgymenvs.robots.articulations.utils.MFP_utils import *
@dataclass
class RootPrimParams:
"""
Root prim params class.
Args:
prim_path (str): path to the prim.
translation (List[float]): translation of the prim.
rotation (List[float]): rotation of the prim.
"""
prim_path: str
translation: List[float]
rotation: List[float]
def __post_init__(self):
assert len(self.translation) == 3, f"translation should be a list of 3 floats, got {self.translation}"
assert len(self.rotation) == 3, f"rotation should be a list of 3 floats, got {self.rotation}"
@dataclass
class SensorBaseParams:
"""
Sensor base params class.
Args:
prim_name (str): name of the prim.
usd_path (str): path to the usd file. none if you do not link a usd file.
"""
prim_name: str = None
usd_path: str = None
@dataclass
class CameraCalibrationParam:
"""
Camera calibration params class.
Args:
focalLength (float): focal length of the camera.
focusDistance (float): focus distance of the camera.
clippingRange (List[float]): clipping range of the camera.
horizontalAperture (float): horizontal aperture of the camera.
verticalAperture (float): vertical aperture of the camera.
"""
focalLength: float
focusDistance: float
clippingRange: List[float]
horizontalAperture: float
verticalAperture: float
@dataclass
class CameraParams:
"""
Camera params class.
Args:
prim_path (str): path to the prim.
rotation (List[float]): rotation of the prim.
params (CameraCalibrationParam): camera calibration params.
"""
prim_path: str
rotation: List[float]
params: CameraCalibrationParam = field(default_factory=dict)
def __post_init__(self):
assert len(self.rotation) == 3, f"rotation should be a list of 3 floats, got {self.rotation}"
self.params = CameraCalibrationParam(**self.params)
@dataclass
class CameraModuleParams:
"""
Camera module params class.
Args:
module_name (str): name of the module.
root_prim (RootPrimParams): root prim params.
sensor_base (SensorBaseParams): sensor base params.
links (list): list of links and their transforms.
camera_sensor (CameraParams): camera params.
"""
module_name: str
root_prim: RootPrimParams = field(default_factory=dict)
sensor_base: SensorBaseParams = field(default_factory=dict)
links: list = field(default_factory=list)
camera_sensor: CameraParams = field(default_factory=dict)
def __post_init__(self):
self.root_prim = RootPrimParams(**self.root_prim)
self.sensor_base = SensorBaseParams(**self.sensor_base)
self.camera_sensor = CameraParams(**self.camera_sensor)
class D435_Sensor:
"""
D435 sensor module class.
It handles the creation of sensor links(body) and joints between them.
"""
def __init__(self, cfg:dict):
"""
Args:
cfg (dict): configuration for the sensor
"""
self.cfg = CameraModuleParams(**cfg)
self.root_prim_path = self.cfg.root_prim.prim_path
self.sensor_base = self.cfg.sensor_base
self.links = self.cfg.links
self.stage = get_current_stage()
def _add_root_prim(self) -> None:
"""
Add root prim.
"""
_, prim = createXform(self.stage, self.root_prim_path)
setTranslate(prim, Gf.Vec3d(*self.cfg.root_prim.translation))
setRotateXYZ(prim, Gf.Vec3d(*self.cfg.root_prim.rotation))
def _add_sensor_link(self) -> None:
"""
Add sensor link(body).
If usd file is given, it will be linked to the sensor link.
"""
_, prim = createXform(self.stage, os.path.join(self.root_prim_path, self.sensor_base.prim_name))
setTranslate(prim, Gf.Vec3d((0, 0, 0)))
setRotateXYZ(prim, Gf.Vec3d((0, 0, 0)))
if self.sensor_base.usd_path is not None:
sensor_body_usd = os.path.join(os.getcwd(), self.sensor_base.usd_path)
camera_body_prim = add_reference_to_stage(sensor_body_usd,
os.path.join(self.root_prim_path,
self.sensor_base.prim_name,
"base_body"))
setTranslate(camera_body_prim, Gf.Vec3d((0, 0, 0)))
setRotateXYZ(camera_body_prim, Gf.Vec3d((0, 0, 0)))
def _add_link(self, link_name:str) -> None:
"""
Add link(body).
Args:
link_name (str): name of the link.
"""
createXform(self.stage, os.path.join(self.root_prim_path, link_name))
def _add_transform(self, link_name:str, transform:list) -> None:
"""
Add transform to the link(body) relative to its parent prim.
Args:
link_name (str): name of the link.
transform (list): transform of the link.
"""
prim = get_prim_at_path(os.path.join(self.root_prim_path, link_name))
setTranslate(prim, Gf.Vec3f(*transform[:3]))
setRotateXYZ(prim, Gf.Vec3f(*transform[3:]))
def _add_camera(self) -> None:
"""
Add usd camera to camera optical link.
"""
camera = self.stage.DefinePrim(self.cfg.camera_sensor.prim_path, 'Camera')
setTranslate(camera, Gf.Vec3d((0, 0, 0)))
setRotateXYZ(camera, Gf.Vec3f(*self.cfg.camera_sensor.rotation))
camera.GetAttribute('focalLength').Set(self.cfg.camera_sensor.params.focalLength)
camera.GetAttribute('focusDistance').Set(self.cfg.camera_sensor.params.focusDistance)
camera.GetAttribute("clippingRange").Set(Gf.Vec2f(*self.cfg.camera_sensor.params.clippingRange))
camera.GetAttribute("horizontalAperture").Set(self.cfg.camera_sensor.params.horizontalAperture)
camera.GetAttribute("verticalAperture").Set(self.cfg.camera_sensor.params.verticalAperture)
def _build_prim_structure(self) -> None:
"""
Build the sensor prim structure.
"""
self._add_root_prim()
self._add_sensor_link()
for link in self.links:
self._add_link(link[0])
self._add_transform(link[0], link[1])
def build(self) -> None:
"""
Initialize the sensor prim structure.
"""
self._build_prim_structure()
self._add_camera()
class D455_Sensor(D435_Sensor):
"""
D455 sensor module class.
It is identical to D435 exept its extrinsics.
"""
def __init__(self, cfg:dict):
"""
Args:
cfg (dict): configuration for the sensor
"""
super().__init__(cfg)
class SensorModuleFactory:
"""
Factory class to create tasks.
"""
def __init__(self):
self.creators = {}
def register(self, name: str, sensor):
"""
Registers a new task.
Args:
name (str): name of the task.
sensor (object): task object.
"""
self.creators[name] = sensor
def get(
self, name: str
) -> object:
"""
Returns a task.
Args:
name (str): name of the task.
"""
assert name in self.creators.keys(), f"{name} not in {self.creators.keys()}"
return self.creators[name]
sensor_module_factory = SensorModuleFactory()
sensor_module_factory.register("D435", D435_Sensor)
sensor_module_factory.register("D455", D455_Sensor)
| 8,204 |
Python
| 32.627049 | 110 | 0.607021 |
elharirymatteo/RANS/omniisaacgymenvs/robots/sensors/exteroceptive/camera_interface.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import numpy as np
import torch
class BaseCameraInterface:
"""
Base camera interface class.
"""
def __call__(self, data):
"""
Get data from the sensor in torch tensor.
Args:
data (Any): data from rep.annotator.get_data()
"""
raise NotImplementedError
class RGBInterface(BaseCameraInterface):
"""
RGB camera interface class."""
def __call__(self, data):
"""
Get rgb data from the sensor in torch tensor.
Args:
data (Any): rgb data from rep.annotator.get_data()
"""
rgb_image = np.frombuffer(data, dtype=np.uint8).reshape(*data.shape, -1)
rgb_image = np.squeeze(rgb_image)[:, :, :3].transpose((2, 0, 1))
rgb_image = (rgb_image/255.0).astype(np.float32)
return torch.from_numpy(rgb_image)
class DepthInterface(BaseCameraInterface):
"""
Depth camera interface class.
"""
def __call__(self, data):
"""
Get depth data from the sensor in torch tensor.
Args:
data (Any): depth data from rep.annotator.get_data()
"""
depth_image = np.frombuffer(data, dtype=np.float32).reshape(*data.shape, -1).transpose((2, 0, 1))
return torch.from_numpy(depth_image)
class SemanticSegmentationInterface(BaseCameraInterface):
"""
Semantic segmentation camera interface class.
"""
def __call__(self, data):
"""
Get semantic segmentation data from the sensor in torch tensor.
Args:
data (Any): semantic segmentation data from rep.annotator.get_data()
"""
raise NotImplementedError
class InstanceSegmentationInterface(BaseCameraInterface):
"""
Instance segmentation camera interface class.
"""
def __call__(self, data):
"""
Get instance segmentation data from the sensor in torch tensor.
Args:
data (Any): instance segmentation data from rep.annotator.get_data()
"""
raise NotImplementedError
class ObjectDetectionInterface(BaseCameraInterface):
"""
Object detection camera interface class."""
def __call__(self, data):
"""
Get object detection data from the sensor in torch tensor.
Args:
data (Any): object detection data from rep.annotator.get_data()"""
raise NotImplementedError
class CameraInterfaceFactory:
"""
Factory class to create tasks.
"""
def __init__(self):
"""
Initialize factor attributes.
"""
self.creators = {}
def register(self, name: str, sensor):
"""
Registers a new task.
Args:
name (str): name of the task.
sensor (object): task object.
"""
self.creators[name] = sensor
def get(
self, name: str
) -> object:
"""
Returns a task.
Args:
name (str): name of the task.
"""
assert name in self.creators.keys(), f"{name} not in {self.creators.keys()}"
return self.creators[name]
camera_interface_factory = CameraInterfaceFactory()
camera_interface_factory.register("RGBInterface", RGBInterface)
camera_interface_factory.register("DepthInterface", DepthInterface)
camera_interface_factory.register("SemanticSegmentationInterface", SemanticSegmentationInterface)
camera_interface_factory.register("InstanceSegmentationInterface", InstanceSegmentationInterface)
camera_interface_factory.register("ObjectDetectionInterface", ObjectDetectionInterface)
| 3,892 |
Python
| 30.144 | 105 | 0.622302 |
elharirymatteo/RANS/omniisaacgymenvs/robots/articulations/AMR_4WheelsSkidSteer.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omni.isaac.core.robots.robot import Robot
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from pxr import Gf
import torch
import omni
import carb
import math
import os
from omniisaacgymenvs.robots.articulations.utils.MFP_utils import *
from omniisaacgymenvs.tasks.MFP.MFP2D_thruster_generator import (
compute_actions,
)
from omniisaacgymenvs.tasks.MFP.MFP2D_thruster_generator import (
ConfigurationParameters,
)
from omniisaacgymenvs.robots.sensors.exteroceptive.camera_module_generator import (
sensor_module_factory,
)
from omniisaacgymenvs.robots.articulations.utils.Types import (
Sphere,
DirectDriveWheel,
GeometricPrimitive,
PhysicsMaterial,
GeometricPrimitiveFactory,
PassiveWheelFactory,
)
@dataclass
class SkidSteerParameters:
shape: GeometricPrimitive = field(default_factory=dict)
front_left_wheel: DirectDriveWheel = field(default_factory=dict)
front_right_wheel: DirectDriveWheel = field(default_factory=dict)
rear_left_wheel: DirectDriveWheel = field(default_factory=dict)
rear_right_wheel: DirectDriveWheel = field(default_factory=dict)
passive_wheels: list = field(default_factory=list)
mass: float = 5.0
CoM: tuple = (0, 0, 0)
def __post_init__(self):
self.shape = GeometricPrimitiveFactory.get_item(self.shape)
self.front_left_wheel = DirectDriveWheel(**self.front_left_wheel)
self.front_right_wheel = DirectDriveWheel(**self.front_right_wheel)
self.rear_left_wheel = DirectDriveWheel(**self.rear_left_wheel)
self.rear_right_wheel = DirectDriveWheel(**self.rear_right_wheel)
class CreateAMR4WheelsSkidSteer:
"""
Creates a 2 wheeled SkidSteer robot."""
def __init__(self, path: str, cfg: dict) -> None:
self.platform_path = path
self.joints_path = "joints"
self.materials_path = "materials"
self.core_path = None
self.stage = omni.usd.get_context().get_stage()
# Reads the thruster configuration and computes the number of virtual thrusters.
self.settings = SkidSteerParameters(**cfg["system"])
self.camera_cfg = cfg.get("camera", None)
def build(self) -> None:
"""
Builds the platform."""
# Creates articulation root and the Xforms to store materials/joints.
self.platform_path, self.platform_prim = createArticulation(
self.stage, self.platform_path
)
self.joints_path, self.joints_prim = createXform(
self.stage, self.platform_path + "/" + self.joints_path
)
self.materials_path, self.materials_prim = createXform(
self.stage, self.platform_path + "/" + self.materials_path
)
# Creates a set of basic materials
self.createBasicColors()
# Creates the main body element and adds the position & heading markers.
self.createCore()
self.createDrivingWheels()
self.createPassiveWheels()
def createCore(self) -> None:
"""
Creates the core of the AMR.
"""
self.core_path, self.core_prim = self.settings.shape.build(
self.stage, self.platform_path + "/core"
)
applyMass(self.core_prim, self.settings.mass, Gf.Vec3d(0, 0, 0))
if self.camera_cfg is not None:
self.createCamera()
else:
self.settings.shape.add_orientation_marker(
self.stage, self.core_path + "/arrow", self.colors["red"]
)
self.settings.shape.add_positional_marker(
self.stage, self.core_path + "/marker", self.colors["green"]
)
def createDrivingWheels(self) -> None:
"""
Creates the wheels of the AMR.
"""
# Creates the front left wheel
front_left_wheel_path, front_left_wheel_prim = (
self.settings.front_left_wheel.build(
self.stage,
joint_path=self.joints_path + "/front_left_wheel",
wheel_path=self.platform_path + "/front_left_wheel",
body_path=self.core_path,
)
)
# Creates the front right wheel
front_right_wheel_path, front_right_wheel_prim = (
self.settings.front_right_wheel.build(
self.stage,
joint_path=self.joints_path + "/front_right_wheel",
wheel_path=self.platform_path + "/front_right_wheel",
body_path=self.core_path,
)
)
# Creates the rear left wheel
rear_left_wheel_path, rear_left_wheel_prim = (
self.settings.rear_left_wheel.build(
self.stage,
joint_path=self.joints_path + "/rear_left_wheel",
wheel_path=self.platform_path + "/rear_left_wheel",
body_path=self.core_path,
)
)
# Creates the rear right wheel
rear_right_wheel_path, rear_right_wheel_prim = (
self.settings.rear_right_wheel.build(
self.stage,
joint_path=self.joints_path + "/rear_right_wheel",
wheel_path=self.platform_path + "/rear_right_wheel",
body_path=self.core_path,
)
)
def createPassiveWheels(self) -> None:
"""
Creates the wheels of the AMR.
"""
for i, wheel in enumerate(self.settings.passive_wheels):
wheel_path, wheel_prim = wheel.build(
self.stage,
joint_path=self.joints_path + f"/passive_wheel_{i}",
material_path=self.materials_path + "/zero_friction",
path=self.platform_path + f"/passive_wheel_{i}",
body_path=self.core_path,
)
def createBasicColors(self) -> None:
"""
Creates a set of basic colors."""
self.colors = {}
self.colors["red"] = createColor(
self.stage, self.materials_path + "/red", [1, 0, 0]
)
self.colors["green"] = createColor(
self.stage, self.materials_path + "/green", [0, 1, 0]
)
self.colors["blue"] = createColor(
self.stage, self.materials_path + "/blue", [0, 0, 1]
)
self.colors["white"] = createColor(
self.stage, self.materials_path + "/white", [1, 1, 1]
)
self.colors["grey"] = createColor(
self.stage, self.materials_path + "/grey", [0.5, 0.5, 0.5]
)
self.colors["dark_grey"] = createColor(
self.stage, self.materials_path + "/dark_grey", [0.25, 0.25, 0.25]
)
self.colors["black"] = createColor(
self.stage, self.materials_path + "/black", [0, 0, 0]
)
def createCamera(self) -> None:
"""
Creates a camera module prim.
"""
self.camera = sensor_module_factory.get(self.camera_cfg["module_name"])(
self.camera_cfg
)
self.camera.build()
class AMR_4W_SS(Robot):
def __init__(
self,
prim_path: str,
cfg: dict,
name: Optional[str] = "AMR_2W_SS",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.array] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
AMR = CreateAMR4WheelsSkidSteer(prim_path, cfg)
AMR.build()
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
scale=scale,
)
| 8,046 |
Python
| 32.115226 | 88 | 0.589113 |
elharirymatteo/RANS/omniisaacgymenvs/robots/articulations/MFP3D_thrusters.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omni.isaac.core.robots.robot import Robot
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from pxr import Gf
import torch
import omni
import carb
import math
import os
from omniisaacgymenvs.robots.articulations.utils.MFP_utils import *
from omniisaacgymenvs.tasks.MFP.MFP3D_thruster_generator import (
compute_actions,
)
from omniisaacgymenvs.tasks.MFP.MFP3D_thruster_generator import (
ConfigurationParameters,
)
from omniisaacgymenvs.robots.sensors.exteroceptive.camera_module_generator import (
sensor_module_factory,
)
@dataclass
class PlatformParameters:
shape: str = "sphere"
radius: float = 0.31
height: float = 0.5
mass: float = 5.32
CoM: tuple = (0, 0, 0)
refinement: int = 2
usd_asset_path: str = "/None"
enable_collision: bool = False
def __post_init__(self):
assert self.shape in [
"cylinder",
"sphere",
"asset",
], "The shape must be 'cylinder', 'sphere' or 'asset'."
assert self.radius > 0, "The radius must be larger than 0."
assert self.height > 0, "The height must be larger than 0."
assert self.mass > 0, "The mass must be larger than 0."
assert len(self.CoM) == 3, "The length of the CoM coordinates must be 3."
assert self.refinement > 0, "The refinement level must be larger than 0."
assert type(self.enable_collision) == bool, "The enable_collision must be a bool."
self.refinement = int(self.refinement)
class CreatePlatform:
"""
Creates a floating platform with a core body and a set of thrusters."""
def __init__(self, path: str, cfg: dict) -> None:
self.platform_path = path
self.joints_path = "joints"
self.materials_path = "materials"
self.core_path = None
self.stage = omni.usd.get_context().get_stage()
# Reads the thruster configuration and computes the number of virtual thrusters.
self.settings = PlatformParameters(**cfg["core"])
thruster_cfg = ConfigurationParameters(**cfg["configuration"])
self.num_virtual_thrusters = compute_actions(thruster_cfg)
self.camera_cfg = cfg.get("camera", None)
def build(self) -> None:
"""
Builds the platform."""
# Creates articulation root and the Xforms to store materials/joints.
self.platform_path, self.platform_prim = createArticulation(
self.stage, self.platform_path
)
self.joints_path, self.joints_prim = createXform(
self.stage, self.platform_path + "/" + self.joints_path
)
self.materials_path, self.materials_prim = createXform(
self.stage, self.platform_path + "/" + self.materials_path
)
# Creates a set of basic materials
self.createBasicColors()
# Creates the main body element and adds the position & heading markers.
if self.settings.shape == "sphere":
self.core_path = self.createRigidSphere(
self.platform_path + "/core",
"body",
self.settings.radius,
Gf.Vec3d([0, 0, 0]),
0.0001,
)
elif self.settings.shape == "cylinder":
self.core_path = self.createRigidCylinder(
self.platform_path + "/core",
"body",
self.settings.radius,
self.settings.height,
Gf.Vec3d([0, 0, 0]),
0.0001,
)
# Creates the movable CoM and the joints to control it.
self.createMovableCoM(
self.platform_path + "/movable_CoM",
"CoM",
self.settings.radius / 2,
self.settings.CoM,
self.settings.mass,
)
if self.camera_cfg is not None:
self.createCamera()
else:
self.createArrowXform(self.core_path + "/arrow")
self.createPositionMarkerXform(self.core_path + "/marker")
# Adds virtual anchors for the thrusters
for i in range(self.num_virtual_thrusters):
self.createVirtualThruster(
self.platform_path + "/v_thruster_" + str(i),
self.joints_path + "/v_thruster_joint_" + str(i),
self.core_path,
0.0001,
Gf.Vec3d([0, 0, 0]),
)
def createMovableCoM(
self, path: str, name: str, radius: float, CoM: Gf.Vec3d, mass: float
) -> None:
"""
Creates a movable Center of Mass (CoM).
Args:
path (str): The path to the movable CoM.
name (str): The name of the sphere used as CoM.
radius (float): The radius of the sphere used as CoM.
CoM (Gf.Vec3d): The resting position of the center of mass.
mass (float): The mass of the Floating Platform.
Returns:
str: The path to the movable CoM.
"""
# Create Xform
CoM_path, CoM_prim = createXform(self.stage, path)
# Add shapes
cylinder_path = CoM_path + "/" + name
cylinder_path, cylinder_geom = createCylinder(
self.stage, CoM_path + "/" + name, radius, radius, self.settings.refinement
)
cylinder_prim = self.stage.GetPrimAtPath(cylinder_geom.GetPath())
applyRigidBody(cylinder_prim)
# Sets the collider
applyCollider(cylinder_prim)
# Sets the mass and CoM
applyMass(cylinder_prim, mass, Gf.Vec3d(0, 0, 0))
# Add dual prismatic joint
CoM_path, CoM_prim = createXform(
self.stage, os.path.join(self.joints_path, "/CoM_joints")
)
createP3Joint(
self.stage,
os.path.join(self.joints_path, "CoM_joints"),
self.core_path,
cylinder_path,
damping=1e6,
stiffness=1e12,
prefix="com_",
enable_drive=True,
)
return cylinder_path
def createBasicColors(self) -> None:
"""
Creates a set of basic colors."""
self.colors = {}
self.colors["red"] = createColor(
self.stage, self.materials_path + "/red", [1, 0, 0]
)
self.colors["green"] = createColor(
self.stage, self.materials_path + "/green", [0, 1, 0]
)
self.colors["blue"] = createColor(
self.stage, self.materials_path + "/blue", [0, 0, 1]
)
self.colors["white"] = createColor(
self.stage, self.materials_path + "/white", [1, 1, 1]
)
self.colors["grey"] = createColor(
self.stage, self.materials_path + "/grey", [0.5, 0.5, 0.5]
)
self.colors["dark_grey"] = createColor(
self.stage, self.materials_path + "/dark_grey", [0.25, 0.25, 0.25]
)
self.colors["black"] = createColor(
self.stage, self.materials_path + "/black", [0, 0, 0]
)
def createArrowXform(self, path: str) -> None:
"""
Creates an Xform to store the arrow indicating the platform heading."""
self.arrow_path, self.arrow_prim = createXform(self.stage, path)
createArrow(
self.stage,
self.arrow_path,
0.1,
0.5,
[self.settings.radius, 0, 0],
self.settings.refinement,
)
applyMaterial(self.arrow_prim, self.colors["red"])
def createPositionMarkerXform(self, path: str) -> None:
"""
Creates an Xform to store the position marker."""
self.marker_path, self.marker_prim = createXform(self.stage, path)
sphere_path, sphere_geom = createSphere(
self.stage,
self.marker_path + "/marker_sphere_z_plus",
0.05,
self.settings.refinement,
)
setTranslate(sphere_geom, Gf.Vec3d([0, 0, self.settings.radius]))
applyMaterial(self.stage.GetPrimAtPath(sphere_path), self.colors["blue"])
sphere_path, sphere_geom = createSphere(
self.stage,
self.marker_path + "/marker_sphere_z_minus",
0.05,
self.settings.refinement,
)
setTranslate(sphere_geom, Gf.Vec3d([0, 0, -self.settings.radius]))
applyMaterial(self.stage.GetPrimAtPath(sphere_path), self.colors["blue"])
sphere_path, sphere_geom = createSphere(
self.stage,
self.marker_path + "/marker_sphere_y_plus",
0.05,
self.settings.refinement,
)
setTranslate(sphere_geom, Gf.Vec3d([0, self.settings.radius, 0]))
applyMaterial(self.stage.GetPrimAtPath(sphere_path), self.colors["green"])
sphere_path, sphere_geom = createSphere(
self.stage,
self.marker_path + "/marker_sphere_y_minus",
0.05,
self.settings.refinement,
)
setTranslate(sphere_geom, Gf.Vec3d([0, -self.settings.radius, 0]))
applyMaterial(self.stage.GetPrimAtPath(sphere_path), self.colors["green"])
sphere_path, sphere_geom = createSphere(
self.stage,
self.marker_path + "/marker_sphere_x_plus",
0.05,
self.settings.refinement,
)
setTranslate(sphere_geom, Gf.Vec3d([self.settings.radius, 0, 0]))
applyMaterial(self.stage.GetPrimAtPath(sphere_path), self.colors["red"])
sphere_path, sphere_geom = createSphere(
self.stage,
self.marker_path + "/marker_sphere_x_minus",
0.05,
self.settings.refinement,
)
setTranslate(sphere_geom, Gf.Vec3d([-self.settings.radius, 0, 0]))
applyMaterial(self.stage.GetPrimAtPath(sphere_path), self.colors["red"])
def createRigidSphere(
self, path: str, name: str, radius: float, CoM: list, mass: float
) -> str:
"""
Creates a rigid sphere. The sphere is a RigidBody, a Collider, and has a mass and CoM.
It is used to create the main body of the platform."""
# Creates an Xform to store the core body
path, prim = createXform(self.stage, path)
# Creates a sphere
sphere_path = path + "/" + name
sphere_path, sphere_geom = createSphere(
self.stage, path + "/" + name, radius, self.settings.refinement
)
sphere_prim = self.stage.GetPrimAtPath(sphere_geom.GetPath())
applyRigidBody(sphere_prim)
# Sets the collider
applyCollider(sphere_prim, self.settings.enable_collision)
# Sets the mass and CoM
applyMass(sphere_prim, mass, CoM)
return sphere_path
def createRigidCylinder(
self, path: str, name: str, radius: float, height: float, CoM: list, mass: float
) -> str:
"""
Creates a rigid cylinder. The cylinder is a RigidBody, a Collider, and has a mass and CoM.
It is used to create the main body of the platform."""
# Creates an Xform to store the core body
path, prim = createXform(self.stage, path)
# Creates a sphere
sphere_path = path + "/" + name
sphere_path, sphere_geom = createCylinder(
self.stage, path + "/" + name, radius, height, self.settings.refinement
)
sphere_prim = self.stage.GetPrimAtPath(sphere_geom.GetPath())
applyRigidBody(sphere_prim)
# Sets the collider
applyCollider(sphere_prim, self.settings.enable_collision)
# Sets the mass and CoM
applyMass(sphere_prim, mass, CoM)
return sphere_path
def createVirtualThruster(
self, path: str, joint_path: str, parent_path: str, thruster_mass, thruster_CoM
) -> str:
"""
Creates a virtual thruster. The thruster is a RigidBody, a Collider, and has a mass and CoM.
It is used to create the thrusters of the platform."""
# Create Xform
thruster_path, thruster_prim = createXform(self.stage, path)
# Add shapes
setTranslate(thruster_prim, Gf.Vec3d([0, 0, 0]))
setOrient(thruster_prim, Gf.Quatd(1, Gf.Vec3d([0, 0, 0])))
# Make rigid
applyRigidBody(thruster_prim)
# Add mass
applyMass(thruster_prim, thruster_mass, thruster_CoM)
# Create joint
createFixedJoint(self.stage, joint_path, parent_path, thruster_path)
return thruster_path
def createCamera(self) -> None:
"""
Creates a camera module prim.
"""
self.camera = sensor_module_factory.get(
self.camera_cfg["module_name"]
)(self.camera_cfg)
self.camera.build()
class ModularFloatingPlatform(Robot):
def __init__(
self,
prim_path: str,
cfg: dict,
name: Optional[str] = "modular_floating_platform",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
scale: Optional[np.array] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
fp = CreatePlatform(prim_path, cfg)
fp.build()
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
scale=scale,
)
| 13,720 |
Python
| 34.918848 | 100 | 0.581268 |
elharirymatteo/RANS/omniisaacgymenvs/robots/articulations/test/test_AMR_4W_SS.py
|
if __name__ == "__main__":
from omni.isaac.kit import SimulationApp
cfg = {
"headless": False,
}
simulation_app = SimulationApp(cfg)
from omni.isaac.core import World
import omni
from omniisaacgymenvs.robots.articulations.AMR_4WheelsSkidSteer import (
AMR_4W_SS,
SkidSteerParameters,
)
from pxr import UsdLux
timeline = omni.timeline.get_timeline_interface()
world = World(stage_units_in_meters=1.0)
world.scene.add_default_ground_plane()
light = UsdLux.DistantLight.Define(world.stage, "/DistantLight")
light.CreateIntensityAttr(3000.0)
physics_ctx = world.get_physics_context()
physics_ctx.set_solver_type("PGS")
# Clearpath Robotics' Husky
Husky = {
"shape": {
"name": "Cube",
"width": 0.670,
"depth": 0.990,
"height": 0.260,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 50.0,
"front_left_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [
0.544 / 2,
-0.670 / 2 - 0.125 / 2,
-0.260 / 2 + 0.330 / 2 - 0.130,
],
"orientation": [-90, 0, 0],
},
"front_right_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [
0.544 / 2,
0.670 / 2 + 0.125 / 2,
-0.260 / 2 + 0.330 / 2 - 0.130,
],
"orientation": [-90, 0, 0],
},
"rear_left_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [
-0.544 / 2,
-0.670 / 2 - 0.125 / 2,
-0.260 / 2 + 0.330 / 2 - 0.130,
],
"orientation": [-90, 0, 0],
},
"rear_right_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.330 / 2,
"height": 0.125,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [
-0.544 / 2,
0.670 / 2 + 0.125 / 2,
-0.260 / 2 + 0.330 / 2 - 0.130,
],
"orientation": [-90, 0, 0],
},
}
AMR_4W_SS("/Husky", cfg={"system": Husky}, translation=[0, 0, 0.3])
world.reset()
for i in range(100):
world.step(render=True)
timeline.play()
while simulation_app.is_running():
world.step(render=True)
timeline.stop()
simulation_app.close()
| 5,701 |
Python
| 29.010526 | 76 | 0.349412 |
elharirymatteo/RANS/omniisaacgymenvs/robots/articulations/test/test_AMR_2W_SS.py
|
if __name__ == "__main__":
from omni.isaac.kit import SimulationApp
cfg = {
"headless": False,
}
simulation_app = SimulationApp(cfg)
from omni.isaac.core import World
import omni
from omniisaacgymenvs.robots.articulations.AMR_2WheelsSkidSteer import (
AMR_2W_SS,
SkidSteerParameters,
)
from pxr import UsdLux
timeline = omni.timeline.get_timeline_interface()
world = World(stage_units_in_meters=1.0)
world.scene.add_default_ground_plane()
light = UsdLux.DistantLight.Define(world.stage, "/DistantLight")
light.CreateIntensityAttr(3000.0)
physics_ctx = world.get_physics_context()
physics_ctx.set_solver_type("PGS")
# Kobuki's Turtlebot 2
Turtlebot2 = {
"shape": {
"name": "Cylinder",
"radius": 0.354 / 2,
"height": 0.420,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 6.5,
"left_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [0.0, -0.24 / 2, -0.420 / 2 + 0.076 / 2 - 0.015],
"orientation": [-90, 0, 0],
},
"right_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [0.0, 0.24 / 2, -0.420 / 2 + 0.076 / 2 - 0.015],
"orientation": [-90, 0, 0],
},
"passive_wheels": [
{
"name": "ZeroFrictionSphere",
"radius": 0.076 / 2,
"offset": [-0.24 / 2, 0.0, -0.420 / 2 + 0.076 / 2 - 0.015],
},
{
"name": "ZeroFrictionSphere",
"radius": 0.076 / 2,
"offset": [0.24 / 2, 0.0, -0.420 / 2 + 0.076 / 2 - 0.015],
},
],
}
AMR_2W_SS("/Turtlebot2", cfg={"system": Turtlebot2}, translation=[0, 0, 0.3])
# Kobuki's Turtlebot 2
Turtlebot2_caster = {
"shape": {
"name": "Cylinder",
"radius": 0.354 / 2,
"height": 0.420,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 6.5,
"left_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [0.0, -0.24 / 2, -0.420 / 2 + 0.076 / 2 - 0.015],
"orientation": [-90, 0, 0],
},
"right_wheel": {
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"actuator": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": True,
"damping": 1e10,
"stiffness": 0.0,
},
"offset": [0.0, 0.24 / 2, -0.420 / 2 + 0.076 / 2 - 0.015],
"orientation": [-90, 0, 0],
},
"passive_wheels": [
{
"name": "CasterWheel",
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"wheel_joint": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": False,
},
"caster_joint": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": False,
},
"caster_offset": [-0.24 / 2, 0.0, -0.420 / 2 + 0.076 - 0.015],
"wheel_offset": [-0.24 / 2, 0.0, -0.420 / 2 + 0.076 / 2 - 0.015],
"wheel_orientation": [-90, 0, 0],
},
{
"name": "CasterWheel",
"wheel": {
"visual_shape": {
"name": "Cylinder",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": False,
"is_rigid": False,
"refinement": 2,
},
"collider_shape": {
"name": "Capsule",
"radius": 0.076 / 2,
"height": 0.04,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
},
"mass": 0.05,
},
"wheel_joint": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": False,
},
"caster_joint": {
"name": "RevoluteJoint",
"axis": "Z",
"enable_drive": False,
},
"caster_offset": [0.24 / 2, 0.0, -0.420 / 2 + 0.076 - 0.015],
"wheel_offset": [0.24 / 2, 0.0, -0.420 / 2 + 0.076 / 2 - 0.015],
"wheel_orientation": [-90, 0, 0],
},
],
}
AMR_2W_SS(
"/Turtlebot2_Caster",
cfg={"system": Turtlebot2_caster},
translation=[1.0, 0, 0.3],
)
world.reset()
for i in range(100):
world.step(render=True)
timeline.play()
while simulation_app.is_running():
world.step(render=True)
timeline.stop()
simulation_app.close()
| 8,904 |
Python
| 31.264493 | 81 | 0.336478 |
elharirymatteo/RANS/omniisaacgymenvs/robots/articulations/views/MFP3D_view.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class ModularFloatingPlatformView(ArticulationView):
def __init__(
self, prim_paths_expr: str, name: Optional[str] = "ModularFloatingPlatformView"
) -> None:
"""[summary]"""
super().__init__(
prim_paths_expr=prim_paths_expr,
name=name,
)
self.base = RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Modular_floating_platform/core/body",
name="base_view",
)
self.CoM = RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Modular_floating_platform/movable_CoM/CoM",
name="CoM_view",
)
self.thrusters = RigidPrimView(
prim_paths_expr=f"/World/envs/.*/Modular_floating_platform/v_thruster_*",
name="thrusters",
)
def get_CoM_indices(self):
self.CoM_shifter_indices = [
self.get_dof_index("com_x_axis_joint"),
self.get_dof_index("com_y_axis_joint"),
self.get_dof_index("com_z_axis_joint"),
]
def get_plane_lock_indices(self):
self.lock_indices = []
| 1,526 |
Python
| 30.163265 | 88 | 0.599607 |
elharirymatteo/RANS/omniisaacgymenvs/robots/articulations/utils/Types.py
|
import omniisaacgymenvs.robots.articulations.utils.MFP_utils as pxr_utils
from pxr import Usd, Gf, UsdShade, UsdPhysics
from scipy.spatial.transform import Rotation
from dataclasses import dataclass, field
from typing import Tuple
class TypeFactoryBuilder:
def __init__(self):
self.creators = {}
def register_instance(self, type):
self.creators[type.__name__] = type
def get_item(self, params):
assert "name" in list(params.keys()), "The name of the type must be provided."
assert params["name"] in self.creators, "Unknown type."
return self.creators[params["name"]](**params)
####################################################################################################
## Define the types of the geometric primitives
####################################################################################################
@dataclass
class GeometricPrimitive:
refinement: int = 2
has_collider: bool = False
is_rigid: bool = False
def __post_init__(self):
assert self.refinement > 0, "The refinement level must be larger than 0."
self.refinement = int(self.refinement)
def build(self, stage: Usd.Stage, path: str = None) -> Tuple[str, Usd.Prim]:
raise NotImplementedError
def add_positional_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
raise NotImplementedError
def add_orientation_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
raise NotImplementedError
@dataclass
class Cylinder(GeometricPrimitive):
name: str = "Cylinder"
radius: float = 0.1
height: float = 0.1
def __post_init__(self):
assert self.radius > 0, "The radius must be larger than 0."
assert self.height > 0, "The height must be larger than 0."
assert self.refinement > 0, "The refinement level must be larger than 0."
def build(self, stage: Usd.Stage, path: str = None) -> Tuple[str, Usd.Prim]:
path, geom = pxr_utils.createCylinder(
stage, path, self.radius, self.height, self.refinement
)
prim = stage.GetPrimAtPath(path)
if self.has_collider:
pxr_utils.applyCollider(prim, enable=True)
if self.is_rigid:
pxr_utils.applyRigidBody(prim)
return path, prim
def add_positional_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
marker_path, marker_prim = pxr_utils.createXform(stage, path)
sphere_path, sphere_geom = pxr_utils.createSphere(
stage,
marker_path + "/marker_sphere",
0.05,
self.refinement,
)
pxr_utils.setTranslate(sphere_geom, Gf.Vec3d([0, 0, self.height / 2]))
pxr_utils.applyMaterial(marker_prim, color)
def add_orientation_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
pxr_utils.createArrow(
stage,
path,
0.1,
0.5,
[self.radius, 0, 0],
self.refinement,
)
marker_prim = stage.GetPrimAtPath(path)
pxr_utils.applyMaterial(marker_prim, color)
@dataclass
class Sphere(GeometricPrimitive):
name: str = "Sphere"
radius: float = 0.1
def __post_init__(self):
assert self.radius > 0, "The radius must be larger than 0."
assert self.refinement > 0, "The refinement level must be larger than 0."
self.refinement = int(self.refinement)
def build(self, stage: Usd.Stage, path: str = None) -> Tuple[str, Usd.Prim]:
path, geom = pxr_utils.createSphere(stage, path, self.radius, self.refinement)
prim = stage.GetPrimAtPath(path)
if self.has_collider:
pxr_utils.applyCollider(prim, enable=True)
if self.is_rigid:
pxr_utils.applyRigidBody(prim)
return path, prim
def add_positional_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
marker_path, marker_prim = pxr_utils.createXform(stage, path)
sphere_path, sphere_geom = pxr_utils.createSphere(
stage,
marker_path + "/marker_sphere",
0.05,
self.refinement,
)
pxr_utils.setTranslate(sphere_geom, Gf.Vec3d([0, 0, self.radius]))
pxr_utils.applyMaterial(marker_prim, color)
def add_orientation_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
marker_path, marker_prim = pxr_utils.createXform(stage, path)
pxr_utils.createArrow(
stage,
marker_path + "/marker_arrow",
0.1,
0.5,
[self.radius, 0, 0],
self.refinement,
)
pxr_utils.applyMaterial(marker_prim, color)
@dataclass
class Capsule(GeometricPrimitive):
name: str = "Capsule"
radius: float = 0.1
height: float = 0.1
def __post_init__(self):
assert self.radius > 0, "The radius must be larger than 0."
assert self.height > 0, "The height must be larger than 0."
self.refinement = int(self.refinement)
def build(self, stage: Usd.Stage, path: str = None) -> Tuple[str, Usd.Prim]:
path, geom = pxr_utils.createCapsule(
stage, path, self.radius, self.height, self.refinement
)
prim = stage.GetPrimAtPath(path)
if self.has_collider:
pxr_utils.applyCollider(prim, enable=True)
if self.is_rigid:
pxr_utils.applyRigidBody(prim)
return path, prim
def add_positional_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
marker_path, marker_prim = pxr_utils.createXform(stage, path)
sphere_path, sphere_geom = pxr_utils.createSphere(
stage,
marker_path + "/marker_sphere",
0.05,
self.refinement,
)
pxr_utils.setTranslate(
sphere_geom, Gf.Vec3d([0, 0, self.height / 2 + self.radius])
)
pxr_utils.applyMaterial(marker_prim, color)
def add_orientation_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
marker_path, marker_prim = pxr_utils.createXform(stage, path)
pxr_utils.createArrow(
stage,
marker_path + "/marker_arrow",
0.1,
0.5,
[self.radius, 0, 0],
self.refinement,
)
pxr_utils.applyMaterial(marker_prim, color)
@dataclass
class Cube(GeometricPrimitive):
name: str = "Cube"
depth: float = 0.1
width: float = 0.1
height: float = 0.1
def __post_init__(self):
assert self.depth > 0, "The depth must be larger than 0."
assert self.width > 0, "The width must be larger than 0."
assert self.height > 0, "The height must be larger than 0."
assert self.refinement > 0, "The refinement level must be larger than 0."
self.refinement = int(self.refinement)
def build(self, stage: Usd.Stage, path: str = None) -> Tuple[str, Usd.Prim]:
path, prim = pxr_utils.createXform(stage, path)
body_path, body_geom = pxr_utils.createCube(
stage, path + "/body", self.depth, self.width, self.height, self.refinement
)
if self.has_collider:
prim = stage.GetPrimAtPath(body_path)
pxr_utils.applyCollider(prim, enable=True)
if self.is_rigid:
prim = stage.GetPrimAtPath(path)
pxr_utils.applyRigidBody(prim)
return path, prim
def add_positional_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
marker_path, marker_prim = pxr_utils.createXform(stage, path)
sphere_path, sphere_geom = pxr_utils.createSphere(
stage,
marker_path + "/marker_sphere",
0.05,
self.refinement,
)
pxr_utils.setTranslate(sphere_geom, Gf.Vec3d([0, 0, self.height / 2]))
pxr_utils.applyMaterial(marker_prim, color)
def add_orientation_marker(
self, stage: Usd.Stage, path: str, color: UsdShade.Material
) -> None:
marker_path, marker_prim = pxr_utils.createXform(stage, path)
pxr_utils.createArrow(
stage,
marker_path + "/marker_arrow",
0.1,
0.5,
[self.depth / 2, 0, 0],
self.refinement,
)
pxr_utils.applyMaterial(marker_prim, color)
GeometricPrimitiveFactory = TypeFactoryBuilder()
GeometricPrimitiveFactory.register_instance(Cylinder)
GeometricPrimitiveFactory.register_instance(Sphere)
GeometricPrimitiveFactory.register_instance(Capsule)
GeometricPrimitiveFactory.register_instance(Cube)
####################################################################################################
## Define the type of physics materials
####################################################################################################
@dataclass
class SimpleColorTexture:
r: float = 0.0
g: float = 0.0
b: float = 0.0
roughness: float = 0.5
def __post_init__(self):
assert 0 <= self.r <= 1, "The red channel must be between 0 and 1."
assert 0 <= self.g <= 1, "The green channel must be between 0 and 1."
assert 0 <= self.b <= 1, "The blue channel must be between 0 and 1."
assert 0 <= self.roughness <= 1, "The roughness must be between 0 and 1."
@dataclass
class PhysicsMaterial:
static_friction: float = 0.5
dynamic_friction: float = 0.5
restitution: float = 0.5
friction_combine_mode: str = "average"
restitution_combine_mode: str = "average"
def __post_init__(self):
combine_modes = ["average", "min", "max", "multiply"]
assert (
0 <= self.static_friction <= 1
), "The static friction must be between 0 and 1."
assert (
0 <= self.dynamic_friction <= 1
), "The dynamic friction must be between 0 and 1."
assert 0 <= self.restitution <= 1, "The restitution must be between 0 and 1."
assert (
self.friction_combine_mode in combine_modes
), "The friction combine mode must be one of 'average', 'min', 'max', or 'multiply'."
assert (
self.restitution_combine_mode in combine_modes
), "The restitution combine mode must be one of 'average', 'min', 'max', or 'multiply'."
def build(self, stage, material_path):
material = pxr_utils.createPhysicsMaterial(
stage,
material_path,
static_friction=self.static_friction,
dynamic_friction=self.dynamic_friction,
restitution=self.restitution,
friction_combine_mode=self.friction_combine_mode,
restitution_combine_mode=self.restitution_combine_mode,
)
return material
####################################################################################################
## Define the type of joint actuators
####################################################################################################
@dataclass
class PrismaticJoint:
name: str = "PrismaticActuator"
axis: str = "X"
lower_limit: float = None
upper_limit: float = None
velocity_limit: float = None
enable_drive: bool = False
force_limit: float = None
damping: float = 1e10
stiffness: float = 0.0
def __post_init__(self):
if (self.lower_limit is not None) and (self.upper_limit is not None):
assert (
self.lower_limit < self.upper_limit
), "The lower limit must be smaller than the upper limit."
if self.velocity_limit is not None:
assert self.velocity_limit > 0, "The velocity limit must be larger than 0."
if self.force_limit is not None:
assert self.force_limit > 0, "The force limit must be larger than 0."
assert self.damping >= 0, "The damping must be larger than 0."
assert self.stiffness >= 0, "The stiffness must be larger than or equal to 0."
def build(
self,
stage: Usd.Stage,
joint_path: str,
body1_path: str,
body2_path: str,
) -> UsdPhysics.PrismaticJoint:
joint = pxr_utils.createPrismaticJoint(
stage,
joint_path,
body1_path,
body2_path,
axis=self.axis,
limit_low=self.lower_limit,
limit_high=self.upper_limit,
enable_drive=self.enable_drive,
damping=self.damping,
stiffness=self.stiffness,
force_limit=self.force_limit,
)
return joint
@dataclass
class RevoluteJoint:
name: str = "RevoluteActuator"
axis: str = "X"
lower_limit: float = None
upper_limit: float = None
velocity_limit: float = None
enable_drive: bool = False
force_limit: float = None
damping: float = 1e10
stiffness: float = 0.0
def __post_init__(self):
if (self.lower_limit is not None) and (self.upper_limit is not None):
assert (
self.lower_limit < self.upper_limit
), "The lower limit must be smaller than the upper limit."
if self.velocity_limit is not None:
assert self.velocity_limit > 0, "The velocity limit must be larger than 0."
if self.force_limit is not None:
assert self.force_limit > 0, "The force limit must be larger than 0."
assert self.damping >= 0, "The damping must be larger than 0."
assert self.stiffness >= 0, "The stiffness must be larger than or equal to 0."
def build(
self,
stage: Usd.Stage,
joint_path: str,
body1_path: str,
body2_path: str,
) -> UsdPhysics.RevoluteJoint:
joint = pxr_utils.createRevoluteJoint(
stage,
joint_path,
body1_path,
body2_path,
axis=self.axis,
limit_low=self.lower_limit,
limit_high=self.upper_limit,
enable_drive=self.enable_drive,
damping=self.damping,
stiffness=self.stiffness,
force_limit=self.force_limit,
)
return joint
JointActuatorFactory = TypeFactoryBuilder()
JointActuatorFactory.register_instance(PrismaticJoint)
JointActuatorFactory.register_instance(RevoluteJoint)
####################################################################################################
## Define different type of dynamics
####################################################################################################
@dataclass
class ZeroOrderDynamics:
name: str = "zero_order"
@dataclass
class FirstOrderDynamics:
name: str = "first_order"
time_constant: float = 0.1
delay: float = 0.0
def __post_init__(self):
assert self.time_constant > 0, "The time constant must be larger than 0."
assert self.delay >= 0, "The delay must be larger than or equal to 0."
@dataclass
class SecondOrderDynamics:
name: str = "second_order"
damping_ratio: float = 0.7
natural_frequency: float = 1.0
delay: float = 0.0
def __post_init__(self):
assert (
0 <= self.damping_ratio <= 1
), "The damping ratio must be between 0 and 1."
assert (
self.natural_frequency > 0
), "The natural frequency must be larger than 0."
assert self.delay >= 0, "The delay must be larger than or equal to 0."
DynamicsFactory = TypeFactoryBuilder()
DynamicsFactory.register_instance(ZeroOrderDynamics)
DynamicsFactory.register_instance(FirstOrderDynamics)
DynamicsFactory.register_instance(SecondOrderDynamics)
####################################################################################################
## Define the type of high level actuators
####################################################################################################
@dataclass
class Wheel:
visual_shape: GeometricPrimitive = field(default_factory=dict)
collider_shape: GeometricPrimitive = field(default_factory=dict)
mass: float = 1.0
# physics_material: PhysicsMaterial = field(default_factory=dict)
# visual_material: SimpleColorTexture = field(default_factory=dict)
def __post_init__(self):
# Force the collision shape to have a collider
self.collider_shape["has_collider"] = True
# Force the visual and collision shapes to be non-rigid
self.collider_shape["is_rigid"] = False
self.visual_shape["is_rigid"] = False
self.visual_shape = GeometricPrimitiveFactory.get_item(self.visual_shape)
self.collider_shape = GeometricPrimitiveFactory.get_item(self.collider_shape)
# self.physics_material = PhysicsMaterial(**self.physics_material)
# self.visual_material = SimpleColorTexture(**self.visual_material)
def build(self, stage: Usd.Stage, path: str = None) -> Tuple[str, Usd.Prim]:
wheel_path, wheel_prim = pxr_utils.createXform(stage, path)
visual_path, visual_prim = self.visual_shape.build(stage, path + "/visual")
collider_path, collider_prim = self.collider_shape.build(
stage, path + "/collision"
)
collider_prim.GetAttribute("visibility").Set("invisible")
pxr_utils.applyRigidBody(wheel_prim)
pxr_utils.applyMass(wheel_prim, self.mass)
# pxr_utils.applyMaterial(visual_prim, self.visual_material)
# pxr_utils.applyMaterial(collision_prim, self.visual_material)
return wheel_path, wheel_prim
@dataclass
class DirectDriveWheel:
wheel: Wheel = field(default_factory=dict)
actuator: RevoluteJoint = field(default_factory=dict)
# dynamics: dict = field(default_factory=dict)
offset: Tuple = (0, 0, 0)
orientation: Tuple = (0, 90, 0)
def __post_init__(self):
self.wheel = Wheel(**self.wheel)
self.actuator = JointActuatorFactory.get_item(self.actuator)
# self.dynamics = DynamicsFactory.get_item(self.dynamics)
def build(
self,
stage: Usd.Stage,
joint_path: str = None,
wheel_path: str = None,
body_path: str = None,
) -> Tuple[str, Usd.Prim]:
# Create the wheel
wheel_path, wheel_prim = self.wheel.build(stage, wheel_path)
pxr_utils.setTranslate(wheel_prim, Gf.Vec3d(*self.offset))
q_xyzw = Rotation.from_euler("xyz", self.orientation, degrees=True).as_quat()
pxr_utils.setOrient(
wheel_prim, Gf.Quatd(q_xyzw[3], Gf.Vec3d([q_xyzw[0], q_xyzw[1], q_xyzw[2]]))
)
# Create the joint
self.actuator.build(stage, joint_path, body_path, wheel_path)
return wheel_path, wheel_prim
@dataclass
class ZeroFrictionSphere:
name: str = "ZeroFrictionSphere"
radius: float = 0.1
mass: float = 1.0
offset: Tuple = (0, 0, 0)
def __post_init__(self):
assert self.radius > 0, "The radius must be larger than 0."
assert self.mass > 0, "The mass must be larger than 0."
self.zero_friction = {
"static_friction": 0.0,
"dynamic_friction": 0.0,
"restitution": 0.8,
"friction_combine_mode": "min",
"restitution_combine_mode": "average",
}
shape = {
"name": "Sphere",
"radius": self.radius,
"has_collider": True,
"is_rigid": True,
"refinement": 2,
}
self.shape = GeometricPrimitiveFactory.get_item(shape)
def build(
self,
stage: Usd.Stage,
joint_path: str = None,
material_path: str = None,
path: str = None,
body_path: str = None,
) -> Tuple[str, Usd.Prim]:
path, prim = self.shape.build(stage, path)
pxr_utils.applyMass(prim, self.mass)
pxr_utils.setTranslate(prim, Gf.Vec3d(*self.offset))
pxr_utils.createFixedJoint(stage, joint_path, body_path, path)
if not stage.GetPrimAtPath(material_path).IsValid():
mat = PhysicsMaterial(**self.zero_friction).build(stage, material_path)
mat = UsdShade.Material.Get(stage, material_path)
else:
mat = UsdShade.Material.Get(stage, material_path)
pxr_utils.applyMaterial(prim, mat, purpose="physics")
return path, prim
@dataclass
class CasterWheel:
name: str = "CasterWheel"
wheel: Wheel = field(default_factory=dict)
wheel_joint: RevoluteJoint = field(default_factory=dict)
caster_joint: RevoluteJoint = field(default_factory=dict)
caster_offset: Tuple = (0, 0, 0)
wheel_offset: Tuple = (0, 0, 0)
wheel_orientation: Tuple = (90, 0, 0)
def __post_init__(self):
self.wheel = Wheel(**self.wheel)
self.caster_joint["name"] = "RevoluteJoint"
self.wheel_joint["name"] = "RevoluteJoint"
self.caster_joint["enable_drive"] = False
self.wheel_joint["enable_drive"] = False
self.caster_joint = JointActuatorFactory.get_item(self.caster_joint)
self.wheel_joint = JointActuatorFactory.get_item(self.wheel_joint)
def build(
self,
stage: Usd.Stage,
joint_path: str = None,
material_path: str = None,
path: str = None,
body_path: str = None,
) -> Tuple[str, Usd.Prim]:
# Create the xform that will hold the caster wheel
caster_wheel_path, caster_wheel_prim = pxr_utils.createXform(stage, path)
# Create the wheel
wheel_path, wheel_prim = self.wheel.build(stage, caster_wheel_path + "/wheel")
pxr_utils.setTranslate(wheel_prim, Gf.Vec3d(*self.wheel_offset))
q_xyzw = Rotation.from_euler(
"xyz", self.wheel_orientation, degrees=True
).as_quat()
pxr_utils.setOrient(
wheel_prim, Gf.Quatd(q_xyzw[3], Gf.Vec3d([q_xyzw[0], q_xyzw[1], q_xyzw[2]]))
)
# Create the caster
caster_path, caster_prim = pxr_utils.createXform(
stage, caster_wheel_path + "/caster"
)
pxr_utils.applyRigidBody(caster_prim)
pxr_utils.applyMass(caster_prim, 0.0005)
pxr_utils.setTranslate(caster_prim, Gf.Vec3d(*self.caster_offset))
# Create the joints
self.caster_joint.build(stage, joint_path + "_caster", body_path, caster_path)
self.wheel_joint.build(stage, joint_path + "_wheel", caster_path, wheel_path)
return wheel_path, wheel_prim
PassiveWheelFactory = TypeFactoryBuilder()
PassiveWheelFactory.register_instance(ZeroFrictionSphere)
PassiveWheelFactory.register_instance(CasterWheel)
| 22,789 |
Python
| 34.665102 | 100 | 0.586643 |
elharirymatteo/RANS/omniisaacgymenvs/robots/articulations/utils/MFP_utils.py
|
__author__ = "Antoine Richard, Matteo El Hariry, Junnosuke Kamohara"
__copyright__ = (
"Copyright 2023-24, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "2.1.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
import omni
from typing import List, Tuple
from pxr import Gf, UsdPhysics, UsdGeom, UsdShade, Sdf, Usd, PhysxSchema
import numpy as np
# ==================================================================================================
# Utils for Xform manipulation
# ==================================================================================================
def setXformOp(prim: Usd.Prim, value, property: UsdGeom.XformOp.Type) -> None:
"""
Sets a transform operatios on a prim.
Args:
prim (Usd.Prim): The prim to set the transform operation.
value: The value of the transform operation.
property (UsdGeom.XformOp.Type): The type of the transform operation.
"""
xform = UsdGeom.Xformable(prim)
op = None
for xformOp in xform.GetOrderedXformOps():
if xformOp.GetOpType() == property:
op = xformOp
if op:
xform_op = op
else:
xform_op = xform.AddXformOp(property, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(value)
def setScale(prim: Usd.Prim, value: Gf.Vec3d) -> None:
"""
Sets the scale of a prim.
Args:
prim (Usd.Prim): The prim to set the scale.
value (Gf.Vec3d): The value of the scale.
"""
setXformOp(prim, value, UsdGeom.XformOp.TypeScale)
def setTranslate(prim: Usd.Prim, value: Gf.Vec3d) -> None:
"""
Sets the translation of a prim.
Args:
prim (Usd.Prim): The prim to set the translation.
value (Gf.Vec3d): The value of the translation.
"""
setXformOp(prim, value, UsdGeom.XformOp.TypeTranslate)
def setRotateXYZ(prim: Usd.Prim, value: Gf.Vec3d) -> None:
"""
Sets the rotation of a prim.
Args:
prim (Usd.Prim): The prim to set the rotation.
value (Gf.Vec3d): The value of the rotation.
"""
setXformOp(prim, value, UsdGeom.XformOp.TypeRotateXYZ)
def setOrient(prim: Usd.Prim, value: Gf.Quatd) -> None:
"""
Sets the rotation of a prim.
Args:
prim (Usd.Prim): The prim to set the rotation.
value (Gf.Quatd): The value of the rotation.
"""
setXformOp(prim, value, UsdGeom.XformOp.TypeOrient)
def setTransform(prim, value: Gf.Matrix4d) -> None:
"""
Sets the transform of a prim.
Args:
prim (Usd.Prim): The prim to set the transform.
value (Gf.Matrix4d): The value of the transform.
"""
setXformOp(prim, value, UsdGeom.XformOp.TypeTransform)
def setXformOps(
prim,
translate: Gf.Vec3d = Gf.Vec3d([0, 0, 0]),
orient: Gf.Quatd = Gf.Quatd(1, Gf.Vec3d([0, 0, 0])),
scale: Gf.Vec3d = Gf.Vec3d([1, 1, 1]),
) -> None:
"""
Sets the transform of a prim.
Args:
prim (Usd.Prim): The prim to set the transform.
translate (Gf.Vec3d): The value of the translation.
orient (Gf.Quatd): The value of the rotation.
scale (Gf.Vec3d): The value of the scale.
"""
setTranslate(prim, translate)
setOrient(prim, orient)
setScale(prim, scale)
def getTransform(prim: Usd.Prim, parent: Usd.Prim) -> Gf.Matrix4d:
"""
Gets the transform of a prim relative to its parent.
Args:
prim (Usd.Prim): The prim to get the transform.
parent (Usd.Prim): The parent of the prim.
"""
return UsdGeom.XformCache(0).ComputeRelativeTransform(prim, parent)[0]
# ==================================================================================================
# Utils for API manipulation
# ==================================================================================================
def applyMaterial(
prim: Usd.Prim,
material: UsdShade.Material,
purpose: str = None,
weaker_than_descendants=False,
) -> UsdShade.MaterialBindingAPI:
"""
Applies a material to a prim.
Args:
prim (Usd.Prim): The prim to apply the material.
material (UsdShade.Material): The material to apply.
purpose (None): The purpose of the material.
weaker_than_descendants (bool): The material is weaker than its descendants.
Returns:
UsdShade.MaterialBindingAPI: The MaterialBindingAPI.
"""
binder = UsdShade.MaterialBindingAPI.Apply(prim)
if purpose is None:
if weaker_than_descendants:
binder.Bind(
material,
bindingStrength=UsdShade.Tokens.weakerThanDescendants,
)
else:
binder.Bind(
material,
bindingStrength=UsdShade.Tokens.strongerThanDescendants,
)
else:
assert purpose in [
"allPurpose",
"all",
"preview",
"physics",
], "Purpose must be 'allPurpose', 'all', 'preview' or 'physics'."
if weaker_than_descendants:
binder.Bind(
material,
materialPurpose=purpose,
bindingStrength=UsdShade.Tokens.weakerThanDescendants,
)
else:
binder.Bind(
material,
materialPurpose=purpose,
bindingStrength=UsdShade.Tokens.strongerThanDescendants,
)
return binder
def applyRigidBody(prim: Usd.Prim) -> UsdPhysics.RigidBodyAPI:
"""
Applies a RigidBodyAPI to a prim.
Args:
prim (Usd.Prim): The prim to apply the RigidBodyAPI.
Returns:
UsdPhysics.RigidBodyAPI: The RigidBodyAPI.
"""
rigid = UsdPhysics.RigidBodyAPI.Apply(prim)
return rigid
def applyCollider(prim: Usd.Prim, enable: bool = False) -> UsdPhysics.CollisionAPI:
"""
Applies a ColliderAPI to a prim.
Args:
prim (Usd.Prim): The prim to apply the ColliderAPI.
enable (bool): Enable or disable the collider.
Returns:
UsdPhysics.CollisionAPI: The ColliderAPI.
"""
collider = UsdPhysics.CollisionAPI.Apply(prim)
collider.CreateCollisionEnabledAttr(enable)
return collider
def applyMass(
prim: Usd.Prim, mass: float, CoM: Gf.Vec3d = Gf.Vec3d([0, 0, 0])
) -> UsdPhysics.MassAPI:
"""
Applies a MassAPI to a prim.
Sets the mass and the center of mass of the prim.
Args:
prim (Usd.Prim): The prim to apply the MassAPI.
mass (float): The mass of the prim.
CoM (Gf.Vec3d): The center of mass of the prim.
Returns:
UsdPhysics.MassAPI: The MassAPI.
"""
massAPI = UsdPhysics.MassAPI.Apply(prim)
massAPI.CreateMassAttr().Set(mass)
massAPI.CreateCenterOfMassAttr().Set(CoM)
return massAPI
def createDrive(
joint: Usd.Prim,
token: str = "transX",
damping: float = 1e3,
stiffness: float = 1e6,
max_force: float = None,
) -> UsdPhysics.DriveAPI:
"""
Creates a DriveAPI on a joint.
List of allowed tokens:
"transX", "transY", "transZ", "linear"
"rotX", "rotY", "rotZ", "angular"
Args:
joint (Usd.Prim): The joint to apply the DriveAPI.
token (str, optional): The type of the drive.
damping (float, optional): The damping of the drive.
stiffness (float, optional): The stiffness of the drive.
max_force (float, optional): The maximum force of the drive.
Returns:
UsdPhysics.DriveAPI: The DriveAPI.
"""
driveAPI = UsdPhysics.DriveAPI.Apply(joint, token)
driveAPI.CreateTypeAttr("force")
driveAPI.CreateDampingAttr(damping)
driveAPI.CreateStiffnessAttr(stiffness)
if max_force is not None:
driveAPI.CreateMaxForceAttr(max_force)
return driveAPI
def createLimit(
joint: Usd.Prim,
token: str = "transX",
low: float = None,
high: float = None,
) -> UsdPhysics.LimitAPI:
"""
Creates a LimitAPI on a joint.
List of allowed tokens:
"transX", "transY", "transZ", "linear"
"rotX", "rotY", "rotZ", "angular"
Args:
joint (Usd.Prim): The joint to apply the LimitAPI.
token (str, optional): The type of the limit.
low (float, optional): The lower limit of the joint.
high (float, optional): The upper limit of the joint.
Returns:
UsdPhysics.LimitAPI: The LimitAPI.
"""
limitAPI = UsdPhysics.LimitAPI.Apply(joint, token)
if low:
limitAPI.CreateLowAttr(low)
if high:
limitAPI.CreateHighAttr(high)
return limitAPI
# ==================================================================================================
# Utils for Geom manipulation
# ==================================================================================================
def createXform(
stage: Usd.Stage,
path: str,
) -> Tuple[str, Usd.Prim]:
"""
Creates an Xform prim.
And sets the default transform operations.
Args:
stage (Usd.Stage): The stage to create the Xform prim.
path (str): The path of the Xform prim.
Returns:
Tuple[str, Usd.Prim]: The path and the prim of the Xform prim.
"""
path = omni.usd.get_stage_next_free_path(stage, path, False)
prim = stage.DefinePrim(path, "Xform")
setXformOps(prim)
return path, prim
def refineShape(stage: Usd.Stage, path: str, refinement: int) -> None:
"""
Refines the geometry of a shape.
This operation is purely visual, it does not affect the physics simulation.
Args:
stage (Usd.Stage): The stage to refine the shape.
path (str): The path of the shape.
refinement (int): The number of times to refine the shape.
"""
prim = stage.GetPrimAtPath(path)
prim.CreateAttribute("refinementLevel", Sdf.ValueTypeNames.Int)
prim.GetAttribute("refinementLevel").Set(refinement)
prim.CreateAttribute("refinementEnableOverride", Sdf.ValueTypeNames.Bool)
prim.GetAttribute("refinementEnableOverride").Set(True)
def createSphere(
stage: Usd.Stage,
path: str,
radius: float,
refinement: int,
) -> Tuple[str, UsdGeom.Sphere]:
"""
Creates a sphere.
Args:
stage (Usd.Stage): The stage to create the sphere.
path (str): The path of the sphere.
radius (float): The radius of the sphere.
refinement (int): The number of times to refine the sphere.
Returns:
Tuple[str, UsdGeom.Sphere]: The path and the prim of the sphere.
"""
path = omni.usd.get_stage_next_free_path(stage, path, False)
sphere_geom = UsdGeom.Sphere.Define(stage, path)
sphere_geom.GetRadiusAttr().Set(radius)
setXformOps(sphere_geom)
refineShape(stage, path, refinement)
return path, sphere_geom
def createCylinder(
stage: Usd.Stage,
path: str,
radius: float,
height: float,
refinement: int,
) -> Tuple[str, UsdGeom.Cylinder]:
"""
Creates a cylinder.
Args:
stage (Usd.Stage): The stage to create the cylinder.
path (str): The path of the cylinder.
radius (float): The radius of the cylinder.
height (float): The height of the cylinder.
refinement (int): The number of times to refine the cylinder.
Returns:
Tuple[str, UsdGeom.Cylinder]: The path and the prim of the cylinder.
"""
path = omni.usd.get_stage_next_free_path(stage, path, False)
cylinder_geom = UsdGeom.Cylinder.Define(stage, path)
cylinder_geom.GetRadiusAttr().Set(radius)
cylinder_geom.GetHeightAttr().Set(height)
setXformOps(cylinder_geom)
refineShape(stage, path, refinement)
return path, cylinder_geom
def createCapsule(
stage: Usd.Stage,
path: str,
radius: float,
height: float,
refinement: int,
) -> Tuple[str, UsdGeom.Capsule]:
"""
Creates a capsule.
Args:
stage (Usd.Stage): The stage to create the capsule.
path (str): The path of the capsule.
radius (float): The radius of the capsule.
height (float): The height of the capsule.
refinement (int): The number of times to refine the capsule.
Returns:
Tuple[str, UsdGeom.Capsule]: The path and the prim of the capsule.
"""
path = omni.usd.get_stage_next_free_path(stage, path, False)
capsule_geom = UsdGeom.Capsule.Define(stage, path)
capsule_geom.GetRadiusAttr().Set(radius)
capsule_geom.GetHeightAttr().Set(height)
setXformOps(capsule_geom)
refineShape(stage, path, refinement)
return path, capsule_geom
def createCube(
stage: Usd.Stage,
path: str,
depth: float,
width: float,
height: float,
refinement: int,
) -> Tuple[str, UsdGeom.Cube]:
"""
Creates a cube.
Args:
stage (Usd.Stage): The stage to create the cube.
path (str): The path of the cube.
depth (float): The depth of the cube.
width (float): The width of the cube.
height (float): The height of the cube.
refinement (int): The number of times to refine the cube.
Returns:
Tuple[str, UsdGeom.Cube]: The path and the prim of the cube.
"""
path = omni.usd.get_stage_next_free_path(stage, path, False)
cube_geom = UsdGeom.Cube.Define(stage, path)
cube_geom.GetSizeAttr().Set(1)
setXformOps(cube_geom, scale=Gf.Vec3d([depth, width, height]))
refineShape(stage, path, refinement)
return path, cube_geom
def createCone(
stage: Usd.Stage,
path: str,
radius: float,
height: float,
refinement: int,
) -> Tuple[str, UsdGeom.Cone]:
"""
Creates a cone.
Args:
stage (Usd.Stage): The stage to create the cone.
path (str): The path of the cone.
radius (float): The radius of the cone.
height (float): The height of the cone.
refinement (int): The number of times to refine the cone.
Returns:
Tuple[str, UsdGeom.Cone]: The path and the prim of the cone.
"""
path = omni.usd.get_stage_next_free_path(stage, path, False)
cone_geom = UsdGeom.Cone.Define(stage, path)
cone_geom.GetRadiusAttr().Set(radius)
cone_geom.GetHeightAttr().Set(height)
setXformOps(cone_geom)
refineShape(stage, path, refinement)
return path, cone_geom
def createArrow(
stage: Usd.Stage,
path: int,
radius: float,
length: float,
offset: list,
refinement: int,
) -> None:
"""
Creates an arrow.
Args:
stage (Usd.Stage): The stage to create the arrow.
path (str): The path of the arrow.
radius (float): The radius of the arrow.
length (float): The length of the arrow.
offset (list): The offset of the arrow.
refinement (int): The number of times to refine the arrow.
Returns:
Tuple[str, UsdGeom.Cone]: The path and the prim of the arrow.
"""
length = length / 2
body_path, body_geom = createCylinder(
stage, path + "/arrow_body", radius, length, refinement
)
setTranslate(body_geom, Gf.Vec3d([offset[0] + length * 0.5, 0, offset[2]]))
setOrient(body_geom, Gf.Quatd(0.707, Gf.Vec3d(0, 0.707, 0)))
head_path, head_geom = createCone(
stage, path + "/arrow_head", radius * 1.5, length, refinement
)
setTranslate(head_geom, Gf.Vec3d([offset[0] + length * 1.5, 0, offset[2]]))
setOrient(head_geom, Gf.Quatd(0.707, Gf.Vec3d(0, 0.707, 0)))
def createThrusterShape(
stage: Usd.Stage,
path: str,
radius: float,
height: float,
refinement: int,
) -> None:
"""
Creates a thruster.
Args:
stage (Usd.Stage): The stage to create the thruster.
path (str): The path of the thruster.
radius (float): The radius of the thruster.
height (float): The height of the thruster.
refinement (int): The number of times to refine the thruster.
Returns:
Tuple[str, UsdGeom.Cone]: The path and the prim of the thruster.
"""
height /= 2
# Creates a cylinder
cylinder_path, cylinder_geom = createCylinder(
stage, path + "/cylinder", radius, height, refinement
)
cylinder_prim = stage.GetPrimAtPath(cylinder_geom.GetPath())
applyCollider(cylinder_prim)
setTranslate(cylinder_geom, Gf.Vec3d([0, 0, height * 0.5]))
setScale(cylinder_geom, Gf.Vec3d([1, 1, 1]))
# Create a cone
cone_path, cone_geom = createCone(stage, path + "/cone", radius, height, refinement)
cone_prim = stage.GetPrimAtPath(cone_geom.GetPath())
applyCollider(cone_prim)
setTranslate(cone_geom, Gf.Vec3d([0, 0, height * 1.5]))
setRotateXYZ(cone_geom, Gf.Vec3d([0, 180, 0]))
def createColor(
stage: Usd.Stage,
material_path: str,
color: list,
) -> UsdShade.Material:
"""
Creates a color material.
Args:
stage (Usd.Stage): The stage to create the color material.
material_path (str): The path of the material.
color (list): The color of the material
Returns:
UsdShade.Material: The material.
"""
material_path = omni.usd.get_stage_next_free_path(stage, material_path, False)
material = UsdShade.Material.Define(stage, material_path)
shader = UsdShade.Shader.Define(stage, material_path + "/shader")
shader.CreateIdAttr("UsdPreviewSurface")
shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Float3).Set(Gf.Vec3f(color))
material.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface")
return material
def createPhysicsMaterial(
stage: Usd.Stage,
material_path: str,
static_friction: float,
dynamic_friction: float,
restitution: float,
friction_combine_mode: str = "average",
restitution_combine_mode: str = "average",
) -> UsdPhysics.MaterialAPI:
"""
Creates a physics material.
Args:
stage (Usd.Stage): The stage to create the physics material.
material_path (str): The path of the material.
static_friction (float): The static friction of the material.
dynamic_friction (float): The dynamic friction of the material.
restitution (float): The restitution of the material.
friction_combine_mode (str, optional): The way the friction between two surfaces is combined.
restitution_combine_mode (str, optional): The way the friction between two surfaces is combined.
Returns:
UsdPhysics.MaterialAPI: The physics material.
"""
if not friction_combine_mode in ["multiply", "average", "min", "max"]:
raise ValueError("average_friction_mode must be average, multiply, min or max")
if not restitution_combine_mode in ["multiply", "average", "min", "max"]:
raise ValueError(
"average_restitution_mode must be average, multiply, min or max"
)
material_path = omni.usd.get_stage_next_free_path(stage, material_path, False)
visual_material = UsdShade.Material.Define(stage, material_path)
prim = stage.GetPrimAtPath(material_path)
material = UsdPhysics.MaterialAPI.Apply(prim)
material.CreateStaticFrictionAttr().Set(static_friction)
material.CreateDynamicFrictionAttr().Set(dynamic_friction)
material.CreateRestitutionAttr().Set(restitution)
physx_material = PhysxSchema.PhysxMaterialAPI.Apply(prim)
physx_material.CreateFrictionCombineModeAttr().Set(friction_combine_mode)
physx_material.CreateRestitutionCombineModeAttr().Set(restitution_combine_mode)
return material
def createArticulation(
stage: Usd.Stage,
path: str,
) -> Tuple[str, Usd.Prim]:
"""
Creates an ArticulationRootAPI on a prim.
Args:
stage (Usd.Stage): The stage to create the ArticulationRootAPI.
path (str): The path of the ArticulationRootAPI.
Returns:
Tuple[str, Usd.Prim]: The path and the prim of the ArticulationRootAPI.
"""
# Creates the Xform of the platform
path, prim = createXform(stage, path)
setXformOps(prim)
# Creates the Articulation root
root = UsdPhysics.ArticulationRootAPI.Apply(prim)
return path, prim
def createFixedJoint(
stage: Usd.Stage,
path: str,
body_path1: str = None,
body_path2: str = None,
) -> UsdPhysics.FixedJoint:
"""
Creates a fixed joint between two bodies.
Args:
stage (Usd.Stage): The stage to create the fixed joint.
path (str): The path of the fixed joint.
body_path1 (str, optional): The path of the first body.
body_path2 (str, optional): The path of the second body.
Returns:
UsdPhysics.FixedJoint: The fixed joint.
"""
# Create fixed joint
joint = UsdPhysics.FixedJoint.Define(stage, path)
# Set body targets
if body_path1 is not None:
joint.CreateBody0Rel().SetTargets([body_path1])
if body_path2 is not None:
joint.CreateBody1Rel().SetTargets([body_path2])
if (body_path1 is not None) and (body_path2 is not None):
# Get from the simulation the position/orientation of the bodies
body_1_prim = stage.GetPrimAtPath(body_path1)
body_2_prim = stage.GetPrimAtPath(body_path2)
xform_body_1 = UsdGeom.Xformable(body_1_prim)
xform_body_2 = UsdGeom.Xformable(body_2_prim)
transform_body_1 = xform_body_1.ComputeLocalToWorldTransform(0.0)
transform_body_2 = xform_body_2.ComputeLocalToWorldTransform(0.0)
t12 = np.matmul(
np.linalg.inv(transform_body_1).T, np.array(transform_body_2).T
).T
translate_body_12 = Gf.Vec3f([t12[3][0], t12[3][1], t12[3][2]])
Q_body_12 = Gf.Transform(Gf.Matrix4d(t12.tolist())).GetRotation().GetQuat()
# Set the transform between the bodies inside the joint
joint.CreateLocalPos0Attr().Set(translate_body_12)
joint.CreateLocalPos1Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalRot0Attr().Set(Gf.Quatf(Q_body_12))
joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
else:
# Set the transform between the bodies inside the joint
joint.CreateLocalPos0Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalPos1Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalRot0Attr().Set(Gf.Quatf(1, 0, 0, 0))
joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
return joint
def createRevoluteJoint(
stage: Usd.Stage,
path: str,
body_path1: str = None,
body_path2: str = None,
axis: str = "Z",
limit_low: float = None,
limit_high: float = None,
enable_drive: bool = False,
damping: float = 1e3,
stiffness: float = 1e6,
force_limit: float = None,
) -> UsdPhysics.RevoluteJoint:
"""
Creates a revolute joint between two bodies.
Args:
stage (Usd.Stage): The stage to create the revolute joint.
path (str): The path of the revolute joint.
body_path1 (str, optional): The path of the first body.
body_path2 (str, optional): The path of the second body.
axis (str, optional): The axis of rotation.
limit_low (float, optional): The lower limit of the joint.
limit_high (float, optional): The upper limit of the joint.
enable_drive (bool, optional): Enable or disable the drive.
damping (float, optional): The damping of the drive.
stiffness (float, optional): The stiffness of the drive.
force_limit (float, optional): The force limit of the drive.
Returns:
UsdPhysics.RevoluteJoint: The revolute joint.
"""
# Create revolute joint
joint = UsdPhysics.RevoluteJoint.Define(stage, path)
# Set body targets
if not body_path1 is None:
joint.CreateBody0Rel().SetTargets([body_path1])
if not body_path2 is None:
joint.CreateBody1Rel().SetTargets([body_path2])
if (body_path1 is not None) and (body_path2 is not None):
# Get from the simulation the position/orientation of the bodies
body_1_prim = stage.GetPrimAtPath(body_path1)
body_2_prim = stage.GetPrimAtPath(body_path2)
xform_body_1 = UsdGeom.Xformable(body_1_prim)
xform_body_2 = UsdGeom.Xformable(body_2_prim)
transform_body_1 = xform_body_1.ComputeLocalToWorldTransform(0.0)
transform_body_2 = xform_body_2.ComputeLocalToWorldTransform(0.0)
t12 = np.matmul(
np.linalg.inv(transform_body_1).T, np.array(transform_body_2).T
).T
translate_body_12 = Gf.Vec3f([t12[3][0], t12[3][1], t12[3][2]])
Q_body_12 = Gf.Transform(Gf.Matrix4d(t12.tolist())).GetRotation().GetQuat()
# Set the transform between the bodies inside the joint
joint.CreateLocalPos0Attr().Set(translate_body_12)
joint.CreateLocalPos1Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalRot0Attr().Set(Gf.Quatf(Q_body_12))
joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
else:
# Set the transform between the bodies inside the joint
joint.CreateLocalPos0Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalPos1Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalRot0Attr().Set(Gf.Quatf(1, 0, 0, 0))
joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
if axis in ["X", "Y", "Z"]:
joint.CreateAxisAttr(axis)
else:
raise ValueError("Axis must be X, Y or Z")
if limit_low is not None:
joint.CreateLowerLimitAttr(limit_low)
if limit_high is not None:
joint.CreateUpperLimitAttr(limit_high)
if enable_drive:
joint_prim = stage.GetPrimAtPath(joint.GetPath())
createDrive(
joint_prim,
token="angular",
damping=damping,
stiffness=stiffness,
max_force=force_limit,
)
return joint
def createPrismaticJoint(
stage: Usd.Stage,
path: str,
body_path1: str = None,
body_path2: str = None,
axis: str = "Z",
limit_low: float = None,
limit_high: float = None,
enable_drive: bool = False,
damping: float = 1e3,
stiffness: float = 1e6,
force_limit: float = None,
) -> UsdPhysics.PrismaticJoint:
"""
Creates a prismatic joint between two bodies.
Args:
stage (Usd.Stage): The stage to create the revolute joint.
path (str): The path of the revolute joint.
body_path1 (str, optional): The path of the first body.
body_path2 (str, optional): The path of the second body.
axis (str, optional): The axis of rotation.
limit_low (float, optional): The lower limit of the joint.
limit_high (float, optional): The upper limit of the joint.
enable_drive (bool, optional): Enable or disable the drive.
damping (float, optional): The damping of the drive.
stiffness (float, optional): The stiffness of the drive.
force_limit (float, optional): The force limit of the drive.
Returns:
UsdPhysics.PrismaticJoint: The prismatic joint.
"""
# Create revolute joint
joint = UsdPhysics.PrismaticJoint.Define(stage, path)
# Set body targets
if body_path1 is not None:
joint.CreateBody0Rel().SetTargets([body_path1])
if body_path2 is not None:
joint.CreateBody1Rel().SetTargets([body_path2])
if (body_path1 is not None) and (body_path2 is not None):
# Get from the simulation the position/orientation of the bodies
body_1_prim = stage.GetPrimAtPath(body_path1)
body_2_prim = stage.GetPrimAtPath(body_path2)
xform_body_1 = UsdGeom.Xformable(body_1_prim)
xform_body_2 = UsdGeom.Xformable(body_2_prim)
transform_body_1 = xform_body_1.ComputeLocalToWorldTransform(0.0)
transform_body_2 = xform_body_2.ComputeLocalToWorldTransform(0.0)
t12 = np.matmul(
np.linalg.inv(transform_body_1).T, np.array(transform_body_2).T
).T
translate_body_12 = Gf.Vec3f([t12[3][0], t12[3][1], t12[3][2]])
Q_body_12 = Gf.Transform(Gf.Matrix4d(t12.tolist())).GetRotation().GetQuat()
# Set the transform between the bodies inside the joint
joint.CreateLocalPos0Attr().Set(translate_body_12)
joint.CreateLocalPos1Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalRot0Attr().Set(Gf.Quatf(Q_body_12))
joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
joint.CreateAxisAttr(axis)
else:
# Set the transform between the bodies inside the joint
joint.CreateLocalPos0Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalPos1Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalRot0Attr().Set(Gf.Quatf(1, 0, 0, 0))
joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
joint.CreateAxisAttr(axis)
if axis in ["X", "Y", "Z"]:
joint.CreateAxisAttr(axis)
else:
raise ValueError("Axis must be X, Y or Z")
if limit_low is not None:
joint.CreateLowerLimitAttr(limit_low)
if limit_high is not None:
joint.CreateUpperLimitAttr(limit_high)
if enable_drive:
joint_prim = stage.GetPrimAtPath(joint.GetPath())
createDrive(
joint_prim,
token="linear",
damping=damping,
stiffness=stiffness,
max_force=force_limit,
)
return joint
def createP3Joint(
stage: Usd.Stage,
path: str,
body_path1: str,
body_path2: str,
damping: float = 1e3,
stiffness: float = 1e6,
articulation_root: str = None,
prefix: str = "",
enable_drive: bool = False,
) -> Tuple[
UsdPhysics.PrismaticJoint, UsdPhysics.PrismaticJoint, UsdPhysics.PrismaticJoint
]:
"""
Creates 3 Prismatic joints between two bodies. One for each axis (X,Y,Z).
To create this joint, it needs to add two dummy bodies, to do this it
needs to create them at the same position as the 1st body, and then
apply a RigidBodyAPI and a MassAPI to them. The addition of these bodies is
automated, and can fail to recover the position of the 1st body correctly.
Args:
stage (Usd.Stage): The stage to create the prismatic joint.
path (str): The path of the prismatic joint.
body_path1 (str): The path of the first body.
body_path2 (str): The path of the second body.
damping (float, optional): The damping of the drive.
stiffness (float, optional): The stiffness of the drive.
articulation_root (str, optional): The path of the articulation root.
enable_drive (bool, optional): Enable or disable the drive.
Returns:
Tuple[UsdPhysics.PrismaticJoint, UsdPhysics.PrismaticJoint, UsdPhysics.PrismaticJoint]: The prismatic joints.
"""
# Get the position/orientation of the two bodies
body_1_prim = stage.GetPrimAtPath(body_path1)
body_2_prim = stage.GetPrimAtPath(body_path2)
if articulation_root is not None:
root_prim = stage.GetPrimAtPath(articulation_root)
transform_body_1 = getTransform(body_1_prim, root_prim)
transform_body_2 = getTransform(body_2_prim, root_prim)
else:
xform_body_1 = UsdGeom.Xformable(body_1_prim)
xform_body_2 = UsdGeom.Xformable(body_2_prim)
transform_body_1 = xform_body_1.ComputeLocalToWorldTransform(0.0)
transform_body_2 = xform_body_2.ComputeLocalToWorldTransform(0.0)
translate_body_1 = Gf.Vec3f(
[transform_body_1[3][0], transform_body_1[3][1], transform_body_1[3][2]]
)
Q_body_1d = Gf.Transform(transform_body_1).GetRotation().GetQuat()
# Generates dummy bodies for the joints at the position of the 1st body
xaxis_body_path, xaxis_body_prim = createXform(stage, path + "/x_axis_body")
yaxis_body_path, yaxis_body_prim = createXform(stage, path + "/y_axis_body")
setTranslate(xaxis_body_prim, translate_body_1)
setTranslate(yaxis_body_prim, translate_body_1)
setOrient(xaxis_body_prim, Q_body_1d)
setOrient(yaxis_body_prim, Q_body_1d)
applyRigidBody(xaxis_body_prim)
applyRigidBody(yaxis_body_prim)
applyMass(xaxis_body_prim, 0.0000001)
applyMass(yaxis_body_prim, 0.0000001)
# Create the 3 prismatic joints
xaxis_joint = createPrismaticJoint(
stage, path + "/" + prefix + "x_axis_joint", body_path1, xaxis_body_path, "X"
)
yaxis_joint = createPrismaticJoint(
stage,
path + "/" + prefix + "y_axis_joint",
xaxis_body_path,
yaxis_body_path,
"Y",
)
zaxis_joint = createPrismaticJoint(
stage, path + "/" + prefix + "z_axis_joint", yaxis_body_path, body_path2, "Z"
)
# Get the delta transform between the 1st and 2nd body
t12 = np.matmul(np.linalg.inv(transform_body_1), transform_body_2)
translate_body_12 = Gf.Vec3f([t12[3][0], t12[3][1], t12[3][2]])
Q_body_12 = Gf.Transform(Gf.Matrix4d(t12.tolist())).GetRotation().GetQuat()
# Set the transform between the bodies inside the joints
xaxis_joint.CreateLocalPos0Attr().Set(Gf.Vec3f([0, 0, 0]))
yaxis_joint.CreateLocalPos0Attr().Set(Gf.Vec3f([0, 0, 0]))
zaxis_joint.CreateLocalPos0Attr().Set(translate_body_12)
xaxis_joint.CreateLocalRot0Attr().Set(Gf.Quatf(1, 0, 0, 0))
yaxis_joint.CreateLocalRot0Attr().Set(Gf.Quatf(1, 0, 0, 0))
zaxis_joint.CreateLocalRot0Attr().Set(Gf.Quatf(Q_body_12))
xaxis_joint.CreateLocalPos1Attr().Set(Gf.Vec3f([0, 0, 0]))
yaxis_joint.CreateLocalPos1Attr().Set(Gf.Vec3f([0, 0, 0]))
zaxis_joint.CreateLocalPos1Attr().Set(Gf.Vec3f([0, 0, 0]))
xaxis_joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
yaxis_joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
zaxis_joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
# Add drives to the joints
if enable_drive:
xaxis_drive = createDrive(
stage.GetPrimAtPath(path + "/" + prefix + "x_axis_joint"),
token="linear",
damping=damping,
stiffness=stiffness,
)
yaxis_drive = createDrive(
stage.GetPrimAtPath(path + "/" + prefix + "y_axis_joint"),
token="linear",
damping=damping,
stiffness=stiffness,
)
zaxis_drive = createDrive(
stage.GetPrimAtPath(path + "/" + prefix + "z_axis_joint"),
token="linear",
damping=damping,
stiffness=stiffness,
)
return (xaxis_joint, yaxis_joint, zaxis_joint)
def createP2Joint(
stage: Usd.Stage,
path: str,
body_path1: str,
body_path2: str,
damping: float = 1e3,
stiffness: float = 1e6,
articulation_root: str = None,
prefix: str = "",
enable_drive: bool = False,
) -> Tuple[UsdPhysics.PrismaticJoint, UsdPhysics.PrismaticJoint]:
"""
Creates 2 Prismatic joints between two bodies. One for each axis (X,Y).
To create this joint, it needs to add one dummy body, to do this it
needs to create it at the same position as the 1st body, and then
apply a RigidBodyAPI and a MassAPI to it. The addition of these bodies is
automated, and can fail to recover the position of the 1st body correctly.
Args:
stage (Usd.Stage): The stage to create the prismatic joint.
path (str): The path of the prismatic joint.
body_path1 (str): The path of the first body.
body_path2 (str): The path of the second body.
damping (float, optional): The damping of the drive.
stiffness (float, optional): The stiffness of the drive.
articulation_root (str, optional): The path of the articulation root.
enable_drive (bool, optional): Enable or disable the drive.
Returns:
Tuple[UsdPhysics.PrismaticJoint, UsdPhysics.PrismaticJoint]: The prismatic joints.
"""
# Get the position/orientation of the two bodies
body_1_prim = stage.GetPrimAtPath(body_path1)
body_2_prim = stage.GetPrimAtPath(body_path2)
if articulation_root is not None:
root_prim = stage.GetPrimAtPath(articulation_root)
transform_body_1 = getTransform(body_1_prim, root_prim)
transform_body_2 = getTransform(body_2_prim, root_prim)
else:
xform_body_1 = UsdGeom.Xformable(body_1_prim)
xform_body_2 = UsdGeom.Xformable(body_2_prim)
transform_body_1 = xform_body_1.ComputeLocalToWorldTransform(0.0)
transform_body_2 = xform_body_2.ComputeLocalToWorldTransform(0.0)
translate_body_1 = Gf.Vec3f(
[transform_body_1[3][0], transform_body_1[3][1], transform_body_1[3][2]]
)
Q_body_1d = Gf.Transform(transform_body_1).GetRotation().GetQuat()
# Generates dummy body for the joints at the position of the 1st body
xaxis_body_path, xaxis_body_prim = createXform(stage, path + "/x_axis_body")
setTranslate(xaxis_body_prim, translate_body_1)
setOrient(xaxis_body_prim, Q_body_1d)
applyRigidBody(xaxis_body_prim)
applyMass(xaxis_body_prim, 0.0000001)
# Create the 3 prismatic joints
xaxis_joint = createPrismaticJoint(
stage, path + "/" + prefix + "x_axis_joint", body_path1, xaxis_body_path, "X"
)
yaxis_joint = createPrismaticJoint(
stage, path + "/" + prefix + "y_axis_joint", xaxis_body_path, body_path2, "Y"
)
# Get the delta transform between the 1st and 2nd body
t12 = np.matmul(np.linalg.inv(transform_body_1), transform_body_2)
translate_body_12 = Gf.Vec3f([t12[3][0], t12[3][1], t12[3][2]])
Q_body_12 = Gf.Transform(Gf.Matrix4d(t12.tolist())).GetRotation().GetQuat()
# Set the transform between the bodies inside the joints
xaxis_joint.CreateLocalPos0Attr().Set(Gf.Vec3f([0, 0, 0]))
yaxis_joint.CreateLocalPos0Attr().Set(translate_body_12)
xaxis_joint.CreateLocalRot0Attr().Set(Gf.Quatf(1, 0, 0, 0))
yaxis_joint.CreateLocalRot0Attr().Set(Gf.Quatf(Q_body_12))
xaxis_joint.CreateLocalPos1Attr().Set(Gf.Vec3f([0, 0, 0]))
yaxis_joint.CreateLocalPos1Attr().Set(Gf.Vec3f([0, 0, 0]))
xaxis_joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
yaxis_joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
# Add drives to the joints
if enable_drive:
xaxis_drive = createDrive(
stage.GetPrimAtPath(path + "/" + prefix + "x_axis_joint"),
token="linear",
damping=damping,
stiffness=stiffness,
)
yaxis_drive = createDrive(
stage.GetPrimAtPath(path + "/" + prefix + "y_axis_joint"),
token="linear",
damping=damping,
stiffness=stiffness,
)
return (xaxis_joint, yaxis_joint)
def create3DOFJoint(
stage: Usd.Stage,
path: str,
body_path1: str,
body_path2: str,
) -> UsdPhysics.FixedJoint:
"""
Creates a D6 joint with limits between two bodies to constrain motionin in 2D plane.
Args:
stage (Usd.Stage): The stage to create the fixed joint.
path (str): The path of the fixed joint.
body_path1 (str): The path of the first body.
body_path2 (str): The path of the second body.
Returns:
UsdPhysics.FixedJoint: The fixed joint.
"""
# Create fixed joint
joint = UsdPhysics.Joint.Define(stage, path)
# Set body targets
joint.CreateBody0Rel().SetTargets([body_path1])
joint.CreateBody1Rel().SetTargets([body_path2])
# Get from the simulation the position/orientation of the bodies
translate = Gf.Vec3d(
stage.GetPrimAtPath(body_path2).GetAttribute("xformOp:translate").Get()
)
Q = stage.GetPrimAtPath(body_path2).GetAttribute("xformOp:orient").Get()
quat0 = Gf.Quatf(
Q.GetReal(), Q.GetImaginary()[0], Q.GetImaginary()[1], Q.GetImaginary()[2]
)
# Set the transform between the bodies inside the joint
joint.CreateLocalPos0Attr().Set(translate)
joint.CreateLocalPos1Attr().Set(Gf.Vec3d([0, 0, 0]))
joint.CreateLocalRot0Attr().Set(quat0)
joint.CreateLocalRot1Attr().Set(Gf.Quatf(1, 0, 0, 0))
d6prim = stage.GetPrimAtPath(path)
for dof in ["transX", "transY", "transZ", "rotX", "rotY", "rotZ"]:
if dof in ["transZ", "rotX", "rotY"]:
limitAPI = UsdPhysics.LimitAPI.Apply(d6prim, dof)
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
return joint
| 40,156 |
Python
| 33.06022 | 117 | 0.633006 |
williamjsmith15/OmniFlow/docker-compose-openmc.yml
|
version: "3.9"
services:
openmc:
build:
context: .
dockerfile: OpenMC/Dockerfile
volumes:
- ../OmniFlow/:/OmniFlow/
- /tmp/:/tmp/
tty: true
stdin_open: true
# docker compose -f "docker-compose-openmc.yml" build
# docker compose -f "docker-compose-openmc.yml" up
| 307 |
YAML
| 17.117646 | 53 | 0.615635 |
williamjsmith15/OmniFlow/Install_Win_wip.md
|
# Setup
## Environment Setup
Download and install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) and follow all the instructions given by microsoft. The default installation options here are fine (system tested running with an Ubuntu distro install)
Download and install [Docker Desktop](https://docs.docker.com/desktop/install/windows-install/), follow the instalation steps and set up docker with the WSL 2 install in the previous step.
Can verify the version and installation of both of these running these commands in PowerShell or the Command Prompt:
``
wsl -l -v
docker version
``
Next, on wsl, install cwltool and the toil-cwl-runner, fist update the installer:
```
sudo apt-get update
```
followed by
```
sudo apt-get install cwltool
sudo apt-get install python3-pip
pip install toil[cwl]
```
both of the workflow runners can be tested later when the repository is cloned on the test OpenMC cases.
To save time later when running the Monte Carlo solver, the Docker image for this can be downloaded now by running
```
docker run -it williamjsmith15/omniflow-openmc:latest
```
and then when the image is installed and running, in the command line run
```
python
import openmc
```
if this throws no errors then the container has been downloaded and the packages inside set up correctly. This can now be exited by running:
```
exit()
exit
```
For visualisation, ParaView is the viewer of choice and comes with a connector that can be installed to allow operation with Omniverse. Paraview can be downloaded from [here](paraview.org/download/). ParaView versions 5.9, 5.10 and 5.11 are all currently supported by Omniverse.
Finally, download and install [NVIDIA Omniverse](https://www.nvidia.com/en-sg/omniverse/download/). The extension should work in most apps but to start, Omniverse create is reccommended.
## Extension Download & Testing
Clone the repository with
```
git clone https://github.com/williamjsmith15/OmniFlow
```
After cloning test that all the environments are working so far, this can be done from the main OmniFlow directory with the following commands:
```
python test/cwltool_test.py [CREATE THESE TEST MODULES]
python test/toil_test.py
```
verify that there are no errors in the terminal output and see that the output files are correctly saved in the folders /test/cwltool/toy/ /test/cwltool/simple/ /test/toil/toy/ /test/toil/simple/ and the vtk files in the simple tests can both be opened in ParaView to visually test and to check correct install of ParaView [SCREENSHOTS]
## Connect Extension to Omniverse
Open -> Extension Manager -> blah balh blah
TODO:
Screenshots for everything
Create test modules
Create a win and linux file to autoinstall most things
| 2,732 |
Markdown
| 38.042857 | 336 | 0.777452 |
williamjsmith15/OmniFlow/README.md
|
# OmniFlow
NVIDIA Omniverse Simulation Integration System
To start the docker container:
First time only to build - docker compose -f "docker-compose-toil.yml" build
Every time after to launch - docker compose -f "docker-compose-toil.yml" up
Set the paths to the individual extensions in Omniverse, for example in the case of OpenMC:
Launch the Omniverse app, go window > extensions > settings and add the <installation-folder>/OmniFlow/OpenMC/omni-kit-extension/exts/ folder to the filepaths
Back onto the extensions manager search for the extension name (or there is a button to filter for just 3rd Party Exts) and find the extension
Click on the toggle switch to launch the extenion (and can select to autoload so next time it will already be loaded on lauch)
To Install:
pip install docker
pip install cwltool
pip install toil
sudo apt install git
sudo apt install python3-pip
sudo apt install libfuse2 # For omni - needs it
git clone https://github.com/williamjsmith15/OmniFlow
should point omni to ext path
get onto correct branch (feature branch)
| 1,083 |
Markdown
| 44.166665 | 162 | 0.78024 |
williamjsmith15/OmniFlow/OpenMC/README.md
|
# MScDIssertation
Colleciton of Files and Scripts that contain the work performed for a MSc Strucutral Engineering dissertation
Extra Dependencies:
pip install cwltool
For OpenMC Conda Env:
conda create <env_name> # Create the new conda environment to install into
conda activate <env_name> # Activate the environment
conda install -c conda-forge mamba # Install the package manager mamba
mabma install openmc # Install OpenMC and all its dependencies through mamba
To run:
cwltool, needs argument --no-pass-user when using the openMC docker container as this means the CWL tool can access the root user and not userID 1000:
cwl-runner --no-match-user workflows/openmc_workflow.cwl workflows/script_loc.yml # Can also use toil-cwl-runner insteaed
docker container normally:
docker run -it -v <parent_folder>/MScDIssertation/:/home/MScDissertation/ openmc/openmc:develop-dagmc-libmesh # Links the git repo folder to folder /home/MScDissertation on the Docker container
To install extension into Omniverse:
Launch app, go window > extensions > settings and add the <parent_folder>/omni-kit-extension/exts fiel to the filepaths
Back onto teh extensions manager search fro OpenMC and it should be the only result
Click on the toggle switch (and can select to autoload so next time it will already be loaded on lauch)
| 1,394 |
Markdown
| 59.652171 | 198 | 0.761119 |
williamjsmith15/OmniFlow/OpenMC/tools/dagmc_material_name/extract_mats.py
|
# Short python script to obtain materials from a list given by MOAB using mbsize command
import os
sep = os.sep
path_py = os.path.realpath(__file__)
list_path = ''
if "MScDIssertation" in path_py:
cwl_folder = path_py.split(f"{sep}MScDIssertation", 1)[0]
elif "cwl" in path_py:
cwl_folder = path_py.split(f"{sep}cwl", 1)[0]
for root, dirs, files in os.walk(cwl_folder):
for file in files:
if file.endswith("mat_list.txt"):
list_path = os.path.join(root, file)
mats = []
check_str = 'NAME = mat:'
with open(list_path) as old, open('materials.txt', 'w') as new:
for line in old: # Loop through lines in old txt
if check_str in line: # check against check string
if not any(material in line for material in mats): # check against existing materials
new.write(line.replace('NAME = mat:', ''))
| 869 |
Python
| 28.999999 | 97 | 0.642117 |
williamjsmith15/OmniFlow/OpenMC/tools/dagmc_material_name/dagmc_materials.yml
|
usd_CAD:
class: File
path: ../../output/omni/dagmc.usd
extract_script:
class: File
path: extract_mats.py
usd_h5m_script:
class: File
path: ../file_converters/usd_h5m.py
settings:
class: File
path: ../../output/omni/settings.txt
| 243 |
YAML
| 19.333332 | 38 | 0.679012 |
williamjsmith15/OmniFlow/OpenMC/tools/main/script_loc.yml
|
script:
class: File
path: general_CAD.py
str: dagmc.vtk
usd_CAD:
class: File
path: ../../output/omni/dagmc.usd
settings:
class: File
path: ../../output/omni/settings.txt
usd_h5m_script:
class: File
path: ../file_converters/usd_h5m.py
# Test running:
# cwltool --outdir /home/williamjsmith15/PhD/OmniFlow/TEST/Test_USD/output /home/williamjsmith15/PhD/OmniFlow/OpenMC/tools/file_converters/usd_h5m_convert.cwl /home/williamjsmith15/PhD/OmniFlow/OpenMC/tools/file_converters/usd_h5m_convert.yml
# cwltool --no-match-user --outdir /home/williamjsmith15/PhD/OmniFlow/TEST/Test_USD/output/ /home/williamjsmith15/PhD/OmniFlow/OpenMC/tools/main/openmc_workflow.cwl /home/williamjsmith15/PhD/OmniFlow/OpenMC/tools/main/script_loc.yml
| 751 |
YAML
| 40.777776 | 244 | 0.76032 |
williamjsmith15/OmniFlow/OpenMC/tools/main/general_CAD.py
|
# Steps for this workflow:
# CAD through cubit and then into h5m format (throuigh mb convert)
# Cubit adds materials etc etc
# Run send CAD file along with this script into the DOCKER container
import openmc
import os
import math
import openmc_plasma_source as ops
import numpy as np
# Find the settings file
sep = os.sep
path_py = os.path.realpath(__file__)
settings_path = ''
geometry_path = ''
# Find parent folder path
if "MScDIssertation" in path_py:
cwl_folder = path_py.split(f"{sep}MScDIssertation", 1)[0]
elif "cwl" in path_py:
cwl_folder = path_py.split(f"{sep}cwl", 1)[0]
# Find settings and dagmc files
for root, dirs, files in os.walk(cwl_folder):
for file in files:
if file.endswith("settings.txt"):
settings_path = os.path.join(root, file)
if file.endswith("dagmc.h5m"):
geometry_path = os.path.join(root, file)
# Get all settings out
materials_input = []
sources_input = []
settings_input = []
ex_settings = []
position = 0
with open(settings_path) as f:
for line in f:
if position == 0:
if "MATERIALS" in line:
position = 1
elif position == 1:
if "SOURCES" in line:
position = 2
else:
materials_input.append(line.split())
elif position == 2:
if "SETTINGS" in line:
position = 3
else:
sources_input.append(line.split())
elif position == 3:
if "EXT_SETTINGS" in line:
position = 4
else:
settings_input.append(line.split())
elif position == 4:
ex_settings.append(line.split())
##################
# DEFINE MATERIALS
##################
tmp_material_array = []
# Temp for testing
# for material in materials_input:
# tmp_material = openmc.Material(name = material[0])
# tmp_material.add_element('Fe', 1, 'ao')
# tmp_material.set_density("g/cm3", 7.7)
# tmp_material_array.append(tmp_material)
for material in materials_input:
tmp_material = openmc.Material(name = material[0])
tmp_material.add_element(material[1], 1, "ao")
tmp_material.set_density("g/cm3", float(material[2]))
tmp_material_array.append(tmp_material)
materials = openmc.Materials(tmp_material_array)
materials.export_to_xml()
##################
# DEFINE GEOMETRY
##################
# Hack to handle the boundaires for the geometry (for now) - future look at how to handle this
# Took from the paramak examples https://github.com/fusion-energy/magnetic_fusion_openmc_dagmc_paramak_example/blob/main/2_run_openmc_dagmc_simulation.py
dagmc_univ = openmc.DAGMCUniverse(filename=geometry_path)
# geometry = openmc.Geometry(root=dagmc_univ)
# geometry.export_to_xml()
# creates an edge of universe boundary surface
vac_surf = openmc.Sphere(r=1000, surface_id=9999, boundary_type="vacuum") # Normally like 100000
# lead_surf = -openmc.Sphere(r=60000) & + openmc.Sphere(r=50000)
# lead = openmc.Material(name='lead')
# lead.set_density('g/cc', 11.4)
# lead.add_element('Pb', 1)
# lead_cell = openmc.Cell(fill=lead, region=lead_surf)
# adds reflective surface for the sector model at 0 degrees
reflective_1 = openmc.Plane(
a=math.sin(0),
b=-math.cos(0),
c=0.0,
d=0.0,
surface_id=9991,
boundary_type="reflective",
)
# adds reflective surface for the sector model at 90 degrees
reflective_2 = openmc.Plane(
a=math.sin(math.radians(90)),
b=-math.cos(math.radians(90)),
c=0.0,
d=0.0,
surface_id=9990,
boundary_type="reflective",
)
# specifies the region as below the universe boundary and inside the reflective surfaces
region = -vac_surf # & -reflective_1 & +reflective_2 DEBUGGING
# creates a cell from the region and fills the cell with the dagmc geometry
containing_cell = openmc.Cell(cell_id=9999, region=region, fill=dagmc_univ)
geometry = openmc.Geometry(root=[containing_cell])
geometry.export_to_xml()
##################
# DEFINE SETTINGS
##################
settings = openmc.Settings()
source_type = ''
for ex_setting in ex_settings:
if ex_setting[0] == "source_type":
source_type = " ".join(ex_setting[1:])
else:
print(f"Don't know what to do with {ex_setting}")
# Sources
sources = []
angle_conversion = (2*np.pi)/360
if source_type == 'Point Source': # If a point source
for source in sources_input:
source_pnt = openmc.stats.Point(xyz=(float(source[1]), float(source[2]), float(source[3])))
source = openmc.Source(space=source_pnt, energy=openmc.stats.Discrete(x=[float(source[0]),], p=[1.0,]))
sources.append(source)
source_str = 1.0 / len(sources)
for source in sources:
source.strength = source_str
elif source_type == 'Fusion Point Source':
for source in sources_input:
source_single = ops.FusionPointSource(
)
sources.append(source_single)
elif source_type == 'Fusion Ring Source':
for source in sources_input:
source_single = ops.FusionRingSource(
angles = (float(source[2])*angle_conversion, float(source[3])*angle_conversion),
radius = float(source[0]),
temperature = float(source[4]),
fuel = str(source[1]),
z_placement = float(source[5])
)
sources.append(source_single)
elif source_type == 'Tokamak Source':
for source in sources_input:
source_single = ops.TokamakSource(
).make_openmc_sources()
sources.append(source_single)
else:
print(f'I dont know what to do with {source_type}')
settings.source = sources
# Settings
for setting in settings_input:
try:
if setting[0] == "batches": # Apparently the version of python being used is not new enough for swtich statements... :(
settings.batches = int(setting[1])
elif setting[0] == "particles":
settings.particles = int(setting[1])
elif setting[0] == "run_mode":
settings.run_mode = str(" ".join(setting[1:]))
else:
print(f"Setting: {setting} did not match one of the expected cases.")
except:
print(f"There was an error with setting {setting} somewhere...")
settings.export_to_xml()
openmc.run(tracks=True) # Run in tracking mode for visualisation of tracks through CAD
| 6,382 |
Python
| 31.902062 | 153 | 0.632404 |
williamjsmith15/OmniFlow/OpenMC/tools/tests/simple/simple_CAD.py
|
#From https://nbviewer.org/github/openmc-dev/openmc-notebooks/blob/main/cad-based-geometry.ipynb
import urllib.request
import openmc
from matplotlib import pyplot as plt
##################
# DEFINE MATERIALS
##################
water = openmc.Material(name="water")
water.add_nuclide('H1', 2.0, 'ao')
water.add_nuclide('O16', 1.0, 'ao')
water.set_density('g/cc', 1.0)
#water.add_s_alpha_beta('c_H_in_H2O') Have to remove due to issue in new docker container - see OmniFlow doc 03/01/23
water.id = 41
iron = openmc.Material(name="iron")
iron.add_nuclide("Fe54", 0.0564555822608)
iron.add_nuclide("Fe56", 0.919015287728)
iron.add_nuclide("Fe57", 0.0216036861685)
iron.add_nuclide("Fe58", 0.00292544384231)
iron.set_density("g/cm3", 7.874)
mats = openmc.Materials([iron, water])
mats.export_to_xml()
##################
# DEFINE GEOMETRY
##################
teapot_url = 'https://tinyurl.com/y4mcmc3u' # 29 MB
def download(url):
"""
Helper function for retrieving dagmc models
"""
u = urllib.request.urlopen(url)
if u.status != 200:
raise RuntimeError("Failed to download file.")
# save file as dagmc.h5m
with open("dagmc.h5m", 'wb') as f:
f.write(u.read())
download(teapot_url)
dagmc_univ = openmc.DAGMCUniverse(filename="dagmc.h5m")
geometry = openmc.Geometry(root=dagmc_univ)
geometry.export_to_xml()
##################
# DEFINE SETTINGS
##################
settings = openmc.Settings()
settings.batches = 10
settings.particles = 5000
settings.run_mode = "fixed source"
src_locations = ((-4.0, 0.0, -2.0),
( 4.0, 0.0, -2.0),
( 4.0, 0.0, -6.0),
(-4.0, 0.0, -6.0),
(10.0, 0.0, -4.0),
(-8.0, 0.0, -4.0))
# we'll use the same energy for each source
src_e = openmc.stats.Discrete(x=[12.0,], p=[1.0,])
# create source for each location
sources = []
for loc in src_locations:
src_pnt = openmc.stats.Point(xyz=loc)
src = openmc.Source(space=src_pnt, energy=src_e)
sources.append(src)
src_str = 1.0 / len(sources)
for source in sources:
source.strength = src_str
settings.source = sources
settings.export_to_xml()
mesh = openmc.RegularMesh()
mesh.dimension = (120, 1, 40)
mesh.lower_left = (-20.0, 0.0, -10.0)
mesh.upper_right = (20.0, 1.0, 4.0)
mesh_filter = openmc.MeshFilter(mesh)
pot_filter = openmc.CellFilter([1])
pot_tally = openmc.Tally()
pot_tally.filters = [mesh_filter, pot_filter]
pot_tally.scores = ['flux']
water_filter = openmc.CellFilter([5])
water_tally = openmc.Tally()
water_tally.filters = [mesh_filter, water_filter]
water_tally.scores = ['flux']
tallies = openmc.Tallies([pot_tally, water_tally])
tallies.export_to_xml()
openmc.run(tracks=True) # Run in tracking mode for visualisation of tracks through CAD
##################
# PLOTTING
##################
sp = openmc.StatePoint("statepoint.10.h5")
water_tally = sp.get_tally(scores=['flux'], id=water_tally.id)
water_flux = water_tally.mean
water_flux.shape = (40, 120)
water_flux = water_flux[::-1, :]
pot_tally = sp.get_tally(scores=['flux'], id=pot_tally.id)
pot_flux = pot_tally.mean
pot_flux.shape = (40, 120)
pot_flux = pot_flux[::-1, :]
del sp
p = openmc.Plot()
p.basis = 'xz'
p.origin = (0.0, 0.0, 0.0)
p.width = (30.0, 20.0)
p.pixels = (450, 300)
p.color_by = 'material'
p.colors = {iron: 'gray', water: 'blue'}
openmc.plot_inline(p)
plt.savefig('Plot_1.png')
plt.clf()
fig = plt.figure(figsize=(18, 16))
sub_plot1 = plt.subplot(121, title="Kettle Flux")
sub_plot1.imshow(pot_flux)
sub_plot2 = plt.subplot(122, title="Water Flux")
sub_plot2.imshow(water_flux)
plt.savefig('Flux.png')
plt.clf()
| 3,690 |
Python
| 19.853107 | 117 | 0.630352 |
williamjsmith15/OmniFlow/OpenMC/tools/tests/toy/toy.py
|
# From https://nbviewer.org/github/openmc-dev/openmc-notebooks/blob/main/pincell.ipynb
# An OpenMC python script that runs a toy problem example
import openmc
import matplotlib.pyplot as plt # Extra to save plots produced in the process
import openmc_data_downloader as odd # Removes need to have the --no-match-user in the CWL call, this downloads the data files needed for the neutronics code automatically
import os
# #Set cross sections XML path
# os.environ["OPENMC_CROSS_SECTIONS"] = str('/home/nndc_hdf5/cross_sections.xml')
####################
# DEFINING MATERIALS
####################
# Uranium Dioxide Fuel
uo2 = openmc.Material(name="uo2") # Create material variable with name uo2
uo2.add_nuclide('U235', 0.03) # Add nuclides to material
uo2.add_nuclide('U238', 0.97)
uo2.add_nuclide('O16', 2.0)
uo2.set_density('g/cm3', 10.0) # Set density of material
# Zirchonium Casing
zirconium = openmc.Material(name="zirconium")
zirconium.add_element('Zr', 1.0) # Use of add element as elemental material
zirconium.set_density('g/cm3', 6.6)
# Water Coolant
water = openmc.Material(name="h2o") # Same process as uo2
water.add_nuclide('H1', 2.0)
water.add_nuclide('O16', 1.0)
water.set_density('g/cm3', 1.0)
# water.add_s_alpha_beta('c_H_in_H2O') # So bound-atom cross section is used as thermal energies rather than free-atom
mats = openmc.Materials([uo2, zirconium, water]) # Add all materials to a group of materials
# odd.just_in_time_library_generator(
# libraries = 'ENDFB-7.1-NNDC',
# materials = mats
# )
# os.environ["OPENMC_CROSS_SECTIONS"] = str('/home/nndc_hdf5/cross_sections.xml')
mats.export_to_xml() # Export the material data to a .xml file that the solver will use later on
os.system('cat materials.xml')
####################
# DEFINING GEOMETRY
####################
# Set cylinders to define regions and then define regions from those cylinders
fuel_outer_radius = openmc.ZCylinder(r=0.39)
clad_inner_radius = openmc.ZCylinder(r=0.40)
clad_outer_radius = openmc.ZCylinder(r=0.46)
fuel_region = -fuel_outer_radius
gap_region = +fuel_outer_radius & -clad_inner_radius
clad_region = +clad_inner_radius & -clad_outer_radius
# Assign matreials and regions to cells
fuel = openmc.Cell(name='fuel')
fuel.fill = uo2
fuel.region = fuel_region
gap = openmc.Cell(name='air gap')
gap.region = gap_region
clad = openmc.Cell(name='clad')
clad.fill = zirconium
clad.region = clad_region
# Create a box around the cylinders and fill with water as coolant
pitch = 1.26
box = openmc.rectangular_prism(width=pitch, height=pitch, boundary_type='reflective')
water_region = box & +clad_outer_radius
moderator = openmc.Cell(name='moderator')
moderator.fill = water
moderator.region = water_region
# Add all cells to the overall universe and again push to .xml for use by the solver
root_universe = openmc.Universe(cells=(fuel, gap, clad, moderator))
geometry = openmc.Geometry(root_universe)
geometry.export_to_xml()
####################
# DEFINING SETTINGS
####################
# Create a point source
point = openmc.stats.Point((0, 0, 0))
source = openmc.Source(space=point)
# Set settings
settings = openmc.Settings()
settings.source = source
settings.batches = 100
settings.inactive = 10
settings.particles = 1000
# Push settings to .xml for solver
settings.export_to_xml()
####################
# DEFINING TALLIES
####################
cell_filter = openmc.CellFilter(fuel) # What space the tallies take place in
tally = openmc.Tally(1)
tally.filters = [cell_filter]
# Tell tally what to collect info on
tally.nuclides = ['U235']
tally.scores = ['total', 'fission', 'absorption', '(n,gamma)']
# Export to .xml for solver
tallies = openmc.Tallies([tally])
tallies.export_to_xml()
####################
# RUN
####################
openmc.run(tracks=True) # Run in tracking mode for visualisation of tracks through CAD
# Plot geometries
plot = openmc.Plot()
plot.filename = 'pinplot'
plot.width = (pitch, pitch)
plot.pixels = (200, 200)
plot.color_by = 'material'
plot.colors = {uo2: 'yellow', water: 'blue'}
plots = openmc.Plots([plot])
plots.export_to_xml()
openmc.plot_geometry()
| 4,142 |
Python
| 26.805369 | 174 | 0.697006 |
williamjsmith15/OmniFlow/OpenMC/tools/file_converters/vtk_obj.py
|
# Convert vtk to obj file format
# Credit: https://github.com/lodeguns/VTK-OBJ/blob/master/vtk_to_obj_converter.py
import pyvista as pv
import os
from pyvista import examples
# System separator
sep = os.sep
this_path = os.path.realpath(__file__)
parent_folder = this_path.split(f"{sep}file_convertors", 1)[0]
paths = {
"input" : f"{parent_folder}{sep}test_output",
"output" : f"{parent_folder}{sep}test_output"
}
def convert(indir, outdir) :
files = os.listdir(indir)
files = [ os.path.join(indir,f) for f in files if f.endswith('.vtk') ]
for f in files:
mesh = pv.read(f)
basename = os.path.basename(f)
print("Copying file:", basename)
basename = os.path.splitext(basename)[0]
print("File name:", basename)
othermesh = examples.load_uniform()
legend_entries = []
legend_entries.append(['Liver converted', 'w'])
legend_entries.append(['External marker', 'k'])
plotter = pv.Plotter()
_ = plotter.add_mesh(mesh)
_ = plotter.add_mesh(othermesh, 'k')
_ = plotter.add_legend(legend_entries)
_ = plotter.export_obj(outdir+"conv_"+basename+".obj")
plotter.export_obj(f"{basename}.obj")
convert(paths['input'], paths['output'])
| 1,269 |
Python
| 30.749999 | 81 | 0.627266 |
williamjsmith15/OmniFlow/OpenMC/tools/file_converters/usd_h5m_convert.yml
|
usd_h5m_script:
class: File
path: usd_h5m.py
usd_CAD:
class: File
path: ../../output/omni/dagmc.usd # Need to reset this to where the usd file will output to
# path: ../../../TEST/Test_USD/Test_4_DonutOnCube.usd
# Test running:
# cwltool --outdir /home/williamjsmith15/PhD/OmniFlow/TEST/Test_USD/output /home/williamjsmith15/PhD/OmniFlow/OpenMC/tools/file_converters/usd_h5m_convert.cwl /home/williamjsmith15/PhD/OmniFlow/OpenMC/tools/file_converters/usd_h5m_convert.yml
| 487 |
YAML
| 47.799995 | 244 | 0.741273 |
williamjsmith15/OmniFlow/OpenMC/tools/file_converters/usd_h5m.py
|
from typing import Iterable
from pxr import Usd, UsdShade
from vertices_to_h5m import vertices_to_h5m
import numpy as np
import os, tempfile
sep = os.sep # System separator
ext_path = os.path.realpath(__file__) # File path of ext
parent_folder = ext_path.split(f"{sep}omni-kit", 1)[0] # File path of parent folder to extension
tmp = tempfile.gettempdir()
path_py = os.path.realpath(__file__)
# Name of file changeable for ease of testing... default should be 'dagmc.usd'
fname_root = 'dagmc' # Default
# fname_root = 'Test_1_Bucket' # TESTING
# fname_root = 'Test_2_MilkJug' # TESTING
# fname_root = 'Test_3_RubixCube' # TESTING
# fname_root = 'Test_4_DonutOnCube' # TESTING
# Grab the filepath of the usd file
def find_files(filename): # TODO: find a better way to search for this rather than search from root (lazy implementation)
search_path = os.path.abspath("/")
result = []
# Walking top-down from the root
for root, dir, files in os.walk(search_path):
if filename in files:
result.append(os.path.join(root, filename))
return result
# USD Helper Functions
def getValidProperty (primative, parameterName):
# Get param
prop = primative.GetProperty(parameterName)
# Test validity
if ( type(prop) == type(Usd.Attribute())): # is valid
return prop.Get()
else: # is not
print("Requested parameter is not valid!")
return None
#raise Exception("Requested parameter is not valid!")
def getProperty (primative, parameterName): # Unsafe
# Get param
prop = primative.GetProperty(parameterName).Get()
return prop
def propertyIsValid (primative, parameterName):
# Get param
prop = primative.GetProperty(parameterName)
# Test validity
if ( type(prop) == type(Usd.Attribute())): # is valid
return True
else:
return False
def get_rot(rotation):
# Calculates rotation matrix given a x,y,z rotation in degrees
factor = 2.0 * np.pi / 360.0 # Convert to radians
x_angle, y_angle, z_angle = rotation[0]*factor, rotation[1]*factor, rotation[2]*factor
x_rot = np.array([[1,0,0],[0,np.cos(x_angle),-np.sin(x_angle)],[0,np.sin(x_angle),np.cos(x_angle)]], dtype='float64')
y_rot = np.array([[np.cos(y_angle),0,np.sin(y_angle)],[0,1,0],[-np.sin(y_angle),0,np.cos(y_angle)]], dtype='float64')
z_rot = np.array([[np.cos(z_angle),-np.sin(z_angle),0],[np.sin(z_angle),np.cos(z_angle),0],[0,0,1]], dtype='float64')
rot_mat = np.dot(np.dot(x_rot,y_rot),z_rot)
return rot_mat
_ALLOWED_MATERIAL_PURPOSES = (
UsdShade.Tokens.full,
UsdShade.Tokens.preview,
UsdShade.Tokens.allPurpose,
)
def get_bound_material(
prim, material_purpose=UsdShade.Tokens.allPurpose, collection=""
):
# From https://github.com/ColinKennedy/USD-Cookbook/blob/master/tricks/bound_material_finder/python/material_binding_api.py 30/01/23
"""Find the strongest material for some prim / purpose / collection.
If no material is found for `prim`, this function will check every
ancestor of Prim for a bound material and return that, instead.
Reference:
https://graphics.pixar.com/usd/docs/UsdShade-Material-Assignment.html#UsdShadeMaterialAssignment-MaterialResolve:DeterminingtheBoundMaterialforanyGeometryPrim
Args:
prim (`pxr.Usd.Prim`):
The path to begin looking for material bindings.
material_purpose (str, optional):
A specific name to filter materials by. Available options
are: `UsdShade.Tokens.full`, `UsdShade.Tokens.preview`,
or `UsdShade.Tokens.allPurpose`.
Default: `UsdShade.Tokens.allPurpose`
collection (str, optional):
The name of a collection to filter by, for any found
collection bindings. If not collection name is given then
the strongest collection is used, instead. Though generally,
it's recommended to always provide a collection name if you
can. Default: "".
Raises:
ValueError:
If `prim` is invalid or if `material_purpose` is not an allowed purpose.
Returns:
`pxr.UsdShade.Material` or NoneType:
The strongest bound material, if one is assigned.
"""
def is_collection_binding_stronger_than_descendents(binding):
return (
UsdShade.MaterialBindingAPI.GetMaterialBindingStrength(
binding.GetBindingRel()
)
== "strongerThanDescendents"
)
def is_binding_stronger_than_descendents(binding, purpose):
"""bool: Check if the given binding/purpose is allowed to override any descendent bindings."""
return (
UsdShade.MaterialBindingAPI.GetMaterialBindingStrength(
binding.GetDirectBindingRel(materialPurpose=purpose)
)
== "strongerThanDescendents"
)
def get_collection_material_bindings_for_purpose(binding, purpose):
"""Find the closest ancestral collection bindings for some `purpose`.
Args:
binding (`pxr.UsdShade.MaterialBindingAPI`):
The material binding that will be used to search
for a direct binding.
purpose (str):
The name of some direct-binding purpose to filter by. If
no name is given, any direct-binding that is found gets
returned.
Returns:
list[`pxr.UsdShade.MaterialBindingAPI.CollectionBinding`]:
The found bindings, if any could be found.
"""
# XXX : Note, Normally I'd just do
# `UsdShadeMaterialBindingAPI.GetCollectionBindings` but, for
# some reason, `binding.GetCollectionBindings(purpose)` does not
# yield the same result as parsing the relationships, manually.
# Maybe it's a bug?
#
# return binding.GetCollectionBindings(purpose)
#
parent = binding.GetPrim()
# TODO : We're doing quadratic work here... not sure how to improve this section
while not parent.IsPseudoRoot():
binding = binding.__class__(parent)
material_bindings = [
UsdShade.MaterialBindingAPI.CollectionBinding(relationship)
for relationship in binding.GetCollectionBindingRels(purpose)
if relationship.IsValid()
]
if material_bindings:
return material_bindings
parent = parent.GetParent()
return []
def get_direct_bound_material_for_purpose(binding, purpose):
"""Find the bound material, using direct binding, if it exists.
Args:
binding (`pxr.UsdShade.MaterialBindingAPI`):
The material binding that will be used to search
for a direct binding.
purpose (str):
The name of some direct-binding purpose to filter by. If
no name is given, any direct-binding that is found gets
returned.
Returns:
`pxr.UsdShade.Material` or NoneType: The found material, if one could be found.
"""
relationship = binding.GetDirectBindingRel(materialPurpose=purpose)
direct = UsdShade.MaterialBindingAPI.DirectBinding(relationship)
if not direct.GetMaterial():
return None
material = direct.GetMaterialPath()
prim = binding.GetPrim().GetStage().GetPrimAtPath(material)
if not prim.IsValid():
return None
return UsdShade.Material(prim)
if not prim.IsValid():
raise ValueError('Prim "{prim}" is not valid.'.format(prim=prim))
if material_purpose not in _ALLOWED_MATERIAL_PURPOSES:
raise ValueError(
'Purpose "{material_purpose}" is not valid. Options were, "{options}".'.format(
material_purpose=material_purpose,
options=sorted(_ALLOWED_MATERIAL_PURPOSES),
)
)
purposes = {material_purpose, UsdShade.Tokens.allPurpose}
for purpose in purposes:
material = None
parent = prim
while not parent.IsPseudoRoot():
binding = UsdShade.MaterialBindingAPI(parent)
if not material or is_binding_stronger_than_descendents(binding, purpose):
material = get_direct_bound_material_for_purpose(binding, purpose)
for collection_binding in get_collection_material_bindings_for_purpose(
binding, purpose
):
binding_collection = collection_binding.GetCollection()
if collection and binding_collection.GetName() != collection:
continue
membership = binding_collection.ComputeMembershipQuery()
if membership.IsPathIncluded(parent.GetPath()) and (
not material
or is_collection_binding_stronger_than_descendents(
collection_binding
)
):
material = collection_binding.GetMaterial()
# Keep searching ancestors until we hit the scene root
parent = parent.GetParent()
if material:
return material
class USDtoDAGMC:
'''
Class to convert USD to h5m file format usable for DAGMC, for use with OpenMC
'''
def __init__(self):
# Initialise with blank numpy arrays
self.vertices = np.array([])
self.triangles = []
self.material_tags = []
def add_USD_file(self, filename: str = fname_root + '.usd'):
'''
Load parts form USD into class with their associated material tags and then converts to triangles for use
in the conversion script
Args:
filename: filename used to import the USD file with
'''
stage_file = filename
stage = Usd.Stage.Open(stage_file)
volumeOffset = 0 # Required as all vertices have to be in a global 1D array (all volumes) => this offsets the indexing
# for each individual volume as all vertices for new volumes are added into the same array as previous
# volumes (vertices is 1D, triangles is 2D with 2nd dimension have the number of volumes in)
material_count = 0 # For materials that 'fall through the net'
# Change as NVIDIA is annoying with its choice of up axis (they choose Y whereas it should be Z for OpenMC...)
# Find parent folder path
if "PhD" in path_py:
cwl_folder = path_py.split(f"{sep}PhD", 1)[0]
elif "cwl" in path_py:
cwl_folder = path_py.split(f"{sep}cwl", 1)[0]
# Find settings and dagmc files
for root, dirs, files in os.walk(cwl_folder):
for file in files:
if file.endswith("settings.txt"):
settings_path = os.path.join(root, file)
with open(settings_path, 'r') as file:
for line in file:
if "up_axis" in line:
up_axis = line.split()[1]
for primID, x in enumerate(stage.Traverse()):
primType = x.GetTypeName()
print(f"PRIM: {str(primType)}")
print(f'PrimID is {primID}')
if str(primType) == 'Mesh':
material_count += 1
# Get the material type of the meshes
material_name = str(get_bound_material(x))
try:
material_name = material_name.split('<')[1] # Just get material name from between <>
material_name = material_name.split('>')[0] # In form of UsdShade.Material(Usd.Prim(</World/Looks/Aluminum_Anodized>))
material_name = material_name.split('/')[-1] # Get the last name from file path
print(f"Material name is: {material_name}")
except:
material_name = f"mesh_{material_count}"
print('No USD material found')
print(f'Setting material name to default: {material_name}')
# Get num of vertecies in elements
allVertexCounts = np.array(getValidProperty(x,"faceVertexCounts"))
allVertexIndices = np.array(getValidProperty(x,"faceVertexIndices"))
# Get if there is rotation or translation of the meshes
rotation = [0.0,0.0,0.0] if not propertyIsValid(x,"xformOp:rotateXYZ") else list(getProperty(x,"xformOp:rotateXYZ"))
translation = np.array([0,0,0]) if not propertyIsValid(x,"xformOp:translate") else np.array(list(getProperty(x,"xformOp:translate")))
print(f'Rotation is {rotation}')
print(f'Translation is {translation}')
# Handling for changing the up axis
if up_axis == 'X':
rotation[1] -= 90.0 # TODO: Check this is correct...
elif up_axis == 'Y':
rotation[0] += 90.0
elif up_axis == 'Z':
rotation = rotation
else:
print('Something went wrong with up_axis')
rot_matrix = get_rot(rotation)
# TODO: Make the rotation matrix multiplication better! Lazy coding for now...
newVertices = np.array(getValidProperty(x,"points"), dtype='float64') # Assign vertices here and add rotation and translation
newVertices = np.array([np.dot(rot_matrix,xyz) for xyz in newVertices]) # Have to rotate first before translating as it rotates around the origin
newVertices = newVertices + translation
if self.vertices.size == 0: # For first run though just set vertices to newVertices array
self.vertices = newVertices
else:
self.vertices = np.append(self.vertices, newVertices, axis=0)
globalCount = 0
extraPointCount = 0
endOfVolumeIdx = np.size(self.vertices,0)
trianglesForVolume = np.array([], dtype="int")
if np.all(allVertexCounts == 3): # Case where mesh is already in triangles (makes program run much faster - hopeuflly!)
trianglesForVolume = allVertexIndices.reshape((allVertexCounts.size,3)) + volumeOffset
else:
for Count in allVertexCounts:
if Count == 3: # Triangle
a, b, c = globalCount, globalCount+1, globalCount+2
# For explanation of +volumeOffset see initialisation of volumeOffset variable
if trianglesForVolume.size == 0: # This whole shenanegans is because i dont know how to use numpy arrays properly.... LEARN
trianglesForVolume = np.array([[allVertexIndices[a]+volumeOffset, allVertexIndices[b]+volumeOffset, allVertexIndices[c]+volumeOffset]])
else:
trianglesForVolume = np.append(trianglesForVolume, np.array([[allVertexIndices[a]+volumeOffset, allVertexIndices[b]+volumeOffset, allVertexIndices[c]+volumeOffset]]), axis=0)
elif Count == 4: # Quadrilateral => Split into 2 triangles
a, b, c, d = globalCount, globalCount+1, globalCount+2, globalCount+3
if trianglesForVolume.size == 0:
trianglesForVolume = np.array([[allVertexIndices[a]+volumeOffset, allVertexIndices[b]+volumeOffset, allVertexIndices[c]+volumeOffset]])
else:
trianglesForVolume = np.append(trianglesForVolume, np.array([[allVertexIndices[a]+volumeOffset, allVertexIndices[b]+volumeOffset, allVertexIndices[c]+volumeOffset]]), axis=0)
#Think this may cause issues with some quadrilaterials being split into 2 triangles that overlap and leave a gap - see latex doc
trianglesForVolume = np.append(trianglesForVolume, np.array([[allVertexIndices[a]+volumeOffset, allVertexIndices[c]+volumeOffset, allVertexIndices[d]+volumeOffset]]), axis=0)
elif Count > 4: # n points to triangles
indices = np.array([allVertexIndices[globalCount+i]+volumeOffset for i in range(Count)]) # Get array of indices of points
points = np.array([self.vertices[idx] for idx in indices]) # Get points that match those indices
# Find mifddle of n-sided polygon => can make triangles from every edge to centre point and add to end of vertices matrix
self.vertices = np.append(self.vertices, np.array([[np.average(points[:,dir]) for dir in range(3)]]), axis=0)
centrePointIdx = endOfVolumeIdx + extraPointCount
extraPointCount += 1 # Just added an extra point into the vertices array
for triangleNum in range(Count):
if triangleNum == Count - 1: # Last triangle
trianglesForVolume = np.append(trianglesForVolume, np.array([[indices[0], indices[triangleNum], centrePointIdx]]), axis=0)
else:
if trianglesForVolume.size == 0:
trianglesForVolume = np.array([[indices[triangleNum], indices[triangleNum+1], centrePointIdx]])
else:
trianglesForVolume = np.append(trianglesForVolume, np.array([[indices[triangleNum], indices[triangleNum+1], centrePointIdx]]), axis=0)
else:
print(f"I don't know what to do with a {Count} count yet...")
globalCount += Count
self.triangles.append(trianglesForVolume)
self.material_tags.append(material_name)
shapeVertices = np.shape(newVertices)
volumeOffset += shapeVertices[0] + extraPointCount # Account for all points plus any extras added in from Counts>4
else:
print(f"I don't know what to do with a {str(primType)} yet...")
print("\n\n")
def save_to_h5m(self, filename: str = fname_root + '.h5m'):
'''
Use the verticies saved in the class to convert to h5m using the vertices_to_h5m mini package
https://github.com/fusion-energy/vertices_to_h5m
Args:
filename: Filename to save the h5m file, default will be dagmc.h5m (as this is the format required by DAGMC)
'''
vertices_to_h5m(
vertices=self.vertices,
triangles=self.triangles,
material_tags=self.material_tags,
h5m_filename=filename,
)
filepath = find_files(fname_root + '.usd')
convert = USDtoDAGMC()
convert.add_USD_file(filename = filepath[0])
convert.save_to_h5m()
| 19,319 |
Python
| 44.352113 | 206 | 0.59589 |
williamjsmith15/OmniFlow/OpenMC/omni-kit-extension/exts/omni.openmc.runner/config/extension.toml
|
[package]
title = "OpenMC Workflow Runner"
description = "OpenMC Workflow Runner built as part of MSc Dissertation Project"
version = "1.0"
category = "Example"
authors = ["William Smith"]
keywords = ["openmc", "runner"]
readme = "docs/README.md"
repository = ""
[dependencies]
"omni.kit.uiapp" = {}
"omni.ui" = {}
"omni.kit.menu.utils" = {}
[[python.module]]
name = "omni.openmc.runner"
| 390 |
TOML
| 21.999999 | 80 | 0.684615 |
williamjsmith15/OmniFlow/OpenMC/omni-kit-extension/exts/omni.openmc.runner/omni/openmc/runner/ui_helpers.py
|
import omni.ui as ui
from .functions import *
import numpy as np
import os
class MinimalItem(ui.AbstractItem):
def __init__(self,text):
super().__init__()
self.model = ui.SimpleStringModel(text)
class MinimalModel(ui.AbstractItemModel):
def __init__(self, items, value=0):
# Items is a 1D array of strings that are the options for the dropdown
super().__init__()
self._current_index = ui.SimpleIntModel()
self._current_index.add_value_changed_fn(
lambda a:self._item_changed(None))
self._items = [
MinimalItem(text)
for text in items
]
self._current_index.set_value(value)
def get_item_children(self, item):
return self._items
def get_item_value_model(self, item, column_id):
if item is None:
return self._current_index
return item.model
def set_model_state(self, value=0):
self._current_index.set_value(value)
| 994 |
Python
| 26.638888 | 78 | 0.608652 |
williamjsmith15/OmniFlow/OpenMC/omni-kit-extension/exts/omni.openmc.runner/omni/openmc/runner/test_cwlcall.py
|
import os
import subprocess, sys # Alternative to os to run shell commands - dont know why it wasn't working before...
import tempfile
###################################
## Set Paths & Get Temp Folders
###################################
sep = os.sep # System separator
ext_path = os.path.realpath(__file__) # File path of ext
parent_folder = ext_path.split(f"{sep}omni-kit", 1)[0] # File path of parent folder to extension
tmp = tempfile.gettempdir()
paths = {
"workflow" : f"{parent_folder}{sep}tools",
"output_container" : f"{sep}output", # IN container
"output_omni" : f"{parent_folder}{sep}output{sep}omni",
"output_sim" : f"{parent_folder}{sep}output{sep}simulation",
"output_test" : f"{parent_folder}{sep}output{sep}test",
"general_CAD" : f"{sep}paramak{sep}dagmc.h5m",
"sep" : sep,
"tmp" : tmp,
"share" : f"{tmp}{sep}share",
"usdTmp" : f"{tmp}{sep}usd",
"outTmpOpenMC" : f"{tmp}{sep}outOpenMC",
"workflowDest" : "/" # In container
}
cmd = f"toil-cwl-runner --outdir {paths['output_omni']} {paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.cwl {paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.yml"
# output = subprocess.run(["toil-cwl-runner", "--outdir", paths['output_omni'], f"{paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.cwl", f"{paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.yml"], capture_output=True, text=True)
output = subprocess.run([i for i in cmd.split(' ')], capture_output=True, text=True)
print(f'stdout:\n\n{output.stdout}\n\n')
print(f'stderr:\n\n{output.stderr}\n\n')
| 1,774 |
Python
| 48.305554 | 256 | 0.588501 |
williamjsmith15/OmniFlow/OpenMC/omni-kit-extension/exts/omni.openmc.runner/omni/openmc/runner/__init__.py
|
__all__ = ["OpenMCExt", "Window"]
from .extension import OpenMCExt
from .window import Window
| 95 |
Python
| 18.199996 | 33 | 0.715789 |
williamjsmith15/OmniFlow/OpenMC/omni-kit-extension/exts/omni.openmc.runner/omni/openmc/runner/functions.py
|
import os, tarfile, tempfile, pathlib # System packages
import omni
import subprocess, sys # Alternative to os to run shell commands - dont know why it wasn't working before...
#################################################################
## Set Paths for Folders & Create Cases for Linux vs Windows OS
#################################################################
sep = os.sep # System separator
ext_path = os.path.realpath(__file__) # File path of ext
parent_folder = ext_path.split(f"{sep}omni-kit", 1)[0] # File path of parent folder to extension
tmp = tempfile.gettempdir()
paths = {
"workflow" : f"{parent_folder}{sep}tools",
"output_container" : f"{sep}output", # IN container
"output_omni" : f"{parent_folder}{sep}output{sep}omni",
"output_sim" : f"{parent_folder}{sep}output{sep}simulation",
"output_test" : f"{parent_folder}{sep}output{sep}test",
"general_CAD" : f"{sep}paramak{sep}dagmc.h5m",
"tmp" : tmp,
"share" : f"{tmp}{sep}share",
"usdTmp" : f"{tmp}{sep}usd",
"outTmpOpenMC" : f"{tmp}{sep}outOpenMC",
"workflowDest" : "/" # In container
}
# Allow choice between toil runner and cwltool - useful for testing
toil_runner = True
if toil_runner:
runner = 'toil-cwl-runner'
else:
runner = 'cwltool'
# Get operating system and set a prefix for the workflow commands
platform = sys.platform
if platform == 'linux':
prefix_cmd = f'{runner}'
elif platform == 'win32':
prefix_cmd = f'wsl.exe {runner}'
else: # Unaccounted for OS
print(f"I don't know what to do with operating system type: {platform}")
###################################
## Helper Functions
###################################
def t_f(string):
"""
Convert string to bool with error handling
Parameters
----------
string: String
String to be tested if True or False
Returns
-------
bool: Default value of False
True or False
"""
if string == 'True':
return True
elif string == 'False':
return False
else:
print("I don't know what this is, returning default of false")
return False
def export_stage():
"""
Exports the current USD stage to an output file
"""
path = f"{paths['output_omni']}{sep}dagmc.usd"
print(f"Exporting stage to: {path}")
stage = omni.usd.get_context().get_stage()
stage.Export(path)
print("Successfully exported USD stage!")
def wsl_file_convert(win_path):
"""
Converts a windows file path, ie C:\PhD\OmniFlow....
to a wsl mount file path, ie /mnt/c/PhD/OmniFlow....
Parameters
----------
win_path: string
file path in normal windows format
Returns
-------
wsl_file: string
file path in wsl format
"""
step_in_path = win_path.split('\\')
drive_letter = step_in_path[0].replace(':', '')
wsl_file = f"/mnt/{drive_letter}"
for step in step_in_path[1:]:
wsl_file = f"{wsl_file}/{step}"
return wsl_file
###################################
## Workflows
###################################
def toy_test():
"""
Run a test module on a toy problem, validate base system is working
Writes files into test output folder
"""
print("Running Toy Test Workflow")
# Handling of different operating systems
if platform == 'linux':
out_dir = paths['output_test']
cwl_loc = f"{paths['workflow']}{sep}tests{sep}toy{sep}openmc_tool_toy.cwl"
yml_loc = f"{paths['workflow']}{sep}tests{sep}toy{sep}script_loc_toy.yml"
elif platform == 'win32':
out_dir = wsl_file_convert(paths['output_test'])
cwl_loc = wsl_file_convert(f"{paths['workflow']}{sep}tests{sep}toy{sep}openmc_tool_toy.cwl")
yml_loc = wsl_file_convert(f"{paths['workflow']}{sep}tests{sep}toy{sep}script_loc_toy.yml")
else:
print(f"Don't know how to handle platform: {platform} yet")
# Run the workflow
cmd = f"{prefix_cmd} --outdir {out_dir} {cwl_loc} {yml_loc}"
print(cmd)
output = subprocess.run([i for i in cmd.split(' ')], capture_output=True, text=True)
print(f'stdout:\n\n{output.stdout}\n\n')
print(f'stderr:\n\n{output.stderr}\n\n')
print(f"Toy Test Complete! Your files will be in: {paths['output_test']}")
def simple_CAD_test():
"""
Run a test module on a simple CAD problem, validate CAD system is working
Writes files into test output folder
"""
print("Running Simple CAD Test Workflow")
# Handling of different operating systems
if platform == 'linux':
out_dir = paths['output_test']
cwl_loc = f"{paths['workflow']}{sep}tests{sep}simple{sep}simple_CAD_workflow.cwl"
yml_loc = f"{paths['workflow']}{sep}tests{sep}simple{sep}script_loc_simple_CAD.yml"
elif platform == 'win32':
out_dir = wsl_file_convert(paths['output_test'])
cwl_loc = wsl_file_convert(f"{paths['workflow']}{sep}tests{sep}simple{sep}simple_CAD_workflow.cwl")
yml_loc = wsl_file_convert(f"{paths['workflow']}{sep}tests{sep}simple{sep}script_loc_simple_CAD.yml")
else:
print(f"Don't know how to handle platform: {platform} yet")
# Run the workflow
cmd = f"{prefix_cmd} --outdir {out_dir} {cwl_loc} {yml_loc}"
print(cmd)
output = subprocess.run([i for i in cmd.split(' ')], capture_output=True, text=True)
print(f'stdout:\n\n{output.stdout}\n\n')
print(f'stderr:\n\n{output.stderr}\n\n')
print(f"Simple CAD Test Complete! Your files will be in: {paths['output_test']}")
def run_workflow():
"""
Main OpenMC Workflow runner
Runs workflow with the settings file written to in the extension
Writes files to simulation folder
"""
print('Running OpenMC Workflow')
print("Exporting USD Stage")
export_stage()
print("Running Workflow")
# Handling of different operating systems
if platform == 'linux':
out_dir = paths['output_sim']
cwl_loc = f"{paths['workflow']}{sep}main{sep}openmc_workflow.cwl"
yml_loc = f"{paths['workflow']}{sep}main{sep}script_loc.yml"
elif platform == 'win32':
out_dir = wsl_file_convert(paths['output_sim'])
cwl_loc = wsl_file_convert(f"{paths['workflow']}{sep}main{sep}openmc_workflow.cwl")
yml_loc = wsl_file_convert(f"{paths['workflow']}{sep}main{sep}script_loc.yml")
else:
print(f"Don't know how to handle platform: {platform} yet")
# Run the workflow
cmd = f"{prefix_cmd} --outdir {out_dir} {cwl_loc} {yml_loc}"
print(cmd)
output = subprocess.run([i for i in cmd.split(' ')], capture_output=True, text=True)
print(f'stdout:\n\n{output.stdout}\n\n')
print(f'stderr:\n\n{output.stderr}\n\n')
print(f"Done! Your files will be in: {paths['output_sim']}")
def get_materials():
"""
Gets material names from the USD file in the current stage
Returns
-------
materials: 1D String Array
All material names present in the stage
"""
print("Getting Material Names")
print('Exporting File')
export_stage()
print(ext_path)
print('Running materials getter')
# Handling of different operating systems
if platform == 'linux':
out_dir = paths['output_omni']
cwl_loc = f"{paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.cwl"
yml_loc = f"{paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.yml"
elif platform == 'win32':
out_dir = wsl_file_convert(paths['output_omni'])
cwl_loc = wsl_file_convert(f"{paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.cwl")
yml_loc = wsl_file_convert(f"{paths['workflow']}{sep}dagmc_material_name{sep}dagmc_materials.yml")
else:
print(f"Don't know how to handle platform: {platform} yet")
# Run the workflow
cmd = f"{prefix_cmd} --outdir {out_dir} {cwl_loc} {yml_loc}"
print(cmd)
output = subprocess.run([i for i in cmd.split(' ')], capture_output=True, text=True)
print(f'stdout:\n\n{output.stdout}\n\n')
print(f'stderr:\n\n{output.stderr}\n\n')
mat_file_path = f"{paths['output_omni']}{sep}materials.txt"
materials = []
if os.path.exists(mat_file_path):
with open(mat_file_path) as file:
for line in file:
materials.append(line)
print("Materials Getter Finished")
return materials
| 8,561 |
Python
| 31.804598 | 109 | 0.598762 |
williamjsmith15/OmniFlow/OpenMC/omni-kit-extension/exts/omni.openmc.runner/omni/openmc/runner/window.py
|
__all__ = ["Window"]
import omni.ui as ui
from .functions import *
from .ui_helpers import *
import numpy as np
import os
LABEL_WIDTH = 120
SPACING = 4
default_dict = {
'sources' : [],
'materials' : [],
# Run settings
'batches' : 0,
'particles' : 0,
'run_mode' : 'fixed source',
# All system / extension settings
'num_sources' : 1,
'source_type' : 'Point Source', # 0=point, 1=plasma
'up_axis' : 'Z', # 0=X 1=Y 2=Z
'test_dropdown' : False,
'mats_dropdown' : False,
'sets_dropdown' : True,
'srcs_dropdown' : False
}
class Window(ui.Window):
"""The class that represents the window"""
# Create dict to store variables
settings_dict = default_dict
previous_settings = default_dict
# Options for dropdowns
run_type_options = np.array(['fixed source','eigenvalue','volume','plot','particle restart'])
source_type_options = np.array(['Point Source', 'Fusion Point Source', 'Fusion Ring Source', 'Tokamak Source'])
up_axis_choice = np.array(['X','Y','Z'])
def __init__(self, title: str, delegate=None, **kwargs):
self.__label_width = LABEL_WIDTH
super().__init__(title, **kwargs)
# Set the function that is called to build widgets when the window is
# visible
self.frame.set_build_fn(self._build_fn)
def destroy(self):
# It will destroy all the children
super().destroy()
@property
def label_width(self):
"""The width of the attribute label"""
return self.__label_width
@label_width.setter
def label_width(self, value):
"""The width of the attribute label"""
self.__label_width = value
self.frame.rebuild()
##########################
# --- BUILD FRAMES ---
##########################
def _build_run(self):
# Build the widgets of the Run group
with ui.VStack(height=0, spacing=SPACING):
ui.Label("OpenMC Workflow Run and Settings")
ui.Button("Run Workflow", clicked_fn=lambda: self._run_workflow_button())
ui.Button("Save State", clicked_fn=lambda: self._save_state_button())
self.settings_dict['test_dropdown'] = ui.CollapsableFrame("Test", collapsed = t_f(self.previous_settings['test_dropdown']))
with self.settings_dict['test_dropdown']:
with ui.HStack():
ui.Button("Run Toy Test", clicked_fn=lambda: toy_test())
ui.Button("Run Simple CAD Test", clicked_fn=lambda: simple_CAD_test())
def _build_materials(self):
# Takes the material.txt file and reads all the material names into teh materails list
mat_file_path = f"{paths['output_omni']}{sep}materials.txt"
materials = []
if os.path.exists(mat_file_path):
with open(mat_file_path) as file:
for line in file:
materials.append(line)
# Build the widgets of the Materials
self.settings_dict['mats_dropdown'] = ui.CollapsableFrame("Materials", collapsed = t_f(self.previous_settings['mats_dropdown']))
with self.settings_dict['mats_dropdown']:
with ui.VStack(height=0, spacing=SPACING):
ui.Button("Get Materials", clicked_fn=lambda: self._save_state_button(get_mats=True))
# Uses the materials list to create a stack of materials to edit properties
self.settings_dict['materials'] = []
for i in range(len(self.previous_settings['materials'])):
self.settings_dict['materials'].append([None]*3)
self.settings_dict['materials'][i][0] = self.previous_settings['materials'][i][0]
with ui.HStack(spacing = SPACING):
ui.Label(self.previous_settings['materials'][i][0], width=self.label_width)
ui.Label("Element")
self.settings_dict['materials'][i][1] = ui.StringField().model
if str(self.previous_settings['materials'][i][1]) != 'None':
self.settings_dict['materials'][i][1].set_value(str(self.previous_settings['materials'][i][1]))
# ui.Label("Atom Percent", width=self.label_width)
# tmp_array[1] = ui.FloatField().model
ui.Label("Density (g/cm^3)")
self.settings_dict['materials'][i][2] = ui.FloatField().model
self.settings_dict['materials'][i][2].set_value(str(self.previous_settings['materials'][i][2]))
def _build_sources(self):
self.settings_dict['srcs_dropdown'] = ui.CollapsableFrame("Sources", collapsed = t_f(self.previous_settings['srcs_dropdown']))
with self.settings_dict['srcs_dropdown']:
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
ui.Label("Source Type", width=self.label_width)
self.settings_dict['source_type'] = MinimalModel(items=self.source_type_options, value=int(np.where(self.source_type_options==str(self.previous_settings['source_type']))[0]))
ui.ComboBox(self.settings_dict['source_type'])
ui.Button("Enter", clicked_fn=lambda: self._save_state_button())
with ui.HStack():
ui.Label("Number of Sources", width=self.label_width)
self.settings_dict['num_sources'] = ui.IntField().model
self.settings_dict['num_sources'].set_value(int(self.previous_settings['num_sources']))
ui.Button("Enter", clicked_fn=lambda: self._save_state_button())
# Point source case
if self.settings_dict['source_type'].get_item_value_model(None, 1).get_value_as_int() == 0:
self.settings_dict['sources'] = []
for i in range(int(self.previous_settings['num_sources'])):
self.settings_dict['sources'].append([None]*4)
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack(spacing=SPACING):
ui.Label(f"Source {i+1}", width=self.label_width)
ui.Label("Energy:", width=self.label_width)
self.settings_dict['sources'][i][0] = ui.FloatField().model
ui.Label("Location:", width=self.label_width)
self.settings_dict['sources'][i][1] = ui.FloatField().model
self.settings_dict['sources'][i][2] = ui.FloatField().model
self.settings_dict['sources'][i][3] = ui.FloatField().model
try:
self.settings_dict['sources'][i][0].set_value(float(self.previous_settings['sources'][i][0]))
self.settings_dict['sources'][i][1].set_value(float(self.previous_settings['sources'][i][1]))
self.settings_dict['sources'][i][2].set_value(float(self.previous_settings['sources'][i][2]))
self.settings_dict['sources'][i][3].set_value(float(self.previous_settings['sources'][i][3]))
except: # Handling of sources that don't have data
print(f"No source data found for source {i+1}")
# Fusion Point Source Case
elif self.settings_dict['source_type'].get_item_value_model(None, 1).get_value_as_int() == 1:
self.settings_dict['sources'] = []
for i in range(int(self.previous_settings['num_sources'])):
self.settings_dict['sources'].append([None]*5)
with ui.HStack(spacing=SPACING):
ui.Label(f"Source {i+1}", width=self.label_width)
with ui.HStack(spacing=SPACING):
ui.Label("Fuel Type (DT, DD)", width=self.label_width)
self.settings_dict['sources'][i][4] = ui.StringField().model
ui.Label("Temoerature (eV)", width=self.label_width)
self.settings_dict['sources'][i][3] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Coordinate:")
ui.Label("x")
self.settings_dict['sources'][i][0] = ui.FloatField().model
ui.Label("y")
self.settings_dict['sources'][i][1] = ui.FloatField().model
ui.Label("z")
self.settings_dict['sources'][i][2] = ui.FloatField().model
for j in range(5):
try:
if j == 4:
self.settings_dict['sources'][i][j].set_value(str(self.previous_settings['sources'][i][j]))
else:
self.settings_dict['sources'][i][j].set_value(float(self.previous_settings['sources'][i][j]))
except: # Handling of sources that don't have data
print(f"No float data found for source {i+1}")
# Fusion Ring Source Case
elif self.settings_dict['source_type'].get_item_value_model(None, 1).get_value_as_int() == 2:
self.settings_dict['sources'] = []
for i in range(int(self.previous_settings['num_sources'])):
self.settings_dict['sources'].append([None]*6)
with ui.HStack(spacing=SPACING):
ui.Label(f"Source {i+1}", width=self.label_width)
ui.Label("Radius (inside, cm)", width=self.label_width)
self.settings_dict['sources'][i][0] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Angle (deg) start:", width=self.label_width)
self.settings_dict['sources'][i][2] = ui.FloatField().model
ui.Label("end:")
self.settings_dict['sources'][i][3] = ui.FloatField().model
ui.Label("Temp (eV)")
self.settings_dict['sources'][i][4] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Fuel Type (DT, DD)")
self.settings_dict['sources'][i][1] = ui.StringField().model
ui.Label("Vert Offset")
self.settings_dict['sources'][i][5] = ui.FloatField().model
for j in range(6):
try:
if j == 1:
self.settings_dict['sources'][i][j].set_value(str(self.previous_settings['sources'][i][j]))
else:
self.settings_dict['sources'][i][j].set_value(float(self.previous_settings['sources'][i][j]))
except: # Handling of sources that don't have data
print(f"No float data found for source {i+1}")
# Tokamak Source Case
elif self.settings_dict['source_type'].get_item_value_model(None, 1).get_value_as_int() == 3:
self.settings_dict['sources'] = []
for i in range(int(self.previous_settings['num_sources'])):
self.settings_dict['sources'].append([None]*19) # TODO: Check 18 is the correct number
with ui.HStack(spacing=SPACING):
ui.Label(f"Source {i+1}", width=self.label_width)
ui.Label("Major Radius (m)", width=self.label_width)
self.settings_dict['sources'][i][0] = ui.FloatField().model
ui.Label("Minor Radius (m)", width=self.label_width)
self.settings_dict['sources'][i][1] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Elongation", width=self.label_width)
self.settings_dict['sources'][i][2] = ui.FloatField().model
ui.Label("Triangularity")
self.settings_dict['sources'][i][3] = ui.FloatField().model
ui.Label("Confinement Mode (L,H,A)")
self.settings_dict['sources'][i][4] = ui.StringField().model
with ui.HStack(spacing=SPACING):
ui.Label("Ion Density (m^-3) at:")
ui.Label("Ion Density Peaking Factor")
self.settings_dict['sources'][i][6] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Centre", width=self.label_width)
self.settings_dict['sources'][i][5] = ui.FloatField().model
ui.Label("Pedestal")
self.settings_dict['sources'][i][7] = ui.FloatField().model
ui.Label("Seperatrix")
self.settings_dict['sources'][i][8] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Ion Temperature (KeV) at:")
ui.Label("Peaking Factor")
self.settings_dict['sources'][i][10] = ui.FloatField().model
ui.Label("Beta")
self.settings_dict['sources'][i][11] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Centre", width=self.label_width)
self.settings_dict['sources'][i][9] = ui.FloatField().model
ui.Label("Pedestal")
self.settings_dict['sources'][i][12] = ui.FloatField().model
ui.Label("Seperatrix")
self.settings_dict['sources'][i][13] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Pedestal Raduis (m)", width=self.label_width)
self.settings_dict['sources'][i][14] = ui.FloatField().model
ui.Label("Shafranov Factor")
self.settings_dict['sources'][i][15] = ui.FloatField().model
ui.Label("Sample Size")
self.settings_dict['sources'][i][18] = ui.FloatField().model
with ui.HStack(spacing=SPACING):
ui.Label("Angle (deg) start:", width=self.label_width)
self.settings_dict['sources'][i][16] = ui.FloatField().model
ui.Label("end:")
self.settings_dict['sources'][i][17] = ui.FloatField().model
for j in range(19):
try:
if j == 4:
self.settings_dict['sources'][i][j].set_value(str(self.previous_settings['sources'][i][j]))
else:
self.settings_dict['sources'][i][j].set_value(float(self.previous_settings['sources'][i][j]))
except: # Handling of sources that don't have data
print(f"No float data found for source {i+1}")
else:
print('There was an error, unknown source type detected')
def _build_settings(self):
# Build the widgets of the Settings group
self.settings_dict['sets_dropdown'] = ui.CollapsableFrame("Settings", collapsed = t_f(self.previous_settings['sets_dropdown']))
with self.settings_dict['sets_dropdown']:
with ui.VStack(height=0, spacing=SPACING):
with ui.HStack():
ui.Label("Batches", width=self.label_width)
self.settings_dict['batches'] = ui.IntField().model
self.settings_dict['batches'].set_value(int(self.previous_settings['batches']))
with ui.HStack():
ui.Label("Particles", width=self.label_width)
self.settings_dict['particles'] = ui.IntField().model
self.settings_dict['particles'].set_value(int(self.previous_settings['particles']))
with ui.HStack():
ui.Label("Run Mode", width=self.label_width)
self.settings_dict['run_mode'] = MinimalModel(items=self.run_type_options, value=int(np.where(self.run_type_options == self.previous_settings['run_mode'])[0]))
ui.ComboBox(self.settings_dict['run_mode'])
with ui.HStack():
ui.Label("Up Axis", width=self.label_width)
self.settings_dict['up_axis'] = MinimalModel(items=self.up_axis_choice, value=int(np.where(self.up_axis_choice == self.previous_settings['up_axis'])[0]))
ui.ComboBox(self.settings_dict['up_axis'])
def _build_export(self):
with ui.VStack(height=0, spacing=SPACING):
ui.Button("Export USD Stage", clicked_fn=lambda: export_stage())
def _build_fn(self):
"""
The method that is called to build all the UI once the window is
visible.
"""
self.load_state()
with ui.ScrollingFrame():
with ui.VStack(height=0):
self._build_run()
self._build_materials()
self._build_sources()
self._build_settings()
self._build_export()
##########################
# --- BUTTONS ---
##########################
def _run_workflow_button(self):
self.generate()
run_workflow()
def _save_state_button(self, get_mats=False):
# Saves the state of the extension
self.generate(get_mats)
print('Refreshing screen')
self.frame.rebuild()
##########################
# --- EXTRA FUNCTIONS ---
##########################
def generate(self, get_mats=False):
# Converts settings and materials into a txt file for the general CAD py script to use
print("Saving Materials and Settings")
already_used_materials = []
if get_mats:
materials = get_materials()
with open(f"{paths['output_omni']}{sep}settings.txt", 'w') as file:
# Materials
file.write('MATERIALS\n')
# count = 0 # DEBUGGING
if get_mats: # Just write the materials to the settings file, no element or density
for mat in materials:
if mat in already_used_materials:
pass
else:
file.write(f"{mat}")
already_used_materials.append(mat)
else: # Write the material settings set in omni
for mat in self.settings_dict['materials']:
# count += 1
# file.write(f"mesh_{count} Fe 7.7\n")
if 'Irangon' in mat[0]:
file.write(f"{mat[0].replace(' ', '')} Fe 7.7\n")
else:
file.write(f"{mat[0].replace(' ', '')} {mat[1].get_value_as_string()} {mat[2].get_value_as_float()}\n")
# Sources
file.write('SOURCES\n')
for src in self.settings_dict['sources']:
tmp_src = []
for field in src:
try: # Basically handles any float or string coming out of the data fields and joins them with a space inbetween each
tmp_field = field.get_value_as_float()
except:
pass
try:
tmp_field = field.get_value_as_string()
except:
pass
tmp_src.append(str(tmp_field))
file.write(f"{' '.join(tmp_src)}\n")
# Settings
file.write('SETTINGS\n')
file.write(f"batches {self.settings_dict['batches'].get_value_as_int()}\n")
file.write(f"particles {self.settings_dict['particles'].get_value_as_int()}\n")
file.write(f"run_mode {self.run_type_options[self.settings_dict['run_mode'].get_item_value_model(None, 1).get_value_as_int()]}\n")
file.write('EXT_SETTINGS\n')
file.write(f"source_type {self.source_type_options[self.settings_dict['source_type'].get_item_value_model(None, 1).get_value_as_int()]}\n")
file.write(f"num_sources {self.settings_dict['num_sources'].get_value_as_int()}\n")
file.write(f"up_axis {self.up_axis_choice[self.settings_dict['up_axis'].get_item_value_model(None, 1).get_value_as_int()]}\n")
file.write(f"test_dropdown {self.settings_dict['test_dropdown'].collapsed}\n")
file.write(f"mats_dropdown {self.settings_dict['mats_dropdown'].collapsed}\n")
file.write(f"sets_dropdown {self.settings_dict['sets_dropdown'].collapsed}\n")
file.write(f"srcs_dropdown {self.settings_dict['srcs_dropdown'].collapsed}\n")
print("Finished Converting")
def load_state(self):
position = 0
self.previous_settings = {}
with open(f"{paths['output_omni']}{sep}settings.txt", 'r') as file:
for line in file:
split_line = line.split()
if position == 0:
if "MATERIALS" in line:
position = 1
self.previous_settings['materials'] = []
elif position == 1:
if "SOURCES" in line:
position = 2
self.previous_settings['sources'] = []
else:
if len(split_line) == 3:
self.previous_settings['materials'].append(split_line)
else:
self.previous_settings['materials'].append([split_line[0], None, 0.0])
elif position == 2:
if "SETTINGS" in line:
position = 3
else:
self.previous_settings['sources'].append(split_line)
elif position == 3:
if "EXT_SETTINGS" in line:
position = 4
else:
self.previous_settings[split_line[0]] = ' '.join(split_line[1:])
elif position == 4:
self.previous_settings[split_line[0]] = ' '.join(split_line[1:])
else:
print(f'I dont know what position {position} is')
| 23,792 |
Python
| 49.408898 | 194 | 0.49214 |
williamjsmith15/OmniFlow/ToilRunner/py_test_docker_compose.py
|
import docker
import os
import tempfile, pathlib, tarfile
###################################
## Set Paths & Get Temp Folders
###################################
sep = os.sep # System separator
ext_path = os.path.realpath(__file__) # File path of ext
parent_folder = ext_path.split(f"{sep}OmniFlow", 1)[0] # File path of parent folder to extension
parent_folder = f"{parent_folder}{sep}OmniFlow{sep}OpenMC"
tmp = tempfile.gettempdir()
paths = {
"workflow" : f"{parent_folder}{sep}tools",
"output_container" : f"{sep}output", # IN container
"output_omni" : f"{parent_folder}{sep}output{sep}omni",
"output_sim" : f"{parent_folder}{sep}output{sep}simulation",
"output_test" : f"{parent_folder}{sep}output{sep}test",
"tmp" : tmp,
"share" : f"{tmp}{sep}share",
"usdTmp" : f"{tmp}{sep}usd",
"outTmpOpenMC" : f"{tmp}{sep}outOpenMC",
"workflowDest" : "/" # In container
}
pathlib.Path(paths["share"]).mkdir(parents=True, exist_ok=True)
pathlib.Path(paths["usdTmp"]).mkdir(parents=True, exist_ok=True)
pathlib.Path(paths["outTmpOpenMC"]).mkdir(parents=True, exist_ok=True)
###################################
## Helper Functions
###################################
def make_tarfile(source_dir, output_filename):
with tarfile.open(output_filename, "w:gz") as tar:
print(f"{source_dir} contains: {os.listdir(source_dir)}")
tar.add(source_dir, arcname=os.path.basename(source_dir))
return tar
def send_files(container, source, temp, destination):
make_tarfile(source, temp)
with open(temp, 'rb') as bundle:
ok = container.put_archive(path=destination, data=bundle)
if not ok:
raise Exception(f'Put {source} to {destination} failed')
else:
print(f'Uploaded {source} ({os.path.getsize(temp)} B) to {destination} successfully')
def get_files(container, source, destination, fname):
f = open(f"{destination}{sep}{fname}.tar", 'wb')
bits, stat = container.get_archive(path = source)
for chunck in bits:
f.write(chunck)
f.close()
print(f"{paths['workflow']}{sep}tests{sep}toy{sep}openmc_tool_toy.cwl")
runner = 'cwltool' # Testing
# runner = 'toil-cwl-runner' # Testing
container = ' '
# container = ' --no-container '
# os.system(f"wsl.exe {runner}{container}--debug --outdir /mnt/d/PhD/OmniFlow/OpenMC/output/tests /mnt/d/PhD/OmniFlow/OpenMC/tools/tests/toy/openmc_tool_toy.cwl /mnt/d/PhD/OmniFlow/OpenMC/tools/tests/toy/script_loc_toy.yml")
os.system(f"wsl.exe {runner}{container}--debug --outdir /mnt/d/phd/omniflow/openmc/output/tests /mnt/d/PhD/OmniFlow/OpenMC/tools/tests/simple/simple_CAD_workflow.cwl /mnt/d/PhD/OmniFlow/OpenMC/tools/tests/simple/script_loc_simple_CAD.yml")
# client = docker.from_env()
# toilContainer = client.containers.get("omniflow-toil-1")
# # var = toilContainer.exec_run(["ls", "OmniFlow/OpenMC/tools/tests/toy"])
# var = toilContainer.exec_run(["toil-cwl-runner", "--debug", "--outdir", "OmniFlow/OpenMC/output/tests", "OmniFlow/OpenMC/tools/tests/toy/openmc_tool_toy.cwl", "OmniFlow/OpenMC/tools/tests/toy/script_loc_toy.yml"])
# for msg in str(var[1]).split("\\n"):
# for i in msg.split("\\x1b"):
# print(i)
# To look at:
# https://cwl.discourse.group/t/working-offline-with-singularity/246
| 3,441 |
Python
| 37.674157 | 239 | 0.622784 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.